1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
20 #include "qemu/range.h"
22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
24 #ifndef CONFIG_USER_ONLY
25 /* Cacheability and shareability attributes for a memory access */
26 typedef struct ARMCacheAttrs
{
27 unsigned int attrs
:8; /* as in the MAIR register encoding */
28 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
31 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
32 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
33 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
34 target_ulong
*page_size
,
35 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
37 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
38 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
39 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
40 target_ulong
*page_size_ptr
,
41 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
43 /* Security attributes for an address, as returned by v8m_security_lookup. */
44 typedef struct V8M_SAttributes
{
45 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
54 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
55 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
56 V8M_SAttributes
*sattrs
);
59 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
63 /* VFP data registers are always little-endian. */
64 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
66 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
69 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
70 /* Aliases for Q regs. */
73 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
75 stq_le_p(buf
+ 8, q
[1]);
79 switch (reg
- nregs
) {
80 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
81 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
82 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
87 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
91 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
93 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
96 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
99 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
100 q
[0] = ldq_le_p(buf
);
101 q
[1] = ldq_le_p(buf
+ 8);
105 switch (reg
- nregs
) {
106 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
107 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
108 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
113 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
117 /* 128 bit FP register */
119 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
121 stq_le_p(buf
+ 8, q
[1]);
126 stl_p(buf
, vfp_get_fpsr(env
));
130 stl_p(buf
, vfp_get_fpcr(env
));
137 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
141 /* 128 bit FP register */
143 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
144 q
[0] = ldq_le_p(buf
);
145 q
[1] = ldq_le_p(buf
+ 8);
150 vfp_set_fpsr(env
, ldl_p(buf
));
154 vfp_set_fpcr(env
, ldl_p(buf
));
161 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
163 assert(ri
->fieldoffset
);
164 if (cpreg_field_is_64bit(ri
)) {
165 return CPREG_FIELD64(env
, ri
);
167 return CPREG_FIELD32(env
, ri
);
171 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
174 assert(ri
->fieldoffset
);
175 if (cpreg_field_is_64bit(ri
)) {
176 CPREG_FIELD64(env
, ri
) = value
;
178 CPREG_FIELD32(env
, ri
) = value
;
182 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
184 return (char *)env
+ ri
->fieldoffset
;
187 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
189 /* Raw read of a coprocessor register (as needed for migration, etc). */
190 if (ri
->type
& ARM_CP_CONST
) {
191 return ri
->resetvalue
;
192 } else if (ri
->raw_readfn
) {
193 return ri
->raw_readfn(env
, ri
);
194 } else if (ri
->readfn
) {
195 return ri
->readfn(env
, ri
);
197 return raw_read(env
, ri
);
201 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
204 /* Raw write of a coprocessor register (as needed for migration, etc).
205 * Note that constant registers are treated as write-ignored; the
206 * caller should check for success by whether a readback gives the
209 if (ri
->type
& ARM_CP_CONST
) {
211 } else if (ri
->raw_writefn
) {
212 ri
->raw_writefn(env
, ri
, v
);
213 } else if (ri
->writefn
) {
214 ri
->writefn(env
, ri
, v
);
216 raw_write(env
, ri
, v
);
220 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
222 ARMCPU
*cpu
= arm_env_get_cpu(env
);
223 const ARMCPRegInfo
*ri
;
226 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
227 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
229 if (cpreg_field_is_64bit(ri
)) {
230 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
232 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
238 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
243 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
245 /* Return true if the regdef would cause an assertion if you called
246 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
247 * program bug for it not to have the NO_RAW flag).
248 * NB that returning false here doesn't necessarily mean that calling
249 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
250 * read/write access functions which are safe for raw use" from "has
251 * read/write access functions which have side effects but has forgotten
252 * to provide raw access functions".
253 * The tests here line up with the conditions in read/write_raw_cp_reg()
254 * and assertions in raw_read()/raw_write().
256 if ((ri
->type
& ARM_CP_CONST
) ||
258 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
264 bool write_cpustate_to_list(ARMCPU
*cpu
)
266 /* Write the coprocessor state from cpu->env to the (index,value) list. */
270 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
271 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
272 const ARMCPRegInfo
*ri
;
274 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
279 if (ri
->type
& ARM_CP_NO_RAW
) {
282 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
287 bool write_list_to_cpustate(ARMCPU
*cpu
)
292 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
293 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
294 uint64_t v
= cpu
->cpreg_values
[i
];
295 const ARMCPRegInfo
*ri
;
297 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
302 if (ri
->type
& ARM_CP_NO_RAW
) {
305 /* Write value and confirm it reads back as written
306 * (to catch read-only registers and partially read-only
307 * registers where the incoming migration value doesn't match)
309 write_raw_cp_reg(&cpu
->env
, ri
, v
);
310 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
317 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
319 ARMCPU
*cpu
= opaque
;
321 const ARMCPRegInfo
*ri
;
323 regidx
= *(uint32_t *)key
;
324 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
326 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
327 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
328 /* The value array need not be initialized at this point */
329 cpu
->cpreg_array_len
++;
333 static void count_cpreg(gpointer key
, gpointer opaque
)
335 ARMCPU
*cpu
= opaque
;
337 const ARMCPRegInfo
*ri
;
339 regidx
= *(uint32_t *)key
;
340 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
342 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
343 cpu
->cpreg_array_len
++;
347 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
349 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
350 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
361 void init_cpreg_list(ARMCPU
*cpu
)
363 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
364 * Note that we require cpreg_tuples[] to be sorted by key ID.
369 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
370 keys
= g_list_sort(keys
, cpreg_key_compare
);
372 cpu
->cpreg_array_len
= 0;
374 g_list_foreach(keys
, count_cpreg
, cpu
);
376 arraylen
= cpu
->cpreg_array_len
;
377 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
378 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
379 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
380 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
381 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
382 cpu
->cpreg_array_len
= 0;
384 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
386 assert(cpu
->cpreg_array_len
== arraylen
);
392 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
393 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
395 * access_el3_aa32ns: Used to check AArch32 register views.
396 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
398 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
399 const ARMCPRegInfo
*ri
,
402 bool secure
= arm_is_secure_below_el3(env
);
404 assert(!arm_el_is_aa64(env
, 3));
406 return CP_ACCESS_TRAP_UNCATEGORIZED
;
411 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
412 const ARMCPRegInfo
*ri
,
415 if (!arm_el_is_aa64(env
, 3)) {
416 return access_el3_aa32ns(env
, ri
, isread
);
421 /* Some secure-only AArch32 registers trap to EL3 if used from
422 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
423 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
424 * We assume that the .access field is set to PL1_RW.
426 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
427 const ARMCPRegInfo
*ri
,
430 if (arm_current_el(env
) == 3) {
433 if (arm_is_secure_below_el3(env
)) {
434 return CP_ACCESS_TRAP_EL3
;
436 /* This will be EL1 NS and EL2 NS, which just UNDEF */
437 return CP_ACCESS_TRAP_UNCATEGORIZED
;
440 /* Check for traps to "powerdown debug" registers, which are controlled
443 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
446 int el
= arm_current_el(env
);
447 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
448 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
449 (env
->cp15
.hcr_el2
& HCR_TGE
);
451 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
452 return CP_ACCESS_TRAP_EL2
;
454 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
455 return CP_ACCESS_TRAP_EL3
;
460 /* Check for traps to "debug ROM" registers, which are controlled
461 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
463 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
466 int el
= arm_current_el(env
);
467 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
468 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
469 (env
->cp15
.hcr_el2
& HCR_TGE
);
471 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
472 return CP_ACCESS_TRAP_EL2
;
474 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
475 return CP_ACCESS_TRAP_EL3
;
480 /* Check for traps to general debug registers, which are controlled
481 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
483 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
486 int el
= arm_current_el(env
);
487 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
488 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
489 (env
->cp15
.hcr_el2
& HCR_TGE
);
491 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
492 return CP_ACCESS_TRAP_EL2
;
494 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
495 return CP_ACCESS_TRAP_EL3
;
500 /* Check for traps to performance monitor registers, which are controlled
501 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
503 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
506 int el
= arm_current_el(env
);
508 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
509 && !arm_is_secure_below_el3(env
)) {
510 return CP_ACCESS_TRAP_EL2
;
512 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
513 return CP_ACCESS_TRAP_EL3
;
518 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
520 ARMCPU
*cpu
= arm_env_get_cpu(env
);
522 raw_write(env
, ri
, value
);
523 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
526 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
528 ARMCPU
*cpu
= arm_env_get_cpu(env
);
530 if (raw_read(env
, ri
) != value
) {
531 /* Unlike real hardware the qemu TLB uses virtual addresses,
532 * not modified virtual addresses, so this causes a TLB flush.
535 raw_write(env
, ri
, value
);
539 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
542 ARMCPU
*cpu
= arm_env_get_cpu(env
);
544 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
545 && !extended_addresses_enabled(env
)) {
546 /* For VMSA (when not using the LPAE long descriptor page table
547 * format) this register includes the ASID, so do a TLB flush.
548 * For PMSA it is purely a process ID and no action is needed.
552 raw_write(env
, ri
, value
);
555 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
558 /* Invalidate all (TLBIALL) */
559 ARMCPU
*cpu
= arm_env_get_cpu(env
);
564 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
567 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
568 ARMCPU
*cpu
= arm_env_get_cpu(env
);
570 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
573 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
576 /* Invalidate by ASID (TLBIASID) */
577 ARMCPU
*cpu
= arm_env_get_cpu(env
);
582 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
585 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
586 ARMCPU
*cpu
= arm_env_get_cpu(env
);
588 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
591 /* IS variants of TLB operations must affect all cores */
592 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
595 CPUState
*cs
= ENV_GET_CPU(env
);
597 tlb_flush_all_cpus_synced(cs
);
600 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
603 CPUState
*cs
= ENV_GET_CPU(env
);
605 tlb_flush_all_cpus_synced(cs
);
608 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
611 CPUState
*cs
= ENV_GET_CPU(env
);
613 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
616 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 CPUState
*cs
= ENV_GET_CPU(env
);
621 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
624 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
627 CPUState
*cs
= ENV_GET_CPU(env
);
629 tlb_flush_by_mmuidx(cs
,
630 ARMMMUIdxBit_S12NSE1
|
631 ARMMMUIdxBit_S12NSE0
|
635 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
638 CPUState
*cs
= ENV_GET_CPU(env
);
640 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
641 ARMMMUIdxBit_S12NSE1
|
642 ARMMMUIdxBit_S12NSE0
|
646 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
649 /* Invalidate by IPA. This has to invalidate any structures that
650 * contain only stage 2 translation information, but does not need
651 * to apply to structures that contain combined stage 1 and stage 2
652 * translation information.
653 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
655 CPUState
*cs
= ENV_GET_CPU(env
);
658 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
662 pageaddr
= sextract64(value
<< 12, 0, 40);
664 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
667 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
670 CPUState
*cs
= ENV_GET_CPU(env
);
673 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
677 pageaddr
= sextract64(value
<< 12, 0, 40);
679 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
683 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
686 CPUState
*cs
= ENV_GET_CPU(env
);
688 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
691 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
694 CPUState
*cs
= ENV_GET_CPU(env
);
696 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
699 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
702 CPUState
*cs
= ENV_GET_CPU(env
);
703 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
705 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
708 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
711 CPUState
*cs
= ENV_GET_CPU(env
);
712 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
714 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
718 static const ARMCPRegInfo cp_reginfo
[] = {
719 /* Define the secure and non-secure FCSE identifier CP registers
720 * separately because there is no secure bank in V8 (no _EL3). This allows
721 * the secure register to be properly reset and migrated. There is also no
722 * v8 EL1 version of the register so the non-secure instance stands alone.
725 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
726 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
727 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
728 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
729 { .name
= "FCSEIDR_S",
730 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
731 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
732 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
733 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
734 /* Define the secure and non-secure context identifier CP registers
735 * separately because there is no secure bank in V8 (no _EL3). This allows
736 * the secure register to be properly reset and migrated. In the
737 * non-secure case, the 32-bit register will have reset and migration
738 * disabled during registration as it is handled by the 64-bit instance.
740 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
741 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
742 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
743 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
744 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
745 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
746 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
747 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
748 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
749 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
753 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
754 /* NB: Some of these registers exist in v8 but with more precise
755 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
757 /* MMU Domain access control / MPU write buffer control */
759 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
760 .access
= PL1_RW
, .resetvalue
= 0,
761 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
762 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
763 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
764 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
765 * For v6 and v5, these mappings are overly broad.
767 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
768 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
769 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
770 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
771 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
772 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
773 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
774 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
775 /* Cache maintenance ops; some of this space may be overridden later. */
776 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
777 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
778 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
782 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
783 /* Not all pre-v6 cores implemented this WFI, so this is slightly
786 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
787 .access
= PL1_W
, .type
= ARM_CP_WFI
},
791 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
792 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
793 * is UNPREDICTABLE; we choose to NOP as most implementations do).
795 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
796 .access
= PL1_W
, .type
= ARM_CP_WFI
},
797 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
798 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
799 * OMAPCP will override this space.
801 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
802 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
804 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
805 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
807 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
808 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
809 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
811 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
812 * implementing it as RAZ means the "debug architecture version" bits
813 * will read as a reserved value, which should cause Linux to not try
814 * to use the debug hardware.
816 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
817 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
818 /* MMU TLB control. Note that the wildcarding means we cover not just
819 * the unified TLB ops but also the dside/iside/inner-shareable variants.
821 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
822 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
823 .type
= ARM_CP_NO_RAW
},
824 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
825 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
826 .type
= ARM_CP_NO_RAW
},
827 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
828 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
829 .type
= ARM_CP_NO_RAW
},
830 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
831 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
832 .type
= ARM_CP_NO_RAW
},
833 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
834 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
835 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
836 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
840 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
845 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
846 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
847 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
848 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
849 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
851 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
852 /* VFP coprocessor: cp10 & cp11 [23:20] */
853 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
855 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
856 /* ASEDIS [31] bit is RAO/WI */
860 /* VFPv3 and upwards with NEON implement 32 double precision
861 * registers (D0-D31).
863 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
864 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
865 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
871 env
->cp15
.cpacr_el1
= value
;
874 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
876 /* Call cpacr_write() so that we reset with the correct RAO bits set
877 * for our CPU features.
879 cpacr_write(env
, ri
, 0);
882 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
885 if (arm_feature(env
, ARM_FEATURE_V8
)) {
886 /* Check if CPACR accesses are to be trapped to EL2 */
887 if (arm_current_el(env
) == 1 &&
888 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
889 return CP_ACCESS_TRAP_EL2
;
890 /* Check if CPACR accesses are to be trapped to EL3 */
891 } else if (arm_current_el(env
) < 3 &&
892 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
893 return CP_ACCESS_TRAP_EL3
;
900 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
903 /* Check if CPTR accesses are set to trap to EL3 */
904 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
905 return CP_ACCESS_TRAP_EL3
;
911 static const ARMCPRegInfo v6_cp_reginfo
[] = {
912 /* prefetch by MVA in v6, NOP in v7 */
913 { .name
= "MVA_prefetch",
914 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
915 .access
= PL1_W
, .type
= ARM_CP_NOP
},
916 /* We need to break the TB after ISB to execute self-modifying code
917 * correctly and also to take any pending interrupts immediately.
918 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
920 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
921 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
922 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
923 .access
= PL0_W
, .type
= ARM_CP_NOP
},
924 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
925 .access
= PL0_W
, .type
= ARM_CP_NOP
},
926 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
928 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
929 offsetof(CPUARMState
, cp15
.ifar_ns
) },
931 /* Watchpoint Fault Address Register : should actually only be present
932 * for 1136, 1176, 11MPCore.
934 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
935 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
936 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
937 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
938 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
939 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
943 /* Definitions for the PMU registers */
944 #define PMCRN_MASK 0xf800
945 #define PMCRN_SHIFT 11
950 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
952 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
955 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
956 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
958 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
961 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
964 /* Performance monitor registers user accessibility is controlled
965 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
966 * trapping to EL2 or EL3 for other accesses.
968 int el
= arm_current_el(env
);
970 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
971 return CP_ACCESS_TRAP
;
973 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
974 && !arm_is_secure_below_el3(env
)) {
975 return CP_ACCESS_TRAP_EL2
;
977 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
978 return CP_ACCESS_TRAP_EL3
;
984 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
985 const ARMCPRegInfo
*ri
,
988 /* ER: event counter read trap control */
989 if (arm_feature(env
, ARM_FEATURE_V8
)
990 && arm_current_el(env
) == 0
991 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
996 return pmreg_access(env
, ri
, isread
);
999 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1000 const ARMCPRegInfo
*ri
,
1003 /* SW: software increment write trap control */
1004 if (arm_feature(env
, ARM_FEATURE_V8
)
1005 && arm_current_el(env
) == 0
1006 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1008 return CP_ACCESS_OK
;
1011 return pmreg_access(env
, ri
, isread
);
1014 #ifndef CONFIG_USER_ONLY
1016 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1017 const ARMCPRegInfo
*ri
,
1020 /* ER: event counter read trap control */
1021 if (arm_feature(env
, ARM_FEATURE_V8
)
1022 && arm_current_el(env
) == 0
1023 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1024 return CP_ACCESS_OK
;
1027 return pmreg_access(env
, ri
, isread
);
1030 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1031 const ARMCPRegInfo
*ri
,
1034 /* CR: cycle counter read trap control */
1035 if (arm_feature(env
, ARM_FEATURE_V8
)
1036 && arm_current_el(env
) == 0
1037 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1039 return CP_ACCESS_OK
;
1042 return pmreg_access(env
, ri
, isread
);
1045 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
1047 /* This does not support checking PMCCFILTR_EL0 register */
1049 if (!(env
->cp15
.c9_pmcr
& PMCRE
) || !(env
->cp15
.c9_pmcnten
& (1 << 31))) {
1056 void pmccntr_sync(CPUARMState
*env
)
1058 uint64_t temp_ticks
;
1060 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1061 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1063 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1064 /* Increment once every 64 processor clock cycles */
1068 if (arm_ccnt_enabled(env
)) {
1069 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
1073 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1078 if (value
& PMCRC
) {
1079 /* The counter has been reset */
1080 env
->cp15
.c15_ccnt
= 0;
1083 /* only the DP, X, D and E bits are writable */
1084 env
->cp15
.c9_pmcr
&= ~0x39;
1085 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1090 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1092 uint64_t total_ticks
;
1094 if (!arm_ccnt_enabled(env
)) {
1095 /* Counter is disabled, do not change value */
1096 return env
->cp15
.c15_ccnt
;
1099 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1100 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1102 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1103 /* Increment once every 64 processor clock cycles */
1106 return total_ticks
- env
->cp15
.c15_ccnt
;
1109 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1112 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1113 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1114 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1117 env
->cp15
.c9_pmselr
= value
& 0x1f;
1120 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1123 uint64_t total_ticks
;
1125 if (!arm_ccnt_enabled(env
)) {
1126 /* Counter is disabled, set the absolute value */
1127 env
->cp15
.c15_ccnt
= value
;
1131 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1132 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1134 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1135 /* Increment once every 64 processor clock cycles */
1138 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1141 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1144 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1146 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1149 #else /* CONFIG_USER_ONLY */
1151 void pmccntr_sync(CPUARMState
*env
)
1157 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1161 env
->cp15
.pmccfiltr_el0
= value
& 0xfc000000;
1165 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1168 value
&= pmu_counter_mask(env
);
1169 env
->cp15
.c9_pmcnten
|= value
;
1172 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1175 value
&= pmu_counter_mask(env
);
1176 env
->cp15
.c9_pmcnten
&= ~value
;
1179 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1182 env
->cp15
.c9_pmovsr
&= ~value
;
1185 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1188 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1189 * PMSELR value is equal to or greater than the number of implemented
1190 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1192 if (env
->cp15
.c9_pmselr
== 0x1f) {
1193 pmccfiltr_write(env
, ri
, value
);
1197 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1199 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1200 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1202 if (env
->cp15
.c9_pmselr
== 0x1f) {
1203 return env
->cp15
.pmccfiltr_el0
;
1209 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1212 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1213 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1215 env
->cp15
.c9_pmuserenr
= value
& 1;
1219 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1222 /* We have no event counters so only the C bit can be changed */
1223 value
&= pmu_counter_mask(env
);
1224 env
->cp15
.c9_pminten
|= value
;
1227 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1230 value
&= pmu_counter_mask(env
);
1231 env
->cp15
.c9_pminten
&= ~value
;
1234 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1237 /* Note that even though the AArch64 view of this register has bits
1238 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1239 * architectural requirements for bits which are RES0 only in some
1240 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1241 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1243 raw_write(env
, ri
, value
& ~0x1FULL
);
1246 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1248 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1249 * For bits that vary between AArch32/64, code needs to check the
1250 * current execution mode before directly using the feature bit.
1252 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1254 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1255 valid_mask
&= ~SCR_HCE
;
1257 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1258 * supported if EL2 exists. The bit is UNK/SBZP when
1259 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1260 * when EL2 is unavailable.
1261 * On ARMv8, this bit is always available.
1263 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1264 !arm_feature(env
, ARM_FEATURE_V8
)) {
1265 valid_mask
&= ~SCR_SMD
;
1269 /* Clear all-context RES0 bits. */
1270 value
&= valid_mask
;
1271 raw_write(env
, ri
, value
);
1274 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1276 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1278 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1281 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1282 ri
->secure
& ARM_CP_SECSTATE_S
);
1284 return cpu
->ccsidr
[index
];
1287 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1290 raw_write(env
, ri
, value
& 0xf);
1293 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1295 CPUState
*cs
= ENV_GET_CPU(env
);
1298 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1301 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1304 /* External aborts are not possible in QEMU so A bit is always clear */
1308 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1309 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1310 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1311 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1312 /* Performance monitors are implementation defined in v7,
1313 * but with an ARM recommended set of registers, which we
1314 * follow (although we don't actually implement any counters)
1316 * Performance registers fall into three categories:
1317 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1318 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1319 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1320 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1321 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1323 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1324 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1325 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1326 .writefn
= pmcntenset_write
,
1327 .accessfn
= pmreg_access
,
1328 .raw_writefn
= raw_write
},
1329 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1330 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1331 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1332 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1333 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1334 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1336 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1337 .accessfn
= pmreg_access
,
1338 .writefn
= pmcntenclr_write
,
1339 .type
= ARM_CP_ALIAS
},
1340 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1341 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1342 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1343 .type
= ARM_CP_ALIAS
,
1344 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1345 .writefn
= pmcntenclr_write
},
1346 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1348 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1349 .accessfn
= pmreg_access
,
1350 .writefn
= pmovsr_write
,
1351 .raw_writefn
= raw_write
},
1352 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1353 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1354 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1355 .type
= ARM_CP_ALIAS
,
1356 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1357 .writefn
= pmovsr_write
,
1358 .raw_writefn
= raw_write
},
1359 /* Unimplemented so WI. */
1360 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1361 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1362 #ifndef CONFIG_USER_ONLY
1363 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1364 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1365 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1366 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1367 .raw_writefn
= raw_write
},
1368 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1369 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1370 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1371 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1372 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1373 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1374 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1375 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1376 .accessfn
= pmreg_access_ccntr
},
1377 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1378 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1379 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1381 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1383 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1384 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1385 .writefn
= pmccfiltr_write
,
1386 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1388 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1390 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1391 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1392 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1393 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1394 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1395 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1396 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1397 /* Unimplemented, RAZ/WI. */
1398 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1399 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1400 .accessfn
= pmreg_access_xevcntr
},
1401 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1402 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1403 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
1405 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1406 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1407 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1408 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1409 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1411 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1412 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1413 .access
= PL1_RW
, .accessfn
= access_tpm
,
1414 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1415 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1417 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1418 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1419 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1420 .access
= PL1_RW
, .accessfn
= access_tpm
,
1422 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1423 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1424 .resetvalue
= 0x0 },
1425 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1426 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1427 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1428 .writefn
= pmintenclr_write
, },
1429 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1430 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1431 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1432 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1433 .writefn
= pmintenclr_write
},
1434 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1435 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1436 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1437 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1438 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1439 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1440 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1441 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1442 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1443 * just RAZ for all cores:
1445 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1446 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1447 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1448 /* Auxiliary fault status registers: these also are IMPDEF, and we
1449 * choose to RAZ/WI for all cores.
1451 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1452 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1453 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1454 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1455 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1456 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1457 /* MAIR can just read-as-written because we don't implement caches
1458 * and so don't need to care about memory attributes.
1460 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1461 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1462 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1464 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1465 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1466 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1468 /* For non-long-descriptor page tables these are PRRR and NMRR;
1469 * regardless they still act as reads-as-written for QEMU.
1471 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1472 * allows them to assign the correct fieldoffset based on the endianness
1473 * handled in the field definitions.
1475 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1476 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1477 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1478 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1479 .resetfn
= arm_cp_reset_ignore
},
1480 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1481 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1482 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1483 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1484 .resetfn
= arm_cp_reset_ignore
},
1485 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1486 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1487 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1488 /* 32 bit ITLB invalidates */
1489 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1490 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1491 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1492 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1493 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1494 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1495 /* 32 bit DTLB invalidates */
1496 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1497 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1498 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1499 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1500 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1501 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1502 /* 32 bit TLB invalidates */
1503 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1504 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1505 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1506 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1507 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1508 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1509 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1510 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1514 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1515 /* 32 bit TLB invalidates, Inner Shareable */
1516 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1517 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1518 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1519 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1520 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1521 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1522 .writefn
= tlbiasid_is_write
},
1523 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1524 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1525 .writefn
= tlbimvaa_is_write
},
1529 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1536 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1539 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1540 return CP_ACCESS_TRAP
;
1542 return CP_ACCESS_OK
;
1545 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1546 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1547 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1549 .writefn
= teecr_write
},
1550 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1551 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1552 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1556 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1557 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1558 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1560 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1561 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1563 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1564 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1565 .resetfn
= arm_cp_reset_ignore
},
1566 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1567 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1568 .access
= PL0_R
|PL1_W
,
1569 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1571 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1572 .access
= PL0_R
|PL1_W
,
1573 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1574 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1575 .resetfn
= arm_cp_reset_ignore
},
1576 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1577 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1579 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1580 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1582 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1583 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1588 #ifndef CONFIG_USER_ONLY
1590 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1593 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1594 * Writable only at the highest implemented exception level.
1596 int el
= arm_current_el(env
);
1600 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1601 return CP_ACCESS_TRAP
;
1605 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1606 arm_is_secure_below_el3(env
)) {
1607 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1608 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1616 if (!isread
&& el
< arm_highest_el(env
)) {
1617 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1620 return CP_ACCESS_OK
;
1623 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1626 unsigned int cur_el
= arm_current_el(env
);
1627 bool secure
= arm_is_secure(env
);
1629 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1631 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1632 return CP_ACCESS_TRAP
;
1635 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1636 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1637 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1638 return CP_ACCESS_TRAP_EL2
;
1640 return CP_ACCESS_OK
;
1643 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1646 unsigned int cur_el
= arm_current_el(env
);
1647 bool secure
= arm_is_secure(env
);
1649 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1650 * EL0[PV]TEN is zero.
1653 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1654 return CP_ACCESS_TRAP
;
1657 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1658 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1659 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1660 return CP_ACCESS_TRAP_EL2
;
1662 return CP_ACCESS_OK
;
1665 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1666 const ARMCPRegInfo
*ri
,
1669 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1672 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1673 const ARMCPRegInfo
*ri
,
1676 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1679 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1682 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1685 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1688 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1691 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1692 const ARMCPRegInfo
*ri
,
1695 /* The AArch64 register view of the secure physical timer is
1696 * always accessible from EL3, and configurably accessible from
1699 switch (arm_current_el(env
)) {
1701 if (!arm_is_secure(env
)) {
1702 return CP_ACCESS_TRAP
;
1704 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1705 return CP_ACCESS_TRAP_EL3
;
1707 return CP_ACCESS_OK
;
1710 return CP_ACCESS_TRAP
;
1712 return CP_ACCESS_OK
;
1714 g_assert_not_reached();
1718 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1720 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1723 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1725 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1728 /* Timer enabled: calculate and set current ISTATUS, irq, and
1729 * reset timer to when ISTATUS next has to change
1731 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1732 cpu
->env
.cp15
.cntvoff_el2
: 0;
1733 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1734 /* Note that this must be unsigned 64 bit arithmetic: */
1735 int istatus
= count
- offset
>= gt
->cval
;
1739 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1741 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1742 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1745 /* Next transition is when count rolls back over to zero */
1746 nexttick
= UINT64_MAX
;
1748 /* Next transition is when we hit cval */
1749 nexttick
= gt
->cval
+ offset
;
1751 /* Note that the desired next expiry time might be beyond the
1752 * signed-64-bit range of a QEMUTimer -- in this case we just
1753 * set the timer for as far in the future as possible. When the
1754 * timer expires we will reset the timer for any remaining period.
1756 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1757 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1759 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1760 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1762 /* Timer disabled: ISTATUS and timer output always clear */
1764 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1765 timer_del(cpu
->gt_timer
[timeridx
]);
1766 trace_arm_gt_recalc_disabled(timeridx
);
1770 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1773 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1775 timer_del(cpu
->gt_timer
[timeridx
]);
1778 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1780 return gt_get_countervalue(env
);
1783 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1785 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1788 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1792 trace_arm_gt_cval_write(timeridx
, value
);
1793 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1794 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1797 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1800 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1802 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1803 (gt_get_countervalue(env
) - offset
));
1806 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1810 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1812 trace_arm_gt_tval_write(timeridx
, value
);
1813 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1814 sextract64(value
, 0, 32);
1815 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1818 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1822 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1823 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1825 trace_arm_gt_ctl_write(timeridx
, value
);
1826 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1827 if ((oldval
^ value
) & 1) {
1828 /* Enable toggled */
1829 gt_recalc_timer(cpu
, timeridx
);
1830 } else if ((oldval
^ value
) & 2) {
1831 /* IMASK toggled: don't need to recalculate,
1832 * just set the interrupt line based on ISTATUS
1834 int irqstate
= (oldval
& 4) && !(value
& 2);
1836 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1837 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1841 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1843 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1846 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1849 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1852 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1854 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1857 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1860 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1863 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1866 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1869 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1871 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1874 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1877 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1880 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1882 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1885 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1888 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1891 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1894 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1897 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1900 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1902 trace_arm_gt_cntvoff_write(value
);
1903 raw_write(env
, ri
, value
);
1904 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1907 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1909 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1912 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1915 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1918 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1920 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1923 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1926 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1929 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1932 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1935 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1937 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1940 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1943 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1946 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1948 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1951 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1954 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1957 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1960 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1963 void arm_gt_ptimer_cb(void *opaque
)
1965 ARMCPU
*cpu
= opaque
;
1967 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1970 void arm_gt_vtimer_cb(void *opaque
)
1972 ARMCPU
*cpu
= opaque
;
1974 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1977 void arm_gt_htimer_cb(void *opaque
)
1979 ARMCPU
*cpu
= opaque
;
1981 gt_recalc_timer(cpu
, GTIMER_HYP
);
1984 void arm_gt_stimer_cb(void *opaque
)
1986 ARMCPU
*cpu
= opaque
;
1988 gt_recalc_timer(cpu
, GTIMER_SEC
);
1991 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1992 /* Note that CNTFRQ is purely reads-as-written for the benefit
1993 * of software; writing it doesn't actually change the timer frequency.
1994 * Our reset value matches the fixed frequency we implement the timer at.
1996 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1997 .type
= ARM_CP_ALIAS
,
1998 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1999 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2001 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2002 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2003 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2004 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2005 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2007 /* overall control: mostly access permissions */
2008 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2009 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2011 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2014 /* per-timer control */
2015 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2016 .secure
= ARM_CP_SECSTATE_NS
,
2017 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2018 .accessfn
= gt_ptimer_access
,
2019 .fieldoffset
= offsetoflow32(CPUARMState
,
2020 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2021 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2023 { .name
= "CNTP_CTL_S",
2024 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2025 .secure
= ARM_CP_SECSTATE_S
,
2026 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2027 .accessfn
= gt_ptimer_access
,
2028 .fieldoffset
= offsetoflow32(CPUARMState
,
2029 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2030 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2032 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2033 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2034 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2035 .accessfn
= gt_ptimer_access
,
2036 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2038 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2040 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2041 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2042 .accessfn
= gt_vtimer_access
,
2043 .fieldoffset
= offsetoflow32(CPUARMState
,
2044 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2045 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2047 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2048 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2049 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2050 .accessfn
= gt_vtimer_access
,
2051 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2053 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2055 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2056 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2057 .secure
= ARM_CP_SECSTATE_NS
,
2058 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2059 .accessfn
= gt_ptimer_access
,
2060 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2062 { .name
= "CNTP_TVAL_S",
2063 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2064 .secure
= ARM_CP_SECSTATE_S
,
2065 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2066 .accessfn
= gt_ptimer_access
,
2067 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2069 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2070 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2071 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2072 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2073 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2075 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2076 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2077 .accessfn
= gt_vtimer_access
,
2078 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2080 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2081 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2082 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2083 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2084 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2086 /* The counter itself */
2087 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2088 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2089 .accessfn
= gt_pct_access
,
2090 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2092 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2093 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2094 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2095 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2097 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2098 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2099 .accessfn
= gt_vct_access
,
2100 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2102 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2103 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2104 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2105 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2107 /* Comparison value, indicating when the timer goes off */
2108 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2109 .secure
= ARM_CP_SECSTATE_NS
,
2110 .access
= PL1_RW
| PL0_R
,
2111 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2112 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2113 .accessfn
= gt_ptimer_access
,
2114 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2116 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2117 .secure
= ARM_CP_SECSTATE_S
,
2118 .access
= PL1_RW
| PL0_R
,
2119 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2120 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2121 .accessfn
= gt_ptimer_access
,
2122 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2124 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2125 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2126 .access
= PL1_RW
| PL0_R
,
2128 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2129 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2130 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2132 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2133 .access
= PL1_RW
| PL0_R
,
2134 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2135 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2136 .accessfn
= gt_vtimer_access
,
2137 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2139 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2140 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2141 .access
= PL1_RW
| PL0_R
,
2143 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2144 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2145 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2147 /* Secure timer -- this is actually restricted to only EL3
2148 * and configurably Secure-EL1 via the accessfn.
2150 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2151 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2152 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2153 .accessfn
= gt_stimer_access
,
2154 .readfn
= gt_sec_tval_read
,
2155 .writefn
= gt_sec_tval_write
,
2156 .resetfn
= gt_sec_timer_reset
,
2158 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2159 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2160 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2161 .accessfn
= gt_stimer_access
,
2162 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2164 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2166 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2167 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2168 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2169 .accessfn
= gt_stimer_access
,
2170 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2171 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2178 /* In user-mode most of the generic timer registers are inaccessible
2179 * however modern kernels (4.12+) allow access to cntvct_el0
2182 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2184 /* Currently we have no support for QEMUTimer in linux-user so we
2185 * can't call gt_get_countervalue(env), instead we directly
2186 * call the lower level functions.
2188 return cpu_get_clock() / GTIMER_SCALE
;
2191 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2192 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2193 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2194 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2195 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2196 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2198 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2199 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2200 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2201 .readfn
= gt_virt_cnt_read
,
2208 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2210 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2211 raw_write(env
, ri
, value
);
2212 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2213 raw_write(env
, ri
, value
& 0xfffff6ff);
2215 raw_write(env
, ri
, value
& 0xfffff1ff);
2219 #ifndef CONFIG_USER_ONLY
2220 /* get_phys_addr() isn't present for user-mode-only targets */
2222 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2226 /* The ATS12NSO* operations must trap to EL3 if executed in
2227 * Secure EL1 (which can only happen if EL3 is AArch64).
2228 * They are simply UNDEF if executed from NS EL1.
2229 * They function normally from EL2 or EL3.
2231 if (arm_current_el(env
) == 1) {
2232 if (arm_is_secure_below_el3(env
)) {
2233 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2235 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2238 return CP_ACCESS_OK
;
2241 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2242 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2245 target_ulong page_size
;
2249 bool format64
= false;
2250 MemTxAttrs attrs
= {};
2251 ARMMMUFaultInfo fi
= {};
2252 ARMCacheAttrs cacheattrs
= {};
2254 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2255 &prot
, &page_size
, &fi
, &cacheattrs
);
2259 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2262 * * TTBCR.EAE determines whether the result is returned using the
2263 * 32-bit or the 64-bit PAR format
2264 * * Instructions executed in Hyp mode always use the 64bit format
2266 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2267 * * The Non-secure TTBCR.EAE bit is set to 1
2268 * * The implementation includes EL2, and the value of HCR.VM is 1
2270 * ATS1Hx always uses the 64bit format (not supported yet).
2272 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2274 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2275 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2276 format64
|= env
->cp15
.hcr_el2
& HCR_VM
;
2278 format64
|= arm_current_el(env
) == 2;
2284 /* Create a 64-bit PAR */
2285 par64
= (1 << 11); /* LPAE bit always set */
2287 par64
|= phys_addr
& ~0xfffULL
;
2288 if (!attrs
.secure
) {
2289 par64
|= (1 << 9); /* NS */
2291 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2292 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2294 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2297 par64
|= (fsr
& 0x3f) << 1; /* FS */
2298 /* Note that S2WLK and FSTAGE are always zero, because we don't
2299 * implement virtualization and therefore there can't be a stage 2
2304 /* fsr is a DFSR/IFSR value for the short descriptor
2305 * translation table format (with WnR always clear).
2306 * Convert it to a 32-bit PAR.
2309 /* We do not set any attribute bits in the PAR */
2310 if (page_size
== (1 << 24)
2311 && arm_feature(env
, ARM_FEATURE_V7
)) {
2312 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2314 par64
= phys_addr
& 0xfffff000;
2316 if (!attrs
.secure
) {
2317 par64
|= (1 << 9); /* NS */
2320 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2322 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2323 ((fsr
& 0xf) << 1) | 1;
2329 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2331 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2334 int el
= arm_current_el(env
);
2335 bool secure
= arm_is_secure_below_el3(env
);
2337 switch (ri
->opc2
& 6) {
2339 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2342 mmu_idx
= ARMMMUIdx_S1E3
;
2345 mmu_idx
= ARMMMUIdx_S1NSE1
;
2348 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2351 g_assert_not_reached();
2355 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2358 mmu_idx
= ARMMMUIdx_S1SE0
;
2361 mmu_idx
= ARMMMUIdx_S1NSE0
;
2364 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2367 g_assert_not_reached();
2371 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2372 mmu_idx
= ARMMMUIdx_S12NSE1
;
2375 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2376 mmu_idx
= ARMMMUIdx_S12NSE0
;
2379 g_assert_not_reached();
2382 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2384 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2387 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2390 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2393 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2395 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2398 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2401 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2402 return CP_ACCESS_TRAP
;
2404 return CP_ACCESS_OK
;
2407 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2410 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2412 int secure
= arm_is_secure_below_el3(env
);
2414 switch (ri
->opc2
& 6) {
2417 case 0: /* AT S1E1R, AT S1E1W */
2418 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2420 case 4: /* AT S1E2R, AT S1E2W */
2421 mmu_idx
= ARMMMUIdx_S1E2
;
2423 case 6: /* AT S1E3R, AT S1E3W */
2424 mmu_idx
= ARMMMUIdx_S1E3
;
2427 g_assert_not_reached();
2430 case 2: /* AT S1E0R, AT S1E0W */
2431 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2433 case 4: /* AT S12E1R, AT S12E1W */
2434 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2436 case 6: /* AT S12E0R, AT S12E0W */
2437 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2440 g_assert_not_reached();
2443 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2447 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2448 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2449 .access
= PL1_RW
, .resetvalue
= 0,
2450 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2451 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2452 .writefn
= par_write
},
2453 #ifndef CONFIG_USER_ONLY
2454 /* This underdecoding is safe because the reginfo is NO_RAW. */
2455 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2456 .access
= PL1_W
, .accessfn
= ats_access
,
2457 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2462 /* Return basic MPU access permission bits. */
2463 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2470 for (i
= 0; i
< 16; i
+= 2) {
2471 ret
|= (val
>> i
) & mask
;
2477 /* Pad basic MPU access permission bits to extended format. */
2478 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2485 for (i
= 0; i
< 16; i
+= 2) {
2486 ret
|= (val
& mask
) << i
;
2492 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2495 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2498 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2500 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2503 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2506 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2509 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2511 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2514 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2516 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2522 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2526 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2529 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2530 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2536 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2537 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2541 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2544 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2545 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2547 if (value
>= nrgs
) {
2548 qemu_log_mask(LOG_GUEST_ERROR
,
2549 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2550 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2554 raw_write(env
, ri
, value
);
2557 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2558 /* Reset for all these registers is handled in arm_cpu_reset(),
2559 * because the PMSAv7 is also used by M-profile CPUs, which do
2560 * not register cpregs but still need the state to be reset.
2562 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2563 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2564 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2565 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2566 .resetfn
= arm_cp_reset_ignore
},
2567 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2568 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2569 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2570 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2571 .resetfn
= arm_cp_reset_ignore
},
2572 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2573 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2574 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2575 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2576 .resetfn
= arm_cp_reset_ignore
},
2577 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2579 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2580 .writefn
= pmsav7_rgnr_write
,
2581 .resetfn
= arm_cp_reset_ignore
},
2585 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2586 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2587 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2588 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2589 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2590 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2591 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2592 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2593 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2594 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2596 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2598 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2600 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2602 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2604 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2605 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2607 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2608 /* Protection region base and size registers */
2609 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2610 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2611 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2612 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2613 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2614 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2615 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2616 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2617 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2618 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2619 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2620 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2621 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2622 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2623 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2624 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2625 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2626 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2627 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2628 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2629 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2630 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2631 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2632 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2636 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2639 TCR
*tcr
= raw_ptr(env
, ri
);
2640 int maskshift
= extract32(value
, 0, 3);
2642 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2643 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2644 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2645 * using Long-desciptor translation table format */
2646 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2647 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2648 /* In an implementation that includes the Security Extensions
2649 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2650 * Short-descriptor translation table format.
2652 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2658 /* Update the masks corresponding to the TCR bank being written
2659 * Note that we always calculate mask and base_mask, but
2660 * they are only used for short-descriptor tables (ie if EAE is 0);
2661 * for long-descriptor tables the TCR fields are used differently
2662 * and the mask and base_mask values are meaningless.
2664 tcr
->raw_tcr
= value
;
2665 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2666 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2669 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2672 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2674 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2675 /* With LPAE the TTBCR could result in a change of ASID
2676 * via the TTBCR.A1 bit, so do a TLB flush.
2678 tlb_flush(CPU(cpu
));
2680 vmsa_ttbcr_raw_write(env
, ri
, value
);
2683 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2685 TCR
*tcr
= raw_ptr(env
, ri
);
2687 /* Reset both the TCR as well as the masks corresponding to the bank of
2688 * the TCR being reset.
2692 tcr
->base_mask
= 0xffffc000u
;
2695 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2698 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2699 TCR
*tcr
= raw_ptr(env
, ri
);
2701 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2702 tlb_flush(CPU(cpu
));
2703 tcr
->raw_tcr
= value
;
2706 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2709 /* 64 bit accesses to the TTBRs can change the ASID and so we
2710 * must flush the TLB.
2712 if (cpreg_field_is_64bit(ri
)) {
2713 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2715 tlb_flush(CPU(cpu
));
2717 raw_write(env
, ri
, value
);
2720 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2723 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2724 CPUState
*cs
= CPU(cpu
);
2726 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2727 if (raw_read(env
, ri
) != value
) {
2728 tlb_flush_by_mmuidx(cs
,
2729 ARMMMUIdxBit_S12NSE1
|
2730 ARMMMUIdxBit_S12NSE0
|
2732 raw_write(env
, ri
, value
);
2736 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2737 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2738 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2739 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2740 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2741 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2742 .access
= PL1_RW
, .resetvalue
= 0,
2743 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2744 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2745 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2746 .access
= PL1_RW
, .resetvalue
= 0,
2747 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2748 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2749 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2750 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2751 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2756 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2757 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2758 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2760 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2761 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2762 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2763 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2764 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2765 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2766 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2767 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2768 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2769 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2770 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2771 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2772 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2773 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2774 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2775 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2776 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2777 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2778 .raw_writefn
= vmsa_ttbcr_raw_write
,
2779 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2780 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2784 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2787 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2788 /* The OS_TYPE bit in this register changes the reported CPUID! */
2789 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2790 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2793 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2796 env
->cp15
.c15_threadid
= value
& 0xffff;
2799 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2802 /* Wait-for-interrupt (deprecated) */
2803 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2806 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2809 /* On OMAP there are registers indicating the max/min index of dcache lines
2810 * containing a dirty line; cache flush operations have to reset these.
2812 env
->cp15
.c15_i_max
= 0x000;
2813 env
->cp15
.c15_i_min
= 0xff0;
2816 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2817 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2818 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2819 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2821 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2822 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2823 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2825 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2826 .writefn
= omap_ticonfig_write
},
2827 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2829 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2830 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2831 .access
= PL1_RW
, .resetvalue
= 0xff0,
2832 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2833 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2835 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2836 .writefn
= omap_threadid_write
},
2837 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2838 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2839 .type
= ARM_CP_NO_RAW
,
2840 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2841 /* TODO: Peripheral port remap register:
2842 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2843 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2846 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2847 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2848 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2849 .writefn
= omap_cachemaint_write
},
2850 { .name
= "C9", .cp
= 15, .crn
= 9,
2851 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2852 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2856 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2859 env
->cp15
.c15_cpar
= value
& 0x3fff;
2862 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2863 { .name
= "XSCALE_CPAR",
2864 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2865 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2866 .writefn
= xscale_cpar_write
, },
2867 { .name
= "XSCALE_AUXCR",
2868 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2869 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2871 /* XScale specific cache-lockdown: since we have no cache we NOP these
2872 * and hope the guest does not really rely on cache behaviour.
2874 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2875 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2876 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2877 { .name
= "XSCALE_UNLOCK_ICACHE",
2878 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2879 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2880 { .name
= "XSCALE_DCACHE_LOCK",
2881 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2882 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2883 { .name
= "XSCALE_UNLOCK_DCACHE",
2884 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2885 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2889 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2890 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2891 * implementation of this implementation-defined space.
2892 * Ideally this should eventually disappear in favour of actually
2893 * implementing the correct behaviour for all cores.
2895 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2896 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2898 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2903 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2904 /* Cache status: RAZ because we have no cache so it's always clean */
2905 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2906 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2911 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2912 /* We never have a a block transfer operation in progress */
2913 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2914 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2916 /* The cache ops themselves: these all NOP for QEMU */
2917 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2918 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2919 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2920 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2921 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2922 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2923 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2924 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2925 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2926 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2927 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2928 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2932 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2933 /* The cache test-and-clean instructions always return (1 << 30)
2934 * to indicate that there are no dirty cache lines.
2936 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2937 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2938 .resetvalue
= (1 << 30) },
2939 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2940 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2941 .resetvalue
= (1 << 30) },
2945 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2946 /* Ignore ReadBuffer accesses */
2947 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2948 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2949 .access
= PL1_RW
, .resetvalue
= 0,
2950 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2954 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2956 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2957 unsigned int cur_el
= arm_current_el(env
);
2958 bool secure
= arm_is_secure(env
);
2960 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2961 return env
->cp15
.vpidr_el2
;
2963 return raw_read(env
, ri
);
2966 static uint64_t mpidr_read_val(CPUARMState
*env
)
2968 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2969 uint64_t mpidr
= cpu
->mp_affinity
;
2971 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2972 mpidr
|= (1U << 31);
2973 /* Cores which are uniprocessor (non-coherent)
2974 * but still implement the MP extensions set
2975 * bit 30. (For instance, Cortex-R5).
2977 if (cpu
->mp_is_up
) {
2978 mpidr
|= (1u << 30);
2984 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2986 unsigned int cur_el
= arm_current_el(env
);
2987 bool secure
= arm_is_secure(env
);
2989 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2990 return env
->cp15
.vmpidr_el2
;
2992 return mpidr_read_val(env
);
2995 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2996 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2997 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2998 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
3002 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3004 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3005 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3006 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3008 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3009 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3010 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3012 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3013 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3014 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3015 offsetof(CPUARMState
, cp15
.par_ns
)} },
3016 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3017 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3018 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3019 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3020 .writefn
= vmsa_ttbr_write
, },
3021 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3022 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3023 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3024 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3025 .writefn
= vmsa_ttbr_write
, },
3029 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3031 return vfp_get_fpcr(env
);
3034 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3037 vfp_set_fpcr(env
, value
);
3040 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3042 return vfp_get_fpsr(env
);
3045 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3048 vfp_set_fpsr(env
, value
);
3051 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3054 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3055 return CP_ACCESS_TRAP
;
3057 return CP_ACCESS_OK
;
3060 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3063 env
->daif
= value
& PSTATE_DAIF
;
3066 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3067 const ARMCPRegInfo
*ri
,
3070 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3071 * SCTLR_EL1.UCI is set.
3073 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3074 return CP_ACCESS_TRAP
;
3076 return CP_ACCESS_OK
;
3079 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3080 * Page D4-1736 (DDI0487A.b)
3083 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3086 CPUState
*cs
= ENV_GET_CPU(env
);
3088 if (arm_is_secure_below_el3(env
)) {
3089 tlb_flush_by_mmuidx(cs
,
3090 ARMMMUIdxBit_S1SE1
|
3091 ARMMMUIdxBit_S1SE0
);
3093 tlb_flush_by_mmuidx(cs
,
3094 ARMMMUIdxBit_S12NSE1
|
3095 ARMMMUIdxBit_S12NSE0
);
3099 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3102 CPUState
*cs
= ENV_GET_CPU(env
);
3103 bool sec
= arm_is_secure_below_el3(env
);
3106 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3107 ARMMMUIdxBit_S1SE1
|
3108 ARMMMUIdxBit_S1SE0
);
3110 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3111 ARMMMUIdxBit_S12NSE1
|
3112 ARMMMUIdxBit_S12NSE0
);
3116 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3119 /* Note that the 'ALL' scope must invalidate both stage 1 and
3120 * stage 2 translations, whereas most other scopes only invalidate
3121 * stage 1 translations.
3123 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3124 CPUState
*cs
= CPU(cpu
);
3126 if (arm_is_secure_below_el3(env
)) {
3127 tlb_flush_by_mmuidx(cs
,
3128 ARMMMUIdxBit_S1SE1
|
3129 ARMMMUIdxBit_S1SE0
);
3131 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3132 tlb_flush_by_mmuidx(cs
,
3133 ARMMMUIdxBit_S12NSE1
|
3134 ARMMMUIdxBit_S12NSE0
|
3137 tlb_flush_by_mmuidx(cs
,
3138 ARMMMUIdxBit_S12NSE1
|
3139 ARMMMUIdxBit_S12NSE0
);
3144 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3147 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3148 CPUState
*cs
= CPU(cpu
);
3150 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3153 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3156 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3157 CPUState
*cs
= CPU(cpu
);
3159 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3162 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3165 /* Note that the 'ALL' scope must invalidate both stage 1 and
3166 * stage 2 translations, whereas most other scopes only invalidate
3167 * stage 1 translations.
3169 CPUState
*cs
= ENV_GET_CPU(env
);
3170 bool sec
= arm_is_secure_below_el3(env
);
3171 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3174 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3175 ARMMMUIdxBit_S1SE1
|
3176 ARMMMUIdxBit_S1SE0
);
3177 } else if (has_el2
) {
3178 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3179 ARMMMUIdxBit_S12NSE1
|
3180 ARMMMUIdxBit_S12NSE0
|
3183 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3184 ARMMMUIdxBit_S12NSE1
|
3185 ARMMMUIdxBit_S12NSE0
);
3189 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3192 CPUState
*cs
= ENV_GET_CPU(env
);
3194 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3197 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3200 CPUState
*cs
= ENV_GET_CPU(env
);
3202 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3205 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3208 /* Invalidate by VA, EL1&0 (AArch64 version).
3209 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3210 * since we don't support flush-for-specific-ASID-only or
3211 * flush-last-level-only.
3213 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3214 CPUState
*cs
= CPU(cpu
);
3215 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3217 if (arm_is_secure_below_el3(env
)) {
3218 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3219 ARMMMUIdxBit_S1SE1
|
3220 ARMMMUIdxBit_S1SE0
);
3222 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3223 ARMMMUIdxBit_S12NSE1
|
3224 ARMMMUIdxBit_S12NSE0
);
3228 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3231 /* Invalidate by VA, EL2
3232 * Currently handles both VAE2 and VALE2, since we don't support
3233 * flush-last-level-only.
3235 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3236 CPUState
*cs
= CPU(cpu
);
3237 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3239 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3242 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3245 /* Invalidate by VA, EL3
3246 * Currently handles both VAE3 and VALE3, since we don't support
3247 * flush-last-level-only.
3249 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3250 CPUState
*cs
= CPU(cpu
);
3251 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3253 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3256 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3259 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3260 CPUState
*cs
= CPU(cpu
);
3261 bool sec
= arm_is_secure_below_el3(env
);
3262 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3265 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3266 ARMMMUIdxBit_S1SE1
|
3267 ARMMMUIdxBit_S1SE0
);
3269 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3270 ARMMMUIdxBit_S12NSE1
|
3271 ARMMMUIdxBit_S12NSE0
);
3275 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3278 CPUState
*cs
= ENV_GET_CPU(env
);
3279 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3281 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3285 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3288 CPUState
*cs
= ENV_GET_CPU(env
);
3289 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3291 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3295 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3298 /* Invalidate by IPA. This has to invalidate any structures that
3299 * contain only stage 2 translation information, but does not need
3300 * to apply to structures that contain combined stage 1 and stage 2
3301 * translation information.
3302 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3304 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3305 CPUState
*cs
= CPU(cpu
);
3308 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3312 pageaddr
= sextract64(value
<< 12, 0, 48);
3314 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3317 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3320 CPUState
*cs
= ENV_GET_CPU(env
);
3323 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3327 pageaddr
= sextract64(value
<< 12, 0, 48);
3329 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3333 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3336 /* We don't implement EL2, so the only control on DC ZVA is the
3337 * bit in the SCTLR which can prohibit access for EL0.
3339 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3340 return CP_ACCESS_TRAP
;
3342 return CP_ACCESS_OK
;
3345 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3347 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3348 int dzp_bit
= 1 << 4;
3350 /* DZP indicates whether DC ZVA access is allowed */
3351 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3354 return cpu
->dcz_blocksize
| dzp_bit
;
3357 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3360 if (!(env
->pstate
& PSTATE_SP
)) {
3361 /* Access to SP_EL0 is undefined if it's being used as
3362 * the stack pointer.
3364 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3366 return CP_ACCESS_OK
;
3369 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3371 return env
->pstate
& PSTATE_SP
;
3374 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3376 update_spsel(env
, val
);
3379 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3382 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3384 if (raw_read(env
, ri
) == value
) {
3385 /* Skip the TLB flush if nothing actually changed; Linux likes
3386 * to do a lot of pointless SCTLR writes.
3391 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3392 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3396 raw_write(env
, ri
, value
);
3397 /* ??? Lots of these bits are not implemented. */
3398 /* This may enable/disable the MMU, so do a TLB flush. */
3399 tlb_flush(CPU(cpu
));
3402 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3405 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3406 return CP_ACCESS_TRAP_FP_EL2
;
3408 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3409 return CP_ACCESS_TRAP_FP_EL3
;
3411 return CP_ACCESS_OK
;
3414 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3417 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3420 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3421 /* Minimal set of EL0-visible registers. This will need to be expanded
3422 * significantly for system emulation of AArch64 CPUs.
3424 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3425 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3426 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3427 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3428 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3429 .type
= ARM_CP_NO_RAW
,
3430 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3431 .fieldoffset
= offsetof(CPUARMState
, daif
),
3432 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3433 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3434 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3435 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3436 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3437 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3438 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3439 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3440 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3441 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3442 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3443 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3444 .readfn
= aa64_dczid_read
},
3445 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3446 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3447 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3448 #ifndef CONFIG_USER_ONLY
3449 /* Avoid overhead of an access check that always passes in user-mode */
3450 .accessfn
= aa64_zva_access
,
3453 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3454 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3455 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3456 /* Cache ops: all NOPs since we don't emulate caches */
3457 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3458 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3459 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3460 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3461 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3462 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3463 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3464 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3465 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3466 .accessfn
= aa64_cacheop_access
},
3467 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3468 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3469 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3470 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3471 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3472 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3473 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3474 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3475 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3476 .accessfn
= aa64_cacheop_access
},
3477 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3478 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3479 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3480 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3481 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3482 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3483 .accessfn
= aa64_cacheop_access
},
3484 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3485 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3486 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3487 .accessfn
= aa64_cacheop_access
},
3488 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3489 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3490 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3491 /* TLBI operations */
3492 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3493 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3494 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3495 .writefn
= tlbi_aa64_vmalle1is_write
},
3496 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3497 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3498 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3499 .writefn
= tlbi_aa64_vae1is_write
},
3500 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3501 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3502 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3503 .writefn
= tlbi_aa64_vmalle1is_write
},
3504 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3505 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3506 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3507 .writefn
= tlbi_aa64_vae1is_write
},
3508 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3509 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3510 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3511 .writefn
= tlbi_aa64_vae1is_write
},
3512 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3513 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3514 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3515 .writefn
= tlbi_aa64_vae1is_write
},
3516 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3517 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3518 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3519 .writefn
= tlbi_aa64_vmalle1_write
},
3520 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3521 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3522 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3523 .writefn
= tlbi_aa64_vae1_write
},
3524 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3525 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3526 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3527 .writefn
= tlbi_aa64_vmalle1_write
},
3528 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3529 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3530 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3531 .writefn
= tlbi_aa64_vae1_write
},
3532 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3533 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3534 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3535 .writefn
= tlbi_aa64_vae1_write
},
3536 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3537 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3538 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3539 .writefn
= tlbi_aa64_vae1_write
},
3540 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3541 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3542 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3543 .writefn
= tlbi_aa64_ipas2e1is_write
},
3544 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3545 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3546 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3547 .writefn
= tlbi_aa64_ipas2e1is_write
},
3548 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3549 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3550 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3551 .writefn
= tlbi_aa64_alle1is_write
},
3552 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3553 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3554 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3555 .writefn
= tlbi_aa64_alle1is_write
},
3556 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3557 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3558 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3559 .writefn
= tlbi_aa64_ipas2e1_write
},
3560 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3561 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3562 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3563 .writefn
= tlbi_aa64_ipas2e1_write
},
3564 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3565 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3566 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3567 .writefn
= tlbi_aa64_alle1_write
},
3568 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3569 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3570 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3571 .writefn
= tlbi_aa64_alle1is_write
},
3572 #ifndef CONFIG_USER_ONLY
3573 /* 64 bit address translation operations */
3574 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3575 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3576 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3577 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3578 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3579 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3580 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3581 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3582 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3583 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3584 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3585 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3586 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3587 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3588 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3589 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3590 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3591 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3592 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3593 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3594 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3595 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3596 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3597 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3598 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3599 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3600 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3601 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3602 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3603 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3604 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3605 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3606 .type
= ARM_CP_ALIAS
,
3607 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3608 .access
= PL1_RW
, .resetvalue
= 0,
3609 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3610 .writefn
= par_write
},
3612 /* TLB invalidate last level of translation table walk */
3613 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3614 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3615 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3616 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3617 .writefn
= tlbimvaa_is_write
},
3618 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3619 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3620 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3621 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3622 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3623 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3624 .writefn
= tlbimva_hyp_write
},
3625 { .name
= "TLBIMVALHIS",
3626 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3627 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3628 .writefn
= tlbimva_hyp_is_write
},
3629 { .name
= "TLBIIPAS2",
3630 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3631 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3632 .writefn
= tlbiipas2_write
},
3633 { .name
= "TLBIIPAS2IS",
3634 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3635 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3636 .writefn
= tlbiipas2_is_write
},
3637 { .name
= "TLBIIPAS2L",
3638 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3639 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3640 .writefn
= tlbiipas2_write
},
3641 { .name
= "TLBIIPAS2LIS",
3642 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3643 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3644 .writefn
= tlbiipas2_is_write
},
3645 /* 32 bit cache operations */
3646 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3647 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3648 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3649 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3650 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3651 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3652 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3653 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3654 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3655 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3656 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3657 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3658 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3659 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3660 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3661 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3662 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3663 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3664 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3665 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3666 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3667 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3668 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3669 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3670 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3671 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3672 /* MMU Domain access control / MPU write buffer control */
3673 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3674 .access
= PL1_RW
, .resetvalue
= 0,
3675 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3676 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3677 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3678 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3679 .type
= ARM_CP_ALIAS
,
3680 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3682 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3683 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3684 .type
= ARM_CP_ALIAS
,
3685 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3687 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3688 /* We rely on the access checks not allowing the guest to write to the
3689 * state field when SPSel indicates that it's being used as the stack
3692 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3693 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3694 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3695 .type
= ARM_CP_ALIAS
,
3696 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3697 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3698 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3699 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3700 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3701 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3702 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3703 .type
= ARM_CP_NO_RAW
,
3704 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3705 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3706 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3707 .type
= ARM_CP_ALIAS
,
3708 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3709 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3710 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3711 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3712 .access
= PL2_RW
, .resetvalue
= 0,
3713 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3714 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3715 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3716 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3717 .access
= PL2_RW
, .resetvalue
= 0,
3718 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3719 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3720 .type
= ARM_CP_ALIAS
,
3721 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3723 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3724 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3725 .type
= ARM_CP_ALIAS
,
3726 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3728 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3729 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3730 .type
= ARM_CP_ALIAS
,
3731 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3733 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3734 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3735 .type
= ARM_CP_ALIAS
,
3736 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3738 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3739 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3740 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3742 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3743 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3744 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3745 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3746 .writefn
= sdcr_write
,
3747 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3751 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3752 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3753 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3754 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3756 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3757 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3758 .type
= ARM_CP_NO_RAW
,
3759 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3761 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3762 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
3763 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3765 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3766 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3767 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3768 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3769 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3770 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3771 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3773 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3774 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3775 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3776 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3777 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3778 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3780 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
3781 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3782 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3784 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3785 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3786 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3788 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3789 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3790 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3792 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3793 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3794 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3795 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3796 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3797 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3798 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3799 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3800 .cp
= 15, .opc1
= 6, .crm
= 2,
3801 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3802 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3803 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3804 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3805 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3806 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3807 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3808 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3809 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3810 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3811 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3812 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3813 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3814 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3815 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3816 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3818 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3819 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3820 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3821 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3822 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3823 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3824 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3825 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3827 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3828 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3829 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3830 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3831 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3833 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3834 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3835 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3836 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3837 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3838 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3839 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3840 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3841 .access
= PL2_RW
, .accessfn
= access_tda
,
3842 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3843 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3844 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3845 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3846 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3847 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3848 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3849 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3850 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3851 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3852 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3853 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
3854 .type
= ARM_CP_CONST
,
3855 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
3856 .access
= PL2_RW
, .resetvalue
= 0 },
3860 /* Ditto, but for registers which exist in ARMv8 but not v7 */
3861 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
3862 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
3863 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
3865 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3869 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3871 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3872 uint64_t valid_mask
= HCR_MASK
;
3874 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3875 valid_mask
&= ~HCR_HCD
;
3876 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3877 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3878 * However, if we're using the SMC PSCI conduit then QEMU is
3879 * effectively acting like EL3 firmware and so the guest at
3880 * EL2 should retain the ability to prevent EL1 from being
3881 * able to make SMC calls into the ersatz firmware, so in
3882 * that case HCR.TSC should be read/write.
3884 valid_mask
&= ~HCR_TSC
;
3887 /* Clear RES0 bits. */
3888 value
&= valid_mask
;
3890 /* These bits change the MMU setup:
3891 * HCR_VM enables stage 2 translation
3892 * HCR_PTW forbids certain page-table setups
3893 * HCR_DC Disables stage1 and enables stage2 translation
3895 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
3896 tlb_flush(CPU(cpu
));
3898 env
->cp15
.hcr_el2
= value
;
3901 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3904 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3905 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
3906 hcr_write(env
, NULL
, value
);
3909 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3912 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3913 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
3914 hcr_write(env
, NULL
, value
);
3917 static const ARMCPRegInfo el2_cp_reginfo
[] = {
3918 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3919 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3920 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3921 .writefn
= hcr_write
},
3922 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
3923 .type
= ARM_CP_ALIAS
,
3924 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3925 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3926 .writefn
= hcr_writelow
},
3927 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
3928 .type
= ARM_CP_ALIAS
,
3929 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
3931 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
3932 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
3933 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3934 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
3935 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3936 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3937 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
3938 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
3939 .type
= ARM_CP_ALIAS
,
3940 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
3942 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
3943 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
3944 .type
= ARM_CP_ALIAS
,
3945 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
3947 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
3948 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3949 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3950 .access
= PL2_RW
, .writefn
= vbar_write
,
3951 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
3953 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
3954 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
3955 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
3956 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
3957 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3958 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3959 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
3960 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
3961 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3962 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3963 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
3965 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3966 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3967 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3968 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
3969 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3970 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3971 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3973 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3974 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
3975 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3976 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3978 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3979 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3980 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3982 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3983 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3984 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3986 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3987 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3989 /* no .writefn needed as this can't cause an ASID change;
3990 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3992 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
3993 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
3994 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3995 .type
= ARM_CP_ALIAS
,
3996 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3997 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3998 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
3999 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4001 /* no .writefn needed as this can't cause an ASID change;
4002 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4004 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4005 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4006 .cp
= 15, .opc1
= 6, .crm
= 2,
4007 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4008 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4009 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4010 .writefn
= vttbr_write
},
4011 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4012 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4013 .access
= PL2_RW
, .writefn
= vttbr_write
,
4014 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4015 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4016 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4017 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4018 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4019 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4020 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4021 .access
= PL2_RW
, .resetvalue
= 0,
4022 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4023 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4024 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4025 .access
= PL2_RW
, .resetvalue
= 0,
4026 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4027 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4028 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4029 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4030 { .name
= "TLBIALLNSNH",
4031 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4032 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4033 .writefn
= tlbiall_nsnh_write
},
4034 { .name
= "TLBIALLNSNHIS",
4035 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4036 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4037 .writefn
= tlbiall_nsnh_is_write
},
4038 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4039 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4040 .writefn
= tlbiall_hyp_write
},
4041 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4042 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4043 .writefn
= tlbiall_hyp_is_write
},
4044 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4045 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4046 .writefn
= tlbimva_hyp_write
},
4047 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4048 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4049 .writefn
= tlbimva_hyp_is_write
},
4050 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4051 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4052 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4053 .writefn
= tlbi_aa64_alle2_write
},
4054 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4055 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4056 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4057 .writefn
= tlbi_aa64_vae2_write
},
4058 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4059 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4060 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4061 .writefn
= tlbi_aa64_vae2_write
},
4062 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4063 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4064 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4065 .writefn
= tlbi_aa64_alle2is_write
},
4066 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4067 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4068 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4069 .writefn
= tlbi_aa64_vae2is_write
},
4070 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4071 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4072 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4073 .writefn
= tlbi_aa64_vae2is_write
},
4074 #ifndef CONFIG_USER_ONLY
4075 /* Unlike the other EL2-related AT operations, these must
4076 * UNDEF from EL3 if EL2 is not implemented, which is why we
4077 * define them here rather than with the rest of the AT ops.
4079 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4080 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4081 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4082 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4083 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4084 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4085 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4086 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4087 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4088 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4089 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4090 * to behave as if SCR.NS was 1.
4092 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4094 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4095 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4097 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4098 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4099 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4100 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4101 * reset values as IMPDEF. We choose to reset to 3 to comply with
4102 * both ARMv7 and ARMv8.
4104 .access
= PL2_RW
, .resetvalue
= 3,
4105 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4106 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4107 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4108 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4109 .writefn
= gt_cntvoff_write
,
4110 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4111 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4112 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4113 .writefn
= gt_cntvoff_write
,
4114 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4115 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4116 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4117 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4118 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4119 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4120 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4121 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4122 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4123 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4124 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4125 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4126 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4127 .resetfn
= gt_hyp_timer_reset
,
4128 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4129 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4131 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4133 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4135 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4137 /* The only field of MDCR_EL2 that has a defined architectural reset value
4138 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4139 * don't impelment any PMU event counters, so using zero as a reset
4140 * value for MDCR_EL2 is okay
4142 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4143 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4144 .access
= PL2_RW
, .resetvalue
= 0,
4145 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4146 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4147 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4148 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4149 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4150 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4151 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4153 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4154 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4155 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4157 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4161 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4162 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4163 .type
= ARM_CP_ALIAS
,
4164 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4166 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4167 .writefn
= hcr_writehigh
},
4171 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4174 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4175 * At Secure EL1 it traps to EL3.
4177 if (arm_current_el(env
) == 3) {
4178 return CP_ACCESS_OK
;
4180 if (arm_is_secure_below_el3(env
)) {
4181 return CP_ACCESS_TRAP_EL3
;
4183 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4185 return CP_ACCESS_OK
;
4187 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4190 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4191 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4192 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4193 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4194 .resetvalue
= 0, .writefn
= scr_write
},
4195 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4196 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4197 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4198 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4199 .writefn
= scr_write
},
4200 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4201 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4202 .access
= PL3_RW
, .resetvalue
= 0,
4203 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4205 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4206 .access
= PL3_RW
, .resetvalue
= 0,
4207 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4208 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4209 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4210 .writefn
= vbar_write
, .resetvalue
= 0,
4211 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4212 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4213 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4214 .access
= PL3_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4215 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4216 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4217 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4219 /* no .writefn needed as this can't cause an ASID change;
4220 * we must provide a .raw_writefn and .resetfn because we handle
4221 * reset and migration for the AArch32 TTBCR(S), which might be
4222 * using mask and base_mask.
4224 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4225 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4226 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4227 .type
= ARM_CP_ALIAS
,
4228 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4230 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4231 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4232 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4233 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4234 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4235 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4236 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4237 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4238 .type
= ARM_CP_ALIAS
,
4239 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4241 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4242 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4243 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4244 .access
= PL3_RW
, .writefn
= vbar_write
,
4245 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4247 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4248 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4249 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4250 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4251 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4252 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4253 .access
= PL3_RW
, .resetvalue
= 0,
4254 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4255 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4256 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4257 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4259 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4260 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4261 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4263 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4264 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4265 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4267 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4268 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4269 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4270 .writefn
= tlbi_aa64_alle3is_write
},
4271 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4272 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4273 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4274 .writefn
= tlbi_aa64_vae3is_write
},
4275 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4276 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4277 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4278 .writefn
= tlbi_aa64_vae3is_write
},
4279 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4280 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4281 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4282 .writefn
= tlbi_aa64_alle3_write
},
4283 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4284 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4285 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4286 .writefn
= tlbi_aa64_vae3_write
},
4287 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4288 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4289 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4290 .writefn
= tlbi_aa64_vae3_write
},
4294 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4297 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4298 * but the AArch32 CTR has its own reginfo struct)
4300 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4301 return CP_ACCESS_TRAP
;
4303 return CP_ACCESS_OK
;
4306 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4309 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4310 * read via a bit in OSLSR_EL1.
4314 if (ri
->state
== ARM_CP_STATE_AA32
) {
4315 oslock
= (value
== 0xC5ACCE55);
4320 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4323 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4324 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4325 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4326 * unlike DBGDRAR it is never accessible from EL0.
4327 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4330 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4331 .access
= PL0_R
, .accessfn
= access_tdra
,
4332 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4333 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4334 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4335 .access
= PL1_R
, .accessfn
= access_tdra
,
4336 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4337 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4338 .access
= PL0_R
, .accessfn
= access_tdra
,
4339 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4340 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4341 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4342 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4343 .access
= PL1_RW
, .accessfn
= access_tda
,
4344 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4346 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4347 * We don't implement the configurable EL0 access.
4349 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4350 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4351 .type
= ARM_CP_ALIAS
,
4352 .access
= PL1_R
, .accessfn
= access_tda
,
4353 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4354 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4355 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4356 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4357 .accessfn
= access_tdosa
,
4358 .writefn
= oslar_write
},
4359 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4360 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4361 .access
= PL1_R
, .resetvalue
= 10,
4362 .accessfn
= access_tdosa
,
4363 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4364 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4365 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4366 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4367 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4368 .type
= ARM_CP_NOP
},
4369 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4370 * implement vector catch debug events yet.
4373 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4374 .access
= PL1_RW
, .accessfn
= access_tda
,
4375 .type
= ARM_CP_NOP
},
4376 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4377 * to save and restore a 32-bit guest's DBGVCR)
4379 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4380 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4381 .access
= PL2_RW
, .accessfn
= access_tda
,
4382 .type
= ARM_CP_NOP
},
4383 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4384 * Channel but Linux may try to access this register. The 32-bit
4385 * alias is DBGDCCINT.
4387 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4388 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4389 .access
= PL1_RW
, .accessfn
= access_tda
,
4390 .type
= ARM_CP_NOP
},
4394 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4395 /* 64 bit access versions of the (dummy) debug registers */
4396 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4397 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4398 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4399 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4403 /* Return the exception level to which SVE-disabled exceptions should
4404 * be taken, or 0 if SVE is enabled.
4406 static int sve_exception_el(CPUARMState
*env
)
4408 #ifndef CONFIG_USER_ONLY
4409 unsigned current_el
= arm_current_el(env
);
4411 /* The CPACR.ZEN controls traps to EL1:
4412 * 0, 2 : trap EL0 and EL1 accesses
4413 * 1 : trap only EL0 accesses
4414 * 3 : trap no accesses
4416 switch (extract32(env
->cp15
.cpacr_el1
, 16, 2)) {
4418 if (current_el
<= 1) {
4419 /* Trap to PL1, which might be EL1 or EL3 */
4420 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4427 if (current_el
== 0) {
4435 /* Similarly for CPACR.FPEN, after having checked ZEN. */
4436 switch (extract32(env
->cp15
.cpacr_el1
, 20, 2)) {
4438 if (current_el
<= 1) {
4439 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4446 if (current_el
== 0) {
4454 /* CPTR_EL2. Check both TZ and TFP. */
4456 && (env
->cp15
.cptr_el
[2] & (CPTR_TFP
| CPTR_TZ
))
4457 && !arm_is_secure_below_el3(env
)) {
4461 /* CPTR_EL3. Check both EZ and TFP. */
4462 if (!(env
->cp15
.cptr_el
[3] & CPTR_EZ
)
4463 || (env
->cp15
.cptr_el
[3] & CPTR_TFP
)) {
4470 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4473 /* Bits other than [3:0] are RAZ/WI. */
4474 raw_write(env
, ri
, value
& 0xf);
4477 static const ARMCPRegInfo zcr_el1_reginfo
= {
4478 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
4479 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
4480 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
4481 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
4482 .writefn
= zcr_write
, .raw_writefn
= raw_write
4485 static const ARMCPRegInfo zcr_el2_reginfo
= {
4486 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4487 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4488 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
4489 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
4490 .writefn
= zcr_write
, .raw_writefn
= raw_write
4493 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
4494 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4495 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4496 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
4497 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
4500 static const ARMCPRegInfo zcr_el3_reginfo
= {
4501 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
4502 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
4503 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
4504 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
4505 .writefn
= zcr_write
, .raw_writefn
= raw_write
4508 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4510 CPUARMState
*env
= &cpu
->env
;
4512 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4513 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4515 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4517 if (env
->cpu_watchpoint
[n
]) {
4518 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4519 env
->cpu_watchpoint
[n
] = NULL
;
4522 if (!extract64(wcr
, 0, 1)) {
4523 /* E bit clear : watchpoint disabled */
4527 switch (extract64(wcr
, 3, 2)) {
4529 /* LSC 00 is reserved and must behave as if the wp is disabled */
4532 flags
|= BP_MEM_READ
;
4535 flags
|= BP_MEM_WRITE
;
4538 flags
|= BP_MEM_ACCESS
;
4542 /* Attempts to use both MASK and BAS fields simultaneously are
4543 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4544 * thus generating a watchpoint for every byte in the masked region.
4546 mask
= extract64(wcr
, 24, 4);
4547 if (mask
== 1 || mask
== 2) {
4548 /* Reserved values of MASK; we must act as if the mask value was
4549 * some non-reserved value, or as if the watchpoint were disabled.
4550 * We choose the latter.
4554 /* Watchpoint covers an aligned area up to 2GB in size */
4556 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4557 * whether the watchpoint fires when the unmasked bits match; we opt
4558 * to generate the exceptions.
4562 /* Watchpoint covers bytes defined by the byte address select bits */
4563 int bas
= extract64(wcr
, 5, 8);
4567 /* This must act as if the watchpoint is disabled */
4571 if (extract64(wvr
, 2, 1)) {
4572 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4573 * ignored, and BAS[3:0] define which bytes to watch.
4577 /* The BAS bits are supposed to be programmed to indicate a contiguous
4578 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4579 * we fire for each byte in the word/doubleword addressed by the WVR.
4580 * We choose to ignore any non-zero bits after the first range of 1s.
4582 basstart
= ctz32(bas
);
4583 len
= cto32(bas
>> basstart
);
4587 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4588 &env
->cpu_watchpoint
[n
]);
4591 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4594 CPUARMState
*env
= &cpu
->env
;
4596 /* Completely clear out existing QEMU watchpoints and our array, to
4597 * avoid possible stale entries following migration load.
4599 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4600 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4602 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4603 hw_watchpoint_update(cpu
, i
);
4607 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4610 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4613 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4614 * register reads and behaves as if values written are sign extended.
4615 * Bits [1:0] are RES0.
4617 value
= sextract64(value
, 0, 49) & ~3ULL;
4619 raw_write(env
, ri
, value
);
4620 hw_watchpoint_update(cpu
, i
);
4623 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4626 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4629 raw_write(env
, ri
, value
);
4630 hw_watchpoint_update(cpu
, i
);
4633 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4635 CPUARMState
*env
= &cpu
->env
;
4636 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4637 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4642 if (env
->cpu_breakpoint
[n
]) {
4643 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4644 env
->cpu_breakpoint
[n
] = NULL
;
4647 if (!extract64(bcr
, 0, 1)) {
4648 /* E bit clear : watchpoint disabled */
4652 bt
= extract64(bcr
, 20, 4);
4655 case 4: /* unlinked address mismatch (reserved if AArch64) */
4656 case 5: /* linked address mismatch (reserved if AArch64) */
4657 qemu_log_mask(LOG_UNIMP
,
4658 "arm: address mismatch breakpoint types not implemented\n");
4660 case 0: /* unlinked address match */
4661 case 1: /* linked address match */
4663 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4664 * we behave as if the register was sign extended. Bits [1:0] are
4665 * RES0. The BAS field is used to allow setting breakpoints on 16
4666 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4667 * a bp will fire if the addresses covered by the bp and the addresses
4668 * covered by the insn overlap but the insn doesn't start at the
4669 * start of the bp address range. We choose to require the insn and
4670 * the bp to have the same address. The constraints on writing to
4671 * BAS enforced in dbgbcr_write mean we have only four cases:
4672 * 0b0000 => no breakpoint
4673 * 0b0011 => breakpoint on addr
4674 * 0b1100 => breakpoint on addr + 2
4675 * 0b1111 => breakpoint on addr
4676 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4678 int bas
= extract64(bcr
, 5, 4);
4679 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4688 case 2: /* unlinked context ID match */
4689 case 8: /* unlinked VMID match (reserved if no EL2) */
4690 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4691 qemu_log_mask(LOG_UNIMP
,
4692 "arm: unlinked context breakpoint types not implemented\n");
4694 case 9: /* linked VMID match (reserved if no EL2) */
4695 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4696 case 3: /* linked context ID match */
4698 /* We must generate no events for Linked context matches (unless
4699 * they are linked to by some other bp/wp, which is handled in
4700 * updates for the linking bp/wp). We choose to also generate no events
4701 * for reserved values.
4706 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4709 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4712 CPUARMState
*env
= &cpu
->env
;
4714 /* Completely clear out existing QEMU breakpoints and our array, to
4715 * avoid possible stale entries following migration load.
4717 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4718 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4720 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4721 hw_breakpoint_update(cpu
, i
);
4725 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4728 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4731 raw_write(env
, ri
, value
);
4732 hw_breakpoint_update(cpu
, i
);
4735 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4738 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4741 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4744 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4745 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4747 raw_write(env
, ri
, value
);
4748 hw_breakpoint_update(cpu
, i
);
4751 static void define_debug_regs(ARMCPU
*cpu
)
4753 /* Define v7 and v8 architectural debug registers.
4754 * These are just dummy implementations for now.
4757 int wrps
, brps
, ctx_cmps
;
4758 ARMCPRegInfo dbgdidr
= {
4759 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4760 .access
= PL0_R
, .accessfn
= access_tda
,
4761 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4764 /* Note that all these register fields hold "number of Xs minus 1". */
4765 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4766 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4767 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4769 assert(ctx_cmps
<= brps
);
4771 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4772 * of the debug registers such as number of breakpoints;
4773 * check that if they both exist then they agree.
4775 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4776 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4777 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4778 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4781 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4782 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4784 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4785 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4788 for (i
= 0; i
< brps
+ 1; i
++) {
4789 ARMCPRegInfo dbgregs
[] = {
4790 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4791 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4792 .access
= PL1_RW
, .accessfn
= access_tda
,
4793 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4794 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4796 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4797 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4798 .access
= PL1_RW
, .accessfn
= access_tda
,
4799 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
4800 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
4804 define_arm_cp_regs(cpu
, dbgregs
);
4807 for (i
= 0; i
< wrps
+ 1; i
++) {
4808 ARMCPRegInfo dbgregs
[] = {
4809 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
4810 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
4811 .access
= PL1_RW
, .accessfn
= access_tda
,
4812 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
4813 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
4815 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
4816 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
4817 .access
= PL1_RW
, .accessfn
= access_tda
,
4818 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
4819 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
4823 define_arm_cp_regs(cpu
, dbgregs
);
4827 /* We don't know until after realize whether there's a GICv3
4828 * attached, and that is what registers the gicv3 sysregs.
4829 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4832 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4834 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4835 uint64_t pfr1
= cpu
->id_pfr1
;
4837 if (env
->gicv3state
) {
4843 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4845 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4846 uint64_t pfr0
= cpu
->id_aa64pfr0
;
4848 if (env
->gicv3state
) {
4854 void register_cp_regs_for_features(ARMCPU
*cpu
)
4856 /* Register all the coprocessor registers based on feature bits */
4857 CPUARMState
*env
= &cpu
->env
;
4858 if (arm_feature(env
, ARM_FEATURE_M
)) {
4859 /* M profile has no coprocessor registers */
4863 define_arm_cp_regs(cpu
, cp_reginfo
);
4864 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4865 /* Must go early as it is full of wildcards that may be
4866 * overridden by later definitions.
4868 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
4871 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4872 /* The ID registers all have impdef reset values */
4873 ARMCPRegInfo v6_idregs
[] = {
4874 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
4875 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4876 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4877 .resetvalue
= cpu
->id_pfr0
},
4878 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4879 * the value of the GIC field until after we define these regs.
4881 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
4882 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
4883 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4884 .readfn
= id_pfr1_read
,
4885 .writefn
= arm_cp_write_ignore
},
4886 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
4887 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
4888 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4889 .resetvalue
= cpu
->id_dfr0
},
4890 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
4891 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
4892 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4893 .resetvalue
= cpu
->id_afr0
},
4894 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
4895 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
4896 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4897 .resetvalue
= cpu
->id_mmfr0
},
4898 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
4899 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
4900 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4901 .resetvalue
= cpu
->id_mmfr1
},
4902 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
4903 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
4904 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4905 .resetvalue
= cpu
->id_mmfr2
},
4906 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
4907 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
4908 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4909 .resetvalue
= cpu
->id_mmfr3
},
4910 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
4911 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4912 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4913 .resetvalue
= cpu
->id_isar0
},
4914 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
4915 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
4916 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4917 .resetvalue
= cpu
->id_isar1
},
4918 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
4919 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4920 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4921 .resetvalue
= cpu
->id_isar2
},
4922 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
4923 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
4924 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4925 .resetvalue
= cpu
->id_isar3
},
4926 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
4927 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
4928 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4929 .resetvalue
= cpu
->id_isar4
},
4930 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
4931 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
4932 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4933 .resetvalue
= cpu
->id_isar5
},
4934 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
4935 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
4936 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4937 .resetvalue
= cpu
->id_mmfr4
},
4938 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
4939 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
4940 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4941 .resetvalue
= cpu
->id_isar6
},
4944 define_arm_cp_regs(cpu
, v6_idregs
);
4945 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
4947 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
4949 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
4950 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
4952 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
4953 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
4954 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
4956 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4957 /* v7 performance monitor control register: same implementor
4958 * field as main ID register, and we implement only the cycle
4961 #ifndef CONFIG_USER_ONLY
4962 ARMCPRegInfo pmcr
= {
4963 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
4965 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
4966 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
4967 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
4968 .raw_writefn
= raw_write
,
4970 ARMCPRegInfo pmcr64
= {
4971 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
4972 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
4973 .access
= PL0_RW
, .accessfn
= pmreg_access
,
4975 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
4976 .resetvalue
= cpu
->midr
& 0xff000000,
4977 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
4979 define_one_arm_cp_reg(cpu
, &pmcr
);
4980 define_one_arm_cp_reg(cpu
, &pmcr64
);
4982 ARMCPRegInfo clidr
= {
4983 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
4984 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
4985 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
4987 define_one_arm_cp_reg(cpu
, &clidr
);
4988 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
4989 define_debug_regs(cpu
);
4991 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
4993 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4994 /* AArch64 ID registers, which all have impdef reset values.
4995 * Note that within the ID register ranges the unused slots
4996 * must all RAZ, not UNDEF; future architecture versions may
4997 * define new registers here.
4999 ARMCPRegInfo v8_idregs
[] = {
5000 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5001 * know the right value for the GIC field until after we
5002 * define these regs.
5004 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5006 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5007 .readfn
= id_aa64pfr0_read
,
5008 .writefn
= arm_cp_write_ignore
},
5009 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5010 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5011 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5012 .resetvalue
= cpu
->id_aa64pfr1
},
5013 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5014 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5015 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5017 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5018 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5019 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5021 { .name
= "ID_AA64PFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5022 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5023 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5025 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5026 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5027 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5029 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5030 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
5031 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5033 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5034 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
5035 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5037 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5038 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
5039 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5040 .resetvalue
= cpu
->id_aa64dfr0
},
5041 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5042 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
5043 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5044 .resetvalue
= cpu
->id_aa64dfr1
},
5045 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5046 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
5047 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5049 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5050 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
5051 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5053 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5054 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
5055 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5056 .resetvalue
= cpu
->id_aa64afr0
},
5057 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5058 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
5059 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5060 .resetvalue
= cpu
->id_aa64afr1
},
5061 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5062 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
5063 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5065 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5066 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
5067 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5069 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
5070 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
5071 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5072 .resetvalue
= cpu
->id_aa64isar0
},
5073 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
5074 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
5075 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5076 .resetvalue
= cpu
->id_aa64isar1
},
5077 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5078 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
5079 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5081 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5082 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
5083 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5085 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5086 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
5087 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5089 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5090 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
5091 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5093 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5094 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
5095 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5097 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5098 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
5099 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5101 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5102 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5103 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5104 .resetvalue
= cpu
->id_aa64mmfr0
},
5105 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5106 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
5107 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5108 .resetvalue
= cpu
->id_aa64mmfr1
},
5109 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5110 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
5111 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5113 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5114 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
5115 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5117 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5118 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
5119 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5121 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5122 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
5123 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5125 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5126 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
5127 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5129 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5130 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
5131 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5133 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5134 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
5135 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5136 .resetvalue
= cpu
->mvfr0
},
5137 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5138 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
5139 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5140 .resetvalue
= cpu
->mvfr1
},
5141 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
5142 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
5143 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5144 .resetvalue
= cpu
->mvfr2
},
5145 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5146 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
5147 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5149 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5150 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
5151 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5153 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5154 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
5155 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5157 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5158 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
5159 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5161 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5162 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
5163 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5165 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
5166 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
5167 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5168 .resetvalue
= cpu
->pmceid0
},
5169 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
5170 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
5171 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5172 .resetvalue
= cpu
->pmceid0
},
5173 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
5174 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
5175 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5176 .resetvalue
= cpu
->pmceid1
},
5177 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
5178 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
5179 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5180 .resetvalue
= cpu
->pmceid1
},
5183 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5184 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
5185 !arm_feature(env
, ARM_FEATURE_EL2
)) {
5186 ARMCPRegInfo rvbar
= {
5187 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5188 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5189 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
5191 define_one_arm_cp_reg(cpu
, &rvbar
);
5193 define_arm_cp_regs(cpu
, v8_idregs
);
5194 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
5196 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5197 uint64_t vmpidr_def
= mpidr_read_val(env
);
5198 ARMCPRegInfo vpidr_regs
[] = {
5199 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
5200 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5201 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5202 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
5203 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
5204 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5205 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5206 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
5207 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5208 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
5209 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5210 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5211 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
5212 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
5213 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5214 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5216 .resetvalue
= vmpidr_def
,
5217 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
5220 define_arm_cp_regs(cpu
, vpidr_regs
);
5221 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
5222 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5223 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
5225 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
5226 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
5227 ARMCPRegInfo rvbar
= {
5228 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
5229 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
5230 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
5232 define_one_arm_cp_reg(cpu
, &rvbar
);
5235 /* If EL2 is missing but higher ELs are enabled, we need to
5236 * register the no_el2 reginfos.
5238 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5239 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
5240 * of MIDR_EL1 and MPIDR_EL1.
5242 ARMCPRegInfo vpidr_regs
[] = {
5243 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5244 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5245 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5246 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
5247 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5248 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5249 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5250 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5251 .type
= ARM_CP_NO_RAW
,
5252 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
5255 define_arm_cp_regs(cpu
, vpidr_regs
);
5256 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
5257 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5258 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
5262 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5263 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
5264 ARMCPRegInfo el3_regs
[] = {
5265 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5266 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
5267 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
5268 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5269 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
5271 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5272 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
5273 .resetvalue
= cpu
->reset_sctlr
},
5277 define_arm_cp_regs(cpu
, el3_regs
);
5279 /* The behaviour of NSACR is sufficiently various that we don't
5280 * try to describe it in a single reginfo:
5281 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5282 * reads as constant 0xc00 from NS EL1 and NS EL2
5283 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5284 * if v7 without EL3, register doesn't exist
5285 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5287 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5288 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5289 ARMCPRegInfo nsacr
= {
5290 .name
= "NSACR", .type
= ARM_CP_CONST
,
5291 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5292 .access
= PL1_RW
, .accessfn
= nsacr_access
,
5295 define_one_arm_cp_reg(cpu
, &nsacr
);
5297 ARMCPRegInfo nsacr
= {
5299 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5300 .access
= PL3_RW
| PL1_R
,
5302 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
5304 define_one_arm_cp_reg(cpu
, &nsacr
);
5307 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5308 ARMCPRegInfo nsacr
= {
5309 .name
= "NSACR", .type
= ARM_CP_CONST
,
5310 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5314 define_one_arm_cp_reg(cpu
, &nsacr
);
5318 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
5319 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5320 /* PMSAv6 not implemented */
5321 assert(arm_feature(env
, ARM_FEATURE_V7
));
5322 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5323 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
5325 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
5328 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5329 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
5331 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5332 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
5334 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
5335 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
5337 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5338 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5340 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5341 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5343 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5344 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5346 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5347 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5349 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5350 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5352 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5353 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5355 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5356 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5358 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5359 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5361 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5362 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5364 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5365 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5366 * be read-only (ie write causes UNDEF exception).
5369 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5370 /* Pre-v8 MIDR space.
5371 * Note that the MIDR isn't a simple constant register because
5372 * of the TI925 behaviour where writes to another register can
5373 * cause the MIDR value to change.
5375 * Unimplemented registers in the c15 0 0 0 space default to
5376 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5377 * and friends override accordingly.
5380 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5381 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5382 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5383 .readfn
= midr_read
,
5384 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5385 .type
= ARM_CP_OVERRIDE
},
5386 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5388 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5389 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5391 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5392 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5394 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5395 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5397 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5398 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5400 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5401 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5404 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5405 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5406 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5407 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5408 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5409 .readfn
= midr_read
},
5410 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5411 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5412 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5413 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5414 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5415 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5416 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5417 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5418 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5419 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5422 ARMCPRegInfo id_cp_reginfo
[] = {
5423 /* These are common to v8 and pre-v8 */
5425 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5426 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5427 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5428 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5429 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5430 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5431 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5433 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5434 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5437 /* TLBTR is specific to VMSA */
5438 ARMCPRegInfo id_tlbtr_reginfo
= {
5440 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5441 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5443 /* MPUIR is specific to PMSA V6+ */
5444 ARMCPRegInfo id_mpuir_reginfo
= {
5446 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5447 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5448 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5450 ARMCPRegInfo crn0_wi_reginfo
= {
5451 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5452 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5453 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5455 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5456 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5458 /* Register the blanket "writes ignored" value first to cover the
5459 * whole space. Then update the specific ID registers to allow write
5460 * access, so that they ignore writes rather than causing them to
5463 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5464 for (r
= id_pre_v8_midr_cp_reginfo
;
5465 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5468 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5471 id_mpuir_reginfo
.access
= PL1_RW
;
5472 id_tlbtr_reginfo
.access
= PL1_RW
;
5474 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5475 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5477 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5479 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5480 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5481 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5482 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5483 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5487 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5488 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5491 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5492 ARMCPRegInfo auxcr_reginfo
[] = {
5493 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5494 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5495 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5496 .resetvalue
= cpu
->reset_auxcr
},
5497 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5498 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5499 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5501 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5502 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5503 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5507 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5508 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5509 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
5510 ARMCPRegInfo hactlr2_reginfo
= {
5511 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
5512 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
5513 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5516 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
5520 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5521 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5522 /* 32 bit view is [31:18] 0...0 [43:32]. */
5523 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5524 | extract64(cpu
->reset_cbar
, 32, 12);
5525 ARMCPRegInfo cbar_reginfo
[] = {
5527 .type
= ARM_CP_CONST
,
5528 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5529 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5530 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5531 .type
= ARM_CP_CONST
,
5532 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5533 .access
= PL1_R
, .resetvalue
= cbar32
},
5536 /* We don't implement a r/w 64 bit CBAR currently */
5537 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5538 define_arm_cp_regs(cpu
, cbar_reginfo
);
5540 ARMCPRegInfo cbar
= {
5542 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5543 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5544 .fieldoffset
= offsetof(CPUARMState
,
5545 cp15
.c15_config_base_address
)
5547 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5548 cbar
.access
= PL1_R
;
5549 cbar
.fieldoffset
= 0;
5550 cbar
.type
= ARM_CP_CONST
;
5552 define_one_arm_cp_reg(cpu
, &cbar
);
5556 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5557 ARMCPRegInfo vbar_cp_reginfo
[] = {
5558 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5559 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5560 .access
= PL1_RW
, .writefn
= vbar_write
,
5561 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5562 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5566 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5569 /* Generic registers whose values depend on the implementation */
5571 ARMCPRegInfo sctlr
= {
5572 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5573 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5575 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5576 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5577 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5578 .raw_writefn
= raw_write
,
5580 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5581 /* Normally we would always end the TB on an SCTLR write, but Linux
5582 * arch/arm/mach-pxa/sleep.S expects two instructions following
5583 * an MMU enable to execute from cache. Imitate this behaviour.
5585 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5587 define_one_arm_cp_reg(cpu
, &sctlr
);
5590 if (arm_feature(env
, ARM_FEATURE_SVE
)) {
5591 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
5592 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5593 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
5595 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
5597 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5598 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
5603 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5605 CPUState
*cs
= CPU(cpu
);
5606 CPUARMState
*env
= &cpu
->env
;
5608 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5609 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5610 aarch64_fpu_gdb_set_reg
,
5611 34, "aarch64-fpu.xml", 0);
5612 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5613 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5614 51, "arm-neon.xml", 0);
5615 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5616 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5617 35, "arm-vfp3.xml", 0);
5618 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5619 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5620 19, "arm-vfp.xml", 0);
5622 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
5623 arm_gen_dynamic_xml(cs
),
5624 "system-registers.xml", 0);
5627 /* Sort alphabetically by type name, except for "any". */
5628 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5630 ObjectClass
*class_a
= (ObjectClass
*)a
;
5631 ObjectClass
*class_b
= (ObjectClass
*)b
;
5632 const char *name_a
, *name_b
;
5634 name_a
= object_class_get_name(class_a
);
5635 name_b
= object_class_get_name(class_b
);
5636 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5638 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5641 return strcmp(name_a
, name_b
);
5645 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5647 ObjectClass
*oc
= data
;
5648 CPUListState
*s
= user_data
;
5649 const char *typename
;
5652 typename
= object_class_get_name(oc
);
5653 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5654 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
5659 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
5663 .cpu_fprintf
= cpu_fprintf
,
5667 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5668 list
= g_slist_sort(list
, arm_cpu_list_compare
);
5669 (*cpu_fprintf
)(f
, "Available CPUs:\n");
5670 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
5674 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
5676 ObjectClass
*oc
= data
;
5677 CpuDefinitionInfoList
**cpu_list
= user_data
;
5678 CpuDefinitionInfoList
*entry
;
5679 CpuDefinitionInfo
*info
;
5680 const char *typename
;
5682 typename
= object_class_get_name(oc
);
5683 info
= g_malloc0(sizeof(*info
));
5684 info
->name
= g_strndup(typename
,
5685 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5686 info
->q_typename
= g_strdup(typename
);
5688 entry
= g_malloc0(sizeof(*entry
));
5689 entry
->value
= info
;
5690 entry
->next
= *cpu_list
;
5694 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
5696 CpuDefinitionInfoList
*cpu_list
= NULL
;
5699 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5700 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
5706 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
5707 void *opaque
, int state
, int secstate
,
5708 int crm
, int opc1
, int opc2
,
5711 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5712 * add a single reginfo struct to the hash table.
5714 uint32_t *key
= g_new(uint32_t, 1);
5715 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
5716 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
5717 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
5719 r2
->name
= g_strdup(name
);
5720 /* Reset the secure state to the specific incoming state. This is
5721 * necessary as the register may have been defined with both states.
5723 r2
->secure
= secstate
;
5725 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5726 /* Register is banked (using both entries in array).
5727 * Overwriting fieldoffset as the array is only used to define
5728 * banked registers but later only fieldoffset is used.
5730 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
5733 if (state
== ARM_CP_STATE_AA32
) {
5734 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5735 /* If the register is banked then we don't need to migrate or
5736 * reset the 32-bit instance in certain cases:
5738 * 1) If the register has both 32-bit and 64-bit instances then we
5739 * can count on the 64-bit instance taking care of the
5741 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5742 * taking care of the secure bank. This requires that separate
5743 * 32 and 64-bit definitions are provided.
5745 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
5746 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
5747 r2
->type
|= ARM_CP_ALIAS
;
5749 } else if ((secstate
!= r
->secure
) && !ns
) {
5750 /* The register is not banked so we only want to allow migration of
5751 * the non-secure instance.
5753 r2
->type
|= ARM_CP_ALIAS
;
5756 if (r
->state
== ARM_CP_STATE_BOTH
) {
5757 /* We assume it is a cp15 register if the .cp field is left unset.
5763 #ifdef HOST_WORDS_BIGENDIAN
5764 if (r2
->fieldoffset
) {
5765 r2
->fieldoffset
+= sizeof(uint32_t);
5770 if (state
== ARM_CP_STATE_AA64
) {
5771 /* To allow abbreviation of ARMCPRegInfo
5772 * definitions, we treat cp == 0 as equivalent to
5773 * the value for "standard guest-visible sysreg".
5774 * STATE_BOTH definitions are also always "standard
5775 * sysreg" in their AArch64 view (the .cp value may
5776 * be non-zero for the benefit of the AArch32 view).
5778 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
5779 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
5781 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
5782 r2
->opc0
, opc1
, opc2
);
5784 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
5787 r2
->opaque
= opaque
;
5789 /* reginfo passed to helpers is correct for the actual access,
5790 * and is never ARM_CP_STATE_BOTH:
5793 /* Make sure reginfo passed to helpers for wildcarded regs
5794 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5799 /* By convention, for wildcarded registers only the first
5800 * entry is used for migration; the others are marked as
5801 * ALIAS so we don't try to transfer the register
5802 * multiple times. Special registers (ie NOP/WFI) are
5803 * never migratable and not even raw-accessible.
5805 if ((r
->type
& ARM_CP_SPECIAL
)) {
5806 r2
->type
|= ARM_CP_NO_RAW
;
5808 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
5809 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
5810 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
5811 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
5814 /* Check that raw accesses are either forbidden or handled. Note that
5815 * we can't assert this earlier because the setup of fieldoffset for
5816 * banked registers has to be done first.
5818 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
5819 assert(!raw_accessors_invalid(r2
));
5822 /* Overriding of an existing definition must be explicitly
5825 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
5826 ARMCPRegInfo
*oldreg
;
5827 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
5828 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
5829 fprintf(stderr
, "Register redefined: cp=%d %d bit "
5830 "crn=%d crm=%d opc1=%d opc2=%d, "
5831 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
5832 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
5833 oldreg
->name
, r2
->name
);
5834 g_assert_not_reached();
5837 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
5841 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
5842 const ARMCPRegInfo
*r
, void *opaque
)
5844 /* Define implementations of coprocessor registers.
5845 * We store these in a hashtable because typically
5846 * there are less than 150 registers in a space which
5847 * is 16*16*16*8*8 = 262144 in size.
5848 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5849 * If a register is defined twice then the second definition is
5850 * used, so this can be used to define some generic registers and
5851 * then override them with implementation specific variations.
5852 * At least one of the original and the second definition should
5853 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5854 * against accidental use.
5856 * The state field defines whether the register is to be
5857 * visible in the AArch32 or AArch64 execution state. If the
5858 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5859 * reginfo structure for the AArch32 view, which sees the lower
5860 * 32 bits of the 64 bit register.
5862 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5863 * be wildcarded. AArch64 registers are always considered to be 64
5864 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5865 * the register, if any.
5867 int crm
, opc1
, opc2
, state
;
5868 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
5869 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
5870 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
5871 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
5872 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
5873 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
5874 /* 64 bit registers have only CRm and Opc1 fields */
5875 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
5876 /* op0 only exists in the AArch64 encodings */
5877 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
5878 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5879 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
5880 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5881 * encodes a minimum access level for the register. We roll this
5882 * runtime check into our general permission check code, so check
5883 * here that the reginfo's specified permissions are strict enough
5884 * to encompass the generic architectural permission check.
5886 if (r
->state
!= ARM_CP_STATE_AA32
) {
5889 case 0: case 1: case 2:
5902 /* unallocated encoding, so not possible */
5910 /* min_EL EL1, secure mode only (we don't check the latter) */
5914 /* broken reginfo with out-of-range opc1 */
5918 /* assert our permissions are not too lax (stricter is fine) */
5919 assert((r
->access
& ~mask
) == 0);
5922 /* Check that the register definition has enough info to handle
5923 * reads and writes if they are permitted.
5925 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
5926 if (r
->access
& PL3_R
) {
5927 assert((r
->fieldoffset
||
5928 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5931 if (r
->access
& PL3_W
) {
5932 assert((r
->fieldoffset
||
5933 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5937 /* Bad type field probably means missing sentinel at end of reg list */
5938 assert(cptype_valid(r
->type
));
5939 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
5940 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
5941 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
5942 for (state
= ARM_CP_STATE_AA32
;
5943 state
<= ARM_CP_STATE_AA64
; state
++) {
5944 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
5947 if (state
== ARM_CP_STATE_AA32
) {
5948 /* Under AArch32 CP registers can be common
5949 * (same for secure and non-secure world) or banked.
5953 switch (r
->secure
) {
5954 case ARM_CP_SECSTATE_S
:
5955 case ARM_CP_SECSTATE_NS
:
5956 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5957 r
->secure
, crm
, opc1
, opc2
,
5961 name
= g_strdup_printf("%s_S", r
->name
);
5962 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5964 crm
, opc1
, opc2
, name
);
5966 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5968 crm
, opc1
, opc2
, r
->name
);
5972 /* AArch64 registers get mapped to non-secure instance
5974 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5976 crm
, opc1
, opc2
, r
->name
);
5984 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
5985 const ARMCPRegInfo
*regs
, void *opaque
)
5987 /* Define a whole list of registers */
5988 const ARMCPRegInfo
*r
;
5989 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5990 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
5994 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
5996 return g_hash_table_lookup(cpregs
, &encoded_cp
);
5999 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6002 /* Helper coprocessor write function for write-ignore registers */
6005 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6007 /* Helper coprocessor write function for read-as-zero registers */
6011 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
6013 /* Helper coprocessor reset function for do-nothing-on-reset registers */
6016 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
6018 /* Return true if it is not valid for us to switch to
6019 * this CPU mode (ie all the UNPREDICTABLE cases in
6020 * the ARM ARM CPSRWriteByInstr pseudocode).
6023 /* Changes to or from Hyp via MSR and CPS are illegal. */
6024 if (write_type
== CPSRWriteByInstr
&&
6025 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
6026 mode
== ARM_CPU_MODE_HYP
)) {
6031 case ARM_CPU_MODE_USR
:
6033 case ARM_CPU_MODE_SYS
:
6034 case ARM_CPU_MODE_SVC
:
6035 case ARM_CPU_MODE_ABT
:
6036 case ARM_CPU_MODE_UND
:
6037 case ARM_CPU_MODE_IRQ
:
6038 case ARM_CPU_MODE_FIQ
:
6039 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
6040 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
6042 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
6043 * and CPS are treated as illegal mode changes.
6045 if (write_type
== CPSRWriteByInstr
&&
6046 (env
->cp15
.hcr_el2
& HCR_TGE
) &&
6047 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
6048 !arm_is_secure_below_el3(env
)) {
6052 case ARM_CPU_MODE_HYP
:
6053 return !arm_feature(env
, ARM_FEATURE_EL2
)
6054 || arm_current_el(env
) < 2 || arm_is_secure(env
);
6055 case ARM_CPU_MODE_MON
:
6056 return arm_current_el(env
) < 3;
6062 uint32_t cpsr_read(CPUARMState
*env
)
6065 ZF
= (env
->ZF
== 0);
6066 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
6067 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
6068 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
6069 | ((env
->condexec_bits
& 0xfc) << 8)
6070 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
6073 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
6074 CPSRWriteType write_type
)
6076 uint32_t changed_daif
;
6078 if (mask
& CPSR_NZCV
) {
6079 env
->ZF
= (~val
) & CPSR_Z
;
6081 env
->CF
= (val
>> 29) & 1;
6082 env
->VF
= (val
<< 3) & 0x80000000;
6085 env
->QF
= ((val
& CPSR_Q
) != 0);
6087 env
->thumb
= ((val
& CPSR_T
) != 0);
6088 if (mask
& CPSR_IT_0_1
) {
6089 env
->condexec_bits
&= ~3;
6090 env
->condexec_bits
|= (val
>> 25) & 3;
6092 if (mask
& CPSR_IT_2_7
) {
6093 env
->condexec_bits
&= 3;
6094 env
->condexec_bits
|= (val
>> 8) & 0xfc;
6096 if (mask
& CPSR_GE
) {
6097 env
->GE
= (val
>> 16) & 0xf;
6100 /* In a V7 implementation that includes the security extensions but does
6101 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
6102 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
6103 * bits respectively.
6105 * In a V8 implementation, it is permitted for privileged software to
6106 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
6108 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
6109 arm_feature(env
, ARM_FEATURE_EL3
) &&
6110 !arm_feature(env
, ARM_FEATURE_EL2
) &&
6111 !arm_is_secure(env
)) {
6113 changed_daif
= (env
->daif
^ val
) & mask
;
6115 if (changed_daif
& CPSR_A
) {
6116 /* Check to see if we are allowed to change the masking of async
6117 * abort exceptions from a non-secure state.
6119 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
6120 qemu_log_mask(LOG_GUEST_ERROR
,
6121 "Ignoring attempt to switch CPSR_A flag from "
6122 "non-secure world with SCR.AW bit clear\n");
6127 if (changed_daif
& CPSR_F
) {
6128 /* Check to see if we are allowed to change the masking of FIQ
6129 * exceptions from a non-secure state.
6131 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
6132 qemu_log_mask(LOG_GUEST_ERROR
,
6133 "Ignoring attempt to switch CPSR_F flag from "
6134 "non-secure world with SCR.FW bit clear\n");
6138 /* Check whether non-maskable FIQ (NMFI) support is enabled.
6139 * If this bit is set software is not allowed to mask
6140 * FIQs, but is allowed to set CPSR_F to 0.
6142 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
6144 qemu_log_mask(LOG_GUEST_ERROR
,
6145 "Ignoring attempt to enable CPSR_F flag "
6146 "(non-maskable FIQ [NMFI] support enabled)\n");
6152 env
->daif
&= ~(CPSR_AIF
& mask
);
6153 env
->daif
|= val
& CPSR_AIF
& mask
;
6155 if (write_type
!= CPSRWriteRaw
&&
6156 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
6157 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
6158 /* Note that we can only get here in USR mode if this is a
6159 * gdb stub write; for this case we follow the architectural
6160 * behaviour for guest writes in USR mode of ignoring an attempt
6161 * to switch mode. (Those are caught by translate.c for writes
6162 * triggered by guest instructions.)
6165 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
6166 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
6167 * v7, and has defined behaviour in v8:
6168 * + leave CPSR.M untouched
6169 * + allow changes to the other CPSR fields
6171 * For user changes via the GDB stub, we don't set PSTATE.IL,
6172 * as this would be unnecessarily harsh for a user error.
6175 if (write_type
!= CPSRWriteByGDBStub
&&
6176 arm_feature(env
, ARM_FEATURE_V8
)) {
6181 switch_mode(env
, val
& CPSR_M
);
6184 mask
&= ~CACHED_CPSR_BITS
;
6185 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
6188 /* Sign/zero extend */
6189 uint32_t HELPER(sxtb16
)(uint32_t x
)
6192 res
= (uint16_t)(int8_t)x
;
6193 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
6197 uint32_t HELPER(uxtb16
)(uint32_t x
)
6200 res
= (uint16_t)(uint8_t)x
;
6201 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
6205 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
6209 if (num
== INT_MIN
&& den
== -1)
6214 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
6221 uint32_t HELPER(rbit
)(uint32_t x
)
6226 #if defined(CONFIG_USER_ONLY)
6228 /* These should probably raise undefined insn exceptions. */
6229 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
6231 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6233 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
6236 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
6238 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6240 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
6244 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6246 /* translate.c should never generate calls here in user-only mode */
6247 g_assert_not_reached();
6250 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6252 /* translate.c should never generate calls here in user-only mode */
6253 g_assert_not_reached();
6256 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
6258 /* The TT instructions can be used by unprivileged code, but in
6259 * user-only emulation we don't have the MPU.
6260 * Luckily since we know we are NonSecure unprivileged (and that in
6261 * turn means that the A flag wasn't specified), all the bits in the
6262 * register must be zero:
6263 * IREGION: 0 because IRVALID is 0
6264 * IRVALID: 0 because NS
6266 * NSRW: 0 because NS
6268 * RW: 0 because unpriv and A flag not set
6269 * R: 0 because unpriv and A flag not set
6270 * SRVALID: 0 because NS
6271 * MRVALID: 0 because unpriv and A flag not set
6272 * SREGION: 0 becaus SRVALID is 0
6273 * MREGION: 0 because MRVALID is 0
6278 void switch_mode(CPUARMState
*env
, int mode
)
6280 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6282 if (mode
!= ARM_CPU_MODE_USR
) {
6283 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
6287 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6288 uint32_t cur_el
, bool secure
)
6293 void aarch64_sync_64_to_32(CPUARMState
*env
)
6295 g_assert_not_reached();
6300 void switch_mode(CPUARMState
*env
, int mode
)
6305 old_mode
= env
->uncached_cpsr
& CPSR_M
;
6306 if (mode
== old_mode
)
6309 if (old_mode
== ARM_CPU_MODE_FIQ
) {
6310 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6311 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
6312 } else if (mode
== ARM_CPU_MODE_FIQ
) {
6313 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6314 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
6317 i
= bank_number(old_mode
);
6318 env
->banked_r13
[i
] = env
->regs
[13];
6319 env
->banked_r14
[i
] = env
->regs
[14];
6320 env
->banked_spsr
[i
] = env
->spsr
;
6322 i
= bank_number(mode
);
6323 env
->regs
[13] = env
->banked_r13
[i
];
6324 env
->regs
[14] = env
->banked_r14
[i
];
6325 env
->spsr
= env
->banked_spsr
[i
];
6328 /* Physical Interrupt Target EL Lookup Table
6330 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6332 * The below multi-dimensional table is used for looking up the target
6333 * exception level given numerous condition criteria. Specifically, the
6334 * target EL is based on SCR and HCR routing controls as well as the
6335 * currently executing EL and secure state.
6338 * target_el_table[2][2][2][2][2][4]
6339 * | | | | | +--- Current EL
6340 * | | | | +------ Non-secure(0)/Secure(1)
6341 * | | | +--------- HCR mask override
6342 * | | +------------ SCR exec state control
6343 * | +--------------- SCR mask override
6344 * +------------------ 32-bit(0)/64-bit(1) EL3
6346 * The table values are as such:
6350 * The ARM ARM target EL table includes entries indicating that an "exception
6351 * is not taken". The two cases where this is applicable are:
6352 * 1) An exception is taken from EL3 but the SCR does not have the exception
6354 * 2) An exception is taken from EL2 but the HCR does not have the exception
6356 * In these two cases, the below table contain a target of EL1. This value is
6357 * returned as it is expected that the consumer of the table data will check
6358 * for "target EL >= current EL" to ensure the exception is not taken.
6362 * BIT IRQ IMO Non-secure Secure
6363 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6365 static const int8_t target_el_table
[2][2][2][2][2][4] = {
6366 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6367 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6368 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6369 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6370 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6371 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6372 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6373 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6374 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6375 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6376 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6377 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6378 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6379 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6380 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6381 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6385 * Determine the target EL for physical exceptions
6387 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6388 uint32_t cur_el
, bool secure
)
6390 CPUARMState
*env
= cs
->env_ptr
;
6395 /* Is the highest EL AArch64? */
6396 int is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6398 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6399 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6401 /* Either EL2 is the highest EL (and so the EL2 register width
6402 * is given by is64); or there is no EL2 or EL3, in which case
6403 * the value of 'rw' does not affect the table lookup anyway.
6410 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6411 hcr
= arm_hcr_el2_imo(env
);
6414 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6415 hcr
= arm_hcr_el2_fmo(env
);
6418 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6419 hcr
= arm_hcr_el2_amo(env
);
6423 /* If HCR.TGE is set then HCR is treated as being 1 */
6424 hcr
|= ((env
->cp15
.hcr_el2
& HCR_TGE
) == HCR_TGE
);
6426 /* Perform a table-lookup for the target EL given the current state */
6427 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6429 assert(target_el
> 0);
6434 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
6435 ARMMMUIdx mmu_idx
, bool ignfault
)
6437 CPUState
*cs
= CPU(cpu
);
6438 CPUARMState
*env
= &cpu
->env
;
6439 MemTxAttrs attrs
= {};
6441 target_ulong page_size
;
6445 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6449 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
6450 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6451 /* MPU/SAU lookup failed */
6452 if (fi
.type
== ARMFault_QEMU_SFault
) {
6453 qemu_log_mask(CPU_LOG_INT
,
6454 "...SecureFault with SFSR.AUVIOL during stacking\n");
6455 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6456 env
->v7m
.sfar
= addr
;
6457 exc
= ARMV7M_EXCP_SECURE
;
6460 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
6461 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
6462 exc
= ARMV7M_EXCP_MEM
;
6463 exc_secure
= secure
;
6467 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
6469 if (txres
!= MEMTX_OK
) {
6470 /* BusFault trying to write the data */
6471 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
6472 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
6473 exc
= ARMV7M_EXCP_BUS
;
6480 /* By pending the exception at this point we are making
6481 * the IMPDEF choice "overridden exceptions pended" (see the
6482 * MergeExcInfo() pseudocode). The other choice would be to not
6483 * pend them now and then make a choice about which to throw away
6484 * later if we have two derived exceptions.
6485 * The only case when we must not pend the exception but instead
6486 * throw it away is if we are doing the push of the callee registers
6487 * and we've already generated a derived exception. Even in this
6488 * case we will still update the fault status registers.
6491 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
6496 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
6499 CPUState
*cs
= CPU(cpu
);
6500 CPUARMState
*env
= &cpu
->env
;
6501 MemTxAttrs attrs
= {};
6503 target_ulong page_size
;
6507 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6512 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
6513 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6514 /* MPU/SAU lookup failed */
6515 if (fi
.type
== ARMFault_QEMU_SFault
) {
6516 qemu_log_mask(CPU_LOG_INT
,
6517 "...SecureFault with SFSR.AUVIOL during unstack\n");
6518 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6519 env
->v7m
.sfar
= addr
;
6520 exc
= ARMV7M_EXCP_SECURE
;
6523 qemu_log_mask(CPU_LOG_INT
,
6524 "...MemManageFault with CFSR.MUNSTKERR\n");
6525 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
6526 exc
= ARMV7M_EXCP_MEM
;
6527 exc_secure
= secure
;
6532 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
6534 if (txres
!= MEMTX_OK
) {
6535 /* BusFault trying to read the data */
6536 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
6537 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
6538 exc
= ARMV7M_EXCP_BUS
;
6547 /* By pending the exception at this point we are making
6548 * the IMPDEF choice "overridden exceptions pended" (see the
6549 * MergeExcInfo() pseudocode). The other choice would be to not
6550 * pend them now and then make a choice about which to throw away
6551 * later if we have two derived exceptions.
6553 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
6557 /* Return true if we're using the process stack pointer (not the MSP) */
6558 static bool v7m_using_psp(CPUARMState
*env
)
6560 /* Handler mode always uses the main stack; for thread mode
6561 * the CONTROL.SPSEL bit determines the answer.
6562 * Note that in v7M it is not possible to be in Handler mode with
6563 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6565 return !arm_v7m_is_handler_mode(env
) &&
6566 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
6569 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6570 * This may change the current stack pointer between Main and Process
6571 * stack pointers if it is done for the CONTROL register for the current
6574 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6578 bool old_is_psp
= v7m_using_psp(env
);
6580 env
->v7m
.control
[secstate
] =
6581 deposit32(env
->v7m
.control
[secstate
],
6582 R_V7M_CONTROL_SPSEL_SHIFT
,
6583 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6585 if (secstate
== env
->v7m
.secure
) {
6586 bool new_is_psp
= v7m_using_psp(env
);
6589 if (old_is_psp
!= new_is_psp
) {
6590 tmp
= env
->v7m
.other_sp
;
6591 env
->v7m
.other_sp
= env
->regs
[13];
6592 env
->regs
[13] = tmp
;
6597 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6598 * stack pointer between Main and Process stack pointers.
6600 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6602 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6605 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6607 /* Write a new value to v7m.exception, thus transitioning into or out
6608 * of Handler mode; this may result in a change of active stack pointer.
6610 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6613 env
->v7m
.exception
= new_exc
;
6615 new_is_psp
= v7m_using_psp(env
);
6617 if (old_is_psp
!= new_is_psp
) {
6618 tmp
= env
->v7m
.other_sp
;
6619 env
->v7m
.other_sp
= env
->regs
[13];
6620 env
->regs
[13] = tmp
;
6624 /* Switch M profile security state between NS and S */
6625 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6627 uint32_t new_ss_msp
, new_ss_psp
;
6629 if (env
->v7m
.secure
== new_secstate
) {
6633 /* All the banked state is accessed by looking at env->v7m.secure
6634 * except for the stack pointer; rearrange the SP appropriately.
6636 new_ss_msp
= env
->v7m
.other_ss_msp
;
6637 new_ss_psp
= env
->v7m
.other_ss_psp
;
6639 if (v7m_using_psp(env
)) {
6640 env
->v7m
.other_ss_psp
= env
->regs
[13];
6641 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6643 env
->v7m
.other_ss_msp
= env
->regs
[13];
6644 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6647 env
->v7m
.secure
= new_secstate
;
6649 if (v7m_using_psp(env
)) {
6650 env
->regs
[13] = new_ss_psp
;
6651 env
->v7m
.other_sp
= new_ss_msp
;
6653 env
->regs
[13] = new_ss_msp
;
6654 env
->v7m
.other_sp
= new_ss_psp
;
6658 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6661 * - if the return value is a magic value, do exception return (like BX)
6662 * - otherwise bit 0 of the return value is the target security state
6666 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6667 /* Covers FNC_RETURN and EXC_RETURN magic */
6668 min_magic
= FNC_RETURN_MIN_MAGIC
;
6670 /* EXC_RETURN magic only */
6671 min_magic
= EXC_RETURN_MIN_MAGIC
;
6674 if (dest
>= min_magic
) {
6675 /* This is an exception return magic value; put it where
6676 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6677 * Note that if we ever add gen_ss_advance() singlestep support to
6678 * M profile this should count as an "instruction execution complete"
6679 * event (compare gen_bx_excret_final_code()).
6681 env
->regs
[15] = dest
& ~1;
6682 env
->thumb
= dest
& 1;
6683 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
6687 /* translate.c should have made BXNS UNDEF unless we're secure */
6688 assert(env
->v7m
.secure
);
6690 switch_v7m_security_state(env
, dest
& 1);
6692 env
->regs
[15] = dest
& ~1;
6695 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6697 /* Handle v7M BLXNS:
6698 * - bit 0 of the destination address is the target security state
6701 /* At this point regs[15] is the address just after the BLXNS */
6702 uint32_t nextinst
= env
->regs
[15] | 1;
6703 uint32_t sp
= env
->regs
[13] - 8;
6706 /* translate.c will have made BLXNS UNDEF unless we're secure */
6707 assert(env
->v7m
.secure
);
6710 /* target is Secure, so this is just a normal BLX,
6711 * except that the low bit doesn't indicate Thumb/not.
6713 env
->regs
[14] = nextinst
;
6715 env
->regs
[15] = dest
& ~1;
6719 /* Target is non-secure: first push a stack frame */
6720 if (!QEMU_IS_ALIGNED(sp
, 8)) {
6721 qemu_log_mask(LOG_GUEST_ERROR
,
6722 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6725 saved_psr
= env
->v7m
.exception
;
6726 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
6727 saved_psr
|= XPSR_SFPA
;
6730 /* Note that these stores can throw exceptions on MPU faults */
6731 cpu_stl_data(env
, sp
, nextinst
);
6732 cpu_stl_data(env
, sp
+ 4, saved_psr
);
6735 env
->regs
[14] = 0xfeffffff;
6736 if (arm_v7m_is_handler_mode(env
)) {
6737 /* Write a dummy value to IPSR, to avoid leaking the current secure
6738 * exception number to non-secure code. This is guaranteed not
6739 * to cause write_v7m_exception() to actually change stacks.
6741 write_v7m_exception(env
, 1);
6743 switch_v7m_security_state(env
, 0);
6745 env
->regs
[15] = dest
;
6748 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
6751 /* Return a pointer to the location where we currently store the
6752 * stack pointer for the requested security state and thread mode.
6753 * This pointer will become invalid if the CPU state is updated
6754 * such that the stack pointers are switched around (eg changing
6755 * the SPSEL control bit).
6756 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6757 * Unlike that pseudocode, we require the caller to pass us in the
6758 * SPSEL control bit value; this is because we also use this
6759 * function in handling of pushing of the callee-saves registers
6760 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6761 * and in the tailchain codepath the SPSEL bit comes from the exception
6762 * return magic LR value from the previous exception. The pseudocode
6763 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6764 * to make this utility function generic enough to do the job.
6766 bool want_psp
= threadmode
&& spsel
;
6768 if (secure
== env
->v7m
.secure
) {
6769 if (want_psp
== v7m_using_psp(env
)) {
6770 return &env
->regs
[13];
6772 return &env
->v7m
.other_sp
;
6776 return &env
->v7m
.other_ss_psp
;
6778 return &env
->v7m
.other_ss_msp
;
6783 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
6786 CPUState
*cs
= CPU(cpu
);
6787 CPUARMState
*env
= &cpu
->env
;
6789 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
6790 uint32_t vector_entry
;
6791 MemTxAttrs attrs
= {};
6795 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
6797 /* We don't do a get_phys_addr() here because the rules for vector
6798 * loads are special: they always use the default memory map, and
6799 * the default memory map permits reads from all addresses.
6800 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
6801 * that we want this special case which would always say "yes",
6802 * we just do the SAU lookup here followed by a direct physical load.
6804 attrs
.secure
= targets_secure
;
6807 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6808 V8M_SAttributes sattrs
= {};
6810 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
6812 attrs
.secure
= false;
6813 } else if (!targets_secure
) {
6814 /* NS access to S memory */
6819 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
6821 if (result
!= MEMTX_OK
) {
6824 *pvec
= vector_entry
;
6828 /* All vector table fetch fails are reported as HardFault, with
6829 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
6830 * technically the underlying exception is a MemManage or BusFault
6831 * that is escalated to HardFault.) This is a terminal exception,
6832 * so we will either take the HardFault immediately or else enter
6833 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
6835 exc_secure
= targets_secure
||
6836 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
6837 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
6838 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
6842 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6845 /* For v8M, push the callee-saves register part of the stack frame.
6846 * Compare the v8M pseudocode PushCalleeStack().
6847 * In the tailchaining case this may not be the current stack.
6849 CPUARMState
*env
= &cpu
->env
;
6850 uint32_t *frame_sp_p
;
6856 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
6857 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
6860 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
6861 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
6862 lr
& R_V7M_EXCRET_SPSEL_MASK
);
6864 mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
6865 frame_sp_p
= &env
->regs
[13];
6868 frameptr
= *frame_sp_p
- 0x28;
6870 /* Write as much of the stack frame as we can. A write failure may
6871 * cause us to pend a derived exception.
6874 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
6875 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
6877 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
6879 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
6881 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
6883 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
6885 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
6887 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
6889 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
6892 /* Update SP regardless of whether any of the stack accesses failed.
6893 * When we implement v8M stack limit checking then this attempt to
6894 * update SP might also fail and result in a derived exception.
6896 *frame_sp_p
= frameptr
;
6901 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6902 bool ignore_stackfaults
)
6904 /* Do the "take the exception" parts of exception entry,
6905 * but not the pushing of state to the stack. This is
6906 * similar to the pseudocode ExceptionTaken() function.
6908 CPUARMState
*env
= &cpu
->env
;
6910 bool targets_secure
;
6912 bool push_failed
= false;
6914 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
6915 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
6916 targets_secure
? "secure" : "nonsecure", exc
);
6918 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6919 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6920 (lr
& R_V7M_EXCRET_S_MASK
)) {
6921 /* The background code (the owner of the registers in the
6922 * exception frame) is Secure. This means it may either already
6923 * have or now needs to push callee-saves registers.
6925 if (targets_secure
) {
6926 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
6927 /* We took an exception from Secure to NonSecure
6928 * (which means the callee-saved registers got stacked)
6929 * and are now tailchaining to a Secure exception.
6930 * Clear DCRS so eventual return from this Secure
6931 * exception unstacks the callee-saved registers.
6933 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
6936 /* We're going to a non-secure exception; push the
6937 * callee-saves registers to the stack now, if they're
6938 * not already saved.
6940 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
6941 !(dotailchain
&& (lr
& R_V7M_EXCRET_ES_MASK
))) {
6942 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
6943 ignore_stackfaults
);
6945 lr
|= R_V7M_EXCRET_DCRS_MASK
;
6949 lr
&= ~R_V7M_EXCRET_ES_MASK
;
6950 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6951 lr
|= R_V7M_EXCRET_ES_MASK
;
6953 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
6954 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
6955 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
6958 /* Clear registers if necessary to prevent non-secure exception
6959 * code being able to see register values from secure code.
6960 * Where register values become architecturally UNKNOWN we leave
6961 * them with their previous values.
6963 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6964 if (!targets_secure
) {
6965 /* Always clear the caller-saved registers (they have been
6966 * pushed to the stack earlier in v7m_push_stack()).
6967 * Clear callee-saved registers if the background code is
6968 * Secure (in which case these regs were saved in
6969 * v7m_push_callee_stack()).
6973 for (i
= 0; i
< 13; i
++) {
6974 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
6975 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
6980 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
6985 if (push_failed
&& !ignore_stackfaults
) {
6986 /* Derived exception on callee-saves register stacking:
6987 * we might now want to take a different exception which
6988 * targets a different security state, so try again from the top.
6990 qemu_log_mask(CPU_LOG_INT
,
6991 "...derived exception on callee-saves register stacking");
6992 v7m_exception_taken(cpu
, lr
, true, true);
6996 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
6997 /* Vector load failed: derived exception */
6998 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
6999 v7m_exception_taken(cpu
, lr
, true, true);
7003 /* Now we've done everything that might cause a derived exception
7004 * we can go ahead and activate whichever exception we're going to
7005 * take (which might now be the derived exception).
7007 armv7m_nvic_acknowledge_irq(env
->nvic
);
7009 /* Switch to target security state -- must do this before writing SPSEL */
7010 switch_v7m_security_state(env
, targets_secure
);
7011 write_v7m_control_spsel(env
, 0);
7012 arm_clear_exclusive(env
);
7014 env
->condexec_bits
= 0;
7016 env
->regs
[15] = addr
& 0xfffffffe;
7017 env
->thumb
= addr
& 1;
7020 static bool v7m_push_stack(ARMCPU
*cpu
)
7022 /* Do the "set up stack frame" part of exception entry,
7023 * similar to pseudocode PushStack().
7024 * Return true if we generate a derived exception (and so
7025 * should ignore further stack faults trying to process
7026 * that derived exception.)
7029 CPUARMState
*env
= &cpu
->env
;
7030 uint32_t xpsr
= xpsr_read(env
);
7031 uint32_t frameptr
= env
->regs
[13];
7032 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
7034 /* Align stack pointer if the guest wants that */
7035 if ((frameptr
& 4) &&
7036 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
7038 xpsr
|= XPSR_SPREALIGN
;
7043 /* Write as much of the stack frame as we can. If we fail a stack
7044 * write this will result in a derived exception being pended
7045 * (which may be taken in preference to the one we started with
7046 * if it has higher priority).
7049 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
7050 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
7051 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
7052 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
7053 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
7054 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
7055 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
7056 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
7058 /* Update SP regardless of whether any of the stack accesses failed.
7059 * When we implement v8M stack limit checking then this attempt to
7060 * update SP might also fail and result in a derived exception.
7062 env
->regs
[13] = frameptr
;
7067 static void do_v7m_exception_exit(ARMCPU
*cpu
)
7069 CPUARMState
*env
= &cpu
->env
;
7072 bool ufault
= false;
7073 bool sfault
= false;
7074 bool return_to_sp_process
;
7075 bool return_to_handler
;
7076 bool rettobase
= false;
7077 bool exc_secure
= false;
7078 bool return_to_secure
;
7080 /* If we're not in Handler mode then jumps to magic exception-exit
7081 * addresses don't have magic behaviour. However for the v8M
7082 * security extensions the magic secure-function-return has to
7083 * work in thread mode too, so to avoid doing an extra check in
7084 * the generated code we allow exception-exit magic to also cause the
7085 * internal exception and bring us here in thread mode. Correct code
7086 * will never try to do this (the following insn fetch will always
7087 * fault) so we the overhead of having taken an unnecessary exception
7090 if (!arm_v7m_is_handler_mode(env
)) {
7094 /* In the spec pseudocode ExceptionReturn() is called directly
7095 * from BXWritePC() and gets the full target PC value including
7096 * bit zero. In QEMU's implementation we treat it as a normal
7097 * jump-to-register (which is then caught later on), and so split
7098 * the target value up between env->regs[15] and env->thumb in
7099 * gen_bx(). Reconstitute it.
7101 excret
= env
->regs
[15];
7106 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
7107 " previous exception %d\n",
7108 excret
, env
->v7m
.exception
);
7110 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
7111 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
7112 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
7116 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7117 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
7118 * we pick which FAULTMASK to clear.
7120 if (!env
->v7m
.secure
&&
7121 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
7122 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
7124 /* For all other purposes, treat ES as 0 (R_HXSR) */
7125 excret
&= ~R_V7M_EXCRET_ES_MASK
;
7127 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
7130 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
7131 /* Auto-clear FAULTMASK on return from other than NMI.
7132 * If the security extension is implemented then this only
7133 * happens if the raw execution priority is >= 0; the
7134 * value of the ES bit in the exception return value indicates
7135 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
7137 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7138 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
7139 env
->v7m
.faultmask
[exc_secure
] = 0;
7142 env
->v7m
.faultmask
[M_REG_NS
] = 0;
7146 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
7149 /* attempt to exit an exception that isn't active */
7153 /* still an irq active now */
7156 /* we returned to base exception level, no nesting.
7157 * (In the pseudocode this is written using "NestedActivation != 1"
7158 * where we have 'rettobase == false'.)
7163 g_assert_not_reached();
7166 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
7167 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
7168 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7169 (excret
& R_V7M_EXCRET_S_MASK
);
7171 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7172 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7173 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
7174 * we choose to take the UsageFault.
7176 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
7177 (excret
& R_V7M_EXCRET_ES_MASK
) ||
7178 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
7182 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
7186 /* For v7M we only recognize certain combinations of the low bits */
7187 switch (excret
& 0xf) {
7188 case 1: /* Return to Handler */
7190 case 13: /* Return to Thread using Process stack */
7191 case 9: /* Return to Thread using Main stack */
7192 /* We only need to check NONBASETHRDENA for v7M, because in
7193 * v8M this bit does not exist (it is RES1).
7196 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
7197 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
7207 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
7208 * Handler mode (and will be until we write the new XPSR.Interrupt
7209 * field) this does not switch around the current stack pointer.
7210 * We must do this before we do any kind of tailchaining, including
7211 * for the derived exceptions on integrity check failures, or we will
7212 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
7214 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
7217 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
7218 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7219 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7220 "stackframe: failed EXC_RETURN.ES validity check\n");
7221 v7m_exception_taken(cpu
, excret
, true, false);
7226 /* Bad exception return: instead of popping the exception
7227 * stack, directly take a usage fault on the current stack.
7229 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7230 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7231 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7232 "stackframe: failed exception return integrity check\n");
7233 v7m_exception_taken(cpu
, excret
, true, false);
7238 * Tailchaining: if there is currently a pending exception that
7239 * is high enough priority to preempt execution at the level we're
7240 * about to return to, then just directly take that exception now,
7241 * avoiding an unstack-and-then-stack. Note that now we have
7242 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
7243 * our current execution priority is already the execution priority we are
7244 * returning to -- none of the state we would unstack or set based on
7245 * the EXCRET value affects it.
7247 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
7248 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
7249 v7m_exception_taken(cpu
, excret
, true, false);
7253 switch_v7m_security_state(env
, return_to_secure
);
7256 /* The stack pointer we should be reading the exception frame from
7257 * depends on bits in the magic exception return type value (and
7258 * for v8M isn't necessarily the stack pointer we will eventually
7259 * end up resuming execution with). Get a pointer to the location
7260 * in the CPU state struct where the SP we need is currently being
7261 * stored; we will use and modify it in place.
7262 * We use this limited C variable scope so we don't accidentally
7263 * use 'frame_sp_p' after we do something that makes it invalid.
7265 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
7268 return_to_sp_process
);
7269 uint32_t frameptr
= *frame_sp_p
;
7272 bool return_to_priv
= return_to_handler
||
7273 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
7275 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
7278 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
7279 arm_feature(env
, ARM_FEATURE_V8
)) {
7280 qemu_log_mask(LOG_GUEST_ERROR
,
7281 "M profile exception return with non-8-aligned SP "
7282 "for destination state is UNPREDICTABLE\n");
7285 /* Do we need to pop callee-saved registers? */
7286 if (return_to_secure
&&
7287 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
7288 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
7289 uint32_t expected_sig
= 0xfefa125b;
7290 uint32_t actual_sig
;
7292 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
7294 if (pop_ok
&& expected_sig
!= actual_sig
) {
7295 /* Take a SecureFault on the current stack */
7296 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
7297 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7298 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7299 "stackframe: failed exception return integrity "
7300 "signature check\n");
7301 v7m_exception_taken(cpu
, excret
, true, false);
7306 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7307 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7308 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
7309 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
7310 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
7311 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
7312 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
7313 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
7314 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
7321 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
7322 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
7323 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
7324 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
7325 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
7326 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
7327 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
7328 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
7331 /* v7m_stack_read() pended a fault, so take it (as a tail
7332 * chained exception on the same stack frame)
7334 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
7335 v7m_exception_taken(cpu
, excret
, true, false);
7339 /* Returning from an exception with a PC with bit 0 set is defined
7340 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
7341 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
7342 * the lsbit, and there are several RTOSes out there which incorrectly
7343 * assume the r15 in the stack frame should be a Thumb-style "lsbit
7344 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
7345 * complain about the badly behaved guest.
7347 if (env
->regs
[15] & 1) {
7348 env
->regs
[15] &= ~1U;
7349 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7350 qemu_log_mask(LOG_GUEST_ERROR
,
7351 "M profile return from interrupt with misaligned "
7352 "PC is UNPREDICTABLE on v7M\n");
7356 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7357 /* For v8M we have to check whether the xPSR exception field
7358 * matches the EXCRET value for return to handler/thread
7359 * before we commit to changing the SP and xPSR.
7361 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
7362 if (return_to_handler
!= will_be_handler
) {
7363 /* Take an INVPC UsageFault on the current stack.
7364 * By this point we will have switched to the security state
7365 * for the background state, so this UsageFault will target
7368 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7370 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7371 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7372 "stackframe: failed exception return integrity "
7374 v7m_exception_taken(cpu
, excret
, true, false);
7379 /* Commit to consuming the stack frame */
7381 /* Undo stack alignment (the SPREALIGN bit indicates that the original
7382 * pre-exception SP was not 8-aligned and we added a padding word to
7383 * align it, so we undo this by ORing in the bit that increases it
7384 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
7385 * would work too but a logical OR is how the pseudocode specifies it.)
7387 if (xpsr
& XPSR_SPREALIGN
) {
7390 *frame_sp_p
= frameptr
;
7392 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
7393 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
7395 /* The restored xPSR exception field will be zero if we're
7396 * resuming in Thread mode. If that doesn't match what the
7397 * exception return excret specified then this is a UsageFault.
7398 * v7M requires we make this check here; v8M did it earlier.
7400 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
7401 /* Take an INVPC UsageFault by pushing the stack again;
7402 * we know we're v7M so this is never a Secure UsageFault.
7404 bool ignore_stackfaults
;
7406 assert(!arm_feature(env
, ARM_FEATURE_V8
));
7407 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
7408 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7409 ignore_stackfaults
= v7m_push_stack(cpu
);
7410 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
7411 "failed exception return integrity check\n");
7412 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
7416 /* Otherwise, we have a successful exception exit. */
7417 arm_clear_exclusive(env
);
7418 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
7421 static bool do_v7m_function_return(ARMCPU
*cpu
)
7423 /* v8M security extensions magic function return.
7425 * (1) throw an exception (longjump)
7426 * (2) return true if we successfully handled the function return
7427 * (3) return false if we failed a consistency check and have
7428 * pended a UsageFault that needs to be taken now
7430 * At this point the magic return value is split between env->regs[15]
7431 * and env->thumb. We don't bother to reconstitute it because we don't
7432 * need it (all values are handled the same way).
7434 CPUARMState
*env
= &cpu
->env
;
7435 uint32_t newpc
, newpsr
, newpsr_exc
;
7437 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
7440 bool threadmode
, spsel
;
7443 uint32_t *frame_sp_p
;
7446 /* Pull the return address and IPSR from the Secure stack */
7447 threadmode
= !arm_v7m_is_handler_mode(env
);
7448 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
7450 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
7451 frameptr
= *frame_sp_p
;
7453 /* These loads may throw an exception (for MPU faults). We want to
7454 * do them as secure, so work out what MMU index that is.
7456 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7457 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
7458 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
7459 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
7461 /* Consistency checks on new IPSR */
7462 newpsr_exc
= newpsr
& XPSR_EXCP
;
7463 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
7464 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
7465 /* Pend the fault and tell our caller to take it */
7466 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7467 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7469 qemu_log_mask(CPU_LOG_INT
,
7470 "...taking INVPC UsageFault: "
7471 "IPSR consistency check failed\n");
7475 *frame_sp_p
= frameptr
+ 8;
7478 /* This invalidates frame_sp_p */
7479 switch_v7m_security_state(env
, true);
7480 env
->v7m
.exception
= newpsr_exc
;
7481 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
7482 if (newpsr
& XPSR_SFPA
) {
7483 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
7485 xpsr_write(env
, 0, XPSR_IT
);
7486 env
->thumb
= newpc
& 1;
7487 env
->regs
[15] = newpc
& ~1;
7489 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
7493 static void arm_log_exception(int idx
)
7495 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
7496 const char *exc
= NULL
;
7497 static const char * const excnames
[] = {
7498 [EXCP_UDEF
] = "Undefined Instruction",
7500 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
7501 [EXCP_DATA_ABORT
] = "Data Abort",
7504 [EXCP_BKPT
] = "Breakpoint",
7505 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
7506 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
7507 [EXCP_HVC
] = "Hypervisor Call",
7508 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
7509 [EXCP_SMC
] = "Secure Monitor Call",
7510 [EXCP_VIRQ
] = "Virtual IRQ",
7511 [EXCP_VFIQ
] = "Virtual FIQ",
7512 [EXCP_SEMIHOST
] = "Semihosting call",
7513 [EXCP_NOCP
] = "v7M NOCP UsageFault",
7514 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
7517 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
7518 exc
= excnames
[idx
];
7523 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
7527 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
7528 uint32_t addr
, uint16_t *insn
)
7530 /* Load a 16-bit portion of a v7M instruction, returning true on success,
7531 * or false on failure (in which case we will have pended the appropriate
7533 * We need to do the instruction fetch's MPU and SAU checks
7534 * like this because there is no MMU index that would allow
7535 * doing the load with a single function call. Instead we must
7536 * first check that the security attributes permit the load
7537 * and that they don't mismatch on the two halves of the instruction,
7538 * and then we do the load as a secure load (ie using the security
7539 * attributes of the address, not the CPU, as architecturally required).
7541 CPUState
*cs
= CPU(cpu
);
7542 CPUARMState
*env
= &cpu
->env
;
7543 V8M_SAttributes sattrs
= {};
7544 MemTxAttrs attrs
= {};
7545 ARMMMUFaultInfo fi
= {};
7547 target_ulong page_size
;
7551 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
7552 if (!sattrs
.nsc
|| sattrs
.ns
) {
7553 /* This must be the second half of the insn, and it straddles a
7554 * region boundary with the second half not being S&NSC.
7556 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7557 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7558 qemu_log_mask(CPU_LOG_INT
,
7559 "...really SecureFault with SFSR.INVEP\n");
7562 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
7563 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7564 /* the MPU lookup failed */
7565 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7566 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
7567 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
7570 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
7572 if (txres
!= MEMTX_OK
) {
7573 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7574 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7575 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
7581 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
7583 /* Check whether this attempt to execute code in a Secure & NS-Callable
7584 * memory region is for an SG instruction; if so, then emulate the
7585 * effect of the SG instruction and return true. Otherwise pend
7586 * the correct kind of exception and return false.
7588 CPUARMState
*env
= &cpu
->env
;
7592 /* We should never get here unless get_phys_addr_pmsav8() caused
7593 * an exception for NS executing in S&NSC memory.
7595 assert(!env
->v7m
.secure
);
7596 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7598 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7599 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7601 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
7609 if (insn
!= 0xe97f) {
7610 /* Not an SG instruction first half (we choose the IMPDEF
7611 * early-SG-check option).
7616 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
7620 if (insn
!= 0xe97f) {
7621 /* Not an SG instruction second half (yes, both halves of the SG
7622 * insn have the same hex value)
7627 /* OK, we have confirmed that we really have an SG instruction.
7628 * We know we're NS in S memory so don't need to repeat those checks.
7630 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
7631 ", executing it\n", env
->regs
[15]);
7632 env
->regs
[14] &= ~1;
7633 switch_v7m_security_state(env
, true);
7634 xpsr_write(env
, 0, XPSR_IT
);
7639 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7640 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7641 qemu_log_mask(CPU_LOG_INT
,
7642 "...really SecureFault with SFSR.INVEP\n");
7646 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
7648 ARMCPU
*cpu
= ARM_CPU(cs
);
7649 CPUARMState
*env
= &cpu
->env
;
7651 bool ignore_stackfaults
;
7653 arm_log_exception(cs
->exception_index
);
7655 /* For exceptions we just mark as pending on the NVIC, and let that
7657 switch (cs
->exception_index
) {
7659 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7660 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
7663 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7664 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
7667 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7668 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
7671 /* The PC already points to the next instruction. */
7672 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
7674 case EXCP_PREFETCH_ABORT
:
7675 case EXCP_DATA_ABORT
:
7676 /* Note that for M profile we don't have a guest facing FSR, but
7677 * the env->exception.fsr will be populated by the code that
7678 * raises the fault, in the A profile short-descriptor format.
7680 switch (env
->exception
.fsr
& 0xf) {
7681 case M_FAKE_FSR_NSC_EXEC
:
7682 /* Exception generated when we try to execute code at an address
7683 * which is marked as Secure & Non-Secure Callable and the CPU
7684 * is in the Non-Secure state. The only instruction which can
7685 * be executed like this is SG (and that only if both halves of
7686 * the SG instruction have the same security attributes.)
7687 * Everything else must generate an INVEP SecureFault, so we
7688 * emulate the SG instruction here.
7690 if (v7m_handle_execute_nsc(cpu
)) {
7694 case M_FAKE_FSR_SFAULT
:
7695 /* Various flavours of SecureFault for attempts to execute or
7696 * access data in the wrong security state.
7698 switch (cs
->exception_index
) {
7699 case EXCP_PREFETCH_ABORT
:
7700 if (env
->v7m
.secure
) {
7701 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
7702 qemu_log_mask(CPU_LOG_INT
,
7703 "...really SecureFault with SFSR.INVTRAN\n");
7705 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7706 qemu_log_mask(CPU_LOG_INT
,
7707 "...really SecureFault with SFSR.INVEP\n");
7710 case EXCP_DATA_ABORT
:
7711 /* This must be an NS access to S memory */
7712 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
7713 qemu_log_mask(CPU_LOG_INT
,
7714 "...really SecureFault with SFSR.AUVIOL\n");
7717 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7719 case 0x8: /* External Abort */
7720 switch (cs
->exception_index
) {
7721 case EXCP_PREFETCH_ABORT
:
7722 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7723 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
7725 case EXCP_DATA_ABORT
:
7726 env
->v7m
.cfsr
[M_REG_NS
] |=
7727 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
7728 env
->v7m
.bfar
= env
->exception
.vaddress
;
7729 qemu_log_mask(CPU_LOG_INT
,
7730 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7734 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7737 /* All other FSR values are either MPU faults or "can't happen
7738 * for M profile" cases.
7740 switch (cs
->exception_index
) {
7741 case EXCP_PREFETCH_ABORT
:
7742 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7743 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
7745 case EXCP_DATA_ABORT
:
7746 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
7747 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
7748 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
7749 qemu_log_mask(CPU_LOG_INT
,
7750 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7751 env
->v7m
.mmfar
[env
->v7m
.secure
]);
7754 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
7760 if (semihosting_enabled()) {
7762 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
7765 qemu_log_mask(CPU_LOG_INT
,
7766 "...handling as semihosting call 0x%x\n",
7768 env
->regs
[0] = do_arm_semihosting(env
);
7772 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
7776 case EXCP_EXCEPTION_EXIT
:
7777 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
7778 /* Must be v8M security extension function return */
7779 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
7780 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7781 if (do_v7m_function_return(cpu
)) {
7785 do_v7m_exception_exit(cpu
);
7790 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7791 return; /* Never happens. Keep compiler happy. */
7794 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7795 lr
= R_V7M_EXCRET_RES1_MASK
|
7796 R_V7M_EXCRET_DCRS_MASK
|
7797 R_V7M_EXCRET_FTYPE_MASK
;
7798 /* The S bit indicates whether we should return to Secure
7799 * or NonSecure (ie our current state).
7800 * The ES bit indicates whether we're taking this exception
7801 * to Secure or NonSecure (ie our target state). We set it
7802 * later, in v7m_exception_taken().
7803 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7804 * This corresponds to the ARM ARM pseudocode for v8M setting
7805 * some LR bits in PushStack() and some in ExceptionTaken();
7806 * the distinction matters for the tailchain cases where we
7807 * can take an exception without pushing the stack.
7809 if (env
->v7m
.secure
) {
7810 lr
|= R_V7M_EXCRET_S_MASK
;
7813 lr
= R_V7M_EXCRET_RES1_MASK
|
7814 R_V7M_EXCRET_S_MASK
|
7815 R_V7M_EXCRET_DCRS_MASK
|
7816 R_V7M_EXCRET_FTYPE_MASK
|
7817 R_V7M_EXCRET_ES_MASK
;
7818 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
7819 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7822 if (!arm_v7m_is_handler_mode(env
)) {
7823 lr
|= R_V7M_EXCRET_MODE_MASK
;
7826 ignore_stackfaults
= v7m_push_stack(cpu
);
7827 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
7830 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7831 * register set. This is necessary when switching between AArch32 and AArch64
7834 void aarch64_sync_32_to_64(CPUARMState
*env
)
7837 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7839 /* We can blanket copy R[0:7] to X[0:7] */
7840 for (i
= 0; i
< 8; i
++) {
7841 env
->xregs
[i
] = env
->regs
[i
];
7844 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7845 * Otherwise, they come from the banked user regs.
7847 if (mode
== ARM_CPU_MODE_FIQ
) {
7848 for (i
= 8; i
< 13; i
++) {
7849 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
7852 for (i
= 8; i
< 13; i
++) {
7853 env
->xregs
[i
] = env
->regs
[i
];
7857 /* Registers x13-x23 are the various mode SP and FP registers. Registers
7858 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7859 * from the mode banked register.
7861 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7862 env
->xregs
[13] = env
->regs
[13];
7863 env
->xregs
[14] = env
->regs
[14];
7865 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
7866 /* HYP is an exception in that it is copied from r14 */
7867 if (mode
== ARM_CPU_MODE_HYP
) {
7868 env
->xregs
[14] = env
->regs
[14];
7870 env
->xregs
[14] = env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)];
7874 if (mode
== ARM_CPU_MODE_HYP
) {
7875 env
->xregs
[15] = env
->regs
[13];
7877 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
7880 if (mode
== ARM_CPU_MODE_IRQ
) {
7881 env
->xregs
[16] = env
->regs
[14];
7882 env
->xregs
[17] = env
->regs
[13];
7884 env
->xregs
[16] = env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)];
7885 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
7888 if (mode
== ARM_CPU_MODE_SVC
) {
7889 env
->xregs
[18] = env
->regs
[14];
7890 env
->xregs
[19] = env
->regs
[13];
7892 env
->xregs
[18] = env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)];
7893 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
7896 if (mode
== ARM_CPU_MODE_ABT
) {
7897 env
->xregs
[20] = env
->regs
[14];
7898 env
->xregs
[21] = env
->regs
[13];
7900 env
->xregs
[20] = env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)];
7901 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
7904 if (mode
== ARM_CPU_MODE_UND
) {
7905 env
->xregs
[22] = env
->regs
[14];
7906 env
->xregs
[23] = env
->regs
[13];
7908 env
->xregs
[22] = env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)];
7909 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
7912 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7913 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7914 * FIQ bank for r8-r14.
7916 if (mode
== ARM_CPU_MODE_FIQ
) {
7917 for (i
= 24; i
< 31; i
++) {
7918 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
7921 for (i
= 24; i
< 29; i
++) {
7922 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
7924 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
7925 env
->xregs
[30] = env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)];
7928 env
->pc
= env
->regs
[15];
7931 /* Function used to synchronize QEMU's AArch32 register set with AArch64
7932 * register set. This is necessary when switching between AArch32 and AArch64
7935 void aarch64_sync_64_to_32(CPUARMState
*env
)
7938 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7940 /* We can blanket copy X[0:7] to R[0:7] */
7941 for (i
= 0; i
< 8; i
++) {
7942 env
->regs
[i
] = env
->xregs
[i
];
7945 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7946 * Otherwise, we copy x8-x12 into the banked user regs.
7948 if (mode
== ARM_CPU_MODE_FIQ
) {
7949 for (i
= 8; i
< 13; i
++) {
7950 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
7953 for (i
= 8; i
< 13; i
++) {
7954 env
->regs
[i
] = env
->xregs
[i
];
7958 /* Registers r13 & r14 depend on the current mode.
7959 * If we are in a given mode, we copy the corresponding x registers to r13
7960 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7963 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7964 env
->regs
[13] = env
->xregs
[13];
7965 env
->regs
[14] = env
->xregs
[14];
7967 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
7969 /* HYP is an exception in that it does not have its own banked r14 but
7970 * shares the USR r14
7972 if (mode
== ARM_CPU_MODE_HYP
) {
7973 env
->regs
[14] = env
->xregs
[14];
7975 env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
7979 if (mode
== ARM_CPU_MODE_HYP
) {
7980 env
->regs
[13] = env
->xregs
[15];
7982 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
7985 if (mode
== ARM_CPU_MODE_IRQ
) {
7986 env
->regs
[14] = env
->xregs
[16];
7987 env
->regs
[13] = env
->xregs
[17];
7989 env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
7990 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
7993 if (mode
== ARM_CPU_MODE_SVC
) {
7994 env
->regs
[14] = env
->xregs
[18];
7995 env
->regs
[13] = env
->xregs
[19];
7997 env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
7998 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
8001 if (mode
== ARM_CPU_MODE_ABT
) {
8002 env
->regs
[14] = env
->xregs
[20];
8003 env
->regs
[13] = env
->xregs
[21];
8005 env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
8006 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
8009 if (mode
== ARM_CPU_MODE_UND
) {
8010 env
->regs
[14] = env
->xregs
[22];
8011 env
->regs
[13] = env
->xregs
[23];
8013 env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
8014 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
8017 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8018 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8019 * FIQ bank for r8-r14.
8021 if (mode
== ARM_CPU_MODE_FIQ
) {
8022 for (i
= 24; i
< 31; i
++) {
8023 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
8026 for (i
= 24; i
< 29; i
++) {
8027 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
8029 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
8030 env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
8033 env
->regs
[15] = env
->pc
;
8036 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
8037 uint32_t mask
, uint32_t offset
,
8040 /* Change the CPU state so as to actually take the exception. */
8041 switch_mode(env
, new_mode
);
8043 * For exceptions taken to AArch32 we must clear the SS bit in both
8044 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8046 env
->uncached_cpsr
&= ~PSTATE_SS
;
8047 env
->spsr
= cpsr_read(env
);
8048 /* Clear IT bits. */
8049 env
->condexec_bits
= 0;
8050 /* Switch to the new mode, and to the correct instruction set. */
8051 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8052 /* Set new mode endianness */
8053 env
->uncached_cpsr
&= ~CPSR_E
;
8054 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8055 env
->uncached_cpsr
|= CPSR_E
;
8057 /* J and IL must always be cleared for exception entry */
8058 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
8061 if (new_mode
== ARM_CPU_MODE_HYP
) {
8062 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
8063 env
->elr_el
[2] = env
->regs
[15];
8066 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8067 * and we should just guard the thumb mode on V4
8069 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8071 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8073 env
->regs
[14] = env
->regs
[15] + offset
;
8075 env
->regs
[15] = newpc
;
8078 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
8081 * Handle exception entry to Hyp mode; this is sufficiently
8082 * different to entry to other AArch32 modes that we handle it
8085 * The vector table entry used is always the 0x14 Hyp mode entry point,
8086 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8087 * The offset applied to the preferred return address is always zero
8088 * (see DDI0487C.a section G1.12.3).
8089 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8091 uint32_t addr
, mask
;
8092 ARMCPU
*cpu
= ARM_CPU(cs
);
8093 CPUARMState
*env
= &cpu
->env
;
8095 switch (cs
->exception_index
) {
8103 /* Fall through to prefetch abort. */
8104 case EXCP_PREFETCH_ABORT
:
8105 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
8106 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
8107 (uint32_t)env
->exception
.vaddress
);
8110 case EXCP_DATA_ABORT
:
8111 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
8112 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
8113 (uint32_t)env
->exception
.vaddress
);
8128 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8131 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
8132 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
8135 if (arm_current_el(env
) != 2 && addr
< 0x14) {
8140 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
8143 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
8146 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
8150 addr
+= env
->cp15
.hvbar
;
8152 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
8155 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
8157 ARMCPU
*cpu
= ARM_CPU(cs
);
8158 CPUARMState
*env
= &cpu
->env
;
8165 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8166 switch (env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
) {
8168 case EC_BREAKPOINT_SAME_EL
:
8172 case EC_WATCHPOINT_SAME_EL
:
8178 case EC_VECTORCATCH
:
8187 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
8190 if (env
->exception
.target_el
== 2) {
8191 arm_cpu_do_interrupt_aarch32_hyp(cs
);
8195 /* TODO: Vectored interrupt controller. */
8196 switch (cs
->exception_index
) {
8198 new_mode
= ARM_CPU_MODE_UND
;
8207 new_mode
= ARM_CPU_MODE_SVC
;
8210 /* The PC already points to the next instruction. */
8214 /* Fall through to prefetch abort. */
8215 case EXCP_PREFETCH_ABORT
:
8216 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
8217 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
8218 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
8219 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
8220 new_mode
= ARM_CPU_MODE_ABT
;
8222 mask
= CPSR_A
| CPSR_I
;
8225 case EXCP_DATA_ABORT
:
8226 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
8227 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
8228 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
8230 (uint32_t)env
->exception
.vaddress
);
8231 new_mode
= ARM_CPU_MODE_ABT
;
8233 mask
= CPSR_A
| CPSR_I
;
8237 new_mode
= ARM_CPU_MODE_IRQ
;
8239 /* Disable IRQ and imprecise data aborts. */
8240 mask
= CPSR_A
| CPSR_I
;
8242 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
8243 /* IRQ routed to monitor mode */
8244 new_mode
= ARM_CPU_MODE_MON
;
8249 new_mode
= ARM_CPU_MODE_FIQ
;
8251 /* Disable FIQ, IRQ and imprecise data aborts. */
8252 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8253 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
8254 /* FIQ routed to monitor mode */
8255 new_mode
= ARM_CPU_MODE_MON
;
8260 new_mode
= ARM_CPU_MODE_IRQ
;
8262 /* Disable IRQ and imprecise data aborts. */
8263 mask
= CPSR_A
| CPSR_I
;
8267 new_mode
= ARM_CPU_MODE_FIQ
;
8269 /* Disable FIQ, IRQ and imprecise data aborts. */
8270 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8274 new_mode
= ARM_CPU_MODE_MON
;
8276 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8280 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8281 return; /* Never happens. Keep compiler happy. */
8284 if (new_mode
== ARM_CPU_MODE_MON
) {
8285 addr
+= env
->cp15
.mvbar
;
8286 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
8287 /* High vectors. When enabled, base address cannot be remapped. */
8290 /* ARM v7 architectures provide a vector base address register to remap
8291 * the interrupt vector table.
8292 * This register is only followed in non-monitor mode, and is banked.
8293 * Note: only bits 31:5 are valid.
8295 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
8298 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
8299 env
->cp15
.scr_el3
&= ~SCR_NS
;
8302 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
8305 /* Handle exception entry to a target EL which is using AArch64 */
8306 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
8308 ARMCPU
*cpu
= ARM_CPU(cs
);
8309 CPUARMState
*env
= &cpu
->env
;
8310 unsigned int new_el
= env
->exception
.target_el
;
8311 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
8312 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
8314 if (arm_current_el(env
) < new_el
) {
8315 /* Entry vector offset depends on whether the implemented EL
8316 * immediately lower than the target level is using AArch32 or AArch64
8322 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
8325 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
8328 is_aa64
= is_a64(env
);
8331 g_assert_not_reached();
8339 } else if (pstate_read(env
) & PSTATE_SP
) {
8343 switch (cs
->exception_index
) {
8344 case EXCP_PREFETCH_ABORT
:
8345 case EXCP_DATA_ABORT
:
8346 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
8347 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
8348 env
->cp15
.far_el
[new_el
]);
8356 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
8367 qemu_log_mask(CPU_LOG_INT
,
8368 "...handling as semihosting call 0x%" PRIx64
"\n",
8370 env
->xregs
[0] = do_arm_semihosting(env
);
8373 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8377 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
8378 aarch64_save_sp(env
, arm_current_el(env
));
8379 env
->elr_el
[new_el
] = env
->pc
;
8381 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
8382 env
->elr_el
[new_el
] = env
->regs
[15];
8384 aarch64_sync_32_to_64(env
);
8386 env
->condexec_bits
= 0;
8388 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
8389 env
->elr_el
[new_el
]);
8391 pstate_write(env
, PSTATE_DAIF
| new_mode
);
8393 aarch64_restore_sp(env
, new_el
);
8397 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
8398 new_el
, env
->pc
, pstate_read(env
));
8401 static inline bool check_for_semihosting(CPUState
*cs
)
8403 /* Check whether this exception is a semihosting call; if so
8404 * then handle it and return true; otherwise return false.
8406 ARMCPU
*cpu
= ARM_CPU(cs
);
8407 CPUARMState
*env
= &cpu
->env
;
8410 if (cs
->exception_index
== EXCP_SEMIHOST
) {
8411 /* This is always the 64-bit semihosting exception.
8412 * The "is this usermode" and "is semihosting enabled"
8413 * checks have been done at translate time.
8415 qemu_log_mask(CPU_LOG_INT
,
8416 "...handling as semihosting call 0x%" PRIx64
"\n",
8418 env
->xregs
[0] = do_arm_semihosting(env
);
8425 /* Only intercept calls from privileged modes, to provide some
8426 * semblance of security.
8428 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
8429 (!semihosting_enabled() ||
8430 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
8434 switch (cs
->exception_index
) {
8436 /* This is always a semihosting call; the "is this usermode"
8437 * and "is semihosting enabled" checks have been done at
8442 /* Check for semihosting interrupt. */
8444 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
8450 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
8452 if (imm
== 0x123456) {
8458 /* See if this is a semihosting syscall. */
8460 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
8472 qemu_log_mask(CPU_LOG_INT
,
8473 "...handling as semihosting call 0x%x\n",
8475 env
->regs
[0] = do_arm_semihosting(env
);
8480 /* Handle a CPU exception for A and R profile CPUs.
8481 * Do any appropriate logging, handle PSCI calls, and then hand off
8482 * to the AArch64-entry or AArch32-entry function depending on the
8483 * target exception level's register width.
8485 void arm_cpu_do_interrupt(CPUState
*cs
)
8487 ARMCPU
*cpu
= ARM_CPU(cs
);
8488 CPUARMState
*env
= &cpu
->env
;
8489 unsigned int new_el
= env
->exception
.target_el
;
8491 assert(!arm_feature(env
, ARM_FEATURE_M
));
8493 arm_log_exception(cs
->exception_index
);
8494 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
8496 if (qemu_loglevel_mask(CPU_LOG_INT
)
8497 && !excp_is_internal(cs
->exception_index
)) {
8498 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
8499 env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
,
8500 env
->exception
.syndrome
);
8503 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
8504 arm_handle_psci_call(cpu
);
8505 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
8509 /* Semihosting semantics depend on the register width of the
8510 * code that caused the exception, not the target exception level,
8511 * so must be handled here.
8513 if (check_for_semihosting(cs
)) {
8517 /* Hooks may change global state so BQL should be held, also the
8518 * BQL needs to be held for any modification of
8519 * cs->interrupt_request.
8521 g_assert(qemu_mutex_iothread_locked());
8523 arm_call_pre_el_change_hook(cpu
);
8525 assert(!excp_is_internal(cs
->exception_index
));
8526 if (arm_el_is_aa64(env
, new_el
)) {
8527 arm_cpu_do_interrupt_aarch64(cs
);
8529 arm_cpu_do_interrupt_aarch32(cs
);
8532 arm_call_el_change_hook(cpu
);
8534 if (!kvm_enabled()) {
8535 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
8539 /* Return the exception level which controls this address translation regime */
8540 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8543 case ARMMMUIdx_S2NS
:
8544 case ARMMMUIdx_S1E2
:
8546 case ARMMMUIdx_S1E3
:
8548 case ARMMMUIdx_S1SE0
:
8549 return arm_el_is_aa64(env
, 3) ? 1 : 3;
8550 case ARMMMUIdx_S1SE1
:
8551 case ARMMMUIdx_S1NSE0
:
8552 case ARMMMUIdx_S1NSE1
:
8553 case ARMMMUIdx_MPrivNegPri
:
8554 case ARMMMUIdx_MUserNegPri
:
8555 case ARMMMUIdx_MPriv
:
8556 case ARMMMUIdx_MUser
:
8557 case ARMMMUIdx_MSPrivNegPri
:
8558 case ARMMMUIdx_MSUserNegPri
:
8559 case ARMMMUIdx_MSPriv
:
8560 case ARMMMUIdx_MSUser
:
8563 g_assert_not_reached();
8567 /* Return the SCTLR value which controls this address translation regime */
8568 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8570 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
8573 /* Return true if the specified stage of address translation is disabled */
8574 static inline bool regime_translation_disabled(CPUARMState
*env
,
8577 if (arm_feature(env
, ARM_FEATURE_M
)) {
8578 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
8579 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
8580 case R_V7M_MPU_CTRL_ENABLE_MASK
:
8581 /* Enabled, but not for HardFault and NMI */
8582 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
8583 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
8584 /* Enabled for all cases */
8588 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8589 * we warned about that in armv7m_nvic.c when the guest set it.
8595 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8596 return (env
->cp15
.hcr_el2
& HCR_VM
) == 0;
8599 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
8600 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8601 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
8606 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
8609 static inline bool regime_translation_big_endian(CPUARMState
*env
,
8612 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
8615 /* Return the TCR controlling this translation regime */
8616 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8618 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8619 return &env
->cp15
.vtcr_el2
;
8621 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
8624 /* Convert a possible stage1+2 MMU index into the appropriate
8627 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
8629 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
8630 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
8635 /* Returns TBI0 value for current regime el */
8636 uint32_t arm_regime_tbi0(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8641 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8642 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8644 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8646 tcr
= regime_tcr(env
, mmu_idx
);
8647 el
= regime_el(env
, mmu_idx
);
8650 return extract64(tcr
->raw_tcr
, 20, 1);
8652 return extract64(tcr
->raw_tcr
, 37, 1);
8656 /* Returns TBI1 value for current regime el */
8657 uint32_t arm_regime_tbi1(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8662 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8663 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8665 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8667 tcr
= regime_tcr(env
, mmu_idx
);
8668 el
= regime_el(env
, mmu_idx
);
8673 return extract64(tcr
->raw_tcr
, 38, 1);
8677 /* Return the TTBR associated with this translation regime */
8678 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8681 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8682 return env
->cp15
.vttbr_el2
;
8685 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
8687 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
8691 /* Return true if the translation regime is using LPAE format page tables */
8692 static inline bool regime_using_lpae_format(CPUARMState
*env
,
8695 int el
= regime_el(env
, mmu_idx
);
8696 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
8699 if (arm_feature(env
, ARM_FEATURE_LPAE
)
8700 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
8706 /* Returns true if the stage 1 translation regime is using LPAE format page
8707 * tables. Used when raising alignment exceptions, whose FSR changes depending
8708 * on whether the long or short descriptor format is in use. */
8709 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8711 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8713 return regime_using_lpae_format(env
, mmu_idx
);
8716 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8719 case ARMMMUIdx_S1SE0
:
8720 case ARMMMUIdx_S1NSE0
:
8721 case ARMMMUIdx_MUser
:
8722 case ARMMMUIdx_MSUser
:
8723 case ARMMMUIdx_MUserNegPri
:
8724 case ARMMMUIdx_MSUserNegPri
:
8728 case ARMMMUIdx_S12NSE0
:
8729 case ARMMMUIdx_S12NSE1
:
8730 g_assert_not_reached();
8734 /* Translate section/page access permissions to page
8735 * R/W protection flags
8738 * @mmu_idx: MMU index indicating required translation regime
8739 * @ap: The 3-bit access permissions (AP[2:0])
8740 * @domain_prot: The 2-bit domain access permissions
8742 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8743 int ap
, int domain_prot
)
8745 bool is_user
= regime_is_user(env
, mmu_idx
);
8747 if (domain_prot
== 3) {
8748 return PAGE_READ
| PAGE_WRITE
;
8753 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8756 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
8758 return is_user
? 0 : PAGE_READ
;
8765 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8770 return PAGE_READ
| PAGE_WRITE
;
8773 return PAGE_READ
| PAGE_WRITE
;
8774 case 4: /* Reserved. */
8777 return is_user
? 0 : PAGE_READ
;
8781 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
8786 g_assert_not_reached();
8790 /* Translate section/page access permissions to page
8791 * R/W protection flags.
8793 * @ap: The 2-bit simple AP (AP[2:1])
8794 * @is_user: TRUE if accessing from PL0
8796 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
8800 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8802 return PAGE_READ
| PAGE_WRITE
;
8804 return is_user
? 0 : PAGE_READ
;
8808 g_assert_not_reached();
8813 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
8815 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
8818 /* Translate S2 section/page access permissions to protection flags
8821 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8822 * @xn: XN (execute-never) bit
8824 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
8835 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
8842 /* Translate section/page access permissions to protection flags
8845 * @mmu_idx: MMU index indicating required translation regime
8846 * @is_aa64: TRUE if AArch64
8847 * @ap: The 2-bit simple AP (AP[2:1])
8848 * @ns: NS (non-secure) bit
8849 * @xn: XN (execute-never) bit
8850 * @pxn: PXN (privileged execute-never) bit
8852 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
8853 int ap
, int ns
, int xn
, int pxn
)
8855 bool is_user
= regime_is_user(env
, mmu_idx
);
8856 int prot_rw
, user_rw
;
8860 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
8862 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
8866 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
8869 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
8873 /* TODO have_wxn should be replaced with
8874 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8875 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8876 * compatible processors have EL2, which is required for [U]WXN.
8878 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
8881 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
8885 switch (regime_el(env
, mmu_idx
)) {
8888 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
8895 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8896 switch (regime_el(env
, mmu_idx
)) {
8900 xn
= xn
|| !(user_rw
& PAGE_READ
);
8904 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
8906 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
8907 (uwxn
&& (user_rw
& PAGE_WRITE
));
8917 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
8920 return prot_rw
| PAGE_EXEC
;
8923 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8924 uint32_t *table
, uint32_t address
)
8926 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8927 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8929 if (address
& tcr
->mask
) {
8930 if (tcr
->raw_tcr
& TTBCR_PD1
) {
8931 /* Translation table walk disabled for TTBR1 */
8934 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
8936 if (tcr
->raw_tcr
& TTBCR_PD0
) {
8937 /* Translation table walk disabled for TTBR0 */
8940 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
8942 *table
|= (address
>> 18) & 0x3ffc;
8946 /* Translate a S1 pagetable walk through S2 if needed. */
8947 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8948 hwaddr addr
, MemTxAttrs txattrs
,
8949 ARMMMUFaultInfo
*fi
)
8951 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
8952 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
8953 target_ulong s2size
;
8958 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
8959 &txattrs
, &s2prot
, &s2size
, fi
, NULL
);
8961 assert(fi
->type
!= ARMFault_None
);
8972 /* All loads done in the course of a page table walk go through here. */
8973 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8974 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8976 ARMCPU
*cpu
= ARM_CPU(cs
);
8977 CPUARMState
*env
= &cpu
->env
;
8978 MemTxAttrs attrs
= {};
8979 MemTxResult result
= MEMTX_OK
;
8983 attrs
.secure
= is_secure
;
8984 as
= arm_addressspace(cs
, attrs
);
8985 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8989 if (regime_translation_big_endian(env
, mmu_idx
)) {
8990 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
8992 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
8994 if (result
== MEMTX_OK
) {
8997 fi
->type
= ARMFault_SyncExternalOnWalk
;
8998 fi
->ea
= arm_extabort_type(result
);
9002 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9003 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9005 ARMCPU
*cpu
= ARM_CPU(cs
);
9006 CPUARMState
*env
= &cpu
->env
;
9007 MemTxAttrs attrs
= {};
9008 MemTxResult result
= MEMTX_OK
;
9012 attrs
.secure
= is_secure
;
9013 as
= arm_addressspace(cs
, attrs
);
9014 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9018 if (regime_translation_big_endian(env
, mmu_idx
)) {
9019 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
9021 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
9023 if (result
== MEMTX_OK
) {
9026 fi
->type
= ARMFault_SyncExternalOnWalk
;
9027 fi
->ea
= arm_extabort_type(result
);
9031 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
9032 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9033 hwaddr
*phys_ptr
, int *prot
,
9034 target_ulong
*page_size
,
9035 ARMMMUFaultInfo
*fi
)
9037 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
9048 /* Pagetable walk. */
9049 /* Lookup l1 descriptor. */
9050 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9051 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9052 fi
->type
= ARMFault_Translation
;
9055 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9057 if (fi
->type
!= ARMFault_None
) {
9061 domain
= (desc
>> 5) & 0x0f;
9062 if (regime_el(env
, mmu_idx
) == 1) {
9063 dacr
= env
->cp15
.dacr_ns
;
9065 dacr
= env
->cp15
.dacr_s
;
9067 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9069 /* Section translation fault. */
9070 fi
->type
= ARMFault_Translation
;
9076 if (domain_prot
== 0 || domain_prot
== 2) {
9077 fi
->type
= ARMFault_Domain
;
9082 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9083 ap
= (desc
>> 10) & 3;
9084 *page_size
= 1024 * 1024;
9086 /* Lookup l2 entry. */
9088 /* Coarse pagetable. */
9089 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9091 /* Fine pagetable. */
9092 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
9094 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9096 if (fi
->type
!= ARMFault_None
) {
9100 case 0: /* Page translation fault. */
9101 fi
->type
= ARMFault_Translation
;
9103 case 1: /* 64k page. */
9104 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9105 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
9106 *page_size
= 0x10000;
9108 case 2: /* 4k page. */
9109 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9110 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
9111 *page_size
= 0x1000;
9113 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9115 /* ARMv6/XScale extended small page format */
9116 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
9117 || arm_feature(env
, ARM_FEATURE_V6
)) {
9118 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9119 *page_size
= 0x1000;
9121 /* UNPREDICTABLE in ARMv5; we choose to take a
9122 * page translation fault.
9124 fi
->type
= ARMFault_Translation
;
9128 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
9131 ap
= (desc
>> 4) & 3;
9134 /* Never happens, but compiler isn't smart enough to tell. */
9138 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9139 *prot
|= *prot
? PAGE_EXEC
: 0;
9140 if (!(*prot
& (1 << access_type
))) {
9141 /* Access permission fault. */
9142 fi
->type
= ARMFault_Permission
;
9145 *phys_ptr
= phys_addr
;
9148 fi
->domain
= domain
;
9153 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
9154 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9155 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
9156 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
9158 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
9172 /* Pagetable walk. */
9173 /* Lookup l1 descriptor. */
9174 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9175 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9176 fi
->type
= ARMFault_Translation
;
9179 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9181 if (fi
->type
!= ARMFault_None
) {
9185 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
9186 /* Section translation fault, or attempt to use the encoding
9187 * which is Reserved on implementations without PXN.
9189 fi
->type
= ARMFault_Translation
;
9192 if ((type
== 1) || !(desc
& (1 << 18))) {
9193 /* Page or Section. */
9194 domain
= (desc
>> 5) & 0x0f;
9196 if (regime_el(env
, mmu_idx
) == 1) {
9197 dacr
= env
->cp15
.dacr_ns
;
9199 dacr
= env
->cp15
.dacr_s
;
9204 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9205 if (domain_prot
== 0 || domain_prot
== 2) {
9206 /* Section or Page domain fault */
9207 fi
->type
= ARMFault_Domain
;
9211 if (desc
& (1 << 18)) {
9213 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
9214 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
9215 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
9216 *page_size
= 0x1000000;
9219 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9220 *page_size
= 0x100000;
9222 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
9223 xn
= desc
& (1 << 4);
9225 ns
= extract32(desc
, 19, 1);
9227 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
9228 pxn
= (desc
>> 2) & 1;
9230 ns
= extract32(desc
, 3, 1);
9231 /* Lookup l2 entry. */
9232 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9233 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9235 if (fi
->type
!= ARMFault_None
) {
9238 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
9240 case 0: /* Page translation fault. */
9241 fi
->type
= ARMFault_Translation
;
9243 case 1: /* 64k page. */
9244 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9245 xn
= desc
& (1 << 15);
9246 *page_size
= 0x10000;
9248 case 2: case 3: /* 4k page. */
9249 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9251 *page_size
= 0x1000;
9254 /* Never happens, but compiler isn't smart enough to tell. */
9258 if (domain_prot
== 3) {
9259 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9261 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
9264 if (xn
&& access_type
== MMU_INST_FETCH
) {
9265 fi
->type
= ARMFault_Permission
;
9269 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
9270 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
9271 /* The simplified model uses AP[0] as an access control bit. */
9272 if ((ap
& 1) == 0) {
9273 /* Access flag fault. */
9274 fi
->type
= ARMFault_AccessFlag
;
9277 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
9279 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9284 if (!(*prot
& (1 << access_type
))) {
9285 /* Access permission fault. */
9286 fi
->type
= ARMFault_Permission
;
9291 /* The NS bit will (as required by the architecture) have no effect if
9292 * the CPU doesn't support TZ or this is a non-secure translation
9293 * regime, because the attribute will already be non-secure.
9295 attrs
->secure
= false;
9297 *phys_ptr
= phys_addr
;
9300 fi
->domain
= domain
;
9306 * check_s2_mmu_setup
9308 * @is_aa64: True if the translation regime is in AArch64 state
9309 * @startlevel: Suggested starting level
9310 * @inputsize: Bitsize of IPAs
9311 * @stride: Page-table stride (See the ARM ARM)
9313 * Returns true if the suggested S2 translation parameters are OK and
9316 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
9317 int inputsize
, int stride
)
9319 const int grainsize
= stride
+ 3;
9322 /* Negative levels are never allowed. */
9327 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
9328 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
9333 CPUARMState
*env
= &cpu
->env
;
9334 unsigned int pamax
= arm_pamax(cpu
);
9337 case 13: /* 64KB Pages. */
9338 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
9342 case 11: /* 16KB Pages. */
9343 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
9347 case 9: /* 4KB Pages. */
9348 if (level
== 0 && pamax
<= 42) {
9353 g_assert_not_reached();
9356 /* Inputsize checks. */
9357 if (inputsize
> pamax
&&
9358 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
9359 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9363 /* AArch32 only supports 4KB pages. Assert on that. */
9364 assert(stride
== 9);
9373 /* Translate from the 4-bit stage 2 representation of
9374 * memory attributes (without cache-allocation hints) to
9375 * the 8-bit representation of the stage 1 MAIR registers
9376 * (which includes allocation hints).
9378 * ref: shared/translation/attrs/S2AttrDecode()
9379 * .../S2ConvertAttrsHints()
9381 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
9383 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
9384 uint8_t loattr
= extract32(s2attrs
, 0, 2);
9385 uint8_t hihint
= 0, lohint
= 0;
9387 if (hiattr
!= 0) { /* normal memory */
9388 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
9389 hiattr
= loattr
= 1; /* non-cacheable */
9391 if (hiattr
!= 1) { /* Write-through or write-back */
9392 hihint
= 3; /* RW allocate */
9394 if (loattr
!= 1) { /* Write-through or write-back */
9395 lohint
= 3; /* RW allocate */
9400 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
9403 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
9404 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9405 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
9406 target_ulong
*page_size_ptr
,
9407 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9409 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9410 CPUState
*cs
= CPU(cpu
);
9411 /* Read an LPAE long-descriptor translation table. */
9412 ARMFaultType fault_type
= ARMFault_Translation
;
9419 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
9420 uint32_t tableattrs
;
9421 target_ulong page_size
;
9427 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9428 int ap
, ns
, xn
, pxn
;
9429 uint32_t el
= regime_el(env
, mmu_idx
);
9430 bool ttbr1_valid
= true;
9431 uint64_t descaddrmask
;
9432 bool aarch64
= arm_el_is_aa64(env
, el
);
9435 * This code does not handle the different format TCR for VTCR_EL2.
9436 * This code also does not support shareability levels.
9437 * Attribute and permission bit handling should also be checked when adding
9438 * support for those page table walks.
9444 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9445 tbi
= extract64(tcr
->raw_tcr
, 20, 1);
9448 if (extract64(address
, 55, 1)) {
9449 tbi
= extract64(tcr
->raw_tcr
, 38, 1);
9451 tbi
= extract64(tcr
->raw_tcr
, 37, 1);
9456 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9460 ttbr1_valid
= false;
9465 /* There is no TTBR1 for EL2 */
9467 ttbr1_valid
= false;
9471 /* Determine whether this address is in the region controlled by
9472 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
9473 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
9474 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
9477 /* AArch64 translation. */
9478 t0sz
= extract32(tcr
->raw_tcr
, 0, 6);
9479 t0sz
= MIN(t0sz
, 39);
9480 t0sz
= MAX(t0sz
, 16);
9481 } else if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9482 /* AArch32 stage 1 translation. */
9483 t0sz
= extract32(tcr
->raw_tcr
, 0, 3);
9485 /* AArch32 stage 2 translation. */
9486 bool sext
= extract32(tcr
->raw_tcr
, 4, 1);
9487 bool sign
= extract32(tcr
->raw_tcr
, 3, 1);
9488 /* Address size is 40-bit for a stage 2 translation,
9489 * and t0sz can be negative (from -8 to 7),
9490 * so we need to adjust it to use the TTBR selecting logic below.
9493 t0sz
= sextract32(tcr
->raw_tcr
, 0, 4) + 8;
9495 /* If the sign-extend bit is not the same as t0sz[3], the result
9496 * is unpredictable. Flag this as a guest error. */
9498 qemu_log_mask(LOG_GUEST_ERROR
,
9499 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9502 t1sz
= extract32(tcr
->raw_tcr
, 16, 6);
9504 t1sz
= MIN(t1sz
, 39);
9505 t1sz
= MAX(t1sz
, 16);
9507 if (t0sz
&& !extract64(address
, addrsize
- t0sz
, t0sz
- tbi
)) {
9508 /* there is a ttbr0 region and we are in it (high bits all zero) */
9510 } else if (ttbr1_valid
&& t1sz
&&
9511 !extract64(~address
, addrsize
- t1sz
, t1sz
- tbi
)) {
9512 /* there is a ttbr1 region and we are in it (high bits all one) */
9515 /* ttbr0 region is "everything not in the ttbr1 region" */
9517 } else if (!t1sz
&& ttbr1_valid
) {
9518 /* ttbr1 region is "everything not in the ttbr0 region" */
9521 /* in the gap between the two regions, this is a Translation fault */
9522 fault_type
= ARMFault_Translation
;
9526 /* Note that QEMU ignores shareability and cacheability attributes,
9527 * so we don't need to do anything with the SH, ORGN, IRGN fields
9528 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9529 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9530 * implement any ASID-like capability so we can ignore it (instead
9531 * we will always flush the TLB any time the ASID is changed).
9533 if (ttbr_select
== 0) {
9534 ttbr
= regime_ttbr(env
, mmu_idx
, 0);
9536 epd
= extract32(tcr
->raw_tcr
, 7, 1);
9538 inputsize
= addrsize
- t0sz
;
9540 tg
= extract32(tcr
->raw_tcr
, 14, 2);
9541 if (tg
== 1) { /* 64KB pages */
9544 if (tg
== 2) { /* 16KB pages */
9548 /* We should only be here if TTBR1 is valid */
9549 assert(ttbr1_valid
);
9551 ttbr
= regime_ttbr(env
, mmu_idx
, 1);
9552 epd
= extract32(tcr
->raw_tcr
, 23, 1);
9553 inputsize
= addrsize
- t1sz
;
9555 tg
= extract32(tcr
->raw_tcr
, 30, 2);
9556 if (tg
== 3) { /* 64KB pages */
9559 if (tg
== 1) { /* 16KB pages */
9564 /* Here we should have set up all the parameters for the translation:
9565 * inputsize, ttbr, epd, stride, tbi
9569 /* Translation table walk disabled => Translation fault on TLB miss
9570 * Note: This is always 0 on 64-bit EL2 and EL3.
9575 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9576 /* The starting level depends on the virtual address size (which can
9577 * be up to 48 bits) and the translation granule size. It indicates
9578 * the number of strides (stride bits at a time) needed to
9579 * consume the bits of the input address. In the pseudocode this is:
9580 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9581 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9582 * our 'stride + 3' and 'stride' is our 'stride'.
9583 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9584 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9585 * = 4 - (inputsize - 4) / stride;
9587 level
= 4 - (inputsize
- 4) / stride
;
9589 /* For stage 2 translations the starting level is specified by the
9590 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9592 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
9593 uint32_t startlevel
;
9596 if (!aarch64
|| stride
== 9) {
9597 /* AArch32 or 4KB pages */
9598 startlevel
= 2 - sl0
;
9600 /* 16KB or 64KB pages */
9601 startlevel
= 3 - sl0
;
9604 /* Check that the starting level is valid. */
9605 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
9608 fault_type
= ARMFault_Translation
;
9614 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
9615 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
9617 /* Now we can extract the actual base address from the TTBR */
9618 descaddr
= extract64(ttbr
, 0, 48);
9619 descaddr
&= ~indexmask
;
9621 /* The address field in the descriptor goes up to bit 39 for ARMv7
9622 * but up to bit 47 for ARMv8, but we use the descaddrmask
9623 * up to bit 39 for AArch32, because we don't need other bits in that case
9624 * to construct next descriptor address (anyway they should be all zeroes).
9626 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
9627 ~indexmask_grainsize
;
9629 /* Secure accesses start with the page table in secure memory and
9630 * can be downgraded to non-secure at any step. Non-secure accesses
9631 * remain non-secure. We implement this by just ORing in the NSTable/NS
9632 * bits at each step.
9634 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
9636 uint64_t descriptor
;
9639 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
9641 nstable
= extract32(tableattrs
, 4, 1);
9642 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
9643 if (fi
->type
!= ARMFault_None
) {
9647 if (!(descriptor
& 1) ||
9648 (!(descriptor
& 2) && (level
== 3))) {
9649 /* Invalid, or the Reserved level 3 encoding */
9652 descaddr
= descriptor
& descaddrmask
;
9654 if ((descriptor
& 2) && (level
< 3)) {
9655 /* Table entry. The top five bits are attributes which may
9656 * propagate down through lower levels of the table (and
9657 * which are all arranged so that 0 means "no effect", so
9658 * we can gather them up by ORing in the bits at each level).
9660 tableattrs
|= extract64(descriptor
, 59, 5);
9662 indexmask
= indexmask_grainsize
;
9665 /* Block entry at level 1 or 2, or page entry at level 3.
9666 * These are basically the same thing, although the number
9667 * of bits we pull in from the vaddr varies.
9669 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
9670 descaddr
|= (address
& (page_size
- 1));
9671 /* Extract attributes from the descriptor */
9672 attrs
= extract64(descriptor
, 2, 10)
9673 | (extract64(descriptor
, 52, 12) << 10);
9675 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9676 /* Stage 2 table descriptors do not include any attribute fields */
9679 /* Merge in attributes from table descriptors */
9680 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
9681 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
9682 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9683 * means "force PL1 access only", which means forcing AP[1] to 0.
9685 if (extract32(tableattrs
, 2, 1)) {
9688 attrs
|= nstable
<< 3; /* NS */
9691 /* Here descaddr is the final physical address, and attributes
9694 fault_type
= ARMFault_AccessFlag
;
9695 if ((attrs
& (1 << 8)) == 0) {
9700 ap
= extract32(attrs
, 4, 2);
9701 xn
= extract32(attrs
, 12, 1);
9703 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9705 *prot
= get_S2prot(env
, ap
, xn
);
9707 ns
= extract32(attrs
, 3, 1);
9708 pxn
= extract32(attrs
, 11, 1);
9709 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
9712 fault_type
= ARMFault_Permission
;
9713 if (!(*prot
& (1 << access_type
))) {
9718 /* The NS bit will (as required by the architecture) have no effect if
9719 * the CPU doesn't support TZ or this is a non-secure translation
9720 * regime, because the attribute will already be non-secure.
9722 txattrs
->secure
= false;
9725 if (cacheattrs
!= NULL
) {
9726 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9727 cacheattrs
->attrs
= convert_stage2_attrs(env
,
9728 extract32(attrs
, 0, 4));
9730 /* Index into MAIR registers for cache attributes */
9731 uint8_t attrindx
= extract32(attrs
, 0, 3);
9732 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
9733 assert(attrindx
<= 7);
9734 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
9736 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
9739 *phys_ptr
= descaddr
;
9740 *page_size_ptr
= page_size
;
9744 fi
->type
= fault_type
;
9746 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9747 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
9751 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
9753 int32_t address
, int *prot
)
9755 if (!arm_feature(env
, ARM_FEATURE_M
)) {
9756 *prot
= PAGE_READ
| PAGE_WRITE
;
9758 case 0xF0000000 ... 0xFFFFFFFF:
9759 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
9760 /* hivecs execing is ok */
9764 case 0x00000000 ... 0x7FFFFFFF:
9769 /* Default system address map for M profile cores.
9770 * The architecture specifies which regions are execute-never;
9771 * at the MPU level no other checks are defined.
9774 case 0x00000000 ... 0x1fffffff: /* ROM */
9775 case 0x20000000 ... 0x3fffffff: /* SRAM */
9776 case 0x60000000 ... 0x7fffffff: /* RAM */
9777 case 0x80000000 ... 0x9fffffff: /* RAM */
9778 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9780 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9781 case 0xa0000000 ... 0xbfffffff: /* Device */
9782 case 0xc0000000 ... 0xdfffffff: /* Device */
9783 case 0xe0000000 ... 0xffffffff: /* System */
9784 *prot
= PAGE_READ
| PAGE_WRITE
;
9787 g_assert_not_reached();
9792 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
9793 ARMMMUIdx mmu_idx
, bool is_user
)
9795 /* Return true if we should use the default memory map as a
9796 * "background" region if there are no hits against any MPU regions.
9798 CPUARMState
*env
= &cpu
->env
;
9804 if (arm_feature(env
, ARM_FEATURE_M
)) {
9805 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
9806 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
9808 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
9812 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
9814 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9815 return arm_feature(env
, ARM_FEATURE_M
) &&
9816 extract32(address
, 20, 12) == 0xe00;
9819 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
9821 /* True if address is in the M profile system region
9822 * 0xe0000000 - 0xffffffff
9824 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
9827 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
9828 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9829 hwaddr
*phys_ptr
, int *prot
,
9830 target_ulong
*page_size
,
9831 ARMMMUFaultInfo
*fi
)
9833 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9835 bool is_user
= regime_is_user(env
, mmu_idx
);
9837 *phys_ptr
= address
;
9838 *page_size
= TARGET_PAGE_SIZE
;
9841 if (regime_translation_disabled(env
, mmu_idx
) ||
9842 m_is_ppb_region(env
, address
)) {
9843 /* MPU disabled or M profile PPB access: use default memory map.
9844 * The other case which uses the default memory map in the
9845 * v7M ARM ARM pseudocode is exception vector reads from the vector
9846 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9847 * which always does a direct read using address_space_ldl(), rather
9848 * than going via this function, so we don't need to check that here.
9850 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9851 } else { /* MPU enabled */
9852 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9854 uint32_t base
= env
->pmsav7
.drbar
[n
];
9855 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
9859 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
9864 qemu_log_mask(LOG_GUEST_ERROR
,
9865 "DRSR[%d]: Rsize field cannot be 0\n", n
);
9869 rmask
= (1ull << rsize
) - 1;
9872 qemu_log_mask(LOG_GUEST_ERROR
,
9873 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
9874 "to DRSR region size, mask = 0x%" PRIx32
"\n",
9879 if (address
< base
|| address
> base
+ rmask
) {
9881 * Address not in this region. We must check whether the
9882 * region covers addresses in the same page as our address.
9883 * In that case we must not report a size that covers the
9884 * whole page for a subsequent hit against a different MPU
9885 * region or the background region, because it would result in
9886 * incorrect TLB hits for subsequent accesses to addresses that
9887 * are in this MPU region.
9889 if (ranges_overlap(base
, rmask
,
9890 address
& TARGET_PAGE_MASK
,
9891 TARGET_PAGE_SIZE
)) {
9897 /* Region matched */
9899 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
9901 uint32_t srdis_mask
;
9903 rsize
-= 3; /* sub region size (power of 2) */
9904 snd
= ((address
- base
) >> rsize
) & 0x7;
9905 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
9907 srdis_mask
= srdis
? 0x3 : 0x0;
9908 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
9909 /* This will check in groups of 2, 4 and then 8, whether
9910 * the subregion bits are consistent. rsize is incremented
9911 * back up to give the region size, considering consistent
9912 * adjacent subregions as one region. Stop testing if rsize
9913 * is already big enough for an entire QEMU page.
9915 int snd_rounded
= snd
& ~(i
- 1);
9916 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
9917 snd_rounded
+ 8, i
);
9918 if (srdis_mask
^ srdis_multi
) {
9921 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
9928 if (rsize
< TARGET_PAGE_BITS
) {
9929 *page_size
= 1 << rsize
;
9934 if (n
== -1) { /* no hits */
9935 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9936 /* background fault */
9937 fi
->type
= ARMFault_Background
;
9940 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9941 } else { /* a MPU hit! */
9942 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
9943 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
9945 if (m_is_system_region(env
, address
)) {
9946 /* System space is always execute never */
9950 if (is_user
) { /* User mode AP bit decoding */
9955 break; /* no access */
9957 *prot
|= PAGE_WRITE
;
9961 *prot
|= PAGE_READ
| PAGE_EXEC
;
9964 /* for v7M, same as 6; for R profile a reserved value */
9965 if (arm_feature(env
, ARM_FEATURE_M
)) {
9966 *prot
|= PAGE_READ
| PAGE_EXEC
;
9971 qemu_log_mask(LOG_GUEST_ERROR
,
9972 "DRACR[%d]: Bad value for AP bits: 0x%"
9973 PRIx32
"\n", n
, ap
);
9975 } else { /* Priv. mode AP bits decoding */
9978 break; /* no access */
9982 *prot
|= PAGE_WRITE
;
9986 *prot
|= PAGE_READ
| PAGE_EXEC
;
9989 /* for v7M, same as 6; for R profile a reserved value */
9990 if (arm_feature(env
, ARM_FEATURE_M
)) {
9991 *prot
|= PAGE_READ
| PAGE_EXEC
;
9996 qemu_log_mask(LOG_GUEST_ERROR
,
9997 "DRACR[%d]: Bad value for AP bits: 0x%"
9998 PRIx32
"\n", n
, ap
);
10002 /* execute never */
10004 *prot
&= ~PAGE_EXEC
;
10009 fi
->type
= ARMFault_Permission
;
10011 return !(*prot
& (1 << access_type
));
10014 static bool v8m_is_sau_exempt(CPUARMState
*env
,
10015 uint32_t address
, MMUAccessType access_type
)
10017 /* The architecture specifies that certain address ranges are
10018 * exempt from v8M SAU/IDAU checks.
10021 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
10022 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
10023 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
10024 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
10025 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
10026 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
10029 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
10030 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10031 V8M_SAttributes
*sattrs
)
10033 /* Look up the security attributes for this address. Compare the
10034 * pseudocode SecurityCheck() function.
10035 * We assume the caller has zero-initialized *sattrs.
10037 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10039 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
10040 int idau_region
= IREGION_NOTVALID
;
10041 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10042 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10045 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
10046 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
10048 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
10052 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
10053 /* 0xf0000000..0xffffffff is always S for insn fetches */
10057 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
10058 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
10062 if (idau_region
!= IREGION_NOTVALID
) {
10063 sattrs
->irvalid
= true;
10064 sattrs
->iregion
= idau_region
;
10067 switch (env
->sau
.ctrl
& 3) {
10068 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10070 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10073 default: /* SAU.ENABLE == 1 */
10074 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
10075 if (env
->sau
.rlar
[r
] & 1) {
10076 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
10077 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
10079 if (base
<= address
&& limit
>= address
) {
10080 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10081 sattrs
->subpage
= true;
10083 if (sattrs
->srvalid
) {
10084 /* If we hit in more than one region then we must report
10085 * as Secure, not NS-Callable, with no valid region
10088 sattrs
->ns
= false;
10089 sattrs
->nsc
= false;
10090 sattrs
->sregion
= 0;
10091 sattrs
->srvalid
= false;
10094 if (env
->sau
.rlar
[r
] & 2) {
10095 sattrs
->nsc
= true;
10099 sattrs
->srvalid
= true;
10100 sattrs
->sregion
= r
;
10104 * Address not in this region. We must check whether the
10105 * region covers addresses in the same page as our address.
10106 * In that case we must not report a size that covers the
10107 * whole page for a subsequent hit against a different MPU
10108 * region or the background region, because it would result
10109 * in incorrect TLB hits for subsequent accesses to
10110 * addresses that are in this MPU region.
10112 if (limit
>= base
&&
10113 ranges_overlap(base
, limit
- base
+ 1,
10115 TARGET_PAGE_SIZE
)) {
10116 sattrs
->subpage
= true;
10122 /* The IDAU will override the SAU lookup results if it specifies
10123 * higher security than the SAU does.
10126 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
10127 sattrs
->ns
= false;
10128 sattrs
->nsc
= idau_nsc
;
10135 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
10136 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10137 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10138 int *prot
, bool *is_subpage
,
10139 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
10141 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10142 * that a full phys-to-virt translation does).
10143 * mregion is (if not NULL) set to the region number which matched,
10144 * or -1 if no region number is returned (MPU off, address did not
10145 * hit a region, address hit in multiple regions).
10146 * We set is_subpage to true if the region hit doesn't cover the
10147 * entire TARGET_PAGE the address is within.
10149 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10150 bool is_user
= regime_is_user(env
, mmu_idx
);
10151 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10153 int matchregion
= -1;
10155 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10156 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10158 *is_subpage
= false;
10159 *phys_ptr
= address
;
10165 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10166 * was an exception vector read from the vector table (which is always
10167 * done using the default system address map), because those accesses
10168 * are done in arm_v7m_load_vector(), which always does a direct
10169 * read using address_space_ldl(), rather than going via this function.
10171 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
10173 } else if (m_is_ppb_region(env
, address
)) {
10175 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10178 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10179 /* region search */
10180 /* Note that the base address is bits [31:5] from the register
10181 * with bits [4:0] all zeroes, but the limit address is bits
10182 * [31:5] from the register with bits [4:0] all ones.
10184 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
10185 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
10187 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
10188 /* Region disabled */
10192 if (address
< base
|| address
> limit
) {
10194 * Address not in this region. We must check whether the
10195 * region covers addresses in the same page as our address.
10196 * In that case we must not report a size that covers the
10197 * whole page for a subsequent hit against a different MPU
10198 * region or the background region, because it would result in
10199 * incorrect TLB hits for subsequent accesses to addresses that
10200 * are in this MPU region.
10202 if (limit
>= base
&&
10203 ranges_overlap(base
, limit
- base
+ 1,
10205 TARGET_PAGE_SIZE
)) {
10206 *is_subpage
= true;
10211 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10212 *is_subpage
= true;
10216 /* Multiple regions match -- always a failure (unlike
10217 * PMSAv7 where highest-numbered-region wins)
10219 fi
->type
= ARMFault_Permission
;
10230 /* background fault */
10231 fi
->type
= ARMFault_Background
;
10235 if (matchregion
== -1) {
10236 /* hit using the background region */
10237 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10239 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
10240 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
10242 if (m_is_system_region(env
, address
)) {
10243 /* System space is always execute never */
10247 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
10248 if (*prot
&& !xn
) {
10249 *prot
|= PAGE_EXEC
;
10251 /* We don't need to look the attribute up in the MAIR0/MAIR1
10252 * registers because that only tells us about cacheability.
10255 *mregion
= matchregion
;
10259 fi
->type
= ARMFault_Permission
;
10261 return !(*prot
& (1 << access_type
));
10265 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
10266 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10267 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10268 int *prot
, target_ulong
*page_size
,
10269 ARMMMUFaultInfo
*fi
)
10271 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10272 V8M_SAttributes sattrs
= {};
10274 bool mpu_is_subpage
;
10276 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10277 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
10278 if (access_type
== MMU_INST_FETCH
) {
10279 /* Instruction fetches always use the MMU bank and the
10280 * transaction attribute determined by the fetch address,
10281 * regardless of CPU state. This is painful for QEMU
10282 * to handle, because it would mean we need to encode
10283 * into the mmu_idx not just the (user, negpri) information
10284 * for the current security state but also that for the
10285 * other security state, which would balloon the number
10286 * of mmu_idx values needed alarmingly.
10287 * Fortunately we can avoid this because it's not actually
10288 * possible to arbitrarily execute code from memory with
10289 * the wrong security attribute: it will always generate
10290 * an exception of some kind or another, apart from the
10291 * special case of an NS CPU executing an SG instruction
10292 * in S&NSC memory. So we always just fail the translation
10293 * here and sort things out in the exception handler
10294 * (including possibly emulating an SG instruction).
10296 if (sattrs
.ns
!= !secure
) {
10298 fi
->type
= ARMFault_QEMU_NSCExec
;
10300 fi
->type
= ARMFault_QEMU_SFault
;
10302 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10303 *phys_ptr
= address
;
10308 /* For data accesses we always use the MMU bank indicated
10309 * by the current CPU state, but the security attributes
10310 * might downgrade a secure access to nonsecure.
10313 txattrs
->secure
= false;
10314 } else if (!secure
) {
10315 /* NS access to S memory must fault.
10316 * Architecturally we should first check whether the
10317 * MPU information for this address indicates that we
10318 * are doing an unaligned access to Device memory, which
10319 * should generate a UsageFault instead. QEMU does not
10320 * currently check for that kind of unaligned access though.
10321 * If we added it we would need to do so as a special case
10322 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10324 fi
->type
= ARMFault_QEMU_SFault
;
10325 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10326 *phys_ptr
= address
;
10333 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
10334 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
10336 * TODO: this is a temporary hack to ignore the fact that the SAU region
10337 * is smaller than a page if this is an executable region. We never
10338 * supported small MPU regions, but we did (accidentally) allow small
10339 * SAU regions, and if we now made small SAU regions not be executable
10340 * then this would break previously working guest code. We can't
10341 * remove this until/unless we implement support for execution from
10344 if (*prot
& PAGE_EXEC
) {
10345 sattrs
.subpage
= false;
10347 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
10351 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
10352 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10353 hwaddr
*phys_ptr
, int *prot
,
10354 ARMMMUFaultInfo
*fi
)
10359 bool is_user
= regime_is_user(env
, mmu_idx
);
10361 if (regime_translation_disabled(env
, mmu_idx
)) {
10362 /* MPU disabled. */
10363 *phys_ptr
= address
;
10364 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10368 *phys_ptr
= address
;
10369 for (n
= 7; n
>= 0; n
--) {
10370 base
= env
->cp15
.c6_region
[n
];
10371 if ((base
& 1) == 0) {
10374 mask
= 1 << ((base
>> 1) & 0x1f);
10375 /* Keep this shift separate from the above to avoid an
10376 (undefined) << 32. */
10377 mask
= (mask
<< 1) - 1;
10378 if (((base
^ address
) & ~mask
) == 0) {
10383 fi
->type
= ARMFault_Background
;
10387 if (access_type
== MMU_INST_FETCH
) {
10388 mask
= env
->cp15
.pmsav5_insn_ap
;
10390 mask
= env
->cp15
.pmsav5_data_ap
;
10392 mask
= (mask
>> (n
* 4)) & 0xf;
10395 fi
->type
= ARMFault_Permission
;
10400 fi
->type
= ARMFault_Permission
;
10404 *prot
= PAGE_READ
| PAGE_WRITE
;
10409 *prot
|= PAGE_WRITE
;
10413 *prot
= PAGE_READ
| PAGE_WRITE
;
10417 fi
->type
= ARMFault_Permission
;
10427 /* Bad permission. */
10428 fi
->type
= ARMFault_Permission
;
10432 *prot
|= PAGE_EXEC
;
10436 /* Combine either inner or outer cacheability attributes for normal
10437 * memory, according to table D4-42 and pseudocode procedure
10438 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10440 * NB: only stage 1 includes allocation hints (RW bits), leading to
10443 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
10445 if (s1
== 4 || s2
== 4) {
10446 /* non-cacheable has precedence */
10448 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
10449 /* stage 1 write-through takes precedence */
10451 } else if (extract32(s2
, 2, 2) == 2) {
10452 /* stage 2 write-through takes precedence, but the allocation hint
10453 * is still taken from stage 1
10455 return (2 << 2) | extract32(s1
, 0, 2);
10456 } else { /* write-back */
10461 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10462 * and CombineS1S2Desc()
10464 * @s1: Attributes from stage 1 walk
10465 * @s2: Attributes from stage 2 walk
10467 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
10469 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
10470 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
10473 /* Combine shareability attributes (table D4-43) */
10474 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
10475 /* if either are outer-shareable, the result is outer-shareable */
10476 ret
.shareability
= 2;
10477 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
10478 /* if either are inner-shareable, the result is inner-shareable */
10479 ret
.shareability
= 3;
10481 /* both non-shareable */
10482 ret
.shareability
= 0;
10485 /* Combine memory type and cacheability attributes */
10486 if (s1hi
== 0 || s2hi
== 0) {
10487 /* Device has precedence over normal */
10488 if (s1lo
== 0 || s2lo
== 0) {
10489 /* nGnRnE has precedence over anything */
10491 } else if (s1lo
== 4 || s2lo
== 4) {
10492 /* non-Reordering has precedence over Reordering */
10493 ret
.attrs
= 4; /* nGnRE */
10494 } else if (s1lo
== 8 || s2lo
== 8) {
10495 /* non-Gathering has precedence over Gathering */
10496 ret
.attrs
= 8; /* nGRE */
10498 ret
.attrs
= 0xc; /* GRE */
10501 /* Any location for which the resultant memory type is any
10502 * type of Device memory is always treated as Outer Shareable.
10504 ret
.shareability
= 2;
10505 } else { /* Normal memory */
10506 /* Outer/inner cacheability combine independently */
10507 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
10508 | combine_cacheattr_nibble(s1lo
, s2lo
);
10510 if (ret
.attrs
== 0x44) {
10511 /* Any location for which the resultant memory type is Normal
10512 * Inner Non-cacheable, Outer Non-cacheable is always treated
10513 * as Outer Shareable.
10515 ret
.shareability
= 2;
10523 /* get_phys_addr - get the physical address for this virtual address
10525 * Find the physical address corresponding to the given virtual address,
10526 * by doing a translation table walk on MMU based systems or using the
10527 * MPU state on MPU based systems.
10529 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10530 * prot and page_size may not be filled in, and the populated fsr value provides
10531 * information on why the translation aborted, in the format of a
10532 * DFSR/IFSR fault register, with the following caveats:
10533 * * we honour the short vs long DFSR format differences.
10534 * * the WnR bit is never set (the caller must do this).
10535 * * for PSMAv5 based systems we don't bother to return a full FSR format
10538 * @env: CPUARMState
10539 * @address: virtual address to get physical address for
10540 * @access_type: 0 for read, 1 for write, 2 for execute
10541 * @mmu_idx: MMU index indicating required translation regime
10542 * @phys_ptr: set to the physical address corresponding to the virtual address
10543 * @attrs: set to the memory transaction attributes to use
10544 * @prot: set to the permissions for the page containing phys_ptr
10545 * @page_size: set to the size of the page containing phys_ptr
10546 * @fi: set to fault info if the translation fails
10547 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10549 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
10550 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10551 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10552 target_ulong
*page_size
,
10553 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10555 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
10556 /* Call ourselves recursively to do the stage 1 and then stage 2
10559 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
10563 ARMCacheAttrs cacheattrs2
= {};
10565 ret
= get_phys_addr(env
, address
, access_type
,
10566 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
10567 prot
, page_size
, fi
, cacheattrs
);
10569 /* If S1 fails or S2 is disabled, return early. */
10570 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10575 /* S1 is done. Now do S2 translation. */
10576 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
10577 phys_ptr
, attrs
, &s2_prot
,
10579 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
10581 /* Combine the S1 and S2 perms. */
10584 /* Combine the S1 and S2 cache attributes, if needed */
10585 if (!ret
&& cacheattrs
!= NULL
) {
10586 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
10592 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10594 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10598 /* The page table entries may downgrade secure to non-secure, but
10599 * cannot upgrade an non-secure translation regime's attributes
10602 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
10603 attrs
->user
= regime_is_user(env
, mmu_idx
);
10605 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10606 * In v7 and earlier it affects all stage 1 translations.
10608 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
10609 && !arm_feature(env
, ARM_FEATURE_V8
)) {
10610 if (regime_el(env
, mmu_idx
) == 3) {
10611 address
+= env
->cp15
.fcseidr_s
;
10613 address
+= env
->cp15
.fcseidr_ns
;
10617 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
10619 *page_size
= TARGET_PAGE_SIZE
;
10621 if (arm_feature(env
, ARM_FEATURE_V8
)) {
10623 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
10624 phys_ptr
, attrs
, prot
, page_size
, fi
);
10625 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10627 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
10628 phys_ptr
, prot
, page_size
, fi
);
10631 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
10632 phys_ptr
, prot
, fi
);
10634 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
10635 " mmu_idx %u -> %s (prot %c%c%c)\n",
10636 access_type
== MMU_DATA_LOAD
? "reading" :
10637 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
10638 (uint32_t)address
, mmu_idx
,
10639 ret
? "Miss" : "Hit",
10640 *prot
& PAGE_READ
? 'r' : '-',
10641 *prot
& PAGE_WRITE
? 'w' : '-',
10642 *prot
& PAGE_EXEC
? 'x' : '-');
10647 /* Definitely a real MMU, not an MPU */
10649 if (regime_translation_disabled(env
, mmu_idx
)) {
10650 /* MMU disabled. */
10651 *phys_ptr
= address
;
10652 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10653 *page_size
= TARGET_PAGE_SIZE
;
10657 if (regime_using_lpae_format(env
, mmu_idx
)) {
10658 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
10659 phys_ptr
, attrs
, prot
, page_size
,
10661 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
10662 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
10663 phys_ptr
, attrs
, prot
, page_size
, fi
);
10665 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
10666 phys_ptr
, prot
, page_size
, fi
);
10670 /* Walk the page table and (if the mapping exists) add the page
10671 * to the TLB. Return false on success, or true on failure. Populate
10672 * fsr with ARM DFSR/IFSR fault register format value on failure.
10674 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
10675 MMUAccessType access_type
, int mmu_idx
,
10676 ARMMMUFaultInfo
*fi
)
10678 ARMCPU
*cpu
= ARM_CPU(cs
);
10679 CPUARMState
*env
= &cpu
->env
;
10681 target_ulong page_size
;
10684 MemTxAttrs attrs
= {};
10686 ret
= get_phys_addr(env
, address
, access_type
,
10687 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
10688 &attrs
, &prot
, &page_size
, fi
, NULL
);
10691 * Map a single [sub]page. Regions smaller than our declared
10692 * target page size are handled specially, so for those we
10693 * pass in the exact addresses.
10695 if (page_size
>= TARGET_PAGE_SIZE
) {
10696 phys_addr
&= TARGET_PAGE_MASK
;
10697 address
&= TARGET_PAGE_MASK
;
10699 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
10700 prot
, mmu_idx
, page_size
);
10707 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
10710 ARMCPU
*cpu
= ARM_CPU(cs
);
10711 CPUARMState
*env
= &cpu
->env
;
10713 target_ulong page_size
;
10716 ARMMMUFaultInfo fi
= {};
10717 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
10719 *attrs
= (MemTxAttrs
) {};
10721 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
10722 attrs
, &prot
, &page_size
, &fi
, NULL
);
10730 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
10733 unsigned el
= arm_current_el(env
);
10735 /* First handle registers which unprivileged can read */
10738 case 0 ... 7: /* xPSR sub-fields */
10740 if ((reg
& 1) && el
) {
10741 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
10744 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
10746 /* EPSR reads as zero */
10747 return xpsr_read(env
) & mask
;
10749 case 20: /* CONTROL */
10750 return env
->v7m
.control
[env
->v7m
.secure
];
10751 case 0x94: /* CONTROL_NS */
10752 /* We have to handle this here because unprivileged Secure code
10753 * can read the NS CONTROL register.
10755 if (!env
->v7m
.secure
) {
10758 return env
->v7m
.control
[M_REG_NS
];
10762 return 0; /* unprivileged reads others as zero */
10765 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10767 case 0x88: /* MSP_NS */
10768 if (!env
->v7m
.secure
) {
10771 return env
->v7m
.other_ss_msp
;
10772 case 0x89: /* PSP_NS */
10773 if (!env
->v7m
.secure
) {
10776 return env
->v7m
.other_ss_psp
;
10777 case 0x8a: /* MSPLIM_NS */
10778 if (!env
->v7m
.secure
) {
10781 return env
->v7m
.msplim
[M_REG_NS
];
10782 case 0x8b: /* PSPLIM_NS */
10783 if (!env
->v7m
.secure
) {
10786 return env
->v7m
.psplim
[M_REG_NS
];
10787 case 0x90: /* PRIMASK_NS */
10788 if (!env
->v7m
.secure
) {
10791 return env
->v7m
.primask
[M_REG_NS
];
10792 case 0x91: /* BASEPRI_NS */
10793 if (!env
->v7m
.secure
) {
10796 return env
->v7m
.basepri
[M_REG_NS
];
10797 case 0x93: /* FAULTMASK_NS */
10798 if (!env
->v7m
.secure
) {
10801 return env
->v7m
.faultmask
[M_REG_NS
];
10802 case 0x98: /* SP_NS */
10804 /* This gives the non-secure SP selected based on whether we're
10805 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10807 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10809 if (!env
->v7m
.secure
) {
10812 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10813 return env
->v7m
.other_ss_psp
;
10815 return env
->v7m
.other_ss_msp
;
10825 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
10827 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
10828 case 10: /* MSPLIM */
10829 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10832 return env
->v7m
.msplim
[env
->v7m
.secure
];
10833 case 11: /* PSPLIM */
10834 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10837 return env
->v7m
.psplim
[env
->v7m
.secure
];
10838 case 16: /* PRIMASK */
10839 return env
->v7m
.primask
[env
->v7m
.secure
];
10840 case 17: /* BASEPRI */
10841 case 18: /* BASEPRI_MAX */
10842 return env
->v7m
.basepri
[env
->v7m
.secure
];
10843 case 19: /* FAULTMASK */
10844 return env
->v7m
.faultmask
[env
->v7m
.secure
];
10847 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
10848 " register %d\n", reg
);
10853 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
10855 /* We're passed bits [11..0] of the instruction; extract
10856 * SYSm and the mask bits.
10857 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
10858 * we choose to treat them as if the mask bits were valid.
10859 * NB that the pseudocode 'mask' variable is bits [11..10],
10860 * whereas ours is [11..8].
10862 uint32_t mask
= extract32(maskreg
, 8, 4);
10863 uint32_t reg
= extract32(maskreg
, 0, 8);
10865 if (arm_current_el(env
) == 0 && reg
> 7) {
10866 /* only xPSR sub-fields may be written by unprivileged */
10870 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10872 case 0x88: /* MSP_NS */
10873 if (!env
->v7m
.secure
) {
10876 env
->v7m
.other_ss_msp
= val
;
10878 case 0x89: /* PSP_NS */
10879 if (!env
->v7m
.secure
) {
10882 env
->v7m
.other_ss_psp
= val
;
10884 case 0x8a: /* MSPLIM_NS */
10885 if (!env
->v7m
.secure
) {
10888 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
10890 case 0x8b: /* PSPLIM_NS */
10891 if (!env
->v7m
.secure
) {
10894 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
10896 case 0x90: /* PRIMASK_NS */
10897 if (!env
->v7m
.secure
) {
10900 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
10902 case 0x91: /* BASEPRI_NS */
10903 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
10906 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
10908 case 0x93: /* FAULTMASK_NS */
10909 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
10912 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
10914 case 0x94: /* CONTROL_NS */
10915 if (!env
->v7m
.secure
) {
10918 write_v7m_control_spsel_for_secstate(env
,
10919 val
& R_V7M_CONTROL_SPSEL_MASK
,
10921 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
10922 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10923 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10926 case 0x98: /* SP_NS */
10928 /* This gives the non-secure SP selected based on whether we're
10929 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10931 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10933 if (!env
->v7m
.secure
) {
10936 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10937 env
->v7m
.other_ss_psp
= val
;
10939 env
->v7m
.other_ss_msp
= val
;
10949 case 0 ... 7: /* xPSR sub-fields */
10950 /* only APSR is actually writable */
10952 uint32_t apsrmask
= 0;
10955 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
10957 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
10958 apsrmask
|= XPSR_GE
;
10960 xpsr_write(env
, val
, apsrmask
);
10964 if (v7m_using_psp(env
)) {
10965 env
->v7m
.other_sp
= val
;
10967 env
->regs
[13] = val
;
10971 if (v7m_using_psp(env
)) {
10972 env
->regs
[13] = val
;
10974 env
->v7m
.other_sp
= val
;
10977 case 10: /* MSPLIM */
10978 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10981 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
10983 case 11: /* PSPLIM */
10984 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10987 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
10989 case 16: /* PRIMASK */
10990 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
10992 case 17: /* BASEPRI */
10993 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
10996 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
10998 case 18: /* BASEPRI_MAX */
10999 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11003 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
11004 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
11005 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
11008 case 19: /* FAULTMASK */
11009 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11012 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
11014 case 20: /* CONTROL */
11015 /* Writing to the SPSEL bit only has an effect if we are in
11016 * thread mode; other bits can be updated by any privileged code.
11017 * write_v7m_control_spsel() deals with updating the SPSEL bit in
11018 * env->v7m.control, so we only need update the others.
11019 * For v7M, we must just ignore explicit writes to SPSEL in handler
11020 * mode; for v8M the write is permitted but will have no effect.
11022 if (arm_feature(env
, ARM_FEATURE_V8
) ||
11023 !arm_v7m_is_handler_mode(env
)) {
11024 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
11026 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11027 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
11028 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
11033 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
11034 " register %d\n", reg
);
11039 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
11041 /* Implement the TT instruction. op is bits [7:6] of the insn. */
11042 bool forceunpriv
= op
& 1;
11044 V8M_SAttributes sattrs
= {};
11046 bool r
, rw
, nsr
, nsrw
, mrvalid
;
11048 ARMMMUFaultInfo fi
= {};
11049 MemTxAttrs attrs
= {};
11054 bool targetsec
= env
->v7m
.secure
;
11057 /* Work out what the security state and privilege level we're
11058 * interested in is...
11061 targetsec
= !targetsec
;
11065 targetpriv
= false;
11067 targetpriv
= arm_v7m_is_handler_mode(env
) ||
11068 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
11071 /* ...and then figure out which MMU index this is */
11072 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
11074 /* We know that the MPU and SAU don't care about the access type
11075 * for our purposes beyond that we don't want to claim to be
11076 * an insn fetch, so we arbitrarily call this a read.
11079 /* MPU region info only available for privileged or if
11080 * inspecting the other MPU state.
11082 if (arm_current_el(env
) != 0 || alt
) {
11083 /* We can ignore the return value as prot is always set */
11084 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
11085 &phys_addr
, &attrs
, &prot
, &is_subpage
,
11087 if (mregion
== -1) {
11093 r
= prot
& PAGE_READ
;
11094 rw
= prot
& PAGE_WRITE
;
11102 if (env
->v7m
.secure
) {
11103 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
11104 nsr
= sattrs
.ns
&& r
;
11105 nsrw
= sattrs
.ns
&& rw
;
11112 tt_resp
= (sattrs
.iregion
<< 24) |
11113 (sattrs
.irvalid
<< 23) |
11114 ((!sattrs
.ns
) << 22) |
11119 (sattrs
.srvalid
<< 17) |
11121 (sattrs
.sregion
<< 8) |
11129 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
11131 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
11132 * Note that we do not implement the (architecturally mandated)
11133 * alignment fault for attempts to use this on Device memory
11134 * (which matches the usual QEMU behaviour of not implementing either
11135 * alignment faults or any memory attribute handling).
11138 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11139 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
11140 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
11142 #ifndef CONFIG_USER_ONLY
11144 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
11145 * the block size so we might have to do more than one TLB lookup.
11146 * We know that in fact for any v8 CPU the page size is at least 4K
11147 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
11148 * 1K as an artefact of legacy v5 subpage support being present in the
11149 * same QEMU executable.
11151 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
11152 void *hostaddr
[maxidx
];
11154 unsigned mmu_idx
= cpu_mmu_index(env
, false);
11155 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
11157 for (try = 0; try < 2; try++) {
11159 for (i
= 0; i
< maxidx
; i
++) {
11160 hostaddr
[i
] = tlb_vaddr_to_host(env
,
11161 vaddr
+ TARGET_PAGE_SIZE
* i
,
11163 if (!hostaddr
[i
]) {
11168 /* If it's all in the TLB it's fair game for just writing to;
11169 * we know we don't need to update dirty status, etc.
11171 for (i
= 0; i
< maxidx
- 1; i
++) {
11172 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
11174 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
11177 /* OK, try a store and see if we can populate the tlb. This
11178 * might cause an exception if the memory isn't writable,
11179 * in which case we will longjmp out of here. We must for
11180 * this purpose use the actual register value passed to us
11181 * so that we get the fault address right.
11183 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
11184 /* Now we can populate the other TLB entries, if any */
11185 for (i
= 0; i
< maxidx
; i
++) {
11186 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
11187 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
11188 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
11193 /* Slow path (probably attempt to do this to an I/O device or
11194 * similar, or clearing of a block of code we have translations
11195 * cached for). Just do a series of byte writes as the architecture
11196 * demands. It's not worth trying to use a cpu_physical_memory_map(),
11197 * memset(), unmap() sequence here because:
11198 * + we'd need to account for the blocksize being larger than a page
11199 * + the direct-RAM access case is almost always going to be dealt
11200 * with in the fastpath code above, so there's no speed benefit
11201 * + we would have to deal with the map returning NULL because the
11202 * bounce buffer was in use
11204 for (i
= 0; i
< blocklen
; i
++) {
11205 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
11209 memset(g2h(vaddr
), 0, blocklen
);
11213 /* Note that signed overflow is undefined in C. The following routines are
11214 careful to use unsigned types where modulo arithmetic is required.
11215 Failure to do so _will_ break on newer gcc. */
11217 /* Signed saturating arithmetic. */
11219 /* Perform 16-bit signed saturating addition. */
11220 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
11225 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
11234 /* Perform 8-bit signed saturating addition. */
11235 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
11240 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
11249 /* Perform 16-bit signed saturating subtraction. */
11250 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
11255 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
11264 /* Perform 8-bit signed saturating subtraction. */
11265 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
11270 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
11279 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11280 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11281 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11282 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11285 #include "op_addsub.h"
11287 /* Unsigned saturating arithmetic. */
11288 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
11297 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
11305 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
11314 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
11322 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11323 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11324 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11325 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11328 #include "op_addsub.h"
11330 /* Signed modulo arithmetic. */
11331 #define SARITH16(a, b, n, op) do { \
11333 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11334 RESULT(sum, n, 16); \
11336 ge |= 3 << (n * 2); \
11339 #define SARITH8(a, b, n, op) do { \
11341 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11342 RESULT(sum, n, 8); \
11348 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11349 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11350 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11351 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11355 #include "op_addsub.h"
11357 /* Unsigned modulo arithmetic. */
11358 #define ADD16(a, b, n) do { \
11360 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11361 RESULT(sum, n, 16); \
11362 if ((sum >> 16) == 1) \
11363 ge |= 3 << (n * 2); \
11366 #define ADD8(a, b, n) do { \
11368 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11369 RESULT(sum, n, 8); \
11370 if ((sum >> 8) == 1) \
11374 #define SUB16(a, b, n) do { \
11376 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11377 RESULT(sum, n, 16); \
11378 if ((sum >> 16) == 0) \
11379 ge |= 3 << (n * 2); \
11382 #define SUB8(a, b, n) do { \
11384 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11385 RESULT(sum, n, 8); \
11386 if ((sum >> 8) == 0) \
11393 #include "op_addsub.h"
11395 /* Halved signed arithmetic. */
11396 #define ADD16(a, b, n) \
11397 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11398 #define SUB16(a, b, n) \
11399 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11400 #define ADD8(a, b, n) \
11401 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11402 #define SUB8(a, b, n) \
11403 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11406 #include "op_addsub.h"
11408 /* Halved unsigned arithmetic. */
11409 #define ADD16(a, b, n) \
11410 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11411 #define SUB16(a, b, n) \
11412 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11413 #define ADD8(a, b, n) \
11414 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11415 #define SUB8(a, b, n) \
11416 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11419 #include "op_addsub.h"
11421 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11429 /* Unsigned sum of absolute byte differences. */
11430 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11433 sum
= do_usad(a
, b
);
11434 sum
+= do_usad(a
>> 8, b
>> 8);
11435 sum
+= do_usad(a
>> 16, b
>>16);
11436 sum
+= do_usad(a
>> 24, b
>> 24);
11440 /* For ARMv6 SEL instruction. */
11441 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11453 mask
|= 0xff000000;
11454 return (a
& mask
) | (b
& ~mask
);
11457 /* VFP support. We follow the convention used for VFP instructions:
11458 Single precision routines have a "s" suffix, double precision a
11461 /* Convert host exception flags to vfp form. */
11462 static inline int vfp_exceptbits_from_host(int host_bits
)
11464 int target_bits
= 0;
11466 if (host_bits
& float_flag_invalid
)
11468 if (host_bits
& float_flag_divbyzero
)
11470 if (host_bits
& float_flag_overflow
)
11472 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
11474 if (host_bits
& float_flag_inexact
)
11475 target_bits
|= 0x10;
11476 if (host_bits
& float_flag_input_denormal
)
11477 target_bits
|= 0x80;
11478 return target_bits
;
11481 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
11486 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
11487 | (env
->vfp
.vec_len
<< 16)
11488 | (env
->vfp
.vec_stride
<< 20);
11490 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
11491 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
11492 /* FZ16 does not generate an input denormal exception. */
11493 i
|= (get_float_exception_flags(&env
->vfp
.fp_status_f16
)
11494 & ~float_flag_input_denormal
);
11496 fpscr
|= vfp_exceptbits_from_host(i
);
11500 uint32_t vfp_get_fpscr(CPUARMState
*env
)
11502 return HELPER(vfp_get_fpscr
)(env
);
11505 /* Convert vfp exception flags to target form. */
11506 static inline int vfp_exceptbits_to_host(int target_bits
)
11510 if (target_bits
& 1)
11511 host_bits
|= float_flag_invalid
;
11512 if (target_bits
& 2)
11513 host_bits
|= float_flag_divbyzero
;
11514 if (target_bits
& 4)
11515 host_bits
|= float_flag_overflow
;
11516 if (target_bits
& 8)
11517 host_bits
|= float_flag_underflow
;
11518 if (target_bits
& 0x10)
11519 host_bits
|= float_flag_inexact
;
11520 if (target_bits
& 0x80)
11521 host_bits
|= float_flag_input_denormal
;
11525 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
11530 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
11531 if (!arm_feature(env
, ARM_FEATURE_V8_FP16
)) {
11535 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
11536 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
11537 env
->vfp
.vec_len
= (val
>> 16) & 7;
11538 env
->vfp
.vec_stride
= (val
>> 20) & 3;
11541 if (changed
& (3 << 22)) {
11542 i
= (val
>> 22) & 3;
11544 case FPROUNDING_TIEEVEN
:
11545 i
= float_round_nearest_even
;
11547 case FPROUNDING_POSINF
:
11548 i
= float_round_up
;
11550 case FPROUNDING_NEGINF
:
11551 i
= float_round_down
;
11553 case FPROUNDING_ZERO
:
11554 i
= float_round_to_zero
;
11557 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
11558 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
11560 if (changed
& FPCR_FZ16
) {
11561 bool ftz_enabled
= val
& FPCR_FZ16
;
11562 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11563 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11565 if (changed
& FPCR_FZ
) {
11566 bool ftz_enabled
= val
& FPCR_FZ
;
11567 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11568 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11570 if (changed
& FPCR_DN
) {
11571 bool dnan_enabled
= val
& FPCR_DN
;
11572 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
11573 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
11576 /* The exception flags are ORed together when we read fpscr so we
11577 * only need to preserve the current state in one of our
11578 * float_status values.
11580 i
= vfp_exceptbits_to_host(val
);
11581 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
11582 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
11583 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
11586 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
11588 HELPER(vfp_set_fpscr
)(env
, val
);
11591 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11593 #define VFP_BINOP(name) \
11594 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11596 float_status *fpst = fpstp; \
11597 return float32_ ## name(a, b, fpst); \
11599 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11601 float_status *fpst = fpstp; \
11602 return float64_ ## name(a, b, fpst); \
11614 float32
VFP_HELPER(neg
, s
)(float32 a
)
11616 return float32_chs(a
);
11619 float64
VFP_HELPER(neg
, d
)(float64 a
)
11621 return float64_chs(a
);
11624 float32
VFP_HELPER(abs
, s
)(float32 a
)
11626 return float32_abs(a
);
11629 float64
VFP_HELPER(abs
, d
)(float64 a
)
11631 return float64_abs(a
);
11634 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
11636 return float32_sqrt(a
, &env
->vfp
.fp_status
);
11639 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
11641 return float64_sqrt(a
, &env
->vfp
.fp_status
);
11644 /* XXX: check quiet/signaling case */
11645 #define DO_VFP_cmp(p, type) \
11646 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
11649 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11650 case 0: flags = 0x6; break; \
11651 case -1: flags = 0x8; break; \
11652 case 1: flags = 0x2; break; \
11653 default: case 2: flags = 0x3; break; \
11655 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11656 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11658 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11661 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11662 case 0: flags = 0x6; break; \
11663 case -1: flags = 0x8; break; \
11664 case 1: flags = 0x2; break; \
11665 default: case 2: flags = 0x3; break; \
11667 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11668 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11670 DO_VFP_cmp(s
, float32
)
11671 DO_VFP_cmp(d
, float64
)
11674 /* Integer to float and float to integer conversions */
11676 #define CONV_ITOF(name, ftype, fsz, sign) \
11677 ftype HELPER(name)(uint32_t x, void *fpstp) \
11679 float_status *fpst = fpstp; \
11680 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
11683 #define CONV_FTOI(name, ftype, fsz, sign, round) \
11684 sign##int32_t HELPER(name)(ftype x, void *fpstp) \
11686 float_status *fpst = fpstp; \
11687 if (float##fsz##_is_any_nan(x)) { \
11688 float_raise(float_flag_invalid, fpst); \
11691 return float##fsz##_to_##sign##int32##round(x, fpst); \
11694 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \
11695 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
11696 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
11697 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
11699 FLOAT_CONVS(si
, h
, uint32_t, 16, )
11700 FLOAT_CONVS(si
, s
, float32
, 32, )
11701 FLOAT_CONVS(si
, d
, float64
, 64, )
11702 FLOAT_CONVS(ui
, h
, uint32_t, 16, u
)
11703 FLOAT_CONVS(ui
, s
, float32
, 32, u
)
11704 FLOAT_CONVS(ui
, d
, float64
, 64, u
)
11710 /* floating point conversion */
11711 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
11713 return float32_to_float64(x
, &env
->vfp
.fp_status
);
11716 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
11718 return float64_to_float32(x
, &env
->vfp
.fp_status
);
11721 /* VFP3 fixed point conversion. */
11722 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11723 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
11725 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
11727 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
11728 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
11731 if (unlikely(float##fsz##_is_any_nan(x))) { \
11732 float_raise(float_flag_invalid, fpst); \
11735 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
11738 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
11739 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11740 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11741 float_round_to_zero, _round_to_zero) \
11742 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11743 get_float_rounding_mode(fpst), )
11745 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
11746 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11747 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
11748 get_float_rounding_mode(fpst), )
11750 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
11751 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
11752 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
11753 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
11754 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
11755 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
11756 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
11757 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
11758 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
11759 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
11760 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
11761 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
11763 #undef VFP_CONV_FIX
11764 #undef VFP_CONV_FIX_FLOAT
11765 #undef VFP_CONV_FLOAT_FIX_ROUND
11766 #undef VFP_CONV_FIX_A64
11768 uint32_t HELPER(vfp_sltoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11770 return int32_to_float16_scalbn(x
, -shift
, fpst
);
11773 uint32_t HELPER(vfp_ultoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11775 return uint32_to_float16_scalbn(x
, -shift
, fpst
);
11778 uint32_t HELPER(vfp_sqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
11780 return int64_to_float16_scalbn(x
, -shift
, fpst
);
11783 uint32_t HELPER(vfp_uqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
11785 return uint64_to_float16_scalbn(x
, -shift
, fpst
);
11788 uint32_t HELPER(vfp_toshh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11790 if (unlikely(float16_is_any_nan(x
))) {
11791 float_raise(float_flag_invalid
, fpst
);
11794 return float16_to_int16_scalbn(x
, get_float_rounding_mode(fpst
),
11798 uint32_t HELPER(vfp_touhh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11800 if (unlikely(float16_is_any_nan(x
))) {
11801 float_raise(float_flag_invalid
, fpst
);
11804 return float16_to_uint16_scalbn(x
, get_float_rounding_mode(fpst
),
11808 uint32_t HELPER(vfp_toslh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11810 if (unlikely(float16_is_any_nan(x
))) {
11811 float_raise(float_flag_invalid
, fpst
);
11814 return float16_to_int32_scalbn(x
, get_float_rounding_mode(fpst
),
11818 uint32_t HELPER(vfp_toulh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11820 if (unlikely(float16_is_any_nan(x
))) {
11821 float_raise(float_flag_invalid
, fpst
);
11824 return float16_to_uint32_scalbn(x
, get_float_rounding_mode(fpst
),
11828 uint64_t HELPER(vfp_tosqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11830 if (unlikely(float16_is_any_nan(x
))) {
11831 float_raise(float_flag_invalid
, fpst
);
11834 return float16_to_int64_scalbn(x
, get_float_rounding_mode(fpst
),
11838 uint64_t HELPER(vfp_touqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11840 if (unlikely(float16_is_any_nan(x
))) {
11841 float_raise(float_flag_invalid
, fpst
);
11844 return float16_to_uint64_scalbn(x
, get_float_rounding_mode(fpst
),
11848 /* Set the current fp rounding mode and return the old one.
11849 * The argument is a softfloat float_round_ value.
11851 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
11853 float_status
*fp_status
= fpstp
;
11855 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11856 set_float_rounding_mode(rmode
, fp_status
);
11861 /* Set the current fp rounding mode in the standard fp status and return
11862 * the old one. This is for NEON instructions that need to change the
11863 * rounding mode but wish to use the standard FPSCR values for everything
11864 * else. Always set the rounding mode back to the correct value after
11866 * The argument is a softfloat float_round_ value.
11868 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
11870 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
11872 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11873 set_float_rounding_mode(rmode
, fp_status
);
11878 /* Half precision conversions. */
11879 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
11881 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11882 * it would affect flushing input denormals.
11884 float_status
*fpst
= fpstp
;
11885 flag save
= get_flush_inputs_to_zero(fpst
);
11886 set_flush_inputs_to_zero(false, fpst
);
11887 float32 r
= float16_to_float32(a
, !ahp_mode
, fpst
);
11888 set_flush_inputs_to_zero(save
, fpst
);
11892 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, void *fpstp
, uint32_t ahp_mode
)
11894 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11895 * it would affect flushing output denormals.
11897 float_status
*fpst
= fpstp
;
11898 flag save
= get_flush_to_zero(fpst
);
11899 set_flush_to_zero(false, fpst
);
11900 float16 r
= float32_to_float16(a
, !ahp_mode
, fpst
);
11901 set_flush_to_zero(save
, fpst
);
11905 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
11907 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11908 * it would affect flushing input denormals.
11910 float_status
*fpst
= fpstp
;
11911 flag save
= get_flush_inputs_to_zero(fpst
);
11912 set_flush_inputs_to_zero(false, fpst
);
11913 float64 r
= float16_to_float64(a
, !ahp_mode
, fpst
);
11914 set_flush_inputs_to_zero(save
, fpst
);
11918 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, void *fpstp
, uint32_t ahp_mode
)
11920 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11921 * it would affect flushing output denormals.
11923 float_status
*fpst
= fpstp
;
11924 flag save
= get_flush_to_zero(fpst
);
11925 set_flush_to_zero(false, fpst
);
11926 float16 r
= float64_to_float16(a
, !ahp_mode
, fpst
);
11927 set_flush_to_zero(save
, fpst
);
11931 #define float32_two make_float32(0x40000000)
11932 #define float32_three make_float32(0x40400000)
11933 #define float32_one_point_five make_float32(0x3fc00000)
11935 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11937 float_status
*s
= &env
->vfp
.standard_fp_status
;
11938 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11939 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11940 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11941 float_raise(float_flag_input_denormal
, s
);
11943 return float32_two
;
11945 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
11948 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11950 float_status
*s
= &env
->vfp
.standard_fp_status
;
11952 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11953 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11954 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11955 float_raise(float_flag_input_denormal
, s
);
11957 return float32_one_point_five
;
11959 product
= float32_mul(a
, b
, s
);
11960 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
11963 /* NEON helpers. */
11965 /* Constants 256 and 512 are used in some helpers; we avoid relying on
11966 * int->float conversions at run-time. */
11967 #define float64_256 make_float64(0x4070000000000000LL)
11968 #define float64_512 make_float64(0x4080000000000000LL)
11969 #define float16_maxnorm make_float16(0x7bff)
11970 #define float32_maxnorm make_float32(0x7f7fffff)
11971 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11973 /* Reciprocal functions
11975 * The algorithm that must be used to calculate the estimate
11976 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
11979 /* See RecipEstimate()
11981 * input is a 9 bit fixed point number
11982 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
11983 * result range 256 .. 511 for a number from 1.0 to 511/256.
11986 static int recip_estimate(int input
)
11989 assert(256 <= input
&& input
< 512);
11990 a
= (input
* 2) + 1;
11993 assert(256 <= r
&& r
< 512);
11998 * Common wrapper to call recip_estimate
12000 * The parameters are exponent and 64 bit fraction (without implicit
12001 * bit) where the binary point is nominally at bit 52. Returns a
12002 * float64 which can then be rounded to the appropriate size by the
12006 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
12008 uint32_t scaled
, estimate
;
12009 uint64_t result_frac
;
12012 /* Handle sub-normals */
12014 if (extract64(frac
, 51, 1) == 0) {
12022 /* scaled = UInt('1':fraction<51:44>) */
12023 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
12024 estimate
= recip_estimate(scaled
);
12026 result_exp
= exp_off
- *exp
;
12027 result_frac
= deposit64(0, 44, 8, estimate
);
12028 if (result_exp
== 0) {
12029 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
12030 } else if (result_exp
== -1) {
12031 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
12037 return result_frac
;
12040 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
12042 switch (fpst
->float_rounding_mode
) {
12043 case float_round_nearest_even
: /* Round to Nearest */
12045 case float_round_up
: /* Round to +Inf */
12047 case float_round_down
: /* Round to -Inf */
12049 case float_round_to_zero
: /* Round to Zero */
12053 g_assert_not_reached();
12056 uint32_t HELPER(recpe_f16
)(uint32_t input
, void *fpstp
)
12058 float_status
*fpst
= fpstp
;
12059 float16 f16
= float16_squash_input_denormal(input
, fpst
);
12060 uint32_t f16_val
= float16_val(f16
);
12061 uint32_t f16_sign
= float16_is_neg(f16
);
12062 int f16_exp
= extract32(f16_val
, 10, 5);
12063 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
12066 if (float16_is_any_nan(f16
)) {
12068 if (float16_is_signaling_nan(f16
, fpst
)) {
12069 float_raise(float_flag_invalid
, fpst
);
12070 nan
= float16_silence_nan(f16
, fpst
);
12072 if (fpst
->default_nan_mode
) {
12073 nan
= float16_default_nan(fpst
);
12076 } else if (float16_is_infinity(f16
)) {
12077 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
12078 } else if (float16_is_zero(f16
)) {
12079 float_raise(float_flag_divbyzero
, fpst
);
12080 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
12081 } else if (float16_abs(f16
) < (1 << 8)) {
12082 /* Abs(value) < 2.0^-16 */
12083 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12084 if (round_to_inf(fpst
, f16_sign
)) {
12085 return float16_set_sign(float16_infinity
, f16_sign
);
12087 return float16_set_sign(float16_maxnorm
, f16_sign
);
12089 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
12090 float_raise(float_flag_underflow
, fpst
);
12091 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
12094 f64_frac
= call_recip_estimate(&f16_exp
, 29,
12095 ((uint64_t) f16_frac
) << (52 - 10));
12097 /* result = sign : result_exp<4:0> : fraction<51:42> */
12098 f16_val
= deposit32(0, 15, 1, f16_sign
);
12099 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
12100 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
12101 return make_float16(f16_val
);
12104 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
12106 float_status
*fpst
= fpstp
;
12107 float32 f32
= float32_squash_input_denormal(input
, fpst
);
12108 uint32_t f32_val
= float32_val(f32
);
12109 bool f32_sign
= float32_is_neg(f32
);
12110 int f32_exp
= extract32(f32_val
, 23, 8);
12111 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
12114 if (float32_is_any_nan(f32
)) {
12116 if (float32_is_signaling_nan(f32
, fpst
)) {
12117 float_raise(float_flag_invalid
, fpst
);
12118 nan
= float32_silence_nan(f32
, fpst
);
12120 if (fpst
->default_nan_mode
) {
12121 nan
= float32_default_nan(fpst
);
12124 } else if (float32_is_infinity(f32
)) {
12125 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
12126 } else if (float32_is_zero(f32
)) {
12127 float_raise(float_flag_divbyzero
, fpst
);
12128 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
12129 } else if (float32_abs(f32
) < (1ULL << 21)) {
12130 /* Abs(value) < 2.0^-128 */
12131 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12132 if (round_to_inf(fpst
, f32_sign
)) {
12133 return float32_set_sign(float32_infinity
, f32_sign
);
12135 return float32_set_sign(float32_maxnorm
, f32_sign
);
12137 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
12138 float_raise(float_flag_underflow
, fpst
);
12139 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
12142 f64_frac
= call_recip_estimate(&f32_exp
, 253,
12143 ((uint64_t) f32_frac
) << (52 - 23));
12145 /* result = sign : result_exp<7:0> : fraction<51:29> */
12146 f32_val
= deposit32(0, 31, 1, f32_sign
);
12147 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
12148 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
12149 return make_float32(f32_val
);
12152 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
12154 float_status
*fpst
= fpstp
;
12155 float64 f64
= float64_squash_input_denormal(input
, fpst
);
12156 uint64_t f64_val
= float64_val(f64
);
12157 bool f64_sign
= float64_is_neg(f64
);
12158 int f64_exp
= extract64(f64_val
, 52, 11);
12159 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
12161 /* Deal with any special cases */
12162 if (float64_is_any_nan(f64
)) {
12164 if (float64_is_signaling_nan(f64
, fpst
)) {
12165 float_raise(float_flag_invalid
, fpst
);
12166 nan
= float64_silence_nan(f64
, fpst
);
12168 if (fpst
->default_nan_mode
) {
12169 nan
= float64_default_nan(fpst
);
12172 } else if (float64_is_infinity(f64
)) {
12173 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
12174 } else if (float64_is_zero(f64
)) {
12175 float_raise(float_flag_divbyzero
, fpst
);
12176 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
12177 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
12178 /* Abs(value) < 2.0^-1024 */
12179 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12180 if (round_to_inf(fpst
, f64_sign
)) {
12181 return float64_set_sign(float64_infinity
, f64_sign
);
12183 return float64_set_sign(float64_maxnorm
, f64_sign
);
12185 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
12186 float_raise(float_flag_underflow
, fpst
);
12187 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
12190 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
12192 /* result = sign : result_exp<10:0> : fraction<51:0>; */
12193 f64_val
= deposit64(0, 63, 1, f64_sign
);
12194 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
12195 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
12196 return make_float64(f64_val
);
12199 /* The algorithm that must be used to calculate the estimate
12200 * is specified by the ARM ARM.
12203 static int do_recip_sqrt_estimate(int a
)
12207 assert(128 <= a
&& a
< 512);
12215 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
12218 estimate
= (b
+ 1) / 2;
12219 assert(256 <= estimate
&& estimate
< 512);
12225 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
12231 while (extract64(frac
, 51, 1) == 0) {
12235 frac
= extract64(frac
, 0, 51) << 1;
12239 /* scaled = UInt('01':fraction<51:45>) */
12240 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
12242 /* scaled = UInt('1':fraction<51:44>) */
12243 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
12245 estimate
= do_recip_sqrt_estimate(scaled
);
12247 *exp
= (exp_off
- *exp
) / 2;
12248 return extract64(estimate
, 0, 8) << 44;
12251 uint32_t HELPER(rsqrte_f16
)(uint32_t input
, void *fpstp
)
12253 float_status
*s
= fpstp
;
12254 float16 f16
= float16_squash_input_denormal(input
, s
);
12255 uint16_t val
= float16_val(f16
);
12256 bool f16_sign
= float16_is_neg(f16
);
12257 int f16_exp
= extract32(val
, 10, 5);
12258 uint16_t f16_frac
= extract32(val
, 0, 10);
12261 if (float16_is_any_nan(f16
)) {
12263 if (float16_is_signaling_nan(f16
, s
)) {
12264 float_raise(float_flag_invalid
, s
);
12265 nan
= float16_silence_nan(f16
, s
);
12267 if (s
->default_nan_mode
) {
12268 nan
= float16_default_nan(s
);
12271 } else if (float16_is_zero(f16
)) {
12272 float_raise(float_flag_divbyzero
, s
);
12273 return float16_set_sign(float16_infinity
, f16_sign
);
12274 } else if (f16_sign
) {
12275 float_raise(float_flag_invalid
, s
);
12276 return float16_default_nan(s
);
12277 } else if (float16_is_infinity(f16
)) {
12278 return float16_zero
;
12281 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12282 * preserving the parity of the exponent. */
12284 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
12286 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
12288 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
12289 val
= deposit32(0, 15, 1, f16_sign
);
12290 val
= deposit32(val
, 10, 5, f16_exp
);
12291 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
12292 return make_float16(val
);
12295 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
12297 float_status
*s
= fpstp
;
12298 float32 f32
= float32_squash_input_denormal(input
, s
);
12299 uint32_t val
= float32_val(f32
);
12300 uint32_t f32_sign
= float32_is_neg(f32
);
12301 int f32_exp
= extract32(val
, 23, 8);
12302 uint32_t f32_frac
= extract32(val
, 0, 23);
12305 if (float32_is_any_nan(f32
)) {
12307 if (float32_is_signaling_nan(f32
, s
)) {
12308 float_raise(float_flag_invalid
, s
);
12309 nan
= float32_silence_nan(f32
, s
);
12311 if (s
->default_nan_mode
) {
12312 nan
= float32_default_nan(s
);
12315 } else if (float32_is_zero(f32
)) {
12316 float_raise(float_flag_divbyzero
, s
);
12317 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
12318 } else if (float32_is_neg(f32
)) {
12319 float_raise(float_flag_invalid
, s
);
12320 return float32_default_nan(s
);
12321 } else if (float32_is_infinity(f32
)) {
12322 return float32_zero
;
12325 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12326 * preserving the parity of the exponent. */
12328 f64_frac
= ((uint64_t) f32_frac
) << 29;
12330 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
12332 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
12333 val
= deposit32(0, 31, 1, f32_sign
);
12334 val
= deposit32(val
, 23, 8, f32_exp
);
12335 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
12336 return make_float32(val
);
12339 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
12341 float_status
*s
= fpstp
;
12342 float64 f64
= float64_squash_input_denormal(input
, s
);
12343 uint64_t val
= float64_val(f64
);
12344 bool f64_sign
= float64_is_neg(f64
);
12345 int f64_exp
= extract64(val
, 52, 11);
12346 uint64_t f64_frac
= extract64(val
, 0, 52);
12348 if (float64_is_any_nan(f64
)) {
12350 if (float64_is_signaling_nan(f64
, s
)) {
12351 float_raise(float_flag_invalid
, s
);
12352 nan
= float64_silence_nan(f64
, s
);
12354 if (s
->default_nan_mode
) {
12355 nan
= float64_default_nan(s
);
12358 } else if (float64_is_zero(f64
)) {
12359 float_raise(float_flag_divbyzero
, s
);
12360 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
12361 } else if (float64_is_neg(f64
)) {
12362 float_raise(float_flag_invalid
, s
);
12363 return float64_default_nan(s
);
12364 } else if (float64_is_infinity(f64
)) {
12365 return float64_zero
;
12368 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
12370 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
12371 val
= deposit64(0, 61, 1, f64_sign
);
12372 val
= deposit64(val
, 52, 11, f64_exp
);
12373 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
12374 return make_float64(val
);
12377 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
12379 /* float_status *s = fpstp; */
12380 int input
, estimate
;
12382 if ((a
& 0x80000000) == 0) {
12386 input
= extract32(a
, 23, 9);
12387 estimate
= recip_estimate(input
);
12389 return deposit32(0, (32 - 9), 9, estimate
);
12392 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
12396 if ((a
& 0xc0000000) == 0) {
12400 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
12402 return deposit32(0, 23, 9, estimate
);
12405 /* VFPv4 fused multiply-accumulate */
12406 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
12408 float_status
*fpst
= fpstp
;
12409 return float32_muladd(a
, b
, c
, 0, fpst
);
12412 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
12414 float_status
*fpst
= fpstp
;
12415 return float64_muladd(a
, b
, c
, 0, fpst
);
12418 /* ARMv8 round to integral */
12419 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
12421 return float32_round_to_int(x
, fp_status
);
12424 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
12426 return float64_round_to_int(x
, fp_status
);
12429 float32
HELPER(rints
)(float32 x
, void *fp_status
)
12431 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12434 ret
= float32_round_to_int(x
, fp_status
);
12436 /* Suppress any inexact exceptions the conversion produced */
12437 if (!(old_flags
& float_flag_inexact
)) {
12438 new_flags
= get_float_exception_flags(fp_status
);
12439 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12445 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
12447 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12450 ret
= float64_round_to_int(x
, fp_status
);
12452 new_flags
= get_float_exception_flags(fp_status
);
12454 /* Suppress any inexact exceptions the conversion produced */
12455 if (!(old_flags
& float_flag_inexact
)) {
12456 new_flags
= get_float_exception_flags(fp_status
);
12457 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12463 /* Convert ARM rounding mode to softfloat */
12464 int arm_rmode_to_sf(int rmode
)
12467 case FPROUNDING_TIEAWAY
:
12468 rmode
= float_round_ties_away
;
12470 case FPROUNDING_ODD
:
12471 /* FIXME: add support for TIEAWAY and ODD */
12472 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
12474 /* fall through for now */
12475 case FPROUNDING_TIEEVEN
:
12477 rmode
= float_round_nearest_even
;
12479 case FPROUNDING_POSINF
:
12480 rmode
= float_round_up
;
12482 case FPROUNDING_NEGINF
:
12483 rmode
= float_round_down
;
12485 case FPROUNDING_ZERO
:
12486 rmode
= float_round_to_zero
;
12493 * The upper bytes of val (above the number specified by 'bytes') must have
12494 * been zeroed out by the caller.
12496 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12500 stl_le_p(buf
, val
);
12502 /* zlib crc32 converts the accumulator and output to one's complement. */
12503 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12506 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12510 stl_le_p(buf
, val
);
12512 /* Linux crc32c converts the output to one's complement. */
12513 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12516 /* Return the exception level to which FP-disabled exceptions should
12517 * be taken, or 0 if FP is enabled.
12519 static inline int fp_exception_el(CPUARMState
*env
)
12521 #ifndef CONFIG_USER_ONLY
12523 int cur_el
= arm_current_el(env
);
12525 /* CPACR and the CPTR registers don't exist before v6, so FP is
12526 * always accessible
12528 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12532 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12533 * 0, 2 : trap EL0 and EL1/PL1 accesses
12534 * 1 : trap only EL0 accesses
12535 * 3 : trap no accesses
12537 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12541 if (cur_el
== 0 || cur_el
== 1) {
12542 /* Trap to PL1, which might be EL1 or EL3 */
12543 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12548 if (cur_el
== 3 && !is_a64(env
)) {
12549 /* Secure PL1 running at EL3 */
12562 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12563 * check because zero bits in the registers mean "don't trap".
12566 /* CPTR_EL2 : present in v7VE or v8 */
12567 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12568 && !arm_is_secure_below_el3(env
)) {
12569 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12573 /* CPTR_EL3 : present in v8 */
12574 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12575 /* Trap all FP ops to EL3 */
12582 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12583 target_ulong
*cs_base
, uint32_t *pflags
)
12585 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
12586 int fp_el
= fp_exception_el(env
);
12590 int sve_el
= sve_exception_el(env
);
12594 flags
= ARM_TBFLAG_AARCH64_STATE_MASK
;
12595 /* Get control bits for tagged addresses */
12596 flags
|= (arm_regime_tbi0(env
, mmu_idx
) << ARM_TBFLAG_TBI0_SHIFT
);
12597 flags
|= (arm_regime_tbi1(env
, mmu_idx
) << ARM_TBFLAG_TBI1_SHIFT
);
12598 flags
|= sve_el
<< ARM_TBFLAG_SVEEXC_EL_SHIFT
;
12600 /* If SVE is disabled, but FP is enabled,
12601 then the effective len is 0. */
12602 if (sve_el
!= 0 && fp_el
== 0) {
12605 int current_el
= arm_current_el(env
);
12606 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12608 zcr_len
= cpu
->sve_max_vq
- 1;
12609 if (current_el
<= 1) {
12610 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
12612 if (current_el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
12613 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
12615 if (current_el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
12616 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
12619 flags
|= zcr_len
<< ARM_TBFLAG_ZCR_LEN_SHIFT
;
12621 *pc
= env
->regs
[15];
12622 flags
= (env
->thumb
<< ARM_TBFLAG_THUMB_SHIFT
)
12623 | (env
->vfp
.vec_len
<< ARM_TBFLAG_VECLEN_SHIFT
)
12624 | (env
->vfp
.vec_stride
<< ARM_TBFLAG_VECSTRIDE_SHIFT
)
12625 | (env
->condexec_bits
<< ARM_TBFLAG_CONDEXEC_SHIFT
)
12626 | (arm_sctlr_b(env
) << ARM_TBFLAG_SCTLR_B_SHIFT
);
12627 if (!(access_secure_reg(env
))) {
12628 flags
|= ARM_TBFLAG_NS_MASK
;
12630 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
12631 || arm_el_is_aa64(env
, 1)) {
12632 flags
|= ARM_TBFLAG_VFPEN_MASK
;
12634 flags
|= (extract32(env
->cp15
.c15_cpar
, 0, 2)
12635 << ARM_TBFLAG_XSCALE_CPAR_SHIFT
);
12638 flags
|= (arm_to_core_mmu_idx(mmu_idx
) << ARM_TBFLAG_MMUIDX_SHIFT
);
12640 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12641 * states defined in the ARM ARM for software singlestep:
12642 * SS_ACTIVE PSTATE.SS State
12643 * 0 x Inactive (the TB flag for SS is always 0)
12644 * 1 0 Active-pending
12645 * 1 1 Active-not-pending
12647 if (arm_singlestep_active(env
)) {
12648 flags
|= ARM_TBFLAG_SS_ACTIVE_MASK
;
12650 if (env
->pstate
& PSTATE_SS
) {
12651 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12654 if (env
->uncached_cpsr
& PSTATE_SS
) {
12655 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12659 if (arm_cpu_data_is_big_endian(env
)) {
12660 flags
|= ARM_TBFLAG_BE_DATA_MASK
;
12662 flags
|= fp_el
<< ARM_TBFLAG_FPEXC_EL_SHIFT
;
12664 if (arm_v7m_is_handler_mode(env
)) {
12665 flags
|= ARM_TBFLAG_HANDLER_MASK
;