1 #include "qemu/osdep.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
21 #ifndef CONFIG_USER_ONLY
22 /* Cacheability and shareability attributes for a memory access */
23 typedef struct ARMCacheAttrs
{
24 unsigned int attrs
:8; /* as in the MAIR register encoding */
25 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
28 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
29 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
30 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
31 target_ulong
*page_size
,
32 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
34 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
35 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
36 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
37 target_ulong
*page_size_ptr
,
38 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
40 /* Security attributes for an address, as returned by v8m_security_lookup. */
41 typedef struct V8M_SAttributes
{
50 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
51 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
52 V8M_SAttributes
*sattrs
);
54 /* Definitions for the PMCCNTR and PMCR registers */
60 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
64 /* VFP data registers are always little-endian. */
65 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
67 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
70 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
71 /* Aliases for Q regs. */
74 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
75 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
79 switch (reg
- nregs
) {
80 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
81 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
82 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
87 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
91 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
93 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
96 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
99 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
100 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
104 switch (reg
- nregs
) {
105 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
106 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
107 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
112 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
116 /* 128 bit FP register */
117 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
118 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
122 stl_p(buf
, vfp_get_fpsr(env
));
126 stl_p(buf
, vfp_get_fpcr(env
));
133 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
137 /* 128 bit FP register */
138 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
139 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
143 vfp_set_fpsr(env
, ldl_p(buf
));
147 vfp_set_fpcr(env
, ldl_p(buf
));
154 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
156 assert(ri
->fieldoffset
);
157 if (cpreg_field_is_64bit(ri
)) {
158 return CPREG_FIELD64(env
, ri
);
160 return CPREG_FIELD32(env
, ri
);
164 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
167 assert(ri
->fieldoffset
);
168 if (cpreg_field_is_64bit(ri
)) {
169 CPREG_FIELD64(env
, ri
) = value
;
171 CPREG_FIELD32(env
, ri
) = value
;
175 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
177 return (char *)env
+ ri
->fieldoffset
;
180 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
182 /* Raw read of a coprocessor register (as needed for migration, etc). */
183 if (ri
->type
& ARM_CP_CONST
) {
184 return ri
->resetvalue
;
185 } else if (ri
->raw_readfn
) {
186 return ri
->raw_readfn(env
, ri
);
187 } else if (ri
->readfn
) {
188 return ri
->readfn(env
, ri
);
190 return raw_read(env
, ri
);
194 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
197 /* Raw write of a coprocessor register (as needed for migration, etc).
198 * Note that constant registers are treated as write-ignored; the
199 * caller should check for success by whether a readback gives the
202 if (ri
->type
& ARM_CP_CONST
) {
204 } else if (ri
->raw_writefn
) {
205 ri
->raw_writefn(env
, ri
, v
);
206 } else if (ri
->writefn
) {
207 ri
->writefn(env
, ri
, v
);
209 raw_write(env
, ri
, v
);
213 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
215 /* Return true if the regdef would cause an assertion if you called
216 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
217 * program bug for it not to have the NO_RAW flag).
218 * NB that returning false here doesn't necessarily mean that calling
219 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
220 * read/write access functions which are safe for raw use" from "has
221 * read/write access functions which have side effects but has forgotten
222 * to provide raw access functions".
223 * The tests here line up with the conditions in read/write_raw_cp_reg()
224 * and assertions in raw_read()/raw_write().
226 if ((ri
->type
& ARM_CP_CONST
) ||
228 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
234 bool write_cpustate_to_list(ARMCPU
*cpu
)
236 /* Write the coprocessor state from cpu->env to the (index,value) list. */
240 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
241 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
242 const ARMCPRegInfo
*ri
;
244 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
249 if (ri
->type
& ARM_CP_NO_RAW
) {
252 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
257 bool write_list_to_cpustate(ARMCPU
*cpu
)
262 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
263 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
264 uint64_t v
= cpu
->cpreg_values
[i
];
265 const ARMCPRegInfo
*ri
;
267 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
272 if (ri
->type
& ARM_CP_NO_RAW
) {
275 /* Write value and confirm it reads back as written
276 * (to catch read-only registers and partially read-only
277 * registers where the incoming migration value doesn't match)
279 write_raw_cp_reg(&cpu
->env
, ri
, v
);
280 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
287 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
289 ARMCPU
*cpu
= opaque
;
291 const ARMCPRegInfo
*ri
;
293 regidx
= *(uint32_t *)key
;
294 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
296 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
297 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
298 /* The value array need not be initialized at this point */
299 cpu
->cpreg_array_len
++;
303 static void count_cpreg(gpointer key
, gpointer opaque
)
305 ARMCPU
*cpu
= opaque
;
307 const ARMCPRegInfo
*ri
;
309 regidx
= *(uint32_t *)key
;
310 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
312 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
313 cpu
->cpreg_array_len
++;
317 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
319 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
320 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
331 void init_cpreg_list(ARMCPU
*cpu
)
333 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
334 * Note that we require cpreg_tuples[] to be sorted by key ID.
339 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
340 keys
= g_list_sort(keys
, cpreg_key_compare
);
342 cpu
->cpreg_array_len
= 0;
344 g_list_foreach(keys
, count_cpreg
, cpu
);
346 arraylen
= cpu
->cpreg_array_len
;
347 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
348 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
349 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
350 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
351 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
352 cpu
->cpreg_array_len
= 0;
354 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
356 assert(cpu
->cpreg_array_len
== arraylen
);
362 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
363 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
365 * access_el3_aa32ns: Used to check AArch32 register views.
366 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
368 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
369 const ARMCPRegInfo
*ri
,
372 bool secure
= arm_is_secure_below_el3(env
);
374 assert(!arm_el_is_aa64(env
, 3));
376 return CP_ACCESS_TRAP_UNCATEGORIZED
;
381 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
382 const ARMCPRegInfo
*ri
,
385 if (!arm_el_is_aa64(env
, 3)) {
386 return access_el3_aa32ns(env
, ri
, isread
);
391 /* Some secure-only AArch32 registers trap to EL3 if used from
392 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
393 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
394 * We assume that the .access field is set to PL1_RW.
396 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
397 const ARMCPRegInfo
*ri
,
400 if (arm_current_el(env
) == 3) {
403 if (arm_is_secure_below_el3(env
)) {
404 return CP_ACCESS_TRAP_EL3
;
406 /* This will be EL1 NS and EL2 NS, which just UNDEF */
407 return CP_ACCESS_TRAP_UNCATEGORIZED
;
410 /* Check for traps to "powerdown debug" registers, which are controlled
413 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
416 int el
= arm_current_el(env
);
418 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
419 && !arm_is_secure_below_el3(env
)) {
420 return CP_ACCESS_TRAP_EL2
;
422 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
423 return CP_ACCESS_TRAP_EL3
;
428 /* Check for traps to "debug ROM" registers, which are controlled
429 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
431 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
434 int el
= arm_current_el(env
);
436 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
437 && !arm_is_secure_below_el3(env
)) {
438 return CP_ACCESS_TRAP_EL2
;
440 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
441 return CP_ACCESS_TRAP_EL3
;
446 /* Check for traps to general debug registers, which are controlled
447 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
449 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
452 int el
= arm_current_el(env
);
454 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
455 && !arm_is_secure_below_el3(env
)) {
456 return CP_ACCESS_TRAP_EL2
;
458 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
459 return CP_ACCESS_TRAP_EL3
;
464 /* Check for traps to performance monitor registers, which are controlled
465 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
467 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
470 int el
= arm_current_el(env
);
472 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
473 && !arm_is_secure_below_el3(env
)) {
474 return CP_ACCESS_TRAP_EL2
;
476 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
477 return CP_ACCESS_TRAP_EL3
;
482 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
484 ARMCPU
*cpu
= arm_env_get_cpu(env
);
486 raw_write(env
, ri
, value
);
487 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
490 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
492 ARMCPU
*cpu
= arm_env_get_cpu(env
);
494 if (raw_read(env
, ri
) != value
) {
495 /* Unlike real hardware the qemu TLB uses virtual addresses,
496 * not modified virtual addresses, so this causes a TLB flush.
499 raw_write(env
, ri
, value
);
503 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
506 ARMCPU
*cpu
= arm_env_get_cpu(env
);
508 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
509 && !extended_addresses_enabled(env
)) {
510 /* For VMSA (when not using the LPAE long descriptor page table
511 * format) this register includes the ASID, so do a TLB flush.
512 * For PMSA it is purely a process ID and no action is needed.
516 raw_write(env
, ri
, value
);
519 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
522 /* Invalidate all (TLBIALL) */
523 ARMCPU
*cpu
= arm_env_get_cpu(env
);
528 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
531 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
532 ARMCPU
*cpu
= arm_env_get_cpu(env
);
534 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
537 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
540 /* Invalidate by ASID (TLBIASID) */
541 ARMCPU
*cpu
= arm_env_get_cpu(env
);
546 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
549 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
550 ARMCPU
*cpu
= arm_env_get_cpu(env
);
552 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
555 /* IS variants of TLB operations must affect all cores */
556 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
559 CPUState
*cs
= ENV_GET_CPU(env
);
561 tlb_flush_all_cpus_synced(cs
);
564 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
567 CPUState
*cs
= ENV_GET_CPU(env
);
569 tlb_flush_all_cpus_synced(cs
);
572 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
575 CPUState
*cs
= ENV_GET_CPU(env
);
577 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
580 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
583 CPUState
*cs
= ENV_GET_CPU(env
);
585 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
588 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
591 CPUState
*cs
= ENV_GET_CPU(env
);
593 tlb_flush_by_mmuidx(cs
,
594 ARMMMUIdxBit_S12NSE1
|
595 ARMMMUIdxBit_S12NSE0
|
599 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
602 CPUState
*cs
= ENV_GET_CPU(env
);
604 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
605 ARMMMUIdxBit_S12NSE1
|
606 ARMMMUIdxBit_S12NSE0
|
610 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
613 /* Invalidate by IPA. This has to invalidate any structures that
614 * contain only stage 2 translation information, but does not need
615 * to apply to structures that contain combined stage 1 and stage 2
616 * translation information.
617 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
619 CPUState
*cs
= ENV_GET_CPU(env
);
622 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
626 pageaddr
= sextract64(value
<< 12, 0, 40);
628 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
631 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
634 CPUState
*cs
= ENV_GET_CPU(env
);
637 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
641 pageaddr
= sextract64(value
<< 12, 0, 40);
643 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
647 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
650 CPUState
*cs
= ENV_GET_CPU(env
);
652 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
655 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
658 CPUState
*cs
= ENV_GET_CPU(env
);
660 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
663 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
666 CPUState
*cs
= ENV_GET_CPU(env
);
667 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
669 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
672 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
675 CPUState
*cs
= ENV_GET_CPU(env
);
676 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
678 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
682 static const ARMCPRegInfo cp_reginfo
[] = {
683 /* Define the secure and non-secure FCSE identifier CP registers
684 * separately because there is no secure bank in V8 (no _EL3). This allows
685 * the secure register to be properly reset and migrated. There is also no
686 * v8 EL1 version of the register so the non-secure instance stands alone.
688 { .name
= "FCSEIDR(NS)",
689 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
690 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
691 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
692 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
693 { .name
= "FCSEIDR(S)",
694 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
695 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
696 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
697 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
698 /* Define the secure and non-secure context identifier CP registers
699 * separately because there is no secure bank in V8 (no _EL3). This allows
700 * the secure register to be properly reset and migrated. In the
701 * non-secure case, the 32-bit register will have reset and migration
702 * disabled during registration as it is handled by the 64-bit instance.
704 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
705 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
706 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
707 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
708 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
709 { .name
= "CONTEXTIDR(S)", .state
= ARM_CP_STATE_AA32
,
710 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
711 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
712 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
713 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
717 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
718 /* NB: Some of these registers exist in v8 but with more precise
719 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
721 /* MMU Domain access control / MPU write buffer control */
723 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
724 .access
= PL1_RW
, .resetvalue
= 0,
725 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
726 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
727 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
728 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
729 * For v6 and v5, these mappings are overly broad.
731 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
732 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
733 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
734 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
735 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
736 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
737 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
738 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
739 /* Cache maintenance ops; some of this space may be overridden later. */
740 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
741 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
742 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
746 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
747 /* Not all pre-v6 cores implemented this WFI, so this is slightly
750 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
751 .access
= PL1_W
, .type
= ARM_CP_WFI
},
755 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
756 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
757 * is UNPREDICTABLE; we choose to NOP as most implementations do).
759 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
760 .access
= PL1_W
, .type
= ARM_CP_WFI
},
761 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
762 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
763 * OMAPCP will override this space.
765 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
766 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
768 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
769 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
771 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
772 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
773 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
775 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
776 * implementing it as RAZ means the "debug architecture version" bits
777 * will read as a reserved value, which should cause Linux to not try
778 * to use the debug hardware.
780 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
781 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
782 /* MMU TLB control. Note that the wildcarding means we cover not just
783 * the unified TLB ops but also the dside/iside/inner-shareable variants.
785 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
786 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
787 .type
= ARM_CP_NO_RAW
},
788 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
789 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
790 .type
= ARM_CP_NO_RAW
},
791 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
792 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
793 .type
= ARM_CP_NO_RAW
},
794 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
795 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
796 .type
= ARM_CP_NO_RAW
},
797 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
798 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
799 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
800 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
804 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
809 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
810 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
811 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
812 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
813 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
815 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
816 /* VFP coprocessor: cp10 & cp11 [23:20] */
817 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
819 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
820 /* ASEDIS [31] bit is RAO/WI */
824 /* VFPv3 and upwards with NEON implement 32 double precision
825 * registers (D0-D31).
827 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
828 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
829 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
835 env
->cp15
.cpacr_el1
= value
;
838 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
841 if (arm_feature(env
, ARM_FEATURE_V8
)) {
842 /* Check if CPACR accesses are to be trapped to EL2 */
843 if (arm_current_el(env
) == 1 &&
844 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
845 return CP_ACCESS_TRAP_EL2
;
846 /* Check if CPACR accesses are to be trapped to EL3 */
847 } else if (arm_current_el(env
) < 3 &&
848 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
849 return CP_ACCESS_TRAP_EL3
;
856 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
859 /* Check if CPTR accesses are set to trap to EL3 */
860 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
861 return CP_ACCESS_TRAP_EL3
;
867 static const ARMCPRegInfo v6_cp_reginfo
[] = {
868 /* prefetch by MVA in v6, NOP in v7 */
869 { .name
= "MVA_prefetch",
870 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
871 .access
= PL1_W
, .type
= ARM_CP_NOP
},
872 /* We need to break the TB after ISB to execute self-modifying code
873 * correctly and also to take any pending interrupts immediately.
874 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
876 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
877 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
878 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
879 .access
= PL0_W
, .type
= ARM_CP_NOP
},
880 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
881 .access
= PL0_W
, .type
= ARM_CP_NOP
},
882 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
884 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
885 offsetof(CPUARMState
, cp15
.ifar_ns
) },
887 /* Watchpoint Fault Address Register : should actually only be present
888 * for 1136, 1176, 11MPCore.
890 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
891 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
892 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
893 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
894 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
895 .resetvalue
= 0, .writefn
= cpacr_write
},
899 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
902 /* Performance monitor registers user accessibility is controlled
903 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
904 * trapping to EL2 or EL3 for other accesses.
906 int el
= arm_current_el(env
);
908 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
909 return CP_ACCESS_TRAP
;
911 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
912 && !arm_is_secure_below_el3(env
)) {
913 return CP_ACCESS_TRAP_EL2
;
915 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
916 return CP_ACCESS_TRAP_EL3
;
922 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
923 const ARMCPRegInfo
*ri
,
926 /* ER: event counter read trap control */
927 if (arm_feature(env
, ARM_FEATURE_V8
)
928 && arm_current_el(env
) == 0
929 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
934 return pmreg_access(env
, ri
, isread
);
937 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
938 const ARMCPRegInfo
*ri
,
941 /* SW: software increment write trap control */
942 if (arm_feature(env
, ARM_FEATURE_V8
)
943 && arm_current_el(env
) == 0
944 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
949 return pmreg_access(env
, ri
, isread
);
952 #ifndef CONFIG_USER_ONLY
954 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
955 const ARMCPRegInfo
*ri
,
958 /* ER: event counter read trap control */
959 if (arm_feature(env
, ARM_FEATURE_V8
)
960 && arm_current_el(env
) == 0
961 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
965 return pmreg_access(env
, ri
, isread
);
968 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
969 const ARMCPRegInfo
*ri
,
972 /* CR: cycle counter read trap control */
973 if (arm_feature(env
, ARM_FEATURE_V8
)
974 && arm_current_el(env
) == 0
975 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
980 return pmreg_access(env
, ri
, isread
);
983 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
985 /* This does not support checking PMCCFILTR_EL0 register */
987 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
994 void pmccntr_sync(CPUARMState
*env
)
998 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
999 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1001 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1002 /* Increment once every 64 processor clock cycles */
1006 if (arm_ccnt_enabled(env
)) {
1007 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
1011 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1016 if (value
& PMCRC
) {
1017 /* The counter has been reset */
1018 env
->cp15
.c15_ccnt
= 0;
1021 /* only the DP, X, D and E bits are writable */
1022 env
->cp15
.c9_pmcr
&= ~0x39;
1023 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1028 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1030 uint64_t total_ticks
;
1032 if (!arm_ccnt_enabled(env
)) {
1033 /* Counter is disabled, do not change value */
1034 return env
->cp15
.c15_ccnt
;
1037 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1038 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1040 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1041 /* Increment once every 64 processor clock cycles */
1044 return total_ticks
- env
->cp15
.c15_ccnt
;
1047 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1050 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1051 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1052 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1055 env
->cp15
.c9_pmselr
= value
& 0x1f;
1058 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1061 uint64_t total_ticks
;
1063 if (!arm_ccnt_enabled(env
)) {
1064 /* Counter is disabled, set the absolute value */
1065 env
->cp15
.c15_ccnt
= value
;
1069 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1070 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1072 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1073 /* Increment once every 64 processor clock cycles */
1076 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1079 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1082 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1084 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1087 #else /* CONFIG_USER_ONLY */
1089 void pmccntr_sync(CPUARMState
*env
)
1095 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1099 env
->cp15
.pmccfiltr_el0
= value
& 0x7E000000;
1103 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1107 env
->cp15
.c9_pmcnten
|= value
;
1110 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1114 env
->cp15
.c9_pmcnten
&= ~value
;
1117 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1120 env
->cp15
.c9_pmovsr
&= ~value
;
1123 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1126 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1127 * PMSELR value is equal to or greater than the number of implemented
1128 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1130 if (env
->cp15
.c9_pmselr
== 0x1f) {
1131 pmccfiltr_write(env
, ri
, value
);
1135 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1137 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1138 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1140 if (env
->cp15
.c9_pmselr
== 0x1f) {
1141 return env
->cp15
.pmccfiltr_el0
;
1147 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1150 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1151 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1153 env
->cp15
.c9_pmuserenr
= value
& 1;
1157 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1160 /* We have no event counters so only the C bit can be changed */
1162 env
->cp15
.c9_pminten
|= value
;
1165 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1169 env
->cp15
.c9_pminten
&= ~value
;
1172 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1175 /* Note that even though the AArch64 view of this register has bits
1176 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1177 * architectural requirements for bits which are RES0 only in some
1178 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1179 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1181 raw_write(env
, ri
, value
& ~0x1FULL
);
1184 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1186 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1187 * For bits that vary between AArch32/64, code needs to check the
1188 * current execution mode before directly using the feature bit.
1190 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1192 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1193 valid_mask
&= ~SCR_HCE
;
1195 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1196 * supported if EL2 exists. The bit is UNK/SBZP when
1197 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1198 * when EL2 is unavailable.
1199 * On ARMv8, this bit is always available.
1201 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1202 !arm_feature(env
, ARM_FEATURE_V8
)) {
1203 valid_mask
&= ~SCR_SMD
;
1207 /* Clear all-context RES0 bits. */
1208 value
&= valid_mask
;
1209 raw_write(env
, ri
, value
);
1212 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1214 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1216 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1219 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1220 ri
->secure
& ARM_CP_SECSTATE_S
);
1222 return cpu
->ccsidr
[index
];
1225 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1228 raw_write(env
, ri
, value
& 0xf);
1231 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1233 CPUState
*cs
= ENV_GET_CPU(env
);
1236 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1239 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1242 /* External aborts are not possible in QEMU so A bit is always clear */
1246 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1247 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1248 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1249 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1250 /* Performance monitors are implementation defined in v7,
1251 * but with an ARM recommended set of registers, which we
1252 * follow (although we don't actually implement any counters)
1254 * Performance registers fall into three categories:
1255 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1256 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1257 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1258 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1259 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1261 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1262 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1263 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1264 .writefn
= pmcntenset_write
,
1265 .accessfn
= pmreg_access
,
1266 .raw_writefn
= raw_write
},
1267 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1268 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1269 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1270 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1271 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1272 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1274 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1275 .accessfn
= pmreg_access
,
1276 .writefn
= pmcntenclr_write
,
1277 .type
= ARM_CP_ALIAS
},
1278 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1279 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1280 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1281 .type
= ARM_CP_ALIAS
,
1282 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1283 .writefn
= pmcntenclr_write
},
1284 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1285 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1286 .accessfn
= pmreg_access
,
1287 .writefn
= pmovsr_write
,
1288 .raw_writefn
= raw_write
},
1289 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1290 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1291 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1292 .type
= ARM_CP_ALIAS
,
1293 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1294 .writefn
= pmovsr_write
,
1295 .raw_writefn
= raw_write
},
1296 /* Unimplemented so WI. */
1297 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1298 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1299 #ifndef CONFIG_USER_ONLY
1300 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1301 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1302 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1303 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1304 .raw_writefn
= raw_write
},
1305 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1306 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1307 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1308 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1309 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1310 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1311 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
1312 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1313 .accessfn
= pmreg_access_ccntr
},
1314 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1315 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1316 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1318 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1320 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1321 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1322 .writefn
= pmccfiltr_write
,
1323 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1325 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1327 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1328 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1329 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1330 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1331 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1332 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1333 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1334 /* Unimplemented, RAZ/WI. */
1335 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1336 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1337 .accessfn
= pmreg_access_xevcntr
},
1338 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1339 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1340 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1342 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1343 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1344 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1345 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1346 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1348 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1349 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1350 .access
= PL1_RW
, .accessfn
= access_tpm
,
1351 .type
= ARM_CP_ALIAS
,
1352 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1354 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1355 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1356 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1357 .access
= PL1_RW
, .accessfn
= access_tpm
,
1359 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1360 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1361 .resetvalue
= 0x0 },
1362 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1363 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1364 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1365 .writefn
= pmintenclr_write
, },
1366 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1367 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1368 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1369 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1370 .writefn
= pmintenclr_write
},
1371 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1372 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1373 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1374 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1375 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1376 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1377 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1378 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1379 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1380 * just RAZ for all cores:
1382 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1383 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1384 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1385 /* Auxiliary fault status registers: these also are IMPDEF, and we
1386 * choose to RAZ/WI for all cores.
1388 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1389 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1390 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1391 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1392 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1393 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1394 /* MAIR can just read-as-written because we don't implement caches
1395 * and so don't need to care about memory attributes.
1397 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1398 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1399 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1401 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1402 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1403 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1405 /* For non-long-descriptor page tables these are PRRR and NMRR;
1406 * regardless they still act as reads-as-written for QEMU.
1408 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1409 * allows them to assign the correct fieldoffset based on the endianness
1410 * handled in the field definitions.
1412 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1413 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1414 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1415 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1416 .resetfn
= arm_cp_reset_ignore
},
1417 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1418 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1419 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1420 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1421 .resetfn
= arm_cp_reset_ignore
},
1422 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1423 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1424 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1425 /* 32 bit ITLB invalidates */
1426 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1427 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1428 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1429 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1430 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1431 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1432 /* 32 bit DTLB invalidates */
1433 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1434 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1435 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1436 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1437 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1438 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1439 /* 32 bit TLB invalidates */
1440 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1441 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1442 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1443 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1444 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1445 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1446 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1447 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1451 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1452 /* 32 bit TLB invalidates, Inner Shareable */
1453 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1454 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1455 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1456 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1457 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1458 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1459 .writefn
= tlbiasid_is_write
},
1460 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1461 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1462 .writefn
= tlbimvaa_is_write
},
1466 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1473 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1476 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1477 return CP_ACCESS_TRAP
;
1479 return CP_ACCESS_OK
;
1482 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1483 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1484 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1486 .writefn
= teecr_write
},
1487 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1488 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1489 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1493 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1494 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1495 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1497 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1498 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1500 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1501 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1502 .resetfn
= arm_cp_reset_ignore
},
1503 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1504 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1505 .access
= PL0_R
|PL1_W
,
1506 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1508 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1509 .access
= PL0_R
|PL1_W
,
1510 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1511 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1512 .resetfn
= arm_cp_reset_ignore
},
1513 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1514 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1516 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1517 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1519 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1520 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1525 #ifndef CONFIG_USER_ONLY
1527 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1530 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1531 * Writable only at the highest implemented exception level.
1533 int el
= arm_current_el(env
);
1537 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1538 return CP_ACCESS_TRAP
;
1542 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1543 arm_is_secure_below_el3(env
)) {
1544 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1545 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1553 if (!isread
&& el
< arm_highest_el(env
)) {
1554 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1557 return CP_ACCESS_OK
;
1560 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1563 unsigned int cur_el
= arm_current_el(env
);
1564 bool secure
= arm_is_secure(env
);
1566 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1568 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1569 return CP_ACCESS_TRAP
;
1572 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1573 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1574 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1575 return CP_ACCESS_TRAP_EL2
;
1577 return CP_ACCESS_OK
;
1580 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1583 unsigned int cur_el
= arm_current_el(env
);
1584 bool secure
= arm_is_secure(env
);
1586 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1587 * EL0[PV]TEN is zero.
1590 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1591 return CP_ACCESS_TRAP
;
1594 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1595 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1596 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1597 return CP_ACCESS_TRAP_EL2
;
1599 return CP_ACCESS_OK
;
1602 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1603 const ARMCPRegInfo
*ri
,
1606 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1609 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1610 const ARMCPRegInfo
*ri
,
1613 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1616 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1619 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1622 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1625 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1628 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1629 const ARMCPRegInfo
*ri
,
1632 /* The AArch64 register view of the secure physical timer is
1633 * always accessible from EL3, and configurably accessible from
1636 switch (arm_current_el(env
)) {
1638 if (!arm_is_secure(env
)) {
1639 return CP_ACCESS_TRAP
;
1641 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1642 return CP_ACCESS_TRAP_EL3
;
1644 return CP_ACCESS_OK
;
1647 return CP_ACCESS_TRAP
;
1649 return CP_ACCESS_OK
;
1651 g_assert_not_reached();
1655 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1657 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1660 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1662 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1665 /* Timer enabled: calculate and set current ISTATUS, irq, and
1666 * reset timer to when ISTATUS next has to change
1668 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1669 cpu
->env
.cp15
.cntvoff_el2
: 0;
1670 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1671 /* Note that this must be unsigned 64 bit arithmetic: */
1672 int istatus
= count
- offset
>= gt
->cval
;
1676 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1678 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1679 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1682 /* Next transition is when count rolls back over to zero */
1683 nexttick
= UINT64_MAX
;
1685 /* Next transition is when we hit cval */
1686 nexttick
= gt
->cval
+ offset
;
1688 /* Note that the desired next expiry time might be beyond the
1689 * signed-64-bit range of a QEMUTimer -- in this case we just
1690 * set the timer for as far in the future as possible. When the
1691 * timer expires we will reset the timer for any remaining period.
1693 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1694 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1696 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1697 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1699 /* Timer disabled: ISTATUS and timer output always clear */
1701 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1702 timer_del(cpu
->gt_timer
[timeridx
]);
1703 trace_arm_gt_recalc_disabled(timeridx
);
1707 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1710 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1712 timer_del(cpu
->gt_timer
[timeridx
]);
1715 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1717 return gt_get_countervalue(env
);
1720 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1722 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1725 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1729 trace_arm_gt_cval_write(timeridx
, value
);
1730 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1731 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1734 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1737 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1739 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1740 (gt_get_countervalue(env
) - offset
));
1743 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1747 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1749 trace_arm_gt_tval_write(timeridx
, value
);
1750 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1751 sextract64(value
, 0, 32);
1752 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1755 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1759 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1760 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1762 trace_arm_gt_ctl_write(timeridx
, value
);
1763 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1764 if ((oldval
^ value
) & 1) {
1765 /* Enable toggled */
1766 gt_recalc_timer(cpu
, timeridx
);
1767 } else if ((oldval
^ value
) & 2) {
1768 /* IMASK toggled: don't need to recalculate,
1769 * just set the interrupt line based on ISTATUS
1771 int irqstate
= (oldval
& 4) && !(value
& 2);
1773 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1774 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1778 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1780 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1783 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1786 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1789 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1791 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1794 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1797 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1800 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1803 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1806 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1808 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1811 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1814 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1817 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1819 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1822 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1825 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1828 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1831 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1834 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1837 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1839 trace_arm_gt_cntvoff_write(value
);
1840 raw_write(env
, ri
, value
);
1841 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1844 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1846 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1849 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1852 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1855 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1857 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1860 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1863 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1866 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1869 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1872 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1874 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1877 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1880 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1883 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1885 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1888 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1891 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1894 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1897 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1900 void arm_gt_ptimer_cb(void *opaque
)
1902 ARMCPU
*cpu
= opaque
;
1904 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1907 void arm_gt_vtimer_cb(void *opaque
)
1909 ARMCPU
*cpu
= opaque
;
1911 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1914 void arm_gt_htimer_cb(void *opaque
)
1916 ARMCPU
*cpu
= opaque
;
1918 gt_recalc_timer(cpu
, GTIMER_HYP
);
1921 void arm_gt_stimer_cb(void *opaque
)
1923 ARMCPU
*cpu
= opaque
;
1925 gt_recalc_timer(cpu
, GTIMER_SEC
);
1928 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1929 /* Note that CNTFRQ is purely reads-as-written for the benefit
1930 * of software; writing it doesn't actually change the timer frequency.
1931 * Our reset value matches the fixed frequency we implement the timer at.
1933 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1934 .type
= ARM_CP_ALIAS
,
1935 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1936 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1938 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1939 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1940 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1941 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1942 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1944 /* overall control: mostly access permissions */
1945 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
1946 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1948 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1951 /* per-timer control */
1952 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1953 .secure
= ARM_CP_SECSTATE_NS
,
1954 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1955 .accessfn
= gt_ptimer_access
,
1956 .fieldoffset
= offsetoflow32(CPUARMState
,
1957 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1958 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1960 { .name
= "CNTP_CTL(S)",
1961 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1962 .secure
= ARM_CP_SECSTATE_S
,
1963 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1964 .accessfn
= gt_ptimer_access
,
1965 .fieldoffset
= offsetoflow32(CPUARMState
,
1966 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
1967 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
1969 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1970 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1971 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1972 .accessfn
= gt_ptimer_access
,
1973 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1975 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1977 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1978 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1979 .accessfn
= gt_vtimer_access
,
1980 .fieldoffset
= offsetoflow32(CPUARMState
,
1981 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1982 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1984 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1985 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1986 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1987 .accessfn
= gt_vtimer_access
,
1988 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1990 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1992 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1993 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1994 .secure
= ARM_CP_SECSTATE_NS
,
1995 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1996 .accessfn
= gt_ptimer_access
,
1997 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
1999 { .name
= "CNTP_TVAL(S)",
2000 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2001 .secure
= ARM_CP_SECSTATE_S
,
2002 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2003 .accessfn
= gt_ptimer_access
,
2004 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2006 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2007 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2008 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2009 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2010 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2012 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2013 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2014 .accessfn
= gt_vtimer_access
,
2015 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2017 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2018 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2019 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2020 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2021 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2023 /* The counter itself */
2024 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2025 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2026 .accessfn
= gt_pct_access
,
2027 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2029 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2030 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2031 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2032 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2034 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2035 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2036 .accessfn
= gt_vct_access
,
2037 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2039 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2040 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2041 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2042 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2044 /* Comparison value, indicating when the timer goes off */
2045 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2046 .secure
= ARM_CP_SECSTATE_NS
,
2047 .access
= PL1_RW
| PL0_R
,
2048 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2049 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2050 .accessfn
= gt_ptimer_access
,
2051 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2053 { .name
= "CNTP_CVAL(S)", .cp
= 15, .crm
= 14, .opc1
= 2,
2054 .secure
= ARM_CP_SECSTATE_S
,
2055 .access
= PL1_RW
| PL0_R
,
2056 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2057 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2058 .accessfn
= gt_ptimer_access
,
2059 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2061 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2062 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2063 .access
= PL1_RW
| PL0_R
,
2065 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2066 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2067 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2069 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2070 .access
= PL1_RW
| PL0_R
,
2071 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2072 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2073 .accessfn
= gt_vtimer_access
,
2074 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2076 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2077 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2078 .access
= PL1_RW
| PL0_R
,
2080 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2081 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2082 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2084 /* Secure timer -- this is actually restricted to only EL3
2085 * and configurably Secure-EL1 via the accessfn.
2087 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2088 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2089 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2090 .accessfn
= gt_stimer_access
,
2091 .readfn
= gt_sec_tval_read
,
2092 .writefn
= gt_sec_tval_write
,
2093 .resetfn
= gt_sec_timer_reset
,
2095 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2096 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2097 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2098 .accessfn
= gt_stimer_access
,
2099 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2101 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2103 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2104 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2105 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2106 .accessfn
= gt_stimer_access
,
2107 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2108 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2114 /* In user-mode none of the generic timer registers are accessible,
2115 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2116 * so instead just don't register any of them.
2118 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2124 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2126 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2127 raw_write(env
, ri
, value
);
2128 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2129 raw_write(env
, ri
, value
& 0xfffff6ff);
2131 raw_write(env
, ri
, value
& 0xfffff1ff);
2135 #ifndef CONFIG_USER_ONLY
2136 /* get_phys_addr() isn't present for user-mode-only targets */
2138 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2142 /* The ATS12NSO* operations must trap to EL3 if executed in
2143 * Secure EL1 (which can only happen if EL3 is AArch64).
2144 * They are simply UNDEF if executed from NS EL1.
2145 * They function normally from EL2 or EL3.
2147 if (arm_current_el(env
) == 1) {
2148 if (arm_is_secure_below_el3(env
)) {
2149 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2151 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2154 return CP_ACCESS_OK
;
2157 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2158 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2161 target_ulong page_size
;
2165 bool format64
= false;
2166 MemTxAttrs attrs
= {};
2167 ARMMMUFaultInfo fi
= {};
2168 ARMCacheAttrs cacheattrs
= {};
2170 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2171 &prot
, &page_size
, &fi
, &cacheattrs
);
2175 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2178 * * TTBCR.EAE determines whether the result is returned using the
2179 * 32-bit or the 64-bit PAR format
2180 * * Instructions executed in Hyp mode always use the 64bit format
2182 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2183 * * The Non-secure TTBCR.EAE bit is set to 1
2184 * * The implementation includes EL2, and the value of HCR.VM is 1
2186 * ATS1Hx always uses the 64bit format (not supported yet).
2188 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2190 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2191 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2192 format64
|= env
->cp15
.hcr_el2
& HCR_VM
;
2194 format64
|= arm_current_el(env
) == 2;
2200 /* Create a 64-bit PAR */
2201 par64
= (1 << 11); /* LPAE bit always set */
2203 par64
|= phys_addr
& ~0xfffULL
;
2204 if (!attrs
.secure
) {
2205 par64
|= (1 << 9); /* NS */
2207 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2208 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2210 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2213 par64
|= (fsr
& 0x3f) << 1; /* FS */
2214 /* Note that S2WLK and FSTAGE are always zero, because we don't
2215 * implement virtualization and therefore there can't be a stage 2
2220 /* fsr is a DFSR/IFSR value for the short descriptor
2221 * translation table format (with WnR always clear).
2222 * Convert it to a 32-bit PAR.
2225 /* We do not set any attribute bits in the PAR */
2226 if (page_size
== (1 << 24)
2227 && arm_feature(env
, ARM_FEATURE_V7
)) {
2228 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2230 par64
= phys_addr
& 0xfffff000;
2232 if (!attrs
.secure
) {
2233 par64
|= (1 << 9); /* NS */
2236 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2238 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2239 ((fsr
& 0xf) << 1) | 1;
2245 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2247 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2250 int el
= arm_current_el(env
);
2251 bool secure
= arm_is_secure_below_el3(env
);
2253 switch (ri
->opc2
& 6) {
2255 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2258 mmu_idx
= ARMMMUIdx_S1E3
;
2261 mmu_idx
= ARMMMUIdx_S1NSE1
;
2264 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2267 g_assert_not_reached();
2271 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2274 mmu_idx
= ARMMMUIdx_S1SE0
;
2277 mmu_idx
= ARMMMUIdx_S1NSE0
;
2280 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2283 g_assert_not_reached();
2287 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2288 mmu_idx
= ARMMMUIdx_S12NSE1
;
2291 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2292 mmu_idx
= ARMMMUIdx_S12NSE0
;
2295 g_assert_not_reached();
2298 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2300 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2303 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2306 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2309 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2311 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2314 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2317 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2318 return CP_ACCESS_TRAP
;
2320 return CP_ACCESS_OK
;
2323 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2326 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2328 int secure
= arm_is_secure_below_el3(env
);
2330 switch (ri
->opc2
& 6) {
2333 case 0: /* AT S1E1R, AT S1E1W */
2334 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2336 case 4: /* AT S1E2R, AT S1E2W */
2337 mmu_idx
= ARMMMUIdx_S1E2
;
2339 case 6: /* AT S1E3R, AT S1E3W */
2340 mmu_idx
= ARMMMUIdx_S1E3
;
2343 g_assert_not_reached();
2346 case 2: /* AT S1E0R, AT S1E0W */
2347 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2349 case 4: /* AT S12E1R, AT S12E1W */
2350 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2352 case 6: /* AT S12E0R, AT S12E0W */
2353 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2356 g_assert_not_reached();
2359 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2363 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2364 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2365 .access
= PL1_RW
, .resetvalue
= 0,
2366 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2367 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2368 .writefn
= par_write
},
2369 #ifndef CONFIG_USER_ONLY
2370 /* This underdecoding is safe because the reginfo is NO_RAW. */
2371 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2372 .access
= PL1_W
, .accessfn
= ats_access
,
2373 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2378 /* Return basic MPU access permission bits. */
2379 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2386 for (i
= 0; i
< 16; i
+= 2) {
2387 ret
|= (val
>> i
) & mask
;
2393 /* Pad basic MPU access permission bits to extended format. */
2394 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2401 for (i
= 0; i
< 16; i
+= 2) {
2402 ret
|= (val
& mask
) << i
;
2408 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2411 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2414 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2416 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2419 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2422 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2425 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2427 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2430 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2432 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2438 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2442 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2445 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2446 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2452 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2453 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2457 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2460 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2461 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2463 if (value
>= nrgs
) {
2464 qemu_log_mask(LOG_GUEST_ERROR
,
2465 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2466 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2470 raw_write(env
, ri
, value
);
2473 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2474 /* Reset for all these registers is handled in arm_cpu_reset(),
2475 * because the PMSAv7 is also used by M-profile CPUs, which do
2476 * not register cpregs but still need the state to be reset.
2478 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2479 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2480 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2481 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2482 .resetfn
= arm_cp_reset_ignore
},
2483 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2484 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2485 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2486 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2487 .resetfn
= arm_cp_reset_ignore
},
2488 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2489 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2490 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2491 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2492 .resetfn
= arm_cp_reset_ignore
},
2493 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2495 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2496 .writefn
= pmsav7_rgnr_write
,
2497 .resetfn
= arm_cp_reset_ignore
},
2501 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2502 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2503 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2504 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2505 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2506 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2507 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2508 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2509 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2510 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2512 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2514 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2516 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2518 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2520 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2521 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2523 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2524 /* Protection region base and size registers */
2525 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2526 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2527 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2528 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2529 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2530 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2531 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2532 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2533 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2534 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2535 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2536 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2537 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2538 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2539 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2540 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2541 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2542 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2543 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2544 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2545 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2546 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2547 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2548 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2552 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2555 TCR
*tcr
= raw_ptr(env
, ri
);
2556 int maskshift
= extract32(value
, 0, 3);
2558 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2559 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2560 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2561 * using Long-desciptor translation table format */
2562 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2563 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2564 /* In an implementation that includes the Security Extensions
2565 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2566 * Short-descriptor translation table format.
2568 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2574 /* Update the masks corresponding to the TCR bank being written
2575 * Note that we always calculate mask and base_mask, but
2576 * they are only used for short-descriptor tables (ie if EAE is 0);
2577 * for long-descriptor tables the TCR fields are used differently
2578 * and the mask and base_mask values are meaningless.
2580 tcr
->raw_tcr
= value
;
2581 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2582 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2585 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2588 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2590 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2591 /* With LPAE the TTBCR could result in a change of ASID
2592 * via the TTBCR.A1 bit, so do a TLB flush.
2594 tlb_flush(CPU(cpu
));
2596 vmsa_ttbcr_raw_write(env
, ri
, value
);
2599 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2601 TCR
*tcr
= raw_ptr(env
, ri
);
2603 /* Reset both the TCR as well as the masks corresponding to the bank of
2604 * the TCR being reset.
2608 tcr
->base_mask
= 0xffffc000u
;
2611 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2614 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2615 TCR
*tcr
= raw_ptr(env
, ri
);
2617 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2618 tlb_flush(CPU(cpu
));
2619 tcr
->raw_tcr
= value
;
2622 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2625 /* 64 bit accesses to the TTBRs can change the ASID and so we
2626 * must flush the TLB.
2628 if (cpreg_field_is_64bit(ri
)) {
2629 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2631 tlb_flush(CPU(cpu
));
2633 raw_write(env
, ri
, value
);
2636 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2639 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2640 CPUState
*cs
= CPU(cpu
);
2642 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2643 if (raw_read(env
, ri
) != value
) {
2644 tlb_flush_by_mmuidx(cs
,
2645 ARMMMUIdxBit_S12NSE1
|
2646 ARMMMUIdxBit_S12NSE0
|
2648 raw_write(env
, ri
, value
);
2652 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2653 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2654 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2655 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2656 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2657 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2658 .access
= PL1_RW
, .resetvalue
= 0,
2659 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2660 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2661 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2662 .access
= PL1_RW
, .resetvalue
= 0,
2663 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2664 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2665 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2666 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2667 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2672 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2673 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2674 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2676 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2677 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2678 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2679 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2680 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2681 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2682 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2683 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2684 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2685 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2686 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2687 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2688 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2689 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2690 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2691 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2692 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2693 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2694 .raw_writefn
= vmsa_ttbcr_raw_write
,
2695 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2696 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2700 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2703 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2704 /* The OS_TYPE bit in this register changes the reported CPUID! */
2705 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2706 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2709 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2712 env
->cp15
.c15_threadid
= value
& 0xffff;
2715 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2718 /* Wait-for-interrupt (deprecated) */
2719 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2722 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2725 /* On OMAP there are registers indicating the max/min index of dcache lines
2726 * containing a dirty line; cache flush operations have to reset these.
2728 env
->cp15
.c15_i_max
= 0x000;
2729 env
->cp15
.c15_i_min
= 0xff0;
2732 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2733 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2734 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2735 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2737 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2738 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2739 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2741 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2742 .writefn
= omap_ticonfig_write
},
2743 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2745 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2746 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2747 .access
= PL1_RW
, .resetvalue
= 0xff0,
2748 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2749 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2751 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2752 .writefn
= omap_threadid_write
},
2753 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2754 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2755 .type
= ARM_CP_NO_RAW
,
2756 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2757 /* TODO: Peripheral port remap register:
2758 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2759 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2762 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2763 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2764 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2765 .writefn
= omap_cachemaint_write
},
2766 { .name
= "C9", .cp
= 15, .crn
= 9,
2767 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2768 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2772 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2775 env
->cp15
.c15_cpar
= value
& 0x3fff;
2778 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2779 { .name
= "XSCALE_CPAR",
2780 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2781 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2782 .writefn
= xscale_cpar_write
, },
2783 { .name
= "XSCALE_AUXCR",
2784 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2785 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2787 /* XScale specific cache-lockdown: since we have no cache we NOP these
2788 * and hope the guest does not really rely on cache behaviour.
2790 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2791 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2792 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2793 { .name
= "XSCALE_UNLOCK_ICACHE",
2794 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2795 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2796 { .name
= "XSCALE_DCACHE_LOCK",
2797 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2798 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2799 { .name
= "XSCALE_UNLOCK_DCACHE",
2800 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2801 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2805 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2806 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2807 * implementation of this implementation-defined space.
2808 * Ideally this should eventually disappear in favour of actually
2809 * implementing the correct behaviour for all cores.
2811 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2812 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2814 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2819 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2820 /* Cache status: RAZ because we have no cache so it's always clean */
2821 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2822 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2827 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2828 /* We never have a a block transfer operation in progress */
2829 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2830 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2832 /* The cache ops themselves: these all NOP for QEMU */
2833 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2834 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2835 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2836 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2837 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2838 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2839 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2840 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2841 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2842 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2843 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2844 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2848 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2849 /* The cache test-and-clean instructions always return (1 << 30)
2850 * to indicate that there are no dirty cache lines.
2852 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2853 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2854 .resetvalue
= (1 << 30) },
2855 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2856 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2857 .resetvalue
= (1 << 30) },
2861 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2862 /* Ignore ReadBuffer accesses */
2863 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2864 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2865 .access
= PL1_RW
, .resetvalue
= 0,
2866 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2870 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2872 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2873 unsigned int cur_el
= arm_current_el(env
);
2874 bool secure
= arm_is_secure(env
);
2876 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2877 return env
->cp15
.vpidr_el2
;
2879 return raw_read(env
, ri
);
2882 static uint64_t mpidr_read_val(CPUARMState
*env
)
2884 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2885 uint64_t mpidr
= cpu
->mp_affinity
;
2887 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2888 mpidr
|= (1U << 31);
2889 /* Cores which are uniprocessor (non-coherent)
2890 * but still implement the MP extensions set
2891 * bit 30. (For instance, Cortex-R5).
2893 if (cpu
->mp_is_up
) {
2894 mpidr
|= (1u << 30);
2900 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2902 unsigned int cur_el
= arm_current_el(env
);
2903 bool secure
= arm_is_secure(env
);
2905 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2906 return env
->cp15
.vmpidr_el2
;
2908 return mpidr_read_val(env
);
2911 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2912 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2913 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2914 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
2918 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
2920 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
2921 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
2922 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2924 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2925 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
2926 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2928 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
2929 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
2930 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
2931 offsetof(CPUARMState
, cp15
.par_ns
)} },
2932 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
2933 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2934 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2935 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
2936 .writefn
= vmsa_ttbr_write
, },
2937 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
2938 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2939 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2940 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
2941 .writefn
= vmsa_ttbr_write
, },
2945 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2947 return vfp_get_fpcr(env
);
2950 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2953 vfp_set_fpcr(env
, value
);
2956 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2958 return vfp_get_fpsr(env
);
2961 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2964 vfp_set_fpsr(env
, value
);
2967 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2970 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
2971 return CP_ACCESS_TRAP
;
2973 return CP_ACCESS_OK
;
2976 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2979 env
->daif
= value
& PSTATE_DAIF
;
2982 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
2983 const ARMCPRegInfo
*ri
,
2986 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2987 * SCTLR_EL1.UCI is set.
2989 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
2990 return CP_ACCESS_TRAP
;
2992 return CP_ACCESS_OK
;
2995 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2996 * Page D4-1736 (DDI0487A.b)
2999 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3002 CPUState
*cs
= ENV_GET_CPU(env
);
3004 if (arm_is_secure_below_el3(env
)) {
3005 tlb_flush_by_mmuidx(cs
,
3006 ARMMMUIdxBit_S1SE1
|
3007 ARMMMUIdxBit_S1SE0
);
3009 tlb_flush_by_mmuidx(cs
,
3010 ARMMMUIdxBit_S12NSE1
|
3011 ARMMMUIdxBit_S12NSE0
);
3015 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3018 CPUState
*cs
= ENV_GET_CPU(env
);
3019 bool sec
= arm_is_secure_below_el3(env
);
3022 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3023 ARMMMUIdxBit_S1SE1
|
3024 ARMMMUIdxBit_S1SE0
);
3026 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3027 ARMMMUIdxBit_S12NSE1
|
3028 ARMMMUIdxBit_S12NSE0
);
3032 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3035 /* Note that the 'ALL' scope must invalidate both stage 1 and
3036 * stage 2 translations, whereas most other scopes only invalidate
3037 * stage 1 translations.
3039 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3040 CPUState
*cs
= CPU(cpu
);
3042 if (arm_is_secure_below_el3(env
)) {
3043 tlb_flush_by_mmuidx(cs
,
3044 ARMMMUIdxBit_S1SE1
|
3045 ARMMMUIdxBit_S1SE0
);
3047 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3048 tlb_flush_by_mmuidx(cs
,
3049 ARMMMUIdxBit_S12NSE1
|
3050 ARMMMUIdxBit_S12NSE0
|
3053 tlb_flush_by_mmuidx(cs
,
3054 ARMMMUIdxBit_S12NSE1
|
3055 ARMMMUIdxBit_S12NSE0
);
3060 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3063 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3064 CPUState
*cs
= CPU(cpu
);
3066 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3069 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3072 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3073 CPUState
*cs
= CPU(cpu
);
3075 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3078 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3081 /* Note that the 'ALL' scope must invalidate both stage 1 and
3082 * stage 2 translations, whereas most other scopes only invalidate
3083 * stage 1 translations.
3085 CPUState
*cs
= ENV_GET_CPU(env
);
3086 bool sec
= arm_is_secure_below_el3(env
);
3087 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3090 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3091 ARMMMUIdxBit_S1SE1
|
3092 ARMMMUIdxBit_S1SE0
);
3093 } else if (has_el2
) {
3094 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3095 ARMMMUIdxBit_S12NSE1
|
3096 ARMMMUIdxBit_S12NSE0
|
3099 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3100 ARMMMUIdxBit_S12NSE1
|
3101 ARMMMUIdxBit_S12NSE0
);
3105 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3108 CPUState
*cs
= ENV_GET_CPU(env
);
3110 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3113 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3116 CPUState
*cs
= ENV_GET_CPU(env
);
3118 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3121 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3124 /* Invalidate by VA, EL1&0 (AArch64 version).
3125 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3126 * since we don't support flush-for-specific-ASID-only or
3127 * flush-last-level-only.
3129 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3130 CPUState
*cs
= CPU(cpu
);
3131 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3133 if (arm_is_secure_below_el3(env
)) {
3134 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3135 ARMMMUIdxBit_S1SE1
|
3136 ARMMMUIdxBit_S1SE0
);
3138 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3139 ARMMMUIdxBit_S12NSE1
|
3140 ARMMMUIdxBit_S12NSE0
);
3144 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3147 /* Invalidate by VA, EL2
3148 * Currently handles both VAE2 and VALE2, since we don't support
3149 * flush-last-level-only.
3151 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3152 CPUState
*cs
= CPU(cpu
);
3153 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3155 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3158 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3161 /* Invalidate by VA, EL3
3162 * Currently handles both VAE3 and VALE3, since we don't support
3163 * flush-last-level-only.
3165 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3166 CPUState
*cs
= CPU(cpu
);
3167 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3169 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3172 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3175 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3176 CPUState
*cs
= CPU(cpu
);
3177 bool sec
= arm_is_secure_below_el3(env
);
3178 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3181 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3182 ARMMMUIdxBit_S1SE1
|
3183 ARMMMUIdxBit_S1SE0
);
3185 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3186 ARMMMUIdxBit_S12NSE1
|
3187 ARMMMUIdxBit_S12NSE0
);
3191 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3194 CPUState
*cs
= ENV_GET_CPU(env
);
3195 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3197 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3201 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3204 CPUState
*cs
= ENV_GET_CPU(env
);
3205 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3207 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3211 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3214 /* Invalidate by IPA. This has to invalidate any structures that
3215 * contain only stage 2 translation information, but does not need
3216 * to apply to structures that contain combined stage 1 and stage 2
3217 * translation information.
3218 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3220 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3221 CPUState
*cs
= CPU(cpu
);
3224 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3228 pageaddr
= sextract64(value
<< 12, 0, 48);
3230 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3233 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3236 CPUState
*cs
= ENV_GET_CPU(env
);
3239 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3243 pageaddr
= sextract64(value
<< 12, 0, 48);
3245 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3249 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3252 /* We don't implement EL2, so the only control on DC ZVA is the
3253 * bit in the SCTLR which can prohibit access for EL0.
3255 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3256 return CP_ACCESS_TRAP
;
3258 return CP_ACCESS_OK
;
3261 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3263 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3264 int dzp_bit
= 1 << 4;
3266 /* DZP indicates whether DC ZVA access is allowed */
3267 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3270 return cpu
->dcz_blocksize
| dzp_bit
;
3273 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3276 if (!(env
->pstate
& PSTATE_SP
)) {
3277 /* Access to SP_EL0 is undefined if it's being used as
3278 * the stack pointer.
3280 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3282 return CP_ACCESS_OK
;
3285 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3287 return env
->pstate
& PSTATE_SP
;
3290 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3292 update_spsel(env
, val
);
3295 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3298 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3300 if (raw_read(env
, ri
) == value
) {
3301 /* Skip the TLB flush if nothing actually changed; Linux likes
3302 * to do a lot of pointless SCTLR writes.
3307 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3308 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3312 raw_write(env
, ri
, value
);
3313 /* ??? Lots of these bits are not implemented. */
3314 /* This may enable/disable the MMU, so do a TLB flush. */
3315 tlb_flush(CPU(cpu
));
3318 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3321 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3322 return CP_ACCESS_TRAP_FP_EL2
;
3324 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3325 return CP_ACCESS_TRAP_FP_EL3
;
3327 return CP_ACCESS_OK
;
3330 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3333 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3336 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3337 /* Minimal set of EL0-visible registers. This will need to be expanded
3338 * significantly for system emulation of AArch64 CPUs.
3340 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3341 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3342 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3343 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3344 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3345 .type
= ARM_CP_NO_RAW
,
3346 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3347 .fieldoffset
= offsetof(CPUARMState
, daif
),
3348 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3349 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3350 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3351 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3352 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3353 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3354 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3355 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3356 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3357 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3358 .readfn
= aa64_dczid_read
},
3359 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3360 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3361 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3362 #ifndef CONFIG_USER_ONLY
3363 /* Avoid overhead of an access check that always passes in user-mode */
3364 .accessfn
= aa64_zva_access
,
3367 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3368 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3369 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3370 /* Cache ops: all NOPs since we don't emulate caches */
3371 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3372 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3373 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3374 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3375 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3376 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3377 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3378 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3379 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3380 .accessfn
= aa64_cacheop_access
},
3381 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3382 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3383 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3384 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3385 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3386 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3387 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3388 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3389 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3390 .accessfn
= aa64_cacheop_access
},
3391 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3392 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3393 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3394 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3395 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3396 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3397 .accessfn
= aa64_cacheop_access
},
3398 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3399 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3400 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3401 .accessfn
= aa64_cacheop_access
},
3402 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3403 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3404 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3405 /* TLBI operations */
3406 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3407 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3408 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3409 .writefn
= tlbi_aa64_vmalle1is_write
},
3410 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3411 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3412 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3413 .writefn
= tlbi_aa64_vae1is_write
},
3414 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3415 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3416 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3417 .writefn
= tlbi_aa64_vmalle1is_write
},
3418 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3419 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3420 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3421 .writefn
= tlbi_aa64_vae1is_write
},
3422 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3423 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3424 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3425 .writefn
= tlbi_aa64_vae1is_write
},
3426 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3427 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3428 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3429 .writefn
= tlbi_aa64_vae1is_write
},
3430 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3431 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3432 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3433 .writefn
= tlbi_aa64_vmalle1_write
},
3434 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3435 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3436 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3437 .writefn
= tlbi_aa64_vae1_write
},
3438 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3439 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3440 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3441 .writefn
= tlbi_aa64_vmalle1_write
},
3442 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3443 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3444 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3445 .writefn
= tlbi_aa64_vae1_write
},
3446 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3447 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3448 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3449 .writefn
= tlbi_aa64_vae1_write
},
3450 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3451 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3452 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3453 .writefn
= tlbi_aa64_vae1_write
},
3454 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3455 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3456 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3457 .writefn
= tlbi_aa64_ipas2e1is_write
},
3458 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3459 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3460 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3461 .writefn
= tlbi_aa64_ipas2e1is_write
},
3462 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3463 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3464 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3465 .writefn
= tlbi_aa64_alle1is_write
},
3466 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3467 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3468 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3469 .writefn
= tlbi_aa64_alle1is_write
},
3470 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3471 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3472 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3473 .writefn
= tlbi_aa64_ipas2e1_write
},
3474 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3475 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3476 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3477 .writefn
= tlbi_aa64_ipas2e1_write
},
3478 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3479 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3480 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3481 .writefn
= tlbi_aa64_alle1_write
},
3482 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3483 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3484 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3485 .writefn
= tlbi_aa64_alle1is_write
},
3486 #ifndef CONFIG_USER_ONLY
3487 /* 64 bit address translation operations */
3488 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3489 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3490 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3491 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3492 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3493 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3494 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3495 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3496 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3497 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3498 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3499 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3500 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3501 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3502 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3503 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3504 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3505 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3506 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3507 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3508 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3509 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3510 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3511 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3512 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3513 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3514 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3515 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3516 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3517 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3518 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3519 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3520 .type
= ARM_CP_ALIAS
,
3521 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3522 .access
= PL1_RW
, .resetvalue
= 0,
3523 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3524 .writefn
= par_write
},
3526 /* TLB invalidate last level of translation table walk */
3527 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3528 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3529 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3530 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3531 .writefn
= tlbimvaa_is_write
},
3532 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3533 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3534 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3535 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3536 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3537 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3538 .writefn
= tlbimva_hyp_write
},
3539 { .name
= "TLBIMVALHIS",
3540 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3541 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3542 .writefn
= tlbimva_hyp_is_write
},
3543 { .name
= "TLBIIPAS2",
3544 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3545 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3546 .writefn
= tlbiipas2_write
},
3547 { .name
= "TLBIIPAS2IS",
3548 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3549 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3550 .writefn
= tlbiipas2_is_write
},
3551 { .name
= "TLBIIPAS2L",
3552 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3553 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3554 .writefn
= tlbiipas2_write
},
3555 { .name
= "TLBIIPAS2LIS",
3556 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3557 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3558 .writefn
= tlbiipas2_is_write
},
3559 /* 32 bit cache operations */
3560 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3561 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3562 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3563 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3564 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3565 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3566 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3567 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3568 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3569 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3570 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3571 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3572 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3573 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3574 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3575 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3576 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3577 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3578 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3579 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3580 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3581 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3582 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3583 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3584 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3585 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3586 /* MMU Domain access control / MPU write buffer control */
3587 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3588 .access
= PL1_RW
, .resetvalue
= 0,
3589 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3590 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3591 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3592 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3593 .type
= ARM_CP_ALIAS
,
3594 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3596 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3597 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3598 .type
= ARM_CP_ALIAS
,
3599 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3601 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3602 /* We rely on the access checks not allowing the guest to write to the
3603 * state field when SPSel indicates that it's being used as the stack
3606 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3607 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3608 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3609 .type
= ARM_CP_ALIAS
,
3610 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3611 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3612 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3613 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3614 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3615 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3616 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3617 .type
= ARM_CP_NO_RAW
,
3618 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3619 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3620 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3621 .type
= ARM_CP_ALIAS
,
3622 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3623 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3624 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3625 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3626 .access
= PL2_RW
, .resetvalue
= 0,
3627 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3628 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3629 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3630 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3631 .access
= PL2_RW
, .resetvalue
= 0,
3632 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3633 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3634 .type
= ARM_CP_ALIAS
,
3635 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3637 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3638 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3639 .type
= ARM_CP_ALIAS
,
3640 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3642 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3643 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3644 .type
= ARM_CP_ALIAS
,
3645 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3647 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3648 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3649 .type
= ARM_CP_ALIAS
,
3650 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3652 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3653 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3654 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3656 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3657 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3658 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3659 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3660 .writefn
= sdcr_write
,
3661 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3665 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3666 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3667 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3668 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3670 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3671 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3672 .type
= ARM_CP_NO_RAW
,
3673 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3675 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3676 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3677 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3678 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3679 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3680 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3681 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3683 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3684 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3685 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3686 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3687 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3688 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3690 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3691 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3692 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3694 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3695 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3696 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3698 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3699 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3700 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3702 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3703 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3704 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3705 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3706 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3707 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3708 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3709 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3710 .cp
= 15, .opc1
= 6, .crm
= 2,
3711 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3712 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3713 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3714 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3715 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3716 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3717 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3718 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3719 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3720 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3721 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3722 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3723 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3724 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3725 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3726 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3728 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3729 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3730 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3731 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3732 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3733 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3734 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3735 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3737 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3738 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3739 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3740 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3741 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3743 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3744 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3745 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3746 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3747 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3748 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3749 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3750 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3751 .access
= PL2_RW
, .accessfn
= access_tda
,
3752 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3753 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3754 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3755 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3756 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3757 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3758 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3759 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3763 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3765 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3766 uint64_t valid_mask
= HCR_MASK
;
3768 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3769 valid_mask
&= ~HCR_HCD
;
3770 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3771 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3772 * However, if we're using the SMC PSCI conduit then QEMU is
3773 * effectively acting like EL3 firmware and so the guest at
3774 * EL2 should retain the ability to prevent EL1 from being
3775 * able to make SMC calls into the ersatz firmware, so in
3776 * that case HCR.TSC should be read/write.
3778 valid_mask
&= ~HCR_TSC
;
3781 /* Clear RES0 bits. */
3782 value
&= valid_mask
;
3784 /* These bits change the MMU setup:
3785 * HCR_VM enables stage 2 translation
3786 * HCR_PTW forbids certain page-table setups
3787 * HCR_DC Disables stage1 and enables stage2 translation
3789 if ((raw_read(env
, ri
) ^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
3790 tlb_flush(CPU(cpu
));
3792 raw_write(env
, ri
, value
);
3795 static const ARMCPRegInfo el2_cp_reginfo
[] = {
3796 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3797 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3798 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3799 .writefn
= hcr_write
},
3800 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
3801 .type
= ARM_CP_ALIAS
,
3802 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
3804 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
3805 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_AA64
,
3806 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3807 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
3808 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_AA64
,
3809 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3810 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
3811 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
3812 .type
= ARM_CP_ALIAS
,
3813 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
3815 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
3816 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3817 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3818 .access
= PL2_RW
, .writefn
= vbar_write
,
3819 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
3821 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
3822 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
3823 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
3824 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
3825 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3826 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3827 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
3828 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
3829 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3830 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3831 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
3833 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3834 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3835 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3836 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
3837 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3838 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3839 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3841 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3842 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3843 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3844 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3846 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3847 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3848 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3850 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3851 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3852 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3854 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3855 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3857 /* no .writefn needed as this can't cause an ASID change;
3858 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3860 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
3861 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
3862 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3863 .type
= ARM_CP_ALIAS
,
3864 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3865 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3866 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
3867 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3869 /* no .writefn needed as this can't cause an ASID change;
3870 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3872 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3873 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3874 .cp
= 15, .opc1
= 6, .crm
= 2,
3875 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3876 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3877 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
3878 .writefn
= vttbr_write
},
3879 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3880 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3881 .access
= PL2_RW
, .writefn
= vttbr_write
,
3882 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
3883 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3884 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3885 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
3886 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
3887 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3888 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3889 .access
= PL2_RW
, .resetvalue
= 0,
3890 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
3891 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3892 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3893 .access
= PL2_RW
, .resetvalue
= 0,
3894 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3895 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3896 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3897 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3898 { .name
= "TLBIALLNSNH",
3899 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3900 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3901 .writefn
= tlbiall_nsnh_write
},
3902 { .name
= "TLBIALLNSNHIS",
3903 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3904 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3905 .writefn
= tlbiall_nsnh_is_write
},
3906 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3907 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3908 .writefn
= tlbiall_hyp_write
},
3909 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3910 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3911 .writefn
= tlbiall_hyp_is_write
},
3912 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3913 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3914 .writefn
= tlbimva_hyp_write
},
3915 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3916 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3917 .writefn
= tlbimva_hyp_is_write
},
3918 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
3919 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3920 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3921 .writefn
= tlbi_aa64_alle2_write
},
3922 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
3923 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3924 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3925 .writefn
= tlbi_aa64_vae2_write
},
3926 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
3927 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3928 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3929 .writefn
= tlbi_aa64_vae2_write
},
3930 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
3931 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3932 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3933 .writefn
= tlbi_aa64_alle2is_write
},
3934 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
3935 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3936 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3937 .writefn
= tlbi_aa64_vae2is_write
},
3938 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
3939 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3940 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3941 .writefn
= tlbi_aa64_vae2is_write
},
3942 #ifndef CONFIG_USER_ONLY
3943 /* Unlike the other EL2-related AT operations, these must
3944 * UNDEF from EL3 if EL2 is not implemented, which is why we
3945 * define them here rather than with the rest of the AT ops.
3947 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
3948 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3949 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3950 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3951 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
3952 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3953 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3954 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3955 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3956 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3957 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3958 * to behave as if SCR.NS was 1.
3960 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3962 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3963 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3965 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3966 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3967 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3968 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3969 * reset values as IMPDEF. We choose to reset to 3 to comply with
3970 * both ARMv7 and ARMv8.
3972 .access
= PL2_RW
, .resetvalue
= 3,
3973 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
3974 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3975 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3976 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
3977 .writefn
= gt_cntvoff_write
,
3978 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3979 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3980 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
3981 .writefn
= gt_cntvoff_write
,
3982 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3983 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3984 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3985 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
3986 .type
= ARM_CP_IO
, .access
= PL2_RW
,
3987 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
3988 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3989 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
3990 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
3991 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
3992 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3993 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3994 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
3995 .resetfn
= gt_hyp_timer_reset
,
3996 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
3997 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3999 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4001 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4003 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4005 /* The only field of MDCR_EL2 that has a defined architectural reset value
4006 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4007 * don't impelment any PMU event counters, so using zero as a reset
4008 * value for MDCR_EL2 is okay
4010 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4011 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4012 .access
= PL2_RW
, .resetvalue
= 0,
4013 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4014 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4015 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4016 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4017 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4018 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4019 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4021 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4022 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4023 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4025 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4029 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4032 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4033 * At Secure EL1 it traps to EL3.
4035 if (arm_current_el(env
) == 3) {
4036 return CP_ACCESS_OK
;
4038 if (arm_is_secure_below_el3(env
)) {
4039 return CP_ACCESS_TRAP_EL3
;
4041 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4043 return CP_ACCESS_OK
;
4045 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4048 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4049 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4050 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4051 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4052 .resetvalue
= 0, .writefn
= scr_write
},
4053 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4054 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4055 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4056 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4057 .writefn
= scr_write
},
4058 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4059 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4060 .access
= PL3_RW
, .resetvalue
= 0,
4061 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4063 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4064 .access
= PL3_RW
, .resetvalue
= 0,
4065 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4066 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4067 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4068 .writefn
= vbar_write
, .resetvalue
= 0,
4069 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4070 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4071 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4072 .access
= PL3_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4073 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4074 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4075 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4077 /* no .writefn needed as this can't cause an ASID change;
4078 * we must provide a .raw_writefn and .resetfn because we handle
4079 * reset and migration for the AArch32 TTBCR(S), which might be
4080 * using mask and base_mask.
4082 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4083 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4084 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4085 .type
= ARM_CP_ALIAS
,
4086 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4088 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4089 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4090 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4091 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4092 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4093 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4094 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4095 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4096 .type
= ARM_CP_ALIAS
,
4097 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4099 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4100 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4101 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4102 .access
= PL3_RW
, .writefn
= vbar_write
,
4103 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4105 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4106 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4107 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4108 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4109 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4110 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4111 .access
= PL3_RW
, .resetvalue
= 0,
4112 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4113 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4114 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4115 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4117 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4118 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4119 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4121 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4122 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4123 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4125 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4126 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4127 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4128 .writefn
= tlbi_aa64_alle3is_write
},
4129 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4130 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4131 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4132 .writefn
= tlbi_aa64_vae3is_write
},
4133 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4134 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4135 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4136 .writefn
= tlbi_aa64_vae3is_write
},
4137 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4138 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4139 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4140 .writefn
= tlbi_aa64_alle3_write
},
4141 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4142 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4143 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4144 .writefn
= tlbi_aa64_vae3_write
},
4145 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4146 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4147 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4148 .writefn
= tlbi_aa64_vae3_write
},
4152 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4155 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4156 * but the AArch32 CTR has its own reginfo struct)
4158 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4159 return CP_ACCESS_TRAP
;
4161 return CP_ACCESS_OK
;
4164 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4167 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4168 * read via a bit in OSLSR_EL1.
4172 if (ri
->state
== ARM_CP_STATE_AA32
) {
4173 oslock
= (value
== 0xC5ACCE55);
4178 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4181 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4182 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4183 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4184 * unlike DBGDRAR it is never accessible from EL0.
4185 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4188 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4189 .access
= PL0_R
, .accessfn
= access_tdra
,
4190 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4191 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4192 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4193 .access
= PL1_R
, .accessfn
= access_tdra
,
4194 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4195 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4196 .access
= PL0_R
, .accessfn
= access_tdra
,
4197 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4198 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4199 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4200 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4201 .access
= PL1_RW
, .accessfn
= access_tda
,
4202 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4204 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4205 * We don't implement the configurable EL0 access.
4207 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4208 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4209 .type
= ARM_CP_ALIAS
,
4210 .access
= PL1_R
, .accessfn
= access_tda
,
4211 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4212 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4213 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4214 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4215 .accessfn
= access_tdosa
,
4216 .writefn
= oslar_write
},
4217 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4218 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4219 .access
= PL1_R
, .resetvalue
= 10,
4220 .accessfn
= access_tdosa
,
4221 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4222 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4223 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4224 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4225 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4226 .type
= ARM_CP_NOP
},
4227 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4228 * implement vector catch debug events yet.
4231 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4232 .access
= PL1_RW
, .accessfn
= access_tda
,
4233 .type
= ARM_CP_NOP
},
4234 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4235 * to save and restore a 32-bit guest's DBGVCR)
4237 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4238 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4239 .access
= PL2_RW
, .accessfn
= access_tda
,
4240 .type
= ARM_CP_NOP
},
4241 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4242 * Channel but Linux may try to access this register. The 32-bit
4243 * alias is DBGDCCINT.
4245 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4246 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4247 .access
= PL1_RW
, .accessfn
= access_tda
,
4248 .type
= ARM_CP_NOP
},
4252 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4253 /* 64 bit access versions of the (dummy) debug registers */
4254 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4255 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4256 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4257 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4261 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4263 CPUARMState
*env
= &cpu
->env
;
4265 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4266 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4268 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4270 if (env
->cpu_watchpoint
[n
]) {
4271 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4272 env
->cpu_watchpoint
[n
] = NULL
;
4275 if (!extract64(wcr
, 0, 1)) {
4276 /* E bit clear : watchpoint disabled */
4280 switch (extract64(wcr
, 3, 2)) {
4282 /* LSC 00 is reserved and must behave as if the wp is disabled */
4285 flags
|= BP_MEM_READ
;
4288 flags
|= BP_MEM_WRITE
;
4291 flags
|= BP_MEM_ACCESS
;
4295 /* Attempts to use both MASK and BAS fields simultaneously are
4296 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4297 * thus generating a watchpoint for every byte in the masked region.
4299 mask
= extract64(wcr
, 24, 4);
4300 if (mask
== 1 || mask
== 2) {
4301 /* Reserved values of MASK; we must act as if the mask value was
4302 * some non-reserved value, or as if the watchpoint were disabled.
4303 * We choose the latter.
4307 /* Watchpoint covers an aligned area up to 2GB in size */
4309 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4310 * whether the watchpoint fires when the unmasked bits match; we opt
4311 * to generate the exceptions.
4315 /* Watchpoint covers bytes defined by the byte address select bits */
4316 int bas
= extract64(wcr
, 5, 8);
4320 /* This must act as if the watchpoint is disabled */
4324 if (extract64(wvr
, 2, 1)) {
4325 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4326 * ignored, and BAS[3:0] define which bytes to watch.
4330 /* The BAS bits are supposed to be programmed to indicate a contiguous
4331 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4332 * we fire for each byte in the word/doubleword addressed by the WVR.
4333 * We choose to ignore any non-zero bits after the first range of 1s.
4335 basstart
= ctz32(bas
);
4336 len
= cto32(bas
>> basstart
);
4340 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4341 &env
->cpu_watchpoint
[n
]);
4344 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4347 CPUARMState
*env
= &cpu
->env
;
4349 /* Completely clear out existing QEMU watchpoints and our array, to
4350 * avoid possible stale entries following migration load.
4352 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4353 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4355 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4356 hw_watchpoint_update(cpu
, i
);
4360 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4363 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4366 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4367 * register reads and behaves as if values written are sign extended.
4368 * Bits [1:0] are RES0.
4370 value
= sextract64(value
, 0, 49) & ~3ULL;
4372 raw_write(env
, ri
, value
);
4373 hw_watchpoint_update(cpu
, i
);
4376 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4379 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4382 raw_write(env
, ri
, value
);
4383 hw_watchpoint_update(cpu
, i
);
4386 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4388 CPUARMState
*env
= &cpu
->env
;
4389 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4390 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4395 if (env
->cpu_breakpoint
[n
]) {
4396 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4397 env
->cpu_breakpoint
[n
] = NULL
;
4400 if (!extract64(bcr
, 0, 1)) {
4401 /* E bit clear : watchpoint disabled */
4405 bt
= extract64(bcr
, 20, 4);
4408 case 4: /* unlinked address mismatch (reserved if AArch64) */
4409 case 5: /* linked address mismatch (reserved if AArch64) */
4410 qemu_log_mask(LOG_UNIMP
,
4411 "arm: address mismatch breakpoint types not implemented");
4413 case 0: /* unlinked address match */
4414 case 1: /* linked address match */
4416 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4417 * we behave as if the register was sign extended. Bits [1:0] are
4418 * RES0. The BAS field is used to allow setting breakpoints on 16
4419 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4420 * a bp will fire if the addresses covered by the bp and the addresses
4421 * covered by the insn overlap but the insn doesn't start at the
4422 * start of the bp address range. We choose to require the insn and
4423 * the bp to have the same address. The constraints on writing to
4424 * BAS enforced in dbgbcr_write mean we have only four cases:
4425 * 0b0000 => no breakpoint
4426 * 0b0011 => breakpoint on addr
4427 * 0b1100 => breakpoint on addr + 2
4428 * 0b1111 => breakpoint on addr
4429 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4431 int bas
= extract64(bcr
, 5, 4);
4432 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4441 case 2: /* unlinked context ID match */
4442 case 8: /* unlinked VMID match (reserved if no EL2) */
4443 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4444 qemu_log_mask(LOG_UNIMP
,
4445 "arm: unlinked context breakpoint types not implemented");
4447 case 9: /* linked VMID match (reserved if no EL2) */
4448 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4449 case 3: /* linked context ID match */
4451 /* We must generate no events for Linked context matches (unless
4452 * they are linked to by some other bp/wp, which is handled in
4453 * updates for the linking bp/wp). We choose to also generate no events
4454 * for reserved values.
4459 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4462 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4465 CPUARMState
*env
= &cpu
->env
;
4467 /* Completely clear out existing QEMU breakpoints and our array, to
4468 * avoid possible stale entries following migration load.
4470 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4471 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4473 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4474 hw_breakpoint_update(cpu
, i
);
4478 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4481 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4484 raw_write(env
, ri
, value
);
4485 hw_breakpoint_update(cpu
, i
);
4488 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4491 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4494 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4497 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4498 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4500 raw_write(env
, ri
, value
);
4501 hw_breakpoint_update(cpu
, i
);
4504 static void define_debug_regs(ARMCPU
*cpu
)
4506 /* Define v7 and v8 architectural debug registers.
4507 * These are just dummy implementations for now.
4510 int wrps
, brps
, ctx_cmps
;
4511 ARMCPRegInfo dbgdidr
= {
4512 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4513 .access
= PL0_R
, .accessfn
= access_tda
,
4514 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4517 /* Note that all these register fields hold "number of Xs minus 1". */
4518 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4519 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4520 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4522 assert(ctx_cmps
<= brps
);
4524 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4525 * of the debug registers such as number of breakpoints;
4526 * check that if they both exist then they agree.
4528 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4529 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4530 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4531 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4534 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4535 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4537 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4538 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4541 for (i
= 0; i
< brps
+ 1; i
++) {
4542 ARMCPRegInfo dbgregs
[] = {
4543 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4544 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4545 .access
= PL1_RW
, .accessfn
= access_tda
,
4546 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4547 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4549 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4550 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4551 .access
= PL1_RW
, .accessfn
= access_tda
,
4552 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
4553 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
4557 define_arm_cp_regs(cpu
, dbgregs
);
4560 for (i
= 0; i
< wrps
+ 1; i
++) {
4561 ARMCPRegInfo dbgregs
[] = {
4562 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
4563 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
4564 .access
= PL1_RW
, .accessfn
= access_tda
,
4565 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
4566 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
4568 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
4569 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
4570 .access
= PL1_RW
, .accessfn
= access_tda
,
4571 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
4572 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
4576 define_arm_cp_regs(cpu
, dbgregs
);
4580 /* We don't know until after realize whether there's a GICv3
4581 * attached, and that is what registers the gicv3 sysregs.
4582 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4585 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4587 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4588 uint64_t pfr1
= cpu
->id_pfr1
;
4590 if (env
->gicv3state
) {
4596 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4598 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4599 uint64_t pfr0
= cpu
->id_aa64pfr0
;
4601 if (env
->gicv3state
) {
4607 void register_cp_regs_for_features(ARMCPU
*cpu
)
4609 /* Register all the coprocessor registers based on feature bits */
4610 CPUARMState
*env
= &cpu
->env
;
4611 if (arm_feature(env
, ARM_FEATURE_M
)) {
4612 /* M profile has no coprocessor registers */
4616 define_arm_cp_regs(cpu
, cp_reginfo
);
4617 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4618 /* Must go early as it is full of wildcards that may be
4619 * overridden by later definitions.
4621 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
4624 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4625 /* The ID registers all have impdef reset values */
4626 ARMCPRegInfo v6_idregs
[] = {
4627 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
4628 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4629 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4630 .resetvalue
= cpu
->id_pfr0
},
4631 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4632 * the value of the GIC field until after we define these regs.
4634 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
4635 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
4636 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4637 .readfn
= id_pfr1_read
,
4638 .writefn
= arm_cp_write_ignore
},
4639 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
4640 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
4641 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4642 .resetvalue
= cpu
->id_dfr0
},
4643 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
4644 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
4645 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4646 .resetvalue
= cpu
->id_afr0
},
4647 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
4648 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
4649 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4650 .resetvalue
= cpu
->id_mmfr0
},
4651 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
4652 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
4653 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4654 .resetvalue
= cpu
->id_mmfr1
},
4655 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
4656 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
4657 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4658 .resetvalue
= cpu
->id_mmfr2
},
4659 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
4660 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
4661 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4662 .resetvalue
= cpu
->id_mmfr3
},
4663 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
4664 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4665 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4666 .resetvalue
= cpu
->id_isar0
},
4667 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
4668 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
4669 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4670 .resetvalue
= cpu
->id_isar1
},
4671 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
4672 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4673 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4674 .resetvalue
= cpu
->id_isar2
},
4675 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
4676 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
4677 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4678 .resetvalue
= cpu
->id_isar3
},
4679 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
4680 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
4681 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4682 .resetvalue
= cpu
->id_isar4
},
4683 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
4684 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
4685 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4686 .resetvalue
= cpu
->id_isar5
},
4687 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
4688 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
4689 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4690 .resetvalue
= cpu
->id_mmfr4
},
4691 /* 7 is as yet unallocated and must RAZ */
4692 { .name
= "ID_ISAR7_RESERVED", .state
= ARM_CP_STATE_BOTH
,
4693 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
4694 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4698 define_arm_cp_regs(cpu
, v6_idregs
);
4699 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
4701 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
4703 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
4704 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
4706 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
4707 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
4708 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
4710 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4711 /* v7 performance monitor control register: same implementor
4712 * field as main ID register, and we implement only the cycle
4715 #ifndef CONFIG_USER_ONLY
4716 ARMCPRegInfo pmcr
= {
4717 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
4719 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
4720 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
4721 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
4722 .raw_writefn
= raw_write
,
4724 ARMCPRegInfo pmcr64
= {
4725 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
4726 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
4727 .access
= PL0_RW
, .accessfn
= pmreg_access
,
4729 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
4730 .resetvalue
= cpu
->midr
& 0xff000000,
4731 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
4733 define_one_arm_cp_reg(cpu
, &pmcr
);
4734 define_one_arm_cp_reg(cpu
, &pmcr64
);
4736 ARMCPRegInfo clidr
= {
4737 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
4738 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
4739 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
4741 define_one_arm_cp_reg(cpu
, &clidr
);
4742 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
4743 define_debug_regs(cpu
);
4745 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
4747 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4748 /* AArch64 ID registers, which all have impdef reset values.
4749 * Note that within the ID register ranges the unused slots
4750 * must all RAZ, not UNDEF; future architecture versions may
4751 * define new registers here.
4753 ARMCPRegInfo v8_idregs
[] = {
4754 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
4755 * know the right value for the GIC field until after we
4756 * define these regs.
4758 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4759 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
4760 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4761 .readfn
= id_aa64pfr0_read
,
4762 .writefn
= arm_cp_write_ignore
},
4763 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4764 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
4765 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4766 .resetvalue
= cpu
->id_aa64pfr1
},
4767 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4768 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
4769 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4771 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4772 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
4773 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4775 { .name
= "ID_AA64PFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4776 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
4777 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4779 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4780 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
4781 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4783 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4784 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
4785 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4787 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4788 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
4789 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4791 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4792 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
4793 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4794 .resetvalue
= cpu
->id_aa64dfr0
},
4795 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4796 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
4797 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4798 .resetvalue
= cpu
->id_aa64dfr1
},
4799 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4800 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
4801 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4803 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4804 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
4805 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4807 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4808 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
4809 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4810 .resetvalue
= cpu
->id_aa64afr0
},
4811 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4812 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
4813 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4814 .resetvalue
= cpu
->id_aa64afr1
},
4815 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4816 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
4817 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4819 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4820 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
4821 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4823 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
4824 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
4825 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4826 .resetvalue
= cpu
->id_aa64isar0
},
4827 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
4828 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
4829 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4830 .resetvalue
= cpu
->id_aa64isar1
},
4831 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4832 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
4833 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4835 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4836 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
4837 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4839 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4840 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
4841 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4843 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4844 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
4845 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4847 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4848 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
4849 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4851 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4852 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
4853 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4855 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4856 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4857 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4858 .resetvalue
= cpu
->id_aa64mmfr0
},
4859 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4860 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
4861 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4862 .resetvalue
= cpu
->id_aa64mmfr1
},
4863 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4864 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
4865 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4867 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4868 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
4869 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4871 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4872 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
4873 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4875 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4876 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
4877 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4879 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4880 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
4881 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4883 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4884 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
4885 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4887 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4888 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
4889 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4890 .resetvalue
= cpu
->mvfr0
},
4891 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4892 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
4893 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4894 .resetvalue
= cpu
->mvfr1
},
4895 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
4896 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
4897 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4898 .resetvalue
= cpu
->mvfr2
},
4899 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4900 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
4901 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4903 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4904 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
4905 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4907 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4908 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
4909 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4911 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4912 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
4913 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4915 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4916 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
4917 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4919 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
4920 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
4921 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4922 .resetvalue
= cpu
->pmceid0
},
4923 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
4924 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
4925 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4926 .resetvalue
= cpu
->pmceid0
},
4927 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
4928 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
4929 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4930 .resetvalue
= cpu
->pmceid1
},
4931 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
4932 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
4933 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4934 .resetvalue
= cpu
->pmceid1
},
4937 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4938 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
4939 !arm_feature(env
, ARM_FEATURE_EL2
)) {
4940 ARMCPRegInfo rvbar
= {
4941 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
4942 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4943 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
4945 define_one_arm_cp_reg(cpu
, &rvbar
);
4947 define_arm_cp_regs(cpu
, v8_idregs
);
4948 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
4950 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
4951 uint64_t vmpidr_def
= mpidr_read_val(env
);
4952 ARMCPRegInfo vpidr_regs
[] = {
4953 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
4954 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4955 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4956 .resetvalue
= cpu
->midr
,
4957 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4958 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
4959 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4960 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
4961 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4962 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
4963 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
4964 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4965 .resetvalue
= vmpidr_def
,
4966 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
4967 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
4968 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
4970 .resetvalue
= vmpidr_def
,
4971 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
4974 define_arm_cp_regs(cpu
, vpidr_regs
);
4975 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
4976 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4977 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
4978 ARMCPRegInfo rvbar
= {
4979 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
4980 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
4981 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
4983 define_one_arm_cp_reg(cpu
, &rvbar
);
4986 /* If EL2 is missing but higher ELs are enabled, we need to
4987 * register the no_el2 reginfos.
4989 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4990 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4991 * of MIDR_EL1 and MPIDR_EL1.
4993 ARMCPRegInfo vpidr_regs
[] = {
4994 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4995 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4996 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4997 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
4998 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4999 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5000 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5001 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5002 .type
= ARM_CP_NO_RAW
,
5003 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
5006 define_arm_cp_regs(cpu
, vpidr_regs
);
5007 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
5010 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5011 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
5012 ARMCPRegInfo el3_regs
[] = {
5013 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5014 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
5015 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
5016 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5017 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
5019 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5020 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
5021 .resetvalue
= cpu
->reset_sctlr
},
5025 define_arm_cp_regs(cpu
, el3_regs
);
5027 /* The behaviour of NSACR is sufficiently various that we don't
5028 * try to describe it in a single reginfo:
5029 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5030 * reads as constant 0xc00 from NS EL1 and NS EL2
5031 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5032 * if v7 without EL3, register doesn't exist
5033 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5035 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5036 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5037 ARMCPRegInfo nsacr
= {
5038 .name
= "NSACR", .type
= ARM_CP_CONST
,
5039 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5040 .access
= PL1_RW
, .accessfn
= nsacr_access
,
5043 define_one_arm_cp_reg(cpu
, &nsacr
);
5045 ARMCPRegInfo nsacr
= {
5047 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5048 .access
= PL3_RW
| PL1_R
,
5050 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
5052 define_one_arm_cp_reg(cpu
, &nsacr
);
5055 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5056 ARMCPRegInfo nsacr
= {
5057 .name
= "NSACR", .type
= ARM_CP_CONST
,
5058 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5062 define_one_arm_cp_reg(cpu
, &nsacr
);
5066 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
5067 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5068 /* PMSAv6 not implemented */
5069 assert(arm_feature(env
, ARM_FEATURE_V7
));
5070 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5071 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
5073 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
5076 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5077 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
5079 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5080 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
5082 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
5083 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
5085 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5086 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5088 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5089 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5091 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5092 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5094 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5095 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5097 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5098 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5100 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5101 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5103 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5104 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5106 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5107 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5109 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5110 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5112 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5113 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5114 * be read-only (ie write causes UNDEF exception).
5117 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5118 /* Pre-v8 MIDR space.
5119 * Note that the MIDR isn't a simple constant register because
5120 * of the TI925 behaviour where writes to another register can
5121 * cause the MIDR value to change.
5123 * Unimplemented registers in the c15 0 0 0 space default to
5124 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5125 * and friends override accordingly.
5128 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5129 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5130 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5131 .readfn
= midr_read
,
5132 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5133 .type
= ARM_CP_OVERRIDE
},
5134 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5136 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5137 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5139 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5140 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5142 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5143 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5145 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5146 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5148 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5149 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5152 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5153 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5154 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5155 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5156 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5157 .readfn
= midr_read
},
5158 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5159 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5160 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5161 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5162 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5163 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5164 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5165 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5166 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5167 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5170 ARMCPRegInfo id_cp_reginfo
[] = {
5171 /* These are common to v8 and pre-v8 */
5173 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5174 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5175 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5176 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5177 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5178 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5179 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5181 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5182 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5185 /* TLBTR is specific to VMSA */
5186 ARMCPRegInfo id_tlbtr_reginfo
= {
5188 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5189 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5191 /* MPUIR is specific to PMSA V6+ */
5192 ARMCPRegInfo id_mpuir_reginfo
= {
5194 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5195 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5196 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5198 ARMCPRegInfo crn0_wi_reginfo
= {
5199 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5200 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5201 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5203 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5204 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5206 /* Register the blanket "writes ignored" value first to cover the
5207 * whole space. Then update the specific ID registers to allow write
5208 * access, so that they ignore writes rather than causing them to
5211 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5212 for (r
= id_pre_v8_midr_cp_reginfo
;
5213 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5216 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5219 id_tlbtr_reginfo
.access
= PL1_RW
;
5220 id_tlbtr_reginfo
.access
= PL1_RW
;
5222 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5223 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5225 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5227 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5228 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5229 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5230 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5231 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5235 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5236 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5239 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5240 ARMCPRegInfo auxcr_reginfo
[] = {
5241 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5242 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5243 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5244 .resetvalue
= cpu
->reset_auxcr
},
5245 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5246 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5247 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5249 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5250 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5251 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5255 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5258 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5259 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5260 /* 32 bit view is [31:18] 0...0 [43:32]. */
5261 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5262 | extract64(cpu
->reset_cbar
, 32, 12);
5263 ARMCPRegInfo cbar_reginfo
[] = {
5265 .type
= ARM_CP_CONST
,
5266 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5267 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5268 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5269 .type
= ARM_CP_CONST
,
5270 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5271 .access
= PL1_R
, .resetvalue
= cbar32
},
5274 /* We don't implement a r/w 64 bit CBAR currently */
5275 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5276 define_arm_cp_regs(cpu
, cbar_reginfo
);
5278 ARMCPRegInfo cbar
= {
5280 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5281 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5282 .fieldoffset
= offsetof(CPUARMState
,
5283 cp15
.c15_config_base_address
)
5285 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5286 cbar
.access
= PL1_R
;
5287 cbar
.fieldoffset
= 0;
5288 cbar
.type
= ARM_CP_CONST
;
5290 define_one_arm_cp_reg(cpu
, &cbar
);
5294 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5295 ARMCPRegInfo vbar_cp_reginfo
[] = {
5296 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5297 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5298 .access
= PL1_RW
, .writefn
= vbar_write
,
5299 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5300 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5304 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5307 /* Generic registers whose values depend on the implementation */
5309 ARMCPRegInfo sctlr
= {
5310 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5311 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5313 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5314 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5315 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5316 .raw_writefn
= raw_write
,
5318 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5319 /* Normally we would always end the TB on an SCTLR write, but Linux
5320 * arch/arm/mach-pxa/sleep.S expects two instructions following
5321 * an MMU enable to execute from cache. Imitate this behaviour.
5323 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5325 define_one_arm_cp_reg(cpu
, &sctlr
);
5329 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5331 CPUState
*cs
= CPU(cpu
);
5332 CPUARMState
*env
= &cpu
->env
;
5334 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5335 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5336 aarch64_fpu_gdb_set_reg
,
5337 34, "aarch64-fpu.xml", 0);
5338 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5339 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5340 51, "arm-neon.xml", 0);
5341 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5342 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5343 35, "arm-vfp3.xml", 0);
5344 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5345 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5346 19, "arm-vfp.xml", 0);
5350 /* Sort alphabetically by type name, except for "any". */
5351 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5353 ObjectClass
*class_a
= (ObjectClass
*)a
;
5354 ObjectClass
*class_b
= (ObjectClass
*)b
;
5355 const char *name_a
, *name_b
;
5357 name_a
= object_class_get_name(class_a
);
5358 name_b
= object_class_get_name(class_b
);
5359 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5361 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5364 return strcmp(name_a
, name_b
);
5368 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5370 ObjectClass
*oc
= data
;
5371 CPUListState
*s
= user_data
;
5372 const char *typename
;
5375 typename
= object_class_get_name(oc
);
5376 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5377 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
5382 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
5386 .cpu_fprintf
= cpu_fprintf
,
5390 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5391 list
= g_slist_sort(list
, arm_cpu_list_compare
);
5392 (*cpu_fprintf
)(f
, "Available CPUs:\n");
5393 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
5396 /* The 'host' CPU type is dynamically registered only if KVM is
5397 * enabled, so we have to special-case it here:
5399 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
5403 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
5405 ObjectClass
*oc
= data
;
5406 CpuDefinitionInfoList
**cpu_list
= user_data
;
5407 CpuDefinitionInfoList
*entry
;
5408 CpuDefinitionInfo
*info
;
5409 const char *typename
;
5411 typename
= object_class_get_name(oc
);
5412 info
= g_malloc0(sizeof(*info
));
5413 info
->name
= g_strndup(typename
,
5414 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5415 info
->q_typename
= g_strdup(typename
);
5417 entry
= g_malloc0(sizeof(*entry
));
5418 entry
->value
= info
;
5419 entry
->next
= *cpu_list
;
5423 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
5425 CpuDefinitionInfoList
*cpu_list
= NULL
;
5428 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5429 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
5435 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
5436 void *opaque
, int state
, int secstate
,
5437 int crm
, int opc1
, int opc2
)
5439 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5440 * add a single reginfo struct to the hash table.
5442 uint32_t *key
= g_new(uint32_t, 1);
5443 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
5444 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
5445 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
5447 /* Reset the secure state to the specific incoming state. This is
5448 * necessary as the register may have been defined with both states.
5450 r2
->secure
= secstate
;
5452 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5453 /* Register is banked (using both entries in array).
5454 * Overwriting fieldoffset as the array is only used to define
5455 * banked registers but later only fieldoffset is used.
5457 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
5460 if (state
== ARM_CP_STATE_AA32
) {
5461 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5462 /* If the register is banked then we don't need to migrate or
5463 * reset the 32-bit instance in certain cases:
5465 * 1) If the register has both 32-bit and 64-bit instances then we
5466 * can count on the 64-bit instance taking care of the
5468 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5469 * taking care of the secure bank. This requires that separate
5470 * 32 and 64-bit definitions are provided.
5472 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
5473 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
5474 r2
->type
|= ARM_CP_ALIAS
;
5476 } else if ((secstate
!= r
->secure
) && !ns
) {
5477 /* The register is not banked so we only want to allow migration of
5478 * the non-secure instance.
5480 r2
->type
|= ARM_CP_ALIAS
;
5483 if (r
->state
== ARM_CP_STATE_BOTH
) {
5484 /* We assume it is a cp15 register if the .cp field is left unset.
5490 #ifdef HOST_WORDS_BIGENDIAN
5491 if (r2
->fieldoffset
) {
5492 r2
->fieldoffset
+= sizeof(uint32_t);
5497 if (state
== ARM_CP_STATE_AA64
) {
5498 /* To allow abbreviation of ARMCPRegInfo
5499 * definitions, we treat cp == 0 as equivalent to
5500 * the value for "standard guest-visible sysreg".
5501 * STATE_BOTH definitions are also always "standard
5502 * sysreg" in their AArch64 view (the .cp value may
5503 * be non-zero for the benefit of the AArch32 view).
5505 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
5506 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
5508 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
5509 r2
->opc0
, opc1
, opc2
);
5511 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
5514 r2
->opaque
= opaque
;
5516 /* reginfo passed to helpers is correct for the actual access,
5517 * and is never ARM_CP_STATE_BOTH:
5520 /* Make sure reginfo passed to helpers for wildcarded regs
5521 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5526 /* By convention, for wildcarded registers only the first
5527 * entry is used for migration; the others are marked as
5528 * ALIAS so we don't try to transfer the register
5529 * multiple times. Special registers (ie NOP/WFI) are
5530 * never migratable and not even raw-accessible.
5532 if ((r
->type
& ARM_CP_SPECIAL
)) {
5533 r2
->type
|= ARM_CP_NO_RAW
;
5535 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
5536 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
5537 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
5538 r2
->type
|= ARM_CP_ALIAS
;
5541 /* Check that raw accesses are either forbidden or handled. Note that
5542 * we can't assert this earlier because the setup of fieldoffset for
5543 * banked registers has to be done first.
5545 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
5546 assert(!raw_accessors_invalid(r2
));
5549 /* Overriding of an existing definition must be explicitly
5552 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
5553 ARMCPRegInfo
*oldreg
;
5554 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
5555 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
5556 fprintf(stderr
, "Register redefined: cp=%d %d bit "
5557 "crn=%d crm=%d opc1=%d opc2=%d, "
5558 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
5559 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
5560 oldreg
->name
, r2
->name
);
5561 g_assert_not_reached();
5564 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
5568 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
5569 const ARMCPRegInfo
*r
, void *opaque
)
5571 /* Define implementations of coprocessor registers.
5572 * We store these in a hashtable because typically
5573 * there are less than 150 registers in a space which
5574 * is 16*16*16*8*8 = 262144 in size.
5575 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5576 * If a register is defined twice then the second definition is
5577 * used, so this can be used to define some generic registers and
5578 * then override them with implementation specific variations.
5579 * At least one of the original and the second definition should
5580 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5581 * against accidental use.
5583 * The state field defines whether the register is to be
5584 * visible in the AArch32 or AArch64 execution state. If the
5585 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5586 * reginfo structure for the AArch32 view, which sees the lower
5587 * 32 bits of the 64 bit register.
5589 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5590 * be wildcarded. AArch64 registers are always considered to be 64
5591 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5592 * the register, if any.
5594 int crm
, opc1
, opc2
, state
;
5595 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
5596 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
5597 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
5598 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
5599 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
5600 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
5601 /* 64 bit registers have only CRm and Opc1 fields */
5602 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
5603 /* op0 only exists in the AArch64 encodings */
5604 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
5605 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5606 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
5607 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5608 * encodes a minimum access level for the register. We roll this
5609 * runtime check into our general permission check code, so check
5610 * here that the reginfo's specified permissions are strict enough
5611 * to encompass the generic architectural permission check.
5613 if (r
->state
!= ARM_CP_STATE_AA32
) {
5616 case 0: case 1: case 2:
5629 /* unallocated encoding, so not possible */
5637 /* min_EL EL1, secure mode only (we don't check the latter) */
5641 /* broken reginfo with out-of-range opc1 */
5645 /* assert our permissions are not too lax (stricter is fine) */
5646 assert((r
->access
& ~mask
) == 0);
5649 /* Check that the register definition has enough info to handle
5650 * reads and writes if they are permitted.
5652 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
5653 if (r
->access
& PL3_R
) {
5654 assert((r
->fieldoffset
||
5655 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5658 if (r
->access
& PL3_W
) {
5659 assert((r
->fieldoffset
||
5660 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5664 /* Bad type field probably means missing sentinel at end of reg list */
5665 assert(cptype_valid(r
->type
));
5666 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
5667 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
5668 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
5669 for (state
= ARM_CP_STATE_AA32
;
5670 state
<= ARM_CP_STATE_AA64
; state
++) {
5671 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
5674 if (state
== ARM_CP_STATE_AA32
) {
5675 /* Under AArch32 CP registers can be common
5676 * (same for secure and non-secure world) or banked.
5678 switch (r
->secure
) {
5679 case ARM_CP_SECSTATE_S
:
5680 case ARM_CP_SECSTATE_NS
:
5681 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5682 r
->secure
, crm
, opc1
, opc2
);
5685 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5688 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5694 /* AArch64 registers get mapped to non-secure instance
5696 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5706 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
5707 const ARMCPRegInfo
*regs
, void *opaque
)
5709 /* Define a whole list of registers */
5710 const ARMCPRegInfo
*r
;
5711 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5712 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
5716 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
5718 return g_hash_table_lookup(cpregs
, &encoded_cp
);
5721 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5724 /* Helper coprocessor write function for write-ignore registers */
5727 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5729 /* Helper coprocessor write function for read-as-zero registers */
5733 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
5735 /* Helper coprocessor reset function for do-nothing-on-reset registers */
5738 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
5740 /* Return true if it is not valid for us to switch to
5741 * this CPU mode (ie all the UNPREDICTABLE cases in
5742 * the ARM ARM CPSRWriteByInstr pseudocode).
5745 /* Changes to or from Hyp via MSR and CPS are illegal. */
5746 if (write_type
== CPSRWriteByInstr
&&
5747 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
5748 mode
== ARM_CPU_MODE_HYP
)) {
5753 case ARM_CPU_MODE_USR
:
5755 case ARM_CPU_MODE_SYS
:
5756 case ARM_CPU_MODE_SVC
:
5757 case ARM_CPU_MODE_ABT
:
5758 case ARM_CPU_MODE_UND
:
5759 case ARM_CPU_MODE_IRQ
:
5760 case ARM_CPU_MODE_FIQ
:
5761 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5762 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5764 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5765 * and CPS are treated as illegal mode changes.
5767 if (write_type
== CPSRWriteByInstr
&&
5768 (env
->cp15
.hcr_el2
& HCR_TGE
) &&
5769 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
5770 !arm_is_secure_below_el3(env
)) {
5774 case ARM_CPU_MODE_HYP
:
5775 return !arm_feature(env
, ARM_FEATURE_EL2
)
5776 || arm_current_el(env
) < 2 || arm_is_secure(env
);
5777 case ARM_CPU_MODE_MON
:
5778 return arm_current_el(env
) < 3;
5784 uint32_t cpsr_read(CPUARMState
*env
)
5787 ZF
= (env
->ZF
== 0);
5788 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
5789 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
5790 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
5791 | ((env
->condexec_bits
& 0xfc) << 8)
5792 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
5795 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
5796 CPSRWriteType write_type
)
5798 uint32_t changed_daif
;
5800 if (mask
& CPSR_NZCV
) {
5801 env
->ZF
= (~val
) & CPSR_Z
;
5803 env
->CF
= (val
>> 29) & 1;
5804 env
->VF
= (val
<< 3) & 0x80000000;
5807 env
->QF
= ((val
& CPSR_Q
) != 0);
5809 env
->thumb
= ((val
& CPSR_T
) != 0);
5810 if (mask
& CPSR_IT_0_1
) {
5811 env
->condexec_bits
&= ~3;
5812 env
->condexec_bits
|= (val
>> 25) & 3;
5814 if (mask
& CPSR_IT_2_7
) {
5815 env
->condexec_bits
&= 3;
5816 env
->condexec_bits
|= (val
>> 8) & 0xfc;
5818 if (mask
& CPSR_GE
) {
5819 env
->GE
= (val
>> 16) & 0xf;
5822 /* In a V7 implementation that includes the security extensions but does
5823 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5824 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5825 * bits respectively.
5827 * In a V8 implementation, it is permitted for privileged software to
5828 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5830 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
5831 arm_feature(env
, ARM_FEATURE_EL3
) &&
5832 !arm_feature(env
, ARM_FEATURE_EL2
) &&
5833 !arm_is_secure(env
)) {
5835 changed_daif
= (env
->daif
^ val
) & mask
;
5837 if (changed_daif
& CPSR_A
) {
5838 /* Check to see if we are allowed to change the masking of async
5839 * abort exceptions from a non-secure state.
5841 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
5842 qemu_log_mask(LOG_GUEST_ERROR
,
5843 "Ignoring attempt to switch CPSR_A flag from "
5844 "non-secure world with SCR.AW bit clear\n");
5849 if (changed_daif
& CPSR_F
) {
5850 /* Check to see if we are allowed to change the masking of FIQ
5851 * exceptions from a non-secure state.
5853 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
5854 qemu_log_mask(LOG_GUEST_ERROR
,
5855 "Ignoring attempt to switch CPSR_F flag from "
5856 "non-secure world with SCR.FW bit clear\n");
5860 /* Check whether non-maskable FIQ (NMFI) support is enabled.
5861 * If this bit is set software is not allowed to mask
5862 * FIQs, but is allowed to set CPSR_F to 0.
5864 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
5866 qemu_log_mask(LOG_GUEST_ERROR
,
5867 "Ignoring attempt to enable CPSR_F flag "
5868 "(non-maskable FIQ [NMFI] support enabled)\n");
5874 env
->daif
&= ~(CPSR_AIF
& mask
);
5875 env
->daif
|= val
& CPSR_AIF
& mask
;
5877 if (write_type
!= CPSRWriteRaw
&&
5878 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
5879 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
5880 /* Note that we can only get here in USR mode if this is a
5881 * gdb stub write; for this case we follow the architectural
5882 * behaviour for guest writes in USR mode of ignoring an attempt
5883 * to switch mode. (Those are caught by translate.c for writes
5884 * triggered by guest instructions.)
5887 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
5888 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5889 * v7, and has defined behaviour in v8:
5890 * + leave CPSR.M untouched
5891 * + allow changes to the other CPSR fields
5893 * For user changes via the GDB stub, we don't set PSTATE.IL,
5894 * as this would be unnecessarily harsh for a user error.
5897 if (write_type
!= CPSRWriteByGDBStub
&&
5898 arm_feature(env
, ARM_FEATURE_V8
)) {
5903 switch_mode(env
, val
& CPSR_M
);
5906 mask
&= ~CACHED_CPSR_BITS
;
5907 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
5910 /* Sign/zero extend */
5911 uint32_t HELPER(sxtb16
)(uint32_t x
)
5914 res
= (uint16_t)(int8_t)x
;
5915 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
5919 uint32_t HELPER(uxtb16
)(uint32_t x
)
5922 res
= (uint16_t)(uint8_t)x
;
5923 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
5927 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
5931 if (num
== INT_MIN
&& den
== -1)
5936 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
5943 uint32_t HELPER(rbit
)(uint32_t x
)
5948 #if defined(CONFIG_USER_ONLY)
5950 /* These should probably raise undefined insn exceptions. */
5951 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
5953 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5955 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
5958 uint32_t QEMU_NORETURN
HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
5960 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5962 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
5965 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
5967 /* translate.c should never generate calls here in user-only mode */
5968 g_assert_not_reached();
5971 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
5973 /* translate.c should never generate calls here in user-only mode */
5974 g_assert_not_reached();
5977 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
5979 /* The TT instructions can be used by unprivileged code, but in
5980 * user-only emulation we don't have the MPU.
5981 * Luckily since we know we are NonSecure unprivileged (and that in
5982 * turn means that the A flag wasn't specified), all the bits in the
5983 * register must be zero:
5984 * IREGION: 0 because IRVALID is 0
5985 * IRVALID: 0 because NS
5987 * NSRW: 0 because NS
5989 * RW: 0 because unpriv and A flag not set
5990 * R: 0 because unpriv and A flag not set
5991 * SRVALID: 0 because NS
5992 * MRVALID: 0 because unpriv and A flag not set
5993 * SREGION: 0 becaus SRVALID is 0
5994 * MREGION: 0 because MRVALID is 0
5999 void switch_mode(CPUARMState
*env
, int mode
)
6001 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6003 if (mode
!= ARM_CPU_MODE_USR
) {
6004 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
6008 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6009 uint32_t cur_el
, bool secure
)
6014 void aarch64_sync_64_to_32(CPUARMState
*env
)
6016 g_assert_not_reached();
6021 void switch_mode(CPUARMState
*env
, int mode
)
6026 old_mode
= env
->uncached_cpsr
& CPSR_M
;
6027 if (mode
== old_mode
)
6030 if (old_mode
== ARM_CPU_MODE_FIQ
) {
6031 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6032 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
6033 } else if (mode
== ARM_CPU_MODE_FIQ
) {
6034 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6035 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
6038 i
= bank_number(old_mode
);
6039 env
->banked_r13
[i
] = env
->regs
[13];
6040 env
->banked_r14
[i
] = env
->regs
[14];
6041 env
->banked_spsr
[i
] = env
->spsr
;
6043 i
= bank_number(mode
);
6044 env
->regs
[13] = env
->banked_r13
[i
];
6045 env
->regs
[14] = env
->banked_r14
[i
];
6046 env
->spsr
= env
->banked_spsr
[i
];
6049 /* Physical Interrupt Target EL Lookup Table
6051 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6053 * The below multi-dimensional table is used for looking up the target
6054 * exception level given numerous condition criteria. Specifically, the
6055 * target EL is based on SCR and HCR routing controls as well as the
6056 * currently executing EL and secure state.
6059 * target_el_table[2][2][2][2][2][4]
6060 * | | | | | +--- Current EL
6061 * | | | | +------ Non-secure(0)/Secure(1)
6062 * | | | +--------- HCR mask override
6063 * | | +------------ SCR exec state control
6064 * | +--------------- SCR mask override
6065 * +------------------ 32-bit(0)/64-bit(1) EL3
6067 * The table values are as such:
6071 * The ARM ARM target EL table includes entries indicating that an "exception
6072 * is not taken". The two cases where this is applicable are:
6073 * 1) An exception is taken from EL3 but the SCR does not have the exception
6075 * 2) An exception is taken from EL2 but the HCR does not have the exception
6077 * In these two cases, the below table contain a target of EL1. This value is
6078 * returned as it is expected that the consumer of the table data will check
6079 * for "target EL >= current EL" to ensure the exception is not taken.
6083 * BIT IRQ IMO Non-secure Secure
6084 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6086 static const int8_t target_el_table
[2][2][2][2][2][4] = {
6087 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6088 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6089 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6090 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6091 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6092 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6093 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6094 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6095 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6096 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6097 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6098 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6099 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6100 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6101 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6102 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6106 * Determine the target EL for physical exceptions
6108 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6109 uint32_t cur_el
, bool secure
)
6111 CPUARMState
*env
= cs
->env_ptr
;
6116 /* Is the highest EL AArch64? */
6117 int is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6119 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6120 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6122 /* Either EL2 is the highest EL (and so the EL2 register width
6123 * is given by is64); or there is no EL2 or EL3, in which case
6124 * the value of 'rw' does not affect the table lookup anyway.
6131 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6132 hcr
= ((env
->cp15
.hcr_el2
& HCR_IMO
) == HCR_IMO
);
6135 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6136 hcr
= ((env
->cp15
.hcr_el2
& HCR_FMO
) == HCR_FMO
);
6139 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6140 hcr
= ((env
->cp15
.hcr_el2
& HCR_AMO
) == HCR_AMO
);
6144 /* If HCR.TGE is set then HCR is treated as being 1 */
6145 hcr
|= ((env
->cp15
.hcr_el2
& HCR_TGE
) == HCR_TGE
);
6147 /* Perform a table-lookup for the target EL given the current state */
6148 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6150 assert(target_el
> 0);
6155 static void v7m_push(CPUARMState
*env
, uint32_t val
)
6157 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
6160 stl_phys(cs
->as
, env
->regs
[13], val
);
6163 /* Return true if we're using the process stack pointer (not the MSP) */
6164 static bool v7m_using_psp(CPUARMState
*env
)
6166 /* Handler mode always uses the main stack; for thread mode
6167 * the CONTROL.SPSEL bit determines the answer.
6168 * Note that in v7M it is not possible to be in Handler mode with
6169 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6171 return !arm_v7m_is_handler_mode(env
) &&
6172 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
6175 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6176 * This may change the current stack pointer between Main and Process
6177 * stack pointers if it is done for the CONTROL register for the current
6180 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6184 bool old_is_psp
= v7m_using_psp(env
);
6186 env
->v7m
.control
[secstate
] =
6187 deposit32(env
->v7m
.control
[secstate
],
6188 R_V7M_CONTROL_SPSEL_SHIFT
,
6189 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6191 if (secstate
== env
->v7m
.secure
) {
6192 bool new_is_psp
= v7m_using_psp(env
);
6195 if (old_is_psp
!= new_is_psp
) {
6196 tmp
= env
->v7m
.other_sp
;
6197 env
->v7m
.other_sp
= env
->regs
[13];
6198 env
->regs
[13] = tmp
;
6203 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6204 * stack pointer between Main and Process stack pointers.
6206 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6208 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6211 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6213 /* Write a new value to v7m.exception, thus transitioning into or out
6214 * of Handler mode; this may result in a change of active stack pointer.
6216 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6219 env
->v7m
.exception
= new_exc
;
6221 new_is_psp
= v7m_using_psp(env
);
6223 if (old_is_psp
!= new_is_psp
) {
6224 tmp
= env
->v7m
.other_sp
;
6225 env
->v7m
.other_sp
= env
->regs
[13];
6226 env
->regs
[13] = tmp
;
6230 /* Switch M profile security state between NS and S */
6231 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6233 uint32_t new_ss_msp
, new_ss_psp
;
6235 if (env
->v7m
.secure
== new_secstate
) {
6239 /* All the banked state is accessed by looking at env->v7m.secure
6240 * except for the stack pointer; rearrange the SP appropriately.
6242 new_ss_msp
= env
->v7m
.other_ss_msp
;
6243 new_ss_psp
= env
->v7m
.other_ss_psp
;
6245 if (v7m_using_psp(env
)) {
6246 env
->v7m
.other_ss_psp
= env
->regs
[13];
6247 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6249 env
->v7m
.other_ss_msp
= env
->regs
[13];
6250 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6253 env
->v7m
.secure
= new_secstate
;
6255 if (v7m_using_psp(env
)) {
6256 env
->regs
[13] = new_ss_psp
;
6257 env
->v7m
.other_sp
= new_ss_msp
;
6259 env
->regs
[13] = new_ss_msp
;
6260 env
->v7m
.other_sp
= new_ss_psp
;
6264 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6267 * - if the return value is a magic value, do exception return (like BX)
6268 * - otherwise bit 0 of the return value is the target security state
6272 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6273 /* Covers FNC_RETURN and EXC_RETURN magic */
6274 min_magic
= FNC_RETURN_MIN_MAGIC
;
6276 /* EXC_RETURN magic only */
6277 min_magic
= EXC_RETURN_MIN_MAGIC
;
6280 if (dest
>= min_magic
) {
6281 /* This is an exception return magic value; put it where
6282 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6283 * Note that if we ever add gen_ss_advance() singlestep support to
6284 * M profile this should count as an "instruction execution complete"
6285 * event (compare gen_bx_excret_final_code()).
6287 env
->regs
[15] = dest
& ~1;
6288 env
->thumb
= dest
& 1;
6289 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
6293 /* translate.c should have made BXNS UNDEF unless we're secure */
6294 assert(env
->v7m
.secure
);
6296 switch_v7m_security_state(env
, dest
& 1);
6298 env
->regs
[15] = dest
& ~1;
6301 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6303 /* Handle v7M BLXNS:
6304 * - bit 0 of the destination address is the target security state
6307 /* At this point regs[15] is the address just after the BLXNS */
6308 uint32_t nextinst
= env
->regs
[15] | 1;
6309 uint32_t sp
= env
->regs
[13] - 8;
6312 /* translate.c will have made BLXNS UNDEF unless we're secure */
6313 assert(env
->v7m
.secure
);
6316 /* target is Secure, so this is just a normal BLX,
6317 * except that the low bit doesn't indicate Thumb/not.
6319 env
->regs
[14] = nextinst
;
6321 env
->regs
[15] = dest
& ~1;
6325 /* Target is non-secure: first push a stack frame */
6326 if (!QEMU_IS_ALIGNED(sp
, 8)) {
6327 qemu_log_mask(LOG_GUEST_ERROR
,
6328 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6331 saved_psr
= env
->v7m
.exception
;
6332 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
6333 saved_psr
|= XPSR_SFPA
;
6336 /* Note that these stores can throw exceptions on MPU faults */
6337 cpu_stl_data(env
, sp
, nextinst
);
6338 cpu_stl_data(env
, sp
+ 4, saved_psr
);
6341 env
->regs
[14] = 0xfeffffff;
6342 if (arm_v7m_is_handler_mode(env
)) {
6343 /* Write a dummy value to IPSR, to avoid leaking the current secure
6344 * exception number to non-secure code. This is guaranteed not
6345 * to cause write_v7m_exception() to actually change stacks.
6347 write_v7m_exception(env
, 1);
6349 switch_v7m_security_state(env
, 0);
6351 env
->regs
[15] = dest
;
6354 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
6357 /* Return a pointer to the location where we currently store the
6358 * stack pointer for the requested security state and thread mode.
6359 * This pointer will become invalid if the CPU state is updated
6360 * such that the stack pointers are switched around (eg changing
6361 * the SPSEL control bit).
6362 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6363 * Unlike that pseudocode, we require the caller to pass us in the
6364 * SPSEL control bit value; this is because we also use this
6365 * function in handling of pushing of the callee-saves registers
6366 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6367 * and in the tailchain codepath the SPSEL bit comes from the exception
6368 * return magic LR value from the previous exception. The pseudocode
6369 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6370 * to make this utility function generic enough to do the job.
6372 bool want_psp
= threadmode
&& spsel
;
6374 if (secure
== env
->v7m
.secure
) {
6375 if (want_psp
== v7m_using_psp(env
)) {
6376 return &env
->regs
[13];
6378 return &env
->v7m
.other_sp
;
6382 return &env
->v7m
.other_ss_psp
;
6384 return &env
->v7m
.other_ss_msp
;
6389 static uint32_t arm_v7m_load_vector(ARMCPU
*cpu
, bool targets_secure
)
6391 CPUState
*cs
= CPU(cpu
);
6392 CPUARMState
*env
= &cpu
->env
;
6394 hwaddr vec
= env
->v7m
.vecbase
[targets_secure
] + env
->v7m
.exception
* 4;
6397 addr
= address_space_ldl(cs
->as
, vec
,
6398 MEMTXATTRS_UNSPECIFIED
, &result
);
6399 if (result
!= MEMTX_OK
) {
6400 /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
6401 * which would then be immediately followed by our failing to load
6402 * the entry vector for that HardFault, which is a Lockup case.
6403 * Since we don't model Lockup, we just report this guest error
6406 cpu_abort(cs
, "Failed to read from %s exception vector table "
6407 "entry %08x\n", targets_secure
? "secure" : "nonsecure",
6413 static void v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
)
6415 /* For v8M, push the callee-saves register part of the stack frame.
6416 * Compare the v8M pseudocode PushCalleeStack().
6417 * In the tailchaining case this may not be the current stack.
6419 CPUARMState
*env
= &cpu
->env
;
6420 CPUState
*cs
= CPU(cpu
);
6421 uint32_t *frame_sp_p
;
6425 frame_sp_p
= get_v7m_sp_ptr(env
, true,
6426 lr
& R_V7M_EXCRET_MODE_MASK
,
6427 lr
& R_V7M_EXCRET_SPSEL_MASK
);
6429 frame_sp_p
= &env
->regs
[13];
6432 frameptr
= *frame_sp_p
- 0x28;
6434 stl_phys(cs
->as
, frameptr
, 0xfefa125b);
6435 stl_phys(cs
->as
, frameptr
+ 0x8, env
->regs
[4]);
6436 stl_phys(cs
->as
, frameptr
+ 0xc, env
->regs
[5]);
6437 stl_phys(cs
->as
, frameptr
+ 0x10, env
->regs
[6]);
6438 stl_phys(cs
->as
, frameptr
+ 0x14, env
->regs
[7]);
6439 stl_phys(cs
->as
, frameptr
+ 0x18, env
->regs
[8]);
6440 stl_phys(cs
->as
, frameptr
+ 0x1c, env
->regs
[9]);
6441 stl_phys(cs
->as
, frameptr
+ 0x20, env
->regs
[10]);
6442 stl_phys(cs
->as
, frameptr
+ 0x24, env
->regs
[11]);
6444 *frame_sp_p
= frameptr
;
6447 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
)
6449 /* Do the "take the exception" parts of exception entry,
6450 * but not the pushing of state to the stack. This is
6451 * similar to the pseudocode ExceptionTaken() function.
6453 CPUARMState
*env
= &cpu
->env
;
6455 bool targets_secure
;
6457 targets_secure
= armv7m_nvic_acknowledge_irq(env
->nvic
);
6459 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6460 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6461 (lr
& R_V7M_EXCRET_S_MASK
)) {
6462 /* The background code (the owner of the registers in the
6463 * exception frame) is Secure. This means it may either already
6464 * have or now needs to push callee-saves registers.
6466 if (targets_secure
) {
6467 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
6468 /* We took an exception from Secure to NonSecure
6469 * (which means the callee-saved registers got stacked)
6470 * and are now tailchaining to a Secure exception.
6471 * Clear DCRS so eventual return from this Secure
6472 * exception unstacks the callee-saved registers.
6474 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
6477 /* We're going to a non-secure exception; push the
6478 * callee-saves registers to the stack now, if they're
6479 * not already saved.
6481 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
6482 !(dotailchain
&& (lr
& R_V7M_EXCRET_ES_MASK
))) {
6483 v7m_push_callee_stack(cpu
, lr
, dotailchain
);
6485 lr
|= R_V7M_EXCRET_DCRS_MASK
;
6489 lr
&= ~R_V7M_EXCRET_ES_MASK
;
6490 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6491 lr
|= R_V7M_EXCRET_ES_MASK
;
6493 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
6494 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
6495 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
6498 /* Clear registers if necessary to prevent non-secure exception
6499 * code being able to see register values from secure code.
6500 * Where register values become architecturally UNKNOWN we leave
6501 * them with their previous values.
6503 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6504 if (!targets_secure
) {
6505 /* Always clear the caller-saved registers (they have been
6506 * pushed to the stack earlier in v7m_push_stack()).
6507 * Clear callee-saved registers if the background code is
6508 * Secure (in which case these regs were saved in
6509 * v7m_push_callee_stack()).
6513 for (i
= 0; i
< 13; i
++) {
6514 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
6515 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
6520 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
6525 /* Switch to target security state -- must do this before writing SPSEL */
6526 switch_v7m_security_state(env
, targets_secure
);
6527 write_v7m_control_spsel(env
, 0);
6528 arm_clear_exclusive(env
);
6530 env
->condexec_bits
= 0;
6532 addr
= arm_v7m_load_vector(cpu
, targets_secure
);
6533 env
->regs
[15] = addr
& 0xfffffffe;
6534 env
->thumb
= addr
& 1;
6537 static void v7m_push_stack(ARMCPU
*cpu
)
6539 /* Do the "set up stack frame" part of exception entry,
6540 * similar to pseudocode PushStack().
6542 CPUARMState
*env
= &cpu
->env
;
6543 uint32_t xpsr
= xpsr_read(env
);
6545 /* Align stack pointer if the guest wants that */
6546 if ((env
->regs
[13] & 4) &&
6547 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
6549 xpsr
|= XPSR_SPREALIGN
;
6551 /* Switch to the handler mode. */
6552 v7m_push(env
, xpsr
);
6553 v7m_push(env
, env
->regs
[15]);
6554 v7m_push(env
, env
->regs
[14]);
6555 v7m_push(env
, env
->regs
[12]);
6556 v7m_push(env
, env
->regs
[3]);
6557 v7m_push(env
, env
->regs
[2]);
6558 v7m_push(env
, env
->regs
[1]);
6559 v7m_push(env
, env
->regs
[0]);
6562 static void do_v7m_exception_exit(ARMCPU
*cpu
)
6564 CPUARMState
*env
= &cpu
->env
;
6565 CPUState
*cs
= CPU(cpu
);
6568 bool ufault
= false;
6569 bool sfault
= false;
6570 bool return_to_sp_process
;
6571 bool return_to_handler
;
6572 bool rettobase
= false;
6573 bool exc_secure
= false;
6574 bool return_to_secure
;
6576 /* If we're not in Handler mode then jumps to magic exception-exit
6577 * addresses don't have magic behaviour. However for the v8M
6578 * security extensions the magic secure-function-return has to
6579 * work in thread mode too, so to avoid doing an extra check in
6580 * the generated code we allow exception-exit magic to also cause the
6581 * internal exception and bring us here in thread mode. Correct code
6582 * will never try to do this (the following insn fetch will always
6583 * fault) so we the overhead of having taken an unnecessary exception
6586 if (!arm_v7m_is_handler_mode(env
)) {
6590 /* In the spec pseudocode ExceptionReturn() is called directly
6591 * from BXWritePC() and gets the full target PC value including
6592 * bit zero. In QEMU's implementation we treat it as a normal
6593 * jump-to-register (which is then caught later on), and so split
6594 * the target value up between env->regs[15] and env->thumb in
6595 * gen_bx(). Reconstitute it.
6597 excret
= env
->regs
[15];
6602 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
6603 " previous exception %d\n",
6604 excret
, env
->v7m
.exception
);
6606 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
6607 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
6608 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
6612 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6613 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
6614 * we pick which FAULTMASK to clear.
6616 if (!env
->v7m
.secure
&&
6617 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
6618 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
6620 /* For all other purposes, treat ES as 0 (R_HXSR) */
6621 excret
&= ~R_V7M_EXCRET_ES_MASK
;
6625 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
6626 /* Auto-clear FAULTMASK on return from other than NMI.
6627 * If the security extension is implemented then this only
6628 * happens if the raw execution priority is >= 0; the
6629 * value of the ES bit in the exception return value indicates
6630 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
6632 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6633 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
6634 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
6635 env
->v7m
.faultmask
[exc_secure
] = 0;
6638 env
->v7m
.faultmask
[M_REG_NS
] = 0;
6642 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
6645 /* attempt to exit an exception that isn't active */
6649 /* still an irq active now */
6652 /* we returned to base exception level, no nesting.
6653 * (In the pseudocode this is written using "NestedActivation != 1"
6654 * where we have 'rettobase == false'.)
6659 g_assert_not_reached();
6662 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
6663 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
6664 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6665 (excret
& R_V7M_EXCRET_S_MASK
);
6667 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6668 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6669 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
6670 * we choose to take the UsageFault.
6672 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
6673 (excret
& R_V7M_EXCRET_ES_MASK
) ||
6674 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
6678 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
6682 /* For v7M we only recognize certain combinations of the low bits */
6683 switch (excret
& 0xf) {
6684 case 1: /* Return to Handler */
6686 case 13: /* Return to Thread using Process stack */
6687 case 9: /* Return to Thread using Main stack */
6688 /* We only need to check NONBASETHRDENA for v7M, because in
6689 * v8M this bit does not exist (it is RES1).
6692 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
6693 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
6703 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
6704 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
6705 v7m_exception_taken(cpu
, excret
, true);
6706 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
6707 "stackframe: failed EXC_RETURN.ES validity check\n");
6712 /* Bad exception return: instead of popping the exception
6713 * stack, directly take a usage fault on the current stack.
6715 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6716 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
6717 v7m_exception_taken(cpu
, excret
, true);
6718 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
6719 "stackframe: failed exception return integrity check\n");
6723 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
6724 * Handler mode (and will be until we write the new XPSR.Interrupt
6725 * field) this does not switch around the current stack pointer.
6727 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
6729 switch_v7m_security_state(env
, return_to_secure
);
6732 /* The stack pointer we should be reading the exception frame from
6733 * depends on bits in the magic exception return type value (and
6734 * for v8M isn't necessarily the stack pointer we will eventually
6735 * end up resuming execution with). Get a pointer to the location
6736 * in the CPU state struct where the SP we need is currently being
6737 * stored; we will use and modify it in place.
6738 * We use this limited C variable scope so we don't accidentally
6739 * use 'frame_sp_p' after we do something that makes it invalid.
6741 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
6744 return_to_sp_process
);
6745 uint32_t frameptr
= *frame_sp_p
;
6747 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
6748 arm_feature(env
, ARM_FEATURE_V8
)) {
6749 qemu_log_mask(LOG_GUEST_ERROR
,
6750 "M profile exception return with non-8-aligned SP "
6751 "for destination state is UNPREDICTABLE\n");
6754 /* Do we need to pop callee-saved registers? */
6755 if (return_to_secure
&&
6756 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
6757 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
6758 uint32_t expected_sig
= 0xfefa125b;
6759 uint32_t actual_sig
= ldl_phys(cs
->as
, frameptr
);
6761 if (expected_sig
!= actual_sig
) {
6762 /* Take a SecureFault on the current stack */
6763 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
6764 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
6765 v7m_exception_taken(cpu
, excret
, true);
6766 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
6767 "stackframe: failed exception return integrity "
6768 "signature check\n");
6772 env
->regs
[4] = ldl_phys(cs
->as
, frameptr
+ 0x8);
6773 env
->regs
[5] = ldl_phys(cs
->as
, frameptr
+ 0xc);
6774 env
->regs
[6] = ldl_phys(cs
->as
, frameptr
+ 0x10);
6775 env
->regs
[7] = ldl_phys(cs
->as
, frameptr
+ 0x14);
6776 env
->regs
[8] = ldl_phys(cs
->as
, frameptr
+ 0x18);
6777 env
->regs
[9] = ldl_phys(cs
->as
, frameptr
+ 0x1c);
6778 env
->regs
[10] = ldl_phys(cs
->as
, frameptr
+ 0x20);
6779 env
->regs
[11] = ldl_phys(cs
->as
, frameptr
+ 0x24);
6784 /* Pop registers. TODO: make these accesses use the correct
6785 * attributes and address space (S/NS, priv/unpriv) and handle
6786 * memory transaction failures.
6788 env
->regs
[0] = ldl_phys(cs
->as
, frameptr
);
6789 env
->regs
[1] = ldl_phys(cs
->as
, frameptr
+ 0x4);
6790 env
->regs
[2] = ldl_phys(cs
->as
, frameptr
+ 0x8);
6791 env
->regs
[3] = ldl_phys(cs
->as
, frameptr
+ 0xc);
6792 env
->regs
[12] = ldl_phys(cs
->as
, frameptr
+ 0x10);
6793 env
->regs
[14] = ldl_phys(cs
->as
, frameptr
+ 0x14);
6794 env
->regs
[15] = ldl_phys(cs
->as
, frameptr
+ 0x18);
6796 /* Returning from an exception with a PC with bit 0 set is defined
6797 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
6798 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
6799 * the lsbit, and there are several RTOSes out there which incorrectly
6800 * assume the r15 in the stack frame should be a Thumb-style "lsbit
6801 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
6802 * complain about the badly behaved guest.
6804 if (env
->regs
[15] & 1) {
6805 env
->regs
[15] &= ~1U;
6806 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
6807 qemu_log_mask(LOG_GUEST_ERROR
,
6808 "M profile return from interrupt with misaligned "
6809 "PC is UNPREDICTABLE on v7M\n");
6813 xpsr
= ldl_phys(cs
->as
, frameptr
+ 0x1c);
6815 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6816 /* For v8M we have to check whether the xPSR exception field
6817 * matches the EXCRET value for return to handler/thread
6818 * before we commit to changing the SP and xPSR.
6820 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
6821 if (return_to_handler
!= will_be_handler
) {
6822 /* Take an INVPC UsageFault on the current stack.
6823 * By this point we will have switched to the security state
6824 * for the background state, so this UsageFault will target
6827 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
6829 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6830 v7m_exception_taken(cpu
, excret
, true);
6831 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
6832 "stackframe: failed exception return integrity "
6838 /* Commit to consuming the stack frame */
6840 /* Undo stack alignment (the SPREALIGN bit indicates that the original
6841 * pre-exception SP was not 8-aligned and we added a padding word to
6842 * align it, so we undo this by ORing in the bit that increases it
6843 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
6844 * would work too but a logical OR is how the pseudocode specifies it.)
6846 if (xpsr
& XPSR_SPREALIGN
) {
6849 *frame_sp_p
= frameptr
;
6851 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
6852 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
6854 /* The restored xPSR exception field will be zero if we're
6855 * resuming in Thread mode. If that doesn't match what the
6856 * exception return excret specified then this is a UsageFault.
6857 * v7M requires we make this check here; v8M did it earlier.
6859 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
6860 /* Take an INVPC UsageFault by pushing the stack again;
6861 * we know we're v7M so this is never a Secure UsageFault.
6863 assert(!arm_feature(env
, ARM_FEATURE_V8
));
6864 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
6865 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6866 v7m_push_stack(cpu
);
6867 v7m_exception_taken(cpu
, excret
, false);
6868 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
6869 "failed exception return integrity check\n");
6873 /* Otherwise, we have a successful exception exit. */
6874 arm_clear_exclusive(env
);
6875 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
6878 static bool do_v7m_function_return(ARMCPU
*cpu
)
6880 /* v8M security extensions magic function return.
6882 * (1) throw an exception (longjump)
6883 * (2) return true if we successfully handled the function return
6884 * (3) return false if we failed a consistency check and have
6885 * pended a UsageFault that needs to be taken now
6887 * At this point the magic return value is split between env->regs[15]
6888 * and env->thumb. We don't bother to reconstitute it because we don't
6889 * need it (all values are handled the same way).
6891 CPUARMState
*env
= &cpu
->env
;
6892 uint32_t newpc
, newpsr
, newpsr_exc
;
6894 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
6897 bool threadmode
, spsel
;
6900 uint32_t *frame_sp_p
;
6903 /* Pull the return address and IPSR from the Secure stack */
6904 threadmode
= !arm_v7m_is_handler_mode(env
);
6905 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
6907 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
6908 frameptr
= *frame_sp_p
;
6910 /* These loads may throw an exception (for MPU faults). We want to
6911 * do them as secure, so work out what MMU index that is.
6913 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
6914 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
6915 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
6916 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
6918 /* Consistency checks on new IPSR */
6919 newpsr_exc
= newpsr
& XPSR_EXCP
;
6920 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
6921 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
6922 /* Pend the fault and tell our caller to take it */
6923 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6924 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
6926 qemu_log_mask(CPU_LOG_INT
,
6927 "...taking INVPC UsageFault: "
6928 "IPSR consistency check failed\n");
6932 *frame_sp_p
= frameptr
+ 8;
6935 /* This invalidates frame_sp_p */
6936 switch_v7m_security_state(env
, true);
6937 env
->v7m
.exception
= newpsr_exc
;
6938 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
6939 if (newpsr
& XPSR_SFPA
) {
6940 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
6942 xpsr_write(env
, 0, XPSR_IT
);
6943 env
->thumb
= newpc
& 1;
6944 env
->regs
[15] = newpc
& ~1;
6946 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
6950 static void arm_log_exception(int idx
)
6952 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
6953 const char *exc
= NULL
;
6954 static const char * const excnames
[] = {
6955 [EXCP_UDEF
] = "Undefined Instruction",
6957 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
6958 [EXCP_DATA_ABORT
] = "Data Abort",
6961 [EXCP_BKPT
] = "Breakpoint",
6962 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
6963 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
6964 [EXCP_HVC
] = "Hypervisor Call",
6965 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
6966 [EXCP_SMC
] = "Secure Monitor Call",
6967 [EXCP_VIRQ
] = "Virtual IRQ",
6968 [EXCP_VFIQ
] = "Virtual FIQ",
6969 [EXCP_SEMIHOST
] = "Semihosting call",
6970 [EXCP_NOCP
] = "v7M NOCP UsageFault",
6971 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
6974 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
6975 exc
= excnames
[idx
];
6980 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
6984 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
6985 uint32_t addr
, uint16_t *insn
)
6987 /* Load a 16-bit portion of a v7M instruction, returning true on success,
6988 * or false on failure (in which case we will have pended the appropriate
6990 * We need to do the instruction fetch's MPU and SAU checks
6991 * like this because there is no MMU index that would allow
6992 * doing the load with a single function call. Instead we must
6993 * first check that the security attributes permit the load
6994 * and that they don't mismatch on the two halves of the instruction,
6995 * and then we do the load as a secure load (ie using the security
6996 * attributes of the address, not the CPU, as architecturally required).
6998 CPUState
*cs
= CPU(cpu
);
6999 CPUARMState
*env
= &cpu
->env
;
7000 V8M_SAttributes sattrs
= {};
7001 MemTxAttrs attrs
= {};
7002 ARMMMUFaultInfo fi
= {};
7004 target_ulong page_size
;
7008 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
7009 if (!sattrs
.nsc
|| sattrs
.ns
) {
7010 /* This must be the second half of the insn, and it straddles a
7011 * region boundary with the second half not being S&NSC.
7013 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7014 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7015 qemu_log_mask(CPU_LOG_INT
,
7016 "...really SecureFault with SFSR.INVEP\n");
7019 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
7020 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7021 /* the MPU lookup failed */
7022 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7023 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
7024 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
7027 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
7029 if (txres
!= MEMTX_OK
) {
7030 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7031 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7032 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
7038 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
7040 /* Check whether this attempt to execute code in a Secure & NS-Callable
7041 * memory region is for an SG instruction; if so, then emulate the
7042 * effect of the SG instruction and return true. Otherwise pend
7043 * the correct kind of exception and return false.
7045 CPUARMState
*env
= &cpu
->env
;
7049 /* We should never get here unless get_phys_addr_pmsav8() caused
7050 * an exception for NS executing in S&NSC memory.
7052 assert(!env
->v7m
.secure
);
7053 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7055 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7056 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7058 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
7066 if (insn
!= 0xe97f) {
7067 /* Not an SG instruction first half (we choose the IMPDEF
7068 * early-SG-check option).
7073 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
7077 if (insn
!= 0xe97f) {
7078 /* Not an SG instruction second half (yes, both halves of the SG
7079 * insn have the same hex value)
7084 /* OK, we have confirmed that we really have an SG instruction.
7085 * We know we're NS in S memory so don't need to repeat those checks.
7087 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
7088 ", executing it\n", env
->regs
[15]);
7089 env
->regs
[14] &= ~1;
7090 switch_v7m_security_state(env
, true);
7091 xpsr_write(env
, 0, XPSR_IT
);
7096 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7097 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7098 qemu_log_mask(CPU_LOG_INT
,
7099 "...really SecureFault with SFSR.INVEP\n");
7103 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
7105 ARMCPU
*cpu
= ARM_CPU(cs
);
7106 CPUARMState
*env
= &cpu
->env
;
7109 arm_log_exception(cs
->exception_index
);
7111 /* For exceptions we just mark as pending on the NVIC, and let that
7113 switch (cs
->exception_index
) {
7115 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7116 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
7119 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7120 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
7123 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7124 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
7127 /* The PC already points to the next instruction. */
7128 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
7130 case EXCP_PREFETCH_ABORT
:
7131 case EXCP_DATA_ABORT
:
7132 /* Note that for M profile we don't have a guest facing FSR, but
7133 * the env->exception.fsr will be populated by the code that
7134 * raises the fault, in the A profile short-descriptor format.
7136 switch (env
->exception
.fsr
& 0xf) {
7137 case M_FAKE_FSR_NSC_EXEC
:
7138 /* Exception generated when we try to execute code at an address
7139 * which is marked as Secure & Non-Secure Callable and the CPU
7140 * is in the Non-Secure state. The only instruction which can
7141 * be executed like this is SG (and that only if both halves of
7142 * the SG instruction have the same security attributes.)
7143 * Everything else must generate an INVEP SecureFault, so we
7144 * emulate the SG instruction here.
7146 if (v7m_handle_execute_nsc(cpu
)) {
7150 case M_FAKE_FSR_SFAULT
:
7151 /* Various flavours of SecureFault for attempts to execute or
7152 * access data in the wrong security state.
7154 switch (cs
->exception_index
) {
7155 case EXCP_PREFETCH_ABORT
:
7156 if (env
->v7m
.secure
) {
7157 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
7158 qemu_log_mask(CPU_LOG_INT
,
7159 "...really SecureFault with SFSR.INVTRAN\n");
7161 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7162 qemu_log_mask(CPU_LOG_INT
,
7163 "...really SecureFault with SFSR.INVEP\n");
7166 case EXCP_DATA_ABORT
:
7167 /* This must be an NS access to S memory */
7168 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
7169 qemu_log_mask(CPU_LOG_INT
,
7170 "...really SecureFault with SFSR.AUVIOL\n");
7173 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7175 case 0x8: /* External Abort */
7176 switch (cs
->exception_index
) {
7177 case EXCP_PREFETCH_ABORT
:
7178 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7179 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
7181 case EXCP_DATA_ABORT
:
7182 env
->v7m
.cfsr
[M_REG_NS
] |=
7183 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
7184 env
->v7m
.bfar
= env
->exception
.vaddress
;
7185 qemu_log_mask(CPU_LOG_INT
,
7186 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7190 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7193 /* All other FSR values are either MPU faults or "can't happen
7194 * for M profile" cases.
7196 switch (cs
->exception_index
) {
7197 case EXCP_PREFETCH_ABORT
:
7198 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7199 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
7201 case EXCP_DATA_ABORT
:
7202 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
7203 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
7204 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
7205 qemu_log_mask(CPU_LOG_INT
,
7206 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7207 env
->v7m
.mmfar
[env
->v7m
.secure
]);
7210 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
7216 if (semihosting_enabled()) {
7218 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
7221 qemu_log_mask(CPU_LOG_INT
,
7222 "...handling as semihosting call 0x%x\n",
7224 env
->regs
[0] = do_arm_semihosting(env
);
7228 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
7232 case EXCP_EXCEPTION_EXIT
:
7233 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
7234 /* Must be v8M security extension function return */
7235 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
7236 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7237 if (do_v7m_function_return(cpu
)) {
7241 do_v7m_exception_exit(cpu
);
7246 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7247 return; /* Never happens. Keep compiler happy. */
7250 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7251 lr
= R_V7M_EXCRET_RES1_MASK
|
7252 R_V7M_EXCRET_DCRS_MASK
|
7253 R_V7M_EXCRET_FTYPE_MASK
;
7254 /* The S bit indicates whether we should return to Secure
7255 * or NonSecure (ie our current state).
7256 * The ES bit indicates whether we're taking this exception
7257 * to Secure or NonSecure (ie our target state). We set it
7258 * later, in v7m_exception_taken().
7259 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7260 * This corresponds to the ARM ARM pseudocode for v8M setting
7261 * some LR bits in PushStack() and some in ExceptionTaken();
7262 * the distinction matters for the tailchain cases where we
7263 * can take an exception without pushing the stack.
7265 if (env
->v7m
.secure
) {
7266 lr
|= R_V7M_EXCRET_S_MASK
;
7269 lr
= R_V7M_EXCRET_RES1_MASK
|
7270 R_V7M_EXCRET_S_MASK
|
7271 R_V7M_EXCRET_DCRS_MASK
|
7272 R_V7M_EXCRET_FTYPE_MASK
|
7273 R_V7M_EXCRET_ES_MASK
;
7274 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
7275 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7278 if (!arm_v7m_is_handler_mode(env
)) {
7279 lr
|= R_V7M_EXCRET_MODE_MASK
;
7282 v7m_push_stack(cpu
);
7283 v7m_exception_taken(cpu
, lr
, false);
7284 qemu_log_mask(CPU_LOG_INT
, "... as %d\n", env
->v7m
.exception
);
7287 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7288 * register set. This is necessary when switching between AArch32 and AArch64
7291 void aarch64_sync_32_to_64(CPUARMState
*env
)
7294 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7296 /* We can blanket copy R[0:7] to X[0:7] */
7297 for (i
= 0; i
< 8; i
++) {
7298 env
->xregs
[i
] = env
->regs
[i
];
7301 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7302 * Otherwise, they come from the banked user regs.
7304 if (mode
== ARM_CPU_MODE_FIQ
) {
7305 for (i
= 8; i
< 13; i
++) {
7306 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
7309 for (i
= 8; i
< 13; i
++) {
7310 env
->xregs
[i
] = env
->regs
[i
];
7314 /* Registers x13-x23 are the various mode SP and FP registers. Registers
7315 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7316 * from the mode banked register.
7318 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7319 env
->xregs
[13] = env
->regs
[13];
7320 env
->xregs
[14] = env
->regs
[14];
7322 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
7323 /* HYP is an exception in that it is copied from r14 */
7324 if (mode
== ARM_CPU_MODE_HYP
) {
7325 env
->xregs
[14] = env
->regs
[14];
7327 env
->xregs
[14] = env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)];
7331 if (mode
== ARM_CPU_MODE_HYP
) {
7332 env
->xregs
[15] = env
->regs
[13];
7334 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
7337 if (mode
== ARM_CPU_MODE_IRQ
) {
7338 env
->xregs
[16] = env
->regs
[14];
7339 env
->xregs
[17] = env
->regs
[13];
7341 env
->xregs
[16] = env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)];
7342 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
7345 if (mode
== ARM_CPU_MODE_SVC
) {
7346 env
->xregs
[18] = env
->regs
[14];
7347 env
->xregs
[19] = env
->regs
[13];
7349 env
->xregs
[18] = env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)];
7350 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
7353 if (mode
== ARM_CPU_MODE_ABT
) {
7354 env
->xregs
[20] = env
->regs
[14];
7355 env
->xregs
[21] = env
->regs
[13];
7357 env
->xregs
[20] = env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)];
7358 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
7361 if (mode
== ARM_CPU_MODE_UND
) {
7362 env
->xregs
[22] = env
->regs
[14];
7363 env
->xregs
[23] = env
->regs
[13];
7365 env
->xregs
[22] = env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)];
7366 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
7369 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7370 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7371 * FIQ bank for r8-r14.
7373 if (mode
== ARM_CPU_MODE_FIQ
) {
7374 for (i
= 24; i
< 31; i
++) {
7375 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
7378 for (i
= 24; i
< 29; i
++) {
7379 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
7381 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
7382 env
->xregs
[30] = env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)];
7385 env
->pc
= env
->regs
[15];
7388 /* Function used to synchronize QEMU's AArch32 register set with AArch64
7389 * register set. This is necessary when switching between AArch32 and AArch64
7392 void aarch64_sync_64_to_32(CPUARMState
*env
)
7395 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7397 /* We can blanket copy X[0:7] to R[0:7] */
7398 for (i
= 0; i
< 8; i
++) {
7399 env
->regs
[i
] = env
->xregs
[i
];
7402 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7403 * Otherwise, we copy x8-x12 into the banked user regs.
7405 if (mode
== ARM_CPU_MODE_FIQ
) {
7406 for (i
= 8; i
< 13; i
++) {
7407 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
7410 for (i
= 8; i
< 13; i
++) {
7411 env
->regs
[i
] = env
->xregs
[i
];
7415 /* Registers r13 & r14 depend on the current mode.
7416 * If we are in a given mode, we copy the corresponding x registers to r13
7417 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7420 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7421 env
->regs
[13] = env
->xregs
[13];
7422 env
->regs
[14] = env
->xregs
[14];
7424 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
7426 /* HYP is an exception in that it does not have its own banked r14 but
7427 * shares the USR r14
7429 if (mode
== ARM_CPU_MODE_HYP
) {
7430 env
->regs
[14] = env
->xregs
[14];
7432 env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
7436 if (mode
== ARM_CPU_MODE_HYP
) {
7437 env
->regs
[13] = env
->xregs
[15];
7439 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
7442 if (mode
== ARM_CPU_MODE_IRQ
) {
7443 env
->regs
[14] = env
->xregs
[16];
7444 env
->regs
[13] = env
->xregs
[17];
7446 env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
7447 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
7450 if (mode
== ARM_CPU_MODE_SVC
) {
7451 env
->regs
[14] = env
->xregs
[18];
7452 env
->regs
[13] = env
->xregs
[19];
7454 env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
7455 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
7458 if (mode
== ARM_CPU_MODE_ABT
) {
7459 env
->regs
[14] = env
->xregs
[20];
7460 env
->regs
[13] = env
->xregs
[21];
7462 env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
7463 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
7466 if (mode
== ARM_CPU_MODE_UND
) {
7467 env
->regs
[14] = env
->xregs
[22];
7468 env
->regs
[13] = env
->xregs
[23];
7470 env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
7471 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
7474 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7475 * mode, then we can copy to r8-r14. Otherwise, we copy to the
7476 * FIQ bank for r8-r14.
7478 if (mode
== ARM_CPU_MODE_FIQ
) {
7479 for (i
= 24; i
< 31; i
++) {
7480 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
7483 for (i
= 24; i
< 29; i
++) {
7484 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
7486 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
7487 env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
7490 env
->regs
[15] = env
->pc
;
7493 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
7495 ARMCPU
*cpu
= ARM_CPU(cs
);
7496 CPUARMState
*env
= &cpu
->env
;
7503 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
7504 switch (env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
) {
7506 case EC_BREAKPOINT_SAME_EL
:
7510 case EC_WATCHPOINT_SAME_EL
:
7516 case EC_VECTORCATCH
:
7525 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
7528 /* TODO: Vectored interrupt controller. */
7529 switch (cs
->exception_index
) {
7531 new_mode
= ARM_CPU_MODE_UND
;
7540 new_mode
= ARM_CPU_MODE_SVC
;
7543 /* The PC already points to the next instruction. */
7547 env
->exception
.fsr
= 2;
7548 /* Fall through to prefetch abort. */
7549 case EXCP_PREFETCH_ABORT
:
7550 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
7551 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
7552 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
7553 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
7554 new_mode
= ARM_CPU_MODE_ABT
;
7556 mask
= CPSR_A
| CPSR_I
;
7559 case EXCP_DATA_ABORT
:
7560 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
7561 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
7562 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
7564 (uint32_t)env
->exception
.vaddress
);
7565 new_mode
= ARM_CPU_MODE_ABT
;
7567 mask
= CPSR_A
| CPSR_I
;
7571 new_mode
= ARM_CPU_MODE_IRQ
;
7573 /* Disable IRQ and imprecise data aborts. */
7574 mask
= CPSR_A
| CPSR_I
;
7576 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
7577 /* IRQ routed to monitor mode */
7578 new_mode
= ARM_CPU_MODE_MON
;
7583 new_mode
= ARM_CPU_MODE_FIQ
;
7585 /* Disable FIQ, IRQ and imprecise data aborts. */
7586 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7587 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
7588 /* FIQ routed to monitor mode */
7589 new_mode
= ARM_CPU_MODE_MON
;
7594 new_mode
= ARM_CPU_MODE_IRQ
;
7596 /* Disable IRQ and imprecise data aborts. */
7597 mask
= CPSR_A
| CPSR_I
;
7601 new_mode
= ARM_CPU_MODE_FIQ
;
7603 /* Disable FIQ, IRQ and imprecise data aborts. */
7604 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7608 new_mode
= ARM_CPU_MODE_MON
;
7610 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7614 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7615 return; /* Never happens. Keep compiler happy. */
7618 if (new_mode
== ARM_CPU_MODE_MON
) {
7619 addr
+= env
->cp15
.mvbar
;
7620 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
7621 /* High vectors. When enabled, base address cannot be remapped. */
7624 /* ARM v7 architectures provide a vector base address register to remap
7625 * the interrupt vector table.
7626 * This register is only followed in non-monitor mode, and is banked.
7627 * Note: only bits 31:5 are valid.
7629 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
7632 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
7633 env
->cp15
.scr_el3
&= ~SCR_NS
;
7636 switch_mode (env
, new_mode
);
7637 /* For exceptions taken to AArch32 we must clear the SS bit in both
7638 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
7640 env
->uncached_cpsr
&= ~PSTATE_SS
;
7641 env
->spsr
= cpsr_read(env
);
7642 /* Clear IT bits. */
7643 env
->condexec_bits
= 0;
7644 /* Switch to the new mode, and to the correct instruction set. */
7645 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
7646 /* Set new mode endianness */
7647 env
->uncached_cpsr
&= ~CPSR_E
;
7648 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
7649 env
->uncached_cpsr
|= CPSR_E
;
7652 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
7653 * and we should just guard the thumb mode on V4 */
7654 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
7655 env
->thumb
= (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
7657 env
->regs
[14] = env
->regs
[15] + offset
;
7658 env
->regs
[15] = addr
;
7661 /* Handle exception entry to a target EL which is using AArch64 */
7662 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
7664 ARMCPU
*cpu
= ARM_CPU(cs
);
7665 CPUARMState
*env
= &cpu
->env
;
7666 unsigned int new_el
= env
->exception
.target_el
;
7667 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
7668 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
7670 if (arm_current_el(env
) < new_el
) {
7671 /* Entry vector offset depends on whether the implemented EL
7672 * immediately lower than the target level is using AArch32 or AArch64
7678 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
7681 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
7684 is_aa64
= is_a64(env
);
7687 g_assert_not_reached();
7695 } else if (pstate_read(env
) & PSTATE_SP
) {
7699 switch (cs
->exception_index
) {
7700 case EXCP_PREFETCH_ABORT
:
7701 case EXCP_DATA_ABORT
:
7702 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
7703 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
7704 env
->cp15
.far_el
[new_el
]);
7712 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
7723 qemu_log_mask(CPU_LOG_INT
,
7724 "...handling as semihosting call 0x%" PRIx64
"\n",
7726 env
->xregs
[0] = do_arm_semihosting(env
);
7729 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7733 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
7734 aarch64_save_sp(env
, arm_current_el(env
));
7735 env
->elr_el
[new_el
] = env
->pc
;
7737 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
7738 env
->elr_el
[new_el
] = env
->regs
[15];
7740 aarch64_sync_32_to_64(env
);
7742 env
->condexec_bits
= 0;
7744 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
7745 env
->elr_el
[new_el
]);
7747 pstate_write(env
, PSTATE_DAIF
| new_mode
);
7749 aarch64_restore_sp(env
, new_el
);
7753 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
7754 new_el
, env
->pc
, pstate_read(env
));
7757 static inline bool check_for_semihosting(CPUState
*cs
)
7759 /* Check whether this exception is a semihosting call; if so
7760 * then handle it and return true; otherwise return false.
7762 ARMCPU
*cpu
= ARM_CPU(cs
);
7763 CPUARMState
*env
= &cpu
->env
;
7766 if (cs
->exception_index
== EXCP_SEMIHOST
) {
7767 /* This is always the 64-bit semihosting exception.
7768 * The "is this usermode" and "is semihosting enabled"
7769 * checks have been done at translate time.
7771 qemu_log_mask(CPU_LOG_INT
,
7772 "...handling as semihosting call 0x%" PRIx64
"\n",
7774 env
->xregs
[0] = do_arm_semihosting(env
);
7781 /* Only intercept calls from privileged modes, to provide some
7782 * semblance of security.
7784 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
7785 (!semihosting_enabled() ||
7786 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
7790 switch (cs
->exception_index
) {
7792 /* This is always a semihosting call; the "is this usermode"
7793 * and "is semihosting enabled" checks have been done at
7798 /* Check for semihosting interrupt. */
7800 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
7806 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
7808 if (imm
== 0x123456) {
7814 /* See if this is a semihosting syscall. */
7816 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
7828 qemu_log_mask(CPU_LOG_INT
,
7829 "...handling as semihosting call 0x%x\n",
7831 env
->regs
[0] = do_arm_semihosting(env
);
7836 /* Handle a CPU exception for A and R profile CPUs.
7837 * Do any appropriate logging, handle PSCI calls, and then hand off
7838 * to the AArch64-entry or AArch32-entry function depending on the
7839 * target exception level's register width.
7841 void arm_cpu_do_interrupt(CPUState
*cs
)
7843 ARMCPU
*cpu
= ARM_CPU(cs
);
7844 CPUARMState
*env
= &cpu
->env
;
7845 unsigned int new_el
= env
->exception
.target_el
;
7847 assert(!arm_feature(env
, ARM_FEATURE_M
));
7849 arm_log_exception(cs
->exception_index
);
7850 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
7852 if (qemu_loglevel_mask(CPU_LOG_INT
)
7853 && !excp_is_internal(cs
->exception_index
)) {
7854 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
7855 env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
,
7856 env
->exception
.syndrome
);
7859 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
7860 arm_handle_psci_call(cpu
);
7861 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
7865 /* Semihosting semantics depend on the register width of the
7866 * code that caused the exception, not the target exception level,
7867 * so must be handled here.
7869 if (check_for_semihosting(cs
)) {
7873 assert(!excp_is_internal(cs
->exception_index
));
7874 if (arm_el_is_aa64(env
, new_el
)) {
7875 arm_cpu_do_interrupt_aarch64(cs
);
7877 arm_cpu_do_interrupt_aarch32(cs
);
7880 /* Hooks may change global state so BQL should be held, also the
7881 * BQL needs to be held for any modification of
7882 * cs->interrupt_request.
7884 g_assert(qemu_mutex_iothread_locked());
7886 arm_call_el_change_hook(cpu
);
7888 if (!kvm_enabled()) {
7889 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
7893 /* Return the exception level which controls this address translation regime */
7894 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7897 case ARMMMUIdx_S2NS
:
7898 case ARMMMUIdx_S1E2
:
7900 case ARMMMUIdx_S1E3
:
7902 case ARMMMUIdx_S1SE0
:
7903 return arm_el_is_aa64(env
, 3) ? 1 : 3;
7904 case ARMMMUIdx_S1SE1
:
7905 case ARMMMUIdx_S1NSE0
:
7906 case ARMMMUIdx_S1NSE1
:
7907 case ARMMMUIdx_MPrivNegPri
:
7908 case ARMMMUIdx_MUserNegPri
:
7909 case ARMMMUIdx_MPriv
:
7910 case ARMMMUIdx_MUser
:
7911 case ARMMMUIdx_MSPrivNegPri
:
7912 case ARMMMUIdx_MSUserNegPri
:
7913 case ARMMMUIdx_MSPriv
:
7914 case ARMMMUIdx_MSUser
:
7917 g_assert_not_reached();
7921 /* Return the SCTLR value which controls this address translation regime */
7922 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7924 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
7927 /* Return true if the specified stage of address translation is disabled */
7928 static inline bool regime_translation_disabled(CPUARMState
*env
,
7931 if (arm_feature(env
, ARM_FEATURE_M
)) {
7932 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
7933 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
7934 case R_V7M_MPU_CTRL_ENABLE_MASK
:
7935 /* Enabled, but not for HardFault and NMI */
7936 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
7937 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
7938 /* Enabled for all cases */
7942 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
7943 * we warned about that in armv7m_nvic.c when the guest set it.
7949 if (mmu_idx
== ARMMMUIdx_S2NS
) {
7950 return (env
->cp15
.hcr_el2
& HCR_VM
) == 0;
7952 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
7955 static inline bool regime_translation_big_endian(CPUARMState
*env
,
7958 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
7961 /* Return the TCR controlling this translation regime */
7962 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7964 if (mmu_idx
== ARMMMUIdx_S2NS
) {
7965 return &env
->cp15
.vtcr_el2
;
7967 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
7970 /* Convert a possible stage1+2 MMU index into the appropriate
7973 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
7975 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
7976 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
7981 /* Returns TBI0 value for current regime el */
7982 uint32_t arm_regime_tbi0(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7987 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7988 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7990 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
7992 tcr
= regime_tcr(env
, mmu_idx
);
7993 el
= regime_el(env
, mmu_idx
);
7996 return extract64(tcr
->raw_tcr
, 20, 1);
7998 return extract64(tcr
->raw_tcr
, 37, 1);
8002 /* Returns TBI1 value for current regime el */
8003 uint32_t arm_regime_tbi1(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8008 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8009 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8011 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8013 tcr
= regime_tcr(env
, mmu_idx
);
8014 el
= regime_el(env
, mmu_idx
);
8019 return extract64(tcr
->raw_tcr
, 38, 1);
8023 /* Return the TTBR associated with this translation regime */
8024 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8027 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8028 return env
->cp15
.vttbr_el2
;
8031 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
8033 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
8037 /* Return true if the translation regime is using LPAE format page tables */
8038 static inline bool regime_using_lpae_format(CPUARMState
*env
,
8041 int el
= regime_el(env
, mmu_idx
);
8042 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
8045 if (arm_feature(env
, ARM_FEATURE_LPAE
)
8046 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
8052 /* Returns true if the stage 1 translation regime is using LPAE format page
8053 * tables. Used when raising alignment exceptions, whose FSR changes depending
8054 * on whether the long or short descriptor format is in use. */
8055 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8057 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8059 return regime_using_lpae_format(env
, mmu_idx
);
8062 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8065 case ARMMMUIdx_S1SE0
:
8066 case ARMMMUIdx_S1NSE0
:
8067 case ARMMMUIdx_MUser
:
8068 case ARMMMUIdx_MSUser
:
8069 case ARMMMUIdx_MUserNegPri
:
8070 case ARMMMUIdx_MSUserNegPri
:
8074 case ARMMMUIdx_S12NSE0
:
8075 case ARMMMUIdx_S12NSE1
:
8076 g_assert_not_reached();
8080 /* Translate section/page access permissions to page
8081 * R/W protection flags
8084 * @mmu_idx: MMU index indicating required translation regime
8085 * @ap: The 3-bit access permissions (AP[2:0])
8086 * @domain_prot: The 2-bit domain access permissions
8088 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8089 int ap
, int domain_prot
)
8091 bool is_user
= regime_is_user(env
, mmu_idx
);
8093 if (domain_prot
== 3) {
8094 return PAGE_READ
| PAGE_WRITE
;
8099 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8102 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
8104 return is_user
? 0 : PAGE_READ
;
8111 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8116 return PAGE_READ
| PAGE_WRITE
;
8119 return PAGE_READ
| PAGE_WRITE
;
8120 case 4: /* Reserved. */
8123 return is_user
? 0 : PAGE_READ
;
8127 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
8132 g_assert_not_reached();
8136 /* Translate section/page access permissions to page
8137 * R/W protection flags.
8139 * @ap: The 2-bit simple AP (AP[2:1])
8140 * @is_user: TRUE if accessing from PL0
8142 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
8146 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8148 return PAGE_READ
| PAGE_WRITE
;
8150 return is_user
? 0 : PAGE_READ
;
8154 g_assert_not_reached();
8159 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
8161 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
8164 /* Translate S2 section/page access permissions to protection flags
8167 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8168 * @xn: XN (execute-never) bit
8170 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
8181 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
8188 /* Translate section/page access permissions to protection flags
8191 * @mmu_idx: MMU index indicating required translation regime
8192 * @is_aa64: TRUE if AArch64
8193 * @ap: The 2-bit simple AP (AP[2:1])
8194 * @ns: NS (non-secure) bit
8195 * @xn: XN (execute-never) bit
8196 * @pxn: PXN (privileged execute-never) bit
8198 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
8199 int ap
, int ns
, int xn
, int pxn
)
8201 bool is_user
= regime_is_user(env
, mmu_idx
);
8202 int prot_rw
, user_rw
;
8206 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
8208 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
8212 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
8215 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
8219 /* TODO have_wxn should be replaced with
8220 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8221 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8222 * compatible processors have EL2, which is required for [U]WXN.
8224 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
8227 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
8231 switch (regime_el(env
, mmu_idx
)) {
8234 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
8241 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8242 switch (regime_el(env
, mmu_idx
)) {
8246 xn
= xn
|| !(user_rw
& PAGE_READ
);
8250 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
8252 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
8253 (uwxn
&& (user_rw
& PAGE_WRITE
));
8263 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
8266 return prot_rw
| PAGE_EXEC
;
8269 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8270 uint32_t *table
, uint32_t address
)
8272 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8273 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8275 if (address
& tcr
->mask
) {
8276 if (tcr
->raw_tcr
& TTBCR_PD1
) {
8277 /* Translation table walk disabled for TTBR1 */
8280 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
8282 if (tcr
->raw_tcr
& TTBCR_PD0
) {
8283 /* Translation table walk disabled for TTBR0 */
8286 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
8288 *table
|= (address
>> 18) & 0x3ffc;
8292 /* Translate a S1 pagetable walk through S2 if needed. */
8293 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8294 hwaddr addr
, MemTxAttrs txattrs
,
8295 ARMMMUFaultInfo
*fi
)
8297 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
8298 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
8299 target_ulong s2size
;
8304 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
8305 &txattrs
, &s2prot
, &s2size
, fi
, NULL
);
8317 /* All loads done in the course of a page table walk go through here.
8318 * TODO: rather than ignoring errors from physical memory reads (which
8319 * are external aborts in ARM terminology) we should propagate this
8320 * error out so that we can turn it into a Data Abort if this walk
8321 * was being done for a CPU load/store or an address translation instruction
8322 * (but not if it was for a debug access).
8324 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8325 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8327 ARMCPU
*cpu
= ARM_CPU(cs
);
8328 CPUARMState
*env
= &cpu
->env
;
8329 MemTxAttrs attrs
= {};
8332 attrs
.secure
= is_secure
;
8333 as
= arm_addressspace(cs
, attrs
);
8334 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8338 if (regime_translation_big_endian(env
, mmu_idx
)) {
8339 return address_space_ldl_be(as
, addr
, attrs
, NULL
);
8341 return address_space_ldl_le(as
, addr
, attrs
, NULL
);
8345 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8346 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8348 ARMCPU
*cpu
= ARM_CPU(cs
);
8349 CPUARMState
*env
= &cpu
->env
;
8350 MemTxAttrs attrs
= {};
8353 attrs
.secure
= is_secure
;
8354 as
= arm_addressspace(cs
, attrs
);
8355 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8359 if (regime_translation_big_endian(env
, mmu_idx
)) {
8360 return address_space_ldq_be(as
, addr
, attrs
, NULL
);
8362 return address_space_ldq_le(as
, addr
, attrs
, NULL
);
8366 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
8367 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8368 hwaddr
*phys_ptr
, int *prot
,
8369 target_ulong
*page_size
,
8370 ARMMMUFaultInfo
*fi
)
8372 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8383 /* Pagetable walk. */
8384 /* Lookup l1 descriptor. */
8385 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8386 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8387 fi
->type
= ARMFault_Translation
;
8390 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8393 domain
= (desc
>> 5) & 0x0f;
8394 if (regime_el(env
, mmu_idx
) == 1) {
8395 dacr
= env
->cp15
.dacr_ns
;
8397 dacr
= env
->cp15
.dacr_s
;
8399 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8401 /* Section translation fault. */
8402 fi
->type
= ARMFault_Translation
;
8408 if (domain_prot
== 0 || domain_prot
== 2) {
8409 fi
->type
= ARMFault_Domain
;
8414 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8415 ap
= (desc
>> 10) & 3;
8416 *page_size
= 1024 * 1024;
8418 /* Lookup l2 entry. */
8420 /* Coarse pagetable. */
8421 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8423 /* Fine pagetable. */
8424 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
8426 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8429 case 0: /* Page translation fault. */
8430 fi
->type
= ARMFault_Translation
;
8432 case 1: /* 64k page. */
8433 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8434 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
8435 *page_size
= 0x10000;
8437 case 2: /* 4k page. */
8438 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8439 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
8440 *page_size
= 0x1000;
8442 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
8444 /* ARMv6/XScale extended small page format */
8445 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
8446 || arm_feature(env
, ARM_FEATURE_V6
)) {
8447 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8448 *page_size
= 0x1000;
8450 /* UNPREDICTABLE in ARMv5; we choose to take a
8451 * page translation fault.
8453 fi
->type
= ARMFault_Translation
;
8457 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
8460 ap
= (desc
>> 4) & 3;
8463 /* Never happens, but compiler isn't smart enough to tell. */
8467 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
8468 *prot
|= *prot
? PAGE_EXEC
: 0;
8469 if (!(*prot
& (1 << access_type
))) {
8470 /* Access permission fault. */
8471 fi
->type
= ARMFault_Permission
;
8474 *phys_ptr
= phys_addr
;
8477 fi
->domain
= domain
;
8482 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
8483 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8484 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
8485 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
8487 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8501 /* Pagetable walk. */
8502 /* Lookup l1 descriptor. */
8503 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8504 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8505 fi
->type
= ARMFault_Translation
;
8508 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8511 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
8512 /* Section translation fault, or attempt to use the encoding
8513 * which is Reserved on implementations without PXN.
8515 fi
->type
= ARMFault_Translation
;
8518 if ((type
== 1) || !(desc
& (1 << 18))) {
8519 /* Page or Section. */
8520 domain
= (desc
>> 5) & 0x0f;
8522 if (regime_el(env
, mmu_idx
) == 1) {
8523 dacr
= env
->cp15
.dacr_ns
;
8525 dacr
= env
->cp15
.dacr_s
;
8530 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8531 if (domain_prot
== 0 || domain_prot
== 2) {
8532 /* Section or Page domain fault */
8533 fi
->type
= ARMFault_Domain
;
8537 if (desc
& (1 << 18)) {
8539 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
8540 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
8541 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
8542 *page_size
= 0x1000000;
8545 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8546 *page_size
= 0x100000;
8548 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
8549 xn
= desc
& (1 << 4);
8551 ns
= extract32(desc
, 19, 1);
8553 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
8554 pxn
= (desc
>> 2) & 1;
8556 ns
= extract32(desc
, 3, 1);
8557 /* Lookup l2 entry. */
8558 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8559 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8561 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
8563 case 0: /* Page translation fault. */
8564 fi
->type
= ARMFault_Translation
;
8566 case 1: /* 64k page. */
8567 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8568 xn
= desc
& (1 << 15);
8569 *page_size
= 0x10000;
8571 case 2: case 3: /* 4k page. */
8572 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8574 *page_size
= 0x1000;
8577 /* Never happens, but compiler isn't smart enough to tell. */
8581 if (domain_prot
== 3) {
8582 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
8584 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
8587 if (xn
&& access_type
== MMU_INST_FETCH
) {
8588 fi
->type
= ARMFault_Permission
;
8592 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
8593 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
8594 /* The simplified model uses AP[0] as an access control bit. */
8595 if ((ap
& 1) == 0) {
8596 /* Access flag fault. */
8597 fi
->type
= ARMFault_AccessFlag
;
8600 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
8602 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
8607 if (!(*prot
& (1 << access_type
))) {
8608 /* Access permission fault. */
8609 fi
->type
= ARMFault_Permission
;
8614 /* The NS bit will (as required by the architecture) have no effect if
8615 * the CPU doesn't support TZ or this is a non-secure translation
8616 * regime, because the attribute will already be non-secure.
8618 attrs
->secure
= false;
8620 *phys_ptr
= phys_addr
;
8623 fi
->domain
= domain
;
8629 * check_s2_mmu_setup
8631 * @is_aa64: True if the translation regime is in AArch64 state
8632 * @startlevel: Suggested starting level
8633 * @inputsize: Bitsize of IPAs
8634 * @stride: Page-table stride (See the ARM ARM)
8636 * Returns true if the suggested S2 translation parameters are OK and
8639 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
8640 int inputsize
, int stride
)
8642 const int grainsize
= stride
+ 3;
8645 /* Negative levels are never allowed. */
8650 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
8651 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
8656 CPUARMState
*env
= &cpu
->env
;
8657 unsigned int pamax
= arm_pamax(cpu
);
8660 case 13: /* 64KB Pages. */
8661 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
8665 case 11: /* 16KB Pages. */
8666 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
8670 case 9: /* 4KB Pages. */
8671 if (level
== 0 && pamax
<= 42) {
8676 g_assert_not_reached();
8679 /* Inputsize checks. */
8680 if (inputsize
> pamax
&&
8681 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
8682 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
8686 /* AArch32 only supports 4KB pages. Assert on that. */
8687 assert(stride
== 9);
8696 /* Translate from the 4-bit stage 2 representation of
8697 * memory attributes (without cache-allocation hints) to
8698 * the 8-bit representation of the stage 1 MAIR registers
8699 * (which includes allocation hints).
8701 * ref: shared/translation/attrs/S2AttrDecode()
8702 * .../S2ConvertAttrsHints()
8704 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
8706 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
8707 uint8_t loattr
= extract32(s2attrs
, 0, 2);
8708 uint8_t hihint
= 0, lohint
= 0;
8710 if (hiattr
!= 0) { /* normal memory */
8711 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
8712 hiattr
= loattr
= 1; /* non-cacheable */
8714 if (hiattr
!= 1) { /* Write-through or write-back */
8715 hihint
= 3; /* RW allocate */
8717 if (loattr
!= 1) { /* Write-through or write-back */
8718 lohint
= 3; /* RW allocate */
8723 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
8726 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
8727 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8728 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
8729 target_ulong
*page_size_ptr
,
8730 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
8732 ARMCPU
*cpu
= arm_env_get_cpu(env
);
8733 CPUState
*cs
= CPU(cpu
);
8734 /* Read an LPAE long-descriptor translation table. */
8735 ARMFaultType fault_type
= ARMFault_Translation
;
8742 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
8743 uint32_t tableattrs
;
8744 target_ulong page_size
;
8750 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8751 int ap
, ns
, xn
, pxn
;
8752 uint32_t el
= regime_el(env
, mmu_idx
);
8753 bool ttbr1_valid
= true;
8754 uint64_t descaddrmask
;
8755 bool aarch64
= arm_el_is_aa64(env
, el
);
8758 * This code does not handle the different format TCR for VTCR_EL2.
8759 * This code also does not support shareability levels.
8760 * Attribute and permission bit handling should also be checked when adding
8761 * support for those page table walks.
8767 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8768 tbi
= extract64(tcr
->raw_tcr
, 20, 1);
8771 if (extract64(address
, 55, 1)) {
8772 tbi
= extract64(tcr
->raw_tcr
, 38, 1);
8774 tbi
= extract64(tcr
->raw_tcr
, 37, 1);
8779 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
8783 ttbr1_valid
= false;
8788 /* There is no TTBR1 for EL2 */
8790 ttbr1_valid
= false;
8794 /* Determine whether this address is in the region controlled by
8795 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
8796 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
8797 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
8800 /* AArch64 translation. */
8801 t0sz
= extract32(tcr
->raw_tcr
, 0, 6);
8802 t0sz
= MIN(t0sz
, 39);
8803 t0sz
= MAX(t0sz
, 16);
8804 } else if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8805 /* AArch32 stage 1 translation. */
8806 t0sz
= extract32(tcr
->raw_tcr
, 0, 3);
8808 /* AArch32 stage 2 translation. */
8809 bool sext
= extract32(tcr
->raw_tcr
, 4, 1);
8810 bool sign
= extract32(tcr
->raw_tcr
, 3, 1);
8811 /* Address size is 40-bit for a stage 2 translation,
8812 * and t0sz can be negative (from -8 to 7),
8813 * so we need to adjust it to use the TTBR selecting logic below.
8816 t0sz
= sextract32(tcr
->raw_tcr
, 0, 4) + 8;
8818 /* If the sign-extend bit is not the same as t0sz[3], the result
8819 * is unpredictable. Flag this as a guest error. */
8821 qemu_log_mask(LOG_GUEST_ERROR
,
8822 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
8825 t1sz
= extract32(tcr
->raw_tcr
, 16, 6);
8827 t1sz
= MIN(t1sz
, 39);
8828 t1sz
= MAX(t1sz
, 16);
8830 if (t0sz
&& !extract64(address
, addrsize
- t0sz
, t0sz
- tbi
)) {
8831 /* there is a ttbr0 region and we are in it (high bits all zero) */
8833 } else if (ttbr1_valid
&& t1sz
&&
8834 !extract64(~address
, addrsize
- t1sz
, t1sz
- tbi
)) {
8835 /* there is a ttbr1 region and we are in it (high bits all one) */
8838 /* ttbr0 region is "everything not in the ttbr1 region" */
8840 } else if (!t1sz
&& ttbr1_valid
) {
8841 /* ttbr1 region is "everything not in the ttbr0 region" */
8844 /* in the gap between the two regions, this is a Translation fault */
8845 fault_type
= ARMFault_Translation
;
8849 /* Note that QEMU ignores shareability and cacheability attributes,
8850 * so we don't need to do anything with the SH, ORGN, IRGN fields
8851 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
8852 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
8853 * implement any ASID-like capability so we can ignore it (instead
8854 * we will always flush the TLB any time the ASID is changed).
8856 if (ttbr_select
== 0) {
8857 ttbr
= regime_ttbr(env
, mmu_idx
, 0);
8859 epd
= extract32(tcr
->raw_tcr
, 7, 1);
8861 inputsize
= addrsize
- t0sz
;
8863 tg
= extract32(tcr
->raw_tcr
, 14, 2);
8864 if (tg
== 1) { /* 64KB pages */
8867 if (tg
== 2) { /* 16KB pages */
8871 /* We should only be here if TTBR1 is valid */
8872 assert(ttbr1_valid
);
8874 ttbr
= regime_ttbr(env
, mmu_idx
, 1);
8875 epd
= extract32(tcr
->raw_tcr
, 23, 1);
8876 inputsize
= addrsize
- t1sz
;
8878 tg
= extract32(tcr
->raw_tcr
, 30, 2);
8879 if (tg
== 3) { /* 64KB pages */
8882 if (tg
== 1) { /* 16KB pages */
8887 /* Here we should have set up all the parameters for the translation:
8888 * inputsize, ttbr, epd, stride, tbi
8892 /* Translation table walk disabled => Translation fault on TLB miss
8893 * Note: This is always 0 on 64-bit EL2 and EL3.
8898 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8899 /* The starting level depends on the virtual address size (which can
8900 * be up to 48 bits) and the translation granule size. It indicates
8901 * the number of strides (stride bits at a time) needed to
8902 * consume the bits of the input address. In the pseudocode this is:
8903 * level = 4 - RoundUp((inputsize - grainsize) / stride)
8904 * where their 'inputsize' is our 'inputsize', 'grainsize' is
8905 * our 'stride + 3' and 'stride' is our 'stride'.
8906 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
8907 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
8908 * = 4 - (inputsize - 4) / stride;
8910 level
= 4 - (inputsize
- 4) / stride
;
8912 /* For stage 2 translations the starting level is specified by the
8913 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
8915 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
8916 uint32_t startlevel
;
8919 if (!aarch64
|| stride
== 9) {
8920 /* AArch32 or 4KB pages */
8921 startlevel
= 2 - sl0
;
8923 /* 16KB or 64KB pages */
8924 startlevel
= 3 - sl0
;
8927 /* Check that the starting level is valid. */
8928 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
8931 fault_type
= ARMFault_Translation
;
8937 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
8938 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
8940 /* Now we can extract the actual base address from the TTBR */
8941 descaddr
= extract64(ttbr
, 0, 48);
8942 descaddr
&= ~indexmask
;
8944 /* The address field in the descriptor goes up to bit 39 for ARMv7
8945 * but up to bit 47 for ARMv8, but we use the descaddrmask
8946 * up to bit 39 for AArch32, because we don't need other bits in that case
8947 * to construct next descriptor address (anyway they should be all zeroes).
8949 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
8950 ~indexmask_grainsize
;
8952 /* Secure accesses start with the page table in secure memory and
8953 * can be downgraded to non-secure at any step. Non-secure accesses
8954 * remain non-secure. We implement this by just ORing in the NSTable/NS
8955 * bits at each step.
8957 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
8959 uint64_t descriptor
;
8962 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
8964 nstable
= extract32(tableattrs
, 4, 1);
8965 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
8970 if (!(descriptor
& 1) ||
8971 (!(descriptor
& 2) && (level
== 3))) {
8972 /* Invalid, or the Reserved level 3 encoding */
8975 descaddr
= descriptor
& descaddrmask
;
8977 if ((descriptor
& 2) && (level
< 3)) {
8978 /* Table entry. The top five bits are attributes which may
8979 * propagate down through lower levels of the table (and
8980 * which are all arranged so that 0 means "no effect", so
8981 * we can gather them up by ORing in the bits at each level).
8983 tableattrs
|= extract64(descriptor
, 59, 5);
8985 indexmask
= indexmask_grainsize
;
8988 /* Block entry at level 1 or 2, or page entry at level 3.
8989 * These are basically the same thing, although the number
8990 * of bits we pull in from the vaddr varies.
8992 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
8993 descaddr
|= (address
& (page_size
- 1));
8994 /* Extract attributes from the descriptor */
8995 attrs
= extract64(descriptor
, 2, 10)
8996 | (extract64(descriptor
, 52, 12) << 10);
8998 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8999 /* Stage 2 table descriptors do not include any attribute fields */
9002 /* Merge in attributes from table descriptors */
9003 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
9004 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
9005 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9006 * means "force PL1 access only", which means forcing AP[1] to 0.
9008 if (extract32(tableattrs
, 2, 1)) {
9011 attrs
|= nstable
<< 3; /* NS */
9014 /* Here descaddr is the final physical address, and attributes
9017 fault_type
= ARMFault_AccessFlag
;
9018 if ((attrs
& (1 << 8)) == 0) {
9023 ap
= extract32(attrs
, 4, 2);
9024 xn
= extract32(attrs
, 12, 1);
9026 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9028 *prot
= get_S2prot(env
, ap
, xn
);
9030 ns
= extract32(attrs
, 3, 1);
9031 pxn
= extract32(attrs
, 11, 1);
9032 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
9035 fault_type
= ARMFault_Permission
;
9036 if (!(*prot
& (1 << access_type
))) {
9041 /* The NS bit will (as required by the architecture) have no effect if
9042 * the CPU doesn't support TZ or this is a non-secure translation
9043 * regime, because the attribute will already be non-secure.
9045 txattrs
->secure
= false;
9048 if (cacheattrs
!= NULL
) {
9049 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9050 cacheattrs
->attrs
= convert_stage2_attrs(env
,
9051 extract32(attrs
, 0, 4));
9053 /* Index into MAIR registers for cache attributes */
9054 uint8_t attrindx
= extract32(attrs
, 0, 3);
9055 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
9056 assert(attrindx
<= 7);
9057 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
9059 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
9062 *phys_ptr
= descaddr
;
9063 *page_size_ptr
= page_size
;
9067 fi
->type
= fault_type
;
9069 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9070 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
9074 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
9076 int32_t address
, int *prot
)
9078 if (!arm_feature(env
, ARM_FEATURE_M
)) {
9079 *prot
= PAGE_READ
| PAGE_WRITE
;
9081 case 0xF0000000 ... 0xFFFFFFFF:
9082 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
9083 /* hivecs execing is ok */
9087 case 0x00000000 ... 0x7FFFFFFF:
9092 /* Default system address map for M profile cores.
9093 * The architecture specifies which regions are execute-never;
9094 * at the MPU level no other checks are defined.
9097 case 0x00000000 ... 0x1fffffff: /* ROM */
9098 case 0x20000000 ... 0x3fffffff: /* SRAM */
9099 case 0x60000000 ... 0x7fffffff: /* RAM */
9100 case 0x80000000 ... 0x9fffffff: /* RAM */
9101 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9103 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9104 case 0xa0000000 ... 0xbfffffff: /* Device */
9105 case 0xc0000000 ... 0xdfffffff: /* Device */
9106 case 0xe0000000 ... 0xffffffff: /* System */
9107 *prot
= PAGE_READ
| PAGE_WRITE
;
9110 g_assert_not_reached();
9115 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
9116 ARMMMUIdx mmu_idx
, bool is_user
)
9118 /* Return true if we should use the default memory map as a
9119 * "background" region if there are no hits against any MPU regions.
9121 CPUARMState
*env
= &cpu
->env
;
9127 if (arm_feature(env
, ARM_FEATURE_M
)) {
9128 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
9129 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
9131 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
9135 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
9137 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9138 return arm_feature(env
, ARM_FEATURE_M
) &&
9139 extract32(address
, 20, 12) == 0xe00;
9142 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
9144 /* True if address is in the M profile system region
9145 * 0xe0000000 - 0xffffffff
9147 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
9150 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
9151 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9152 hwaddr
*phys_ptr
, int *prot
,
9153 ARMMMUFaultInfo
*fi
)
9155 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9157 bool is_user
= regime_is_user(env
, mmu_idx
);
9159 *phys_ptr
= address
;
9162 if (regime_translation_disabled(env
, mmu_idx
) ||
9163 m_is_ppb_region(env
, address
)) {
9164 /* MPU disabled or M profile PPB access: use default memory map.
9165 * The other case which uses the default memory map in the
9166 * v7M ARM ARM pseudocode is exception vector reads from the vector
9167 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9168 * which always does a direct read using address_space_ldl(), rather
9169 * than going via this function, so we don't need to check that here.
9171 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9172 } else { /* MPU enabled */
9173 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9175 uint32_t base
= env
->pmsav7
.drbar
[n
];
9176 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
9180 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
9185 qemu_log_mask(LOG_GUEST_ERROR
,
9186 "DRSR[%d]: Rsize field cannot be 0\n", n
);
9190 rmask
= (1ull << rsize
) - 1;
9193 qemu_log_mask(LOG_GUEST_ERROR
,
9194 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
9195 "to DRSR region size, mask = 0x%" PRIx32
"\n",
9200 if (address
< base
|| address
> base
+ rmask
) {
9204 /* Region matched */
9206 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
9208 uint32_t srdis_mask
;
9210 rsize
-= 3; /* sub region size (power of 2) */
9211 snd
= ((address
- base
) >> rsize
) & 0x7;
9212 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
9214 srdis_mask
= srdis
? 0x3 : 0x0;
9215 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
9216 /* This will check in groups of 2, 4 and then 8, whether
9217 * the subregion bits are consistent. rsize is incremented
9218 * back up to give the region size, considering consistent
9219 * adjacent subregions as one region. Stop testing if rsize
9220 * is already big enough for an entire QEMU page.
9222 int snd_rounded
= snd
& ~(i
- 1);
9223 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
9224 snd_rounded
+ 8, i
);
9225 if (srdis_mask
^ srdis_multi
) {
9228 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
9232 if (rsize
< TARGET_PAGE_BITS
) {
9233 qemu_log_mask(LOG_UNIMP
,
9234 "DRSR[%d]: No support for MPU (sub)region "
9235 "alignment of %" PRIu32
" bits. Minimum is %d\n",
9236 n
, rsize
, TARGET_PAGE_BITS
);
9245 if (n
== -1) { /* no hits */
9246 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9247 /* background fault */
9248 fi
->type
= ARMFault_Background
;
9251 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9252 } else { /* a MPU hit! */
9253 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
9254 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
9256 if (m_is_system_region(env
, address
)) {
9257 /* System space is always execute never */
9261 if (is_user
) { /* User mode AP bit decoding */
9266 break; /* no access */
9268 *prot
|= PAGE_WRITE
;
9272 *prot
|= PAGE_READ
| PAGE_EXEC
;
9275 qemu_log_mask(LOG_GUEST_ERROR
,
9276 "DRACR[%d]: Bad value for AP bits: 0x%"
9277 PRIx32
"\n", n
, ap
);
9279 } else { /* Priv. mode AP bits decoding */
9282 break; /* no access */
9286 *prot
|= PAGE_WRITE
;
9290 *prot
|= PAGE_READ
| PAGE_EXEC
;
9293 qemu_log_mask(LOG_GUEST_ERROR
,
9294 "DRACR[%d]: Bad value for AP bits: 0x%"
9295 PRIx32
"\n", n
, ap
);
9301 *prot
&= ~PAGE_EXEC
;
9306 fi
->type
= ARMFault_Permission
;
9308 return !(*prot
& (1 << access_type
));
9311 static bool v8m_is_sau_exempt(CPUARMState
*env
,
9312 uint32_t address
, MMUAccessType access_type
)
9314 /* The architecture specifies that certain address ranges are
9315 * exempt from v8M SAU/IDAU checks.
9318 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
9319 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
9320 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
9321 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
9322 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
9323 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
9326 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
9327 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9328 V8M_SAttributes
*sattrs
)
9330 /* Look up the security attributes for this address. Compare the
9331 * pseudocode SecurityCheck() function.
9332 * We assume the caller has zero-initialized *sattrs.
9334 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9337 /* TODO: implement IDAU */
9339 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
9340 /* 0xf0000000..0xffffffff is always S for insn fetches */
9344 if (v8m_is_sau_exempt(env
, address
, access_type
)) {
9345 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
9349 switch (env
->sau
.ctrl
& 3) {
9350 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
9352 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
9355 default: /* SAU.ENABLE == 1 */
9356 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
9357 if (env
->sau
.rlar
[r
] & 1) {
9358 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
9359 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
9361 if (base
<= address
&& limit
>= address
) {
9362 if (sattrs
->srvalid
) {
9363 /* If we hit in more than one region then we must report
9364 * as Secure, not NS-Callable, with no valid region
9368 sattrs
->nsc
= false;
9369 sattrs
->sregion
= 0;
9370 sattrs
->srvalid
= false;
9373 if (env
->sau
.rlar
[r
] & 2) {
9378 sattrs
->srvalid
= true;
9379 sattrs
->sregion
= r
;
9385 /* TODO when we support the IDAU then it may override the result here */
9390 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
9391 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9392 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9393 int *prot
, ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
9395 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
9396 * that a full phys-to-virt translation does).
9397 * mregion is (if not NULL) set to the region number which matched,
9398 * or -1 if no region number is returned (MPU off, address did not
9399 * hit a region, address hit in multiple regions).
9401 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9402 bool is_user
= regime_is_user(env
, mmu_idx
);
9403 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9405 int matchregion
= -1;
9408 *phys_ptr
= address
;
9414 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
9415 * was an exception vector read from the vector table (which is always
9416 * done using the default system address map), because those accesses
9417 * are done in arm_v7m_load_vector(), which always does a direct
9418 * read using address_space_ldl(), rather than going via this function.
9420 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
9422 } else if (m_is_ppb_region(env
, address
)) {
9424 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9427 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9429 /* Note that the base address is bits [31:5] from the register
9430 * with bits [4:0] all zeroes, but the limit address is bits
9431 * [31:5] from the register with bits [4:0] all ones.
9433 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
9434 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
9436 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
9437 /* Region disabled */
9441 if (address
< base
|| address
> limit
) {
9446 /* Multiple regions match -- always a failure (unlike
9447 * PMSAv7 where highest-numbered-region wins)
9449 fi
->type
= ARMFault_Permission
;
9457 if (base
& ~TARGET_PAGE_MASK
) {
9458 qemu_log_mask(LOG_UNIMP
,
9459 "MPU_RBAR[%d]: No support for MPU region base"
9460 "address of 0x%" PRIx32
". Minimum alignment is "
9462 n
, base
, TARGET_PAGE_BITS
);
9465 if ((limit
+ 1) & ~TARGET_PAGE_MASK
) {
9466 qemu_log_mask(LOG_UNIMP
,
9467 "MPU_RBAR[%d]: No support for MPU region limit"
9468 "address of 0x%" PRIx32
". Minimum alignment is "
9470 n
, limit
, TARGET_PAGE_BITS
);
9477 /* background fault */
9478 fi
->type
= ARMFault_Background
;
9482 if (matchregion
== -1) {
9483 /* hit using the background region */
9484 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9486 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
9487 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
9489 if (m_is_system_region(env
, address
)) {
9490 /* System space is always execute never */
9494 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
9498 /* We don't need to look the attribute up in the MAIR0/MAIR1
9499 * registers because that only tells us about cacheability.
9502 *mregion
= matchregion
;
9506 fi
->type
= ARMFault_Permission
;
9508 return !(*prot
& (1 << access_type
));
9512 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
9513 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9514 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9515 int *prot
, ARMMMUFaultInfo
*fi
)
9517 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9518 V8M_SAttributes sattrs
= {};
9520 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
9521 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
9522 if (access_type
== MMU_INST_FETCH
) {
9523 /* Instruction fetches always use the MMU bank and the
9524 * transaction attribute determined by the fetch address,
9525 * regardless of CPU state. This is painful for QEMU
9526 * to handle, because it would mean we need to encode
9527 * into the mmu_idx not just the (user, negpri) information
9528 * for the current security state but also that for the
9529 * other security state, which would balloon the number
9530 * of mmu_idx values needed alarmingly.
9531 * Fortunately we can avoid this because it's not actually
9532 * possible to arbitrarily execute code from memory with
9533 * the wrong security attribute: it will always generate
9534 * an exception of some kind or another, apart from the
9535 * special case of an NS CPU executing an SG instruction
9536 * in S&NSC memory. So we always just fail the translation
9537 * here and sort things out in the exception handler
9538 * (including possibly emulating an SG instruction).
9540 if (sattrs
.ns
!= !secure
) {
9542 fi
->type
= ARMFault_QEMU_NSCExec
;
9544 fi
->type
= ARMFault_QEMU_SFault
;
9546 *phys_ptr
= address
;
9551 /* For data accesses we always use the MMU bank indicated
9552 * by the current CPU state, but the security attributes
9553 * might downgrade a secure access to nonsecure.
9556 txattrs
->secure
= false;
9557 } else if (!secure
) {
9558 /* NS access to S memory must fault.
9559 * Architecturally we should first check whether the
9560 * MPU information for this address indicates that we
9561 * are doing an unaligned access to Device memory, which
9562 * should generate a UsageFault instead. QEMU does not
9563 * currently check for that kind of unaligned access though.
9564 * If we added it we would need to do so as a special case
9565 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
9567 fi
->type
= ARMFault_QEMU_SFault
;
9568 *phys_ptr
= address
;
9575 return pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
9576 txattrs
, prot
, fi
, NULL
);
9579 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
9580 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9581 hwaddr
*phys_ptr
, int *prot
,
9582 ARMMMUFaultInfo
*fi
)
9587 bool is_user
= regime_is_user(env
, mmu_idx
);
9589 if (regime_translation_disabled(env
, mmu_idx
)) {
9591 *phys_ptr
= address
;
9592 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9596 *phys_ptr
= address
;
9597 for (n
= 7; n
>= 0; n
--) {
9598 base
= env
->cp15
.c6_region
[n
];
9599 if ((base
& 1) == 0) {
9602 mask
= 1 << ((base
>> 1) & 0x1f);
9603 /* Keep this shift separate from the above to avoid an
9604 (undefined) << 32. */
9605 mask
= (mask
<< 1) - 1;
9606 if (((base
^ address
) & ~mask
) == 0) {
9611 fi
->type
= ARMFault_Background
;
9615 if (access_type
== MMU_INST_FETCH
) {
9616 mask
= env
->cp15
.pmsav5_insn_ap
;
9618 mask
= env
->cp15
.pmsav5_data_ap
;
9620 mask
= (mask
>> (n
* 4)) & 0xf;
9623 fi
->type
= ARMFault_Permission
;
9628 fi
->type
= ARMFault_Permission
;
9632 *prot
= PAGE_READ
| PAGE_WRITE
;
9637 *prot
|= PAGE_WRITE
;
9641 *prot
= PAGE_READ
| PAGE_WRITE
;
9645 fi
->type
= ARMFault_Permission
;
9655 /* Bad permission. */
9656 fi
->type
= ARMFault_Permission
;
9664 /* Combine either inner or outer cacheability attributes for normal
9665 * memory, according to table D4-42 and pseudocode procedure
9666 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
9668 * NB: only stage 1 includes allocation hints (RW bits), leading to
9671 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
9673 if (s1
== 4 || s2
== 4) {
9674 /* non-cacheable has precedence */
9676 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
9677 /* stage 1 write-through takes precedence */
9679 } else if (extract32(s2
, 2, 2) == 2) {
9680 /* stage 2 write-through takes precedence, but the allocation hint
9681 * is still taken from stage 1
9683 return (2 << 2) | extract32(s1
, 0, 2);
9684 } else { /* write-back */
9689 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
9690 * and CombineS1S2Desc()
9692 * @s1: Attributes from stage 1 walk
9693 * @s2: Attributes from stage 2 walk
9695 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
9697 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
9698 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
9701 /* Combine shareability attributes (table D4-43) */
9702 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
9703 /* if either are outer-shareable, the result is outer-shareable */
9704 ret
.shareability
= 2;
9705 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
9706 /* if either are inner-shareable, the result is inner-shareable */
9707 ret
.shareability
= 3;
9709 /* both non-shareable */
9710 ret
.shareability
= 0;
9713 /* Combine memory type and cacheability attributes */
9714 if (s1hi
== 0 || s2hi
== 0) {
9715 /* Device has precedence over normal */
9716 if (s1lo
== 0 || s2lo
== 0) {
9717 /* nGnRnE has precedence over anything */
9719 } else if (s1lo
== 4 || s2lo
== 4) {
9720 /* non-Reordering has precedence over Reordering */
9721 ret
.attrs
= 4; /* nGnRE */
9722 } else if (s1lo
== 8 || s2lo
== 8) {
9723 /* non-Gathering has precedence over Gathering */
9724 ret
.attrs
= 8; /* nGRE */
9726 ret
.attrs
= 0xc; /* GRE */
9729 /* Any location for which the resultant memory type is any
9730 * type of Device memory is always treated as Outer Shareable.
9732 ret
.shareability
= 2;
9733 } else { /* Normal memory */
9734 /* Outer/inner cacheability combine independently */
9735 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
9736 | combine_cacheattr_nibble(s1lo
, s2lo
);
9738 if (ret
.attrs
== 0x44) {
9739 /* Any location for which the resultant memory type is Normal
9740 * Inner Non-cacheable, Outer Non-cacheable is always treated
9741 * as Outer Shareable.
9743 ret
.shareability
= 2;
9751 /* get_phys_addr - get the physical address for this virtual address
9753 * Find the physical address corresponding to the given virtual address,
9754 * by doing a translation table walk on MMU based systems or using the
9755 * MPU state on MPU based systems.
9757 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
9758 * prot and page_size may not be filled in, and the populated fsr value provides
9759 * information on why the translation aborted, in the format of a
9760 * DFSR/IFSR fault register, with the following caveats:
9761 * * we honour the short vs long DFSR format differences.
9762 * * the WnR bit is never set (the caller must do this).
9763 * * for PSMAv5 based systems we don't bother to return a full FSR format
9767 * @address: virtual address to get physical address for
9768 * @access_type: 0 for read, 1 for write, 2 for execute
9769 * @mmu_idx: MMU index indicating required translation regime
9770 * @phys_ptr: set to the physical address corresponding to the virtual address
9771 * @attrs: set to the memory transaction attributes to use
9772 * @prot: set to the permissions for the page containing phys_ptr
9773 * @page_size: set to the size of the page containing phys_ptr
9774 * @fi: set to fault info if the translation fails
9775 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
9777 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
9778 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9779 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
9780 target_ulong
*page_size
,
9781 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9783 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9784 /* Call ourselves recursively to do the stage 1 and then stage 2
9787 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
9791 ARMCacheAttrs cacheattrs2
= {};
9793 ret
= get_phys_addr(env
, address
, access_type
,
9794 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
9795 prot
, page_size
, fi
, cacheattrs
);
9797 /* If S1 fails or S2 is disabled, return early. */
9798 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
9803 /* S1 is done. Now do S2 translation. */
9804 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
9805 phys_ptr
, attrs
, &s2_prot
,
9807 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
9809 /* Combine the S1 and S2 perms. */
9812 /* Combine the S1 and S2 cache attributes, if needed */
9813 if (!ret
&& cacheattrs
!= NULL
) {
9814 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
9820 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
9822 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9826 /* The page table entries may downgrade secure to non-secure, but
9827 * cannot upgrade an non-secure translation regime's attributes
9830 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
9831 attrs
->user
= regime_is_user(env
, mmu_idx
);
9833 /* Fast Context Switch Extension. This doesn't exist at all in v8.
9834 * In v7 and earlier it affects all stage 1 translations.
9836 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
9837 && !arm_feature(env
, ARM_FEATURE_V8
)) {
9838 if (regime_el(env
, mmu_idx
) == 3) {
9839 address
+= env
->cp15
.fcseidr_s
;
9841 address
+= env
->cp15
.fcseidr_ns
;
9845 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
9847 *page_size
= TARGET_PAGE_SIZE
;
9849 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9851 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
9852 phys_ptr
, attrs
, prot
, fi
);
9853 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9855 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
9856 phys_ptr
, prot
, fi
);
9859 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
9860 phys_ptr
, prot
, fi
);
9862 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
9863 " mmu_idx %u -> %s (prot %c%c%c)\n",
9864 access_type
== MMU_DATA_LOAD
? "reading" :
9865 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
9866 (uint32_t)address
, mmu_idx
,
9867 ret
? "Miss" : "Hit",
9868 *prot
& PAGE_READ
? 'r' : '-',
9869 *prot
& PAGE_WRITE
? 'w' : '-',
9870 *prot
& PAGE_EXEC
? 'x' : '-');
9875 /* Definitely a real MMU, not an MPU */
9877 if (regime_translation_disabled(env
, mmu_idx
)) {
9879 *phys_ptr
= address
;
9880 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9881 *page_size
= TARGET_PAGE_SIZE
;
9885 if (regime_using_lpae_format(env
, mmu_idx
)) {
9886 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
9887 phys_ptr
, attrs
, prot
, page_size
,
9889 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
9890 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
9891 phys_ptr
, attrs
, prot
, page_size
, fi
);
9893 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
9894 phys_ptr
, prot
, page_size
, fi
);
9898 /* Walk the page table and (if the mapping exists) add the page
9899 * to the TLB. Return false on success, or true on failure. Populate
9900 * fsr with ARM DFSR/IFSR fault register format value on failure.
9902 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
9903 MMUAccessType access_type
, int mmu_idx
,
9904 ARMMMUFaultInfo
*fi
)
9906 ARMCPU
*cpu
= ARM_CPU(cs
);
9907 CPUARMState
*env
= &cpu
->env
;
9909 target_ulong page_size
;
9912 MemTxAttrs attrs
= {};
9914 ret
= get_phys_addr(env
, address
, access_type
,
9915 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
9916 &attrs
, &prot
, &page_size
, fi
, NULL
);
9918 /* Map a single [sub]page. */
9919 phys_addr
&= TARGET_PAGE_MASK
;
9920 address
&= TARGET_PAGE_MASK
;
9921 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
9922 prot
, mmu_idx
, page_size
);
9929 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
9932 ARMCPU
*cpu
= ARM_CPU(cs
);
9933 CPUARMState
*env
= &cpu
->env
;
9935 target_ulong page_size
;
9938 ARMMMUFaultInfo fi
= {};
9939 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
9941 *attrs
= (MemTxAttrs
) {};
9943 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
9944 attrs
, &prot
, &page_size
, &fi
, NULL
);
9952 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
9955 unsigned el
= arm_current_el(env
);
9957 /* First handle registers which unprivileged can read */
9960 case 0 ... 7: /* xPSR sub-fields */
9962 if ((reg
& 1) && el
) {
9963 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
9966 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
9968 /* EPSR reads as zero */
9969 return xpsr_read(env
) & mask
;
9971 case 20: /* CONTROL */
9972 return env
->v7m
.control
[env
->v7m
.secure
];
9973 case 0x94: /* CONTROL_NS */
9974 /* We have to handle this here because unprivileged Secure code
9975 * can read the NS CONTROL register.
9977 if (!env
->v7m
.secure
) {
9980 return env
->v7m
.control
[M_REG_NS
];
9984 return 0; /* unprivileged reads others as zero */
9987 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
9989 case 0x88: /* MSP_NS */
9990 if (!env
->v7m
.secure
) {
9993 return env
->v7m
.other_ss_msp
;
9994 case 0x89: /* PSP_NS */
9995 if (!env
->v7m
.secure
) {
9998 return env
->v7m
.other_ss_psp
;
9999 case 0x90: /* PRIMASK_NS */
10000 if (!env
->v7m
.secure
) {
10003 return env
->v7m
.primask
[M_REG_NS
];
10004 case 0x91: /* BASEPRI_NS */
10005 if (!env
->v7m
.secure
) {
10008 return env
->v7m
.basepri
[M_REG_NS
];
10009 case 0x93: /* FAULTMASK_NS */
10010 if (!env
->v7m
.secure
) {
10013 return env
->v7m
.faultmask
[M_REG_NS
];
10014 case 0x98: /* SP_NS */
10016 /* This gives the non-secure SP selected based on whether we're
10017 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10019 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10021 if (!env
->v7m
.secure
) {
10024 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10025 return env
->v7m
.other_ss_psp
;
10027 return env
->v7m
.other_ss_msp
;
10037 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
10039 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
10040 case 16: /* PRIMASK */
10041 return env
->v7m
.primask
[env
->v7m
.secure
];
10042 case 17: /* BASEPRI */
10043 case 18: /* BASEPRI_MAX */
10044 return env
->v7m
.basepri
[env
->v7m
.secure
];
10045 case 19: /* FAULTMASK */
10046 return env
->v7m
.faultmask
[env
->v7m
.secure
];
10048 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
10049 " register %d\n", reg
);
10054 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
10056 /* We're passed bits [11..0] of the instruction; extract
10057 * SYSm and the mask bits.
10058 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
10059 * we choose to treat them as if the mask bits were valid.
10060 * NB that the pseudocode 'mask' variable is bits [11..10],
10061 * whereas ours is [11..8].
10063 uint32_t mask
= extract32(maskreg
, 8, 4);
10064 uint32_t reg
= extract32(maskreg
, 0, 8);
10066 if (arm_current_el(env
) == 0 && reg
> 7) {
10067 /* only xPSR sub-fields may be written by unprivileged */
10071 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10073 case 0x88: /* MSP_NS */
10074 if (!env
->v7m
.secure
) {
10077 env
->v7m
.other_ss_msp
= val
;
10079 case 0x89: /* PSP_NS */
10080 if (!env
->v7m
.secure
) {
10083 env
->v7m
.other_ss_psp
= val
;
10085 case 0x90: /* PRIMASK_NS */
10086 if (!env
->v7m
.secure
) {
10089 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
10091 case 0x91: /* BASEPRI_NS */
10092 if (!env
->v7m
.secure
) {
10095 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
10097 case 0x93: /* FAULTMASK_NS */
10098 if (!env
->v7m
.secure
) {
10101 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
10103 case 0x98: /* SP_NS */
10105 /* This gives the non-secure SP selected based on whether we're
10106 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10108 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10110 if (!env
->v7m
.secure
) {
10113 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10114 env
->v7m
.other_ss_psp
= val
;
10116 env
->v7m
.other_ss_msp
= val
;
10126 case 0 ... 7: /* xPSR sub-fields */
10127 /* only APSR is actually writable */
10129 uint32_t apsrmask
= 0;
10132 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
10134 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
10135 apsrmask
|= XPSR_GE
;
10137 xpsr_write(env
, val
, apsrmask
);
10141 if (v7m_using_psp(env
)) {
10142 env
->v7m
.other_sp
= val
;
10144 env
->regs
[13] = val
;
10148 if (v7m_using_psp(env
)) {
10149 env
->regs
[13] = val
;
10151 env
->v7m
.other_sp
= val
;
10154 case 16: /* PRIMASK */
10155 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
10157 case 17: /* BASEPRI */
10158 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
10160 case 18: /* BASEPRI_MAX */
10162 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
10163 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
10164 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
10167 case 19: /* FAULTMASK */
10168 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
10170 case 20: /* CONTROL */
10171 /* Writing to the SPSEL bit only has an effect if we are in
10172 * thread mode; other bits can be updated by any privileged code.
10173 * write_v7m_control_spsel() deals with updating the SPSEL bit in
10174 * env->v7m.control, so we only need update the others.
10175 * For v7M, we must just ignore explicit writes to SPSEL in handler
10176 * mode; for v8M the write is permitted but will have no effect.
10178 if (arm_feature(env
, ARM_FEATURE_V8
) ||
10179 !arm_v7m_is_handler_mode(env
)) {
10180 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
10182 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10183 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10186 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
10187 " register %d\n", reg
);
10192 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
10194 /* Implement the TT instruction. op is bits [7:6] of the insn. */
10195 bool forceunpriv
= op
& 1;
10197 V8M_SAttributes sattrs
= {};
10199 bool r
, rw
, nsr
, nsrw
, mrvalid
;
10201 ARMMMUFaultInfo fi
= {};
10202 MemTxAttrs attrs
= {};
10207 bool targetsec
= env
->v7m
.secure
;
10209 /* Work out what the security state and privilege level we're
10210 * interested in is...
10213 targetsec
= !targetsec
;
10217 targetpriv
= false;
10219 targetpriv
= arm_v7m_is_handler_mode(env
) ||
10220 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
10223 /* ...and then figure out which MMU index this is */
10224 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
10226 /* We know that the MPU and SAU don't care about the access type
10227 * for our purposes beyond that we don't want to claim to be
10228 * an insn fetch, so we arbitrarily call this a read.
10231 /* MPU region info only available for privileged or if
10232 * inspecting the other MPU state.
10234 if (arm_current_el(env
) != 0 || alt
) {
10235 /* We can ignore the return value as prot is always set */
10236 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
10237 &phys_addr
, &attrs
, &prot
, &fi
, &mregion
);
10238 if (mregion
== -1) {
10244 r
= prot
& PAGE_READ
;
10245 rw
= prot
& PAGE_WRITE
;
10253 if (env
->v7m
.secure
) {
10254 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
10255 nsr
= sattrs
.ns
&& r
;
10256 nsrw
= sattrs
.ns
&& rw
;
10263 tt_resp
= (sattrs
.iregion
<< 24) |
10264 (sattrs
.irvalid
<< 23) |
10265 ((!sattrs
.ns
) << 22) |
10270 (sattrs
.srvalid
<< 17) |
10272 (sattrs
.sregion
<< 8) |
10280 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
10282 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
10283 * Note that we do not implement the (architecturally mandated)
10284 * alignment fault for attempts to use this on Device memory
10285 * (which matches the usual QEMU behaviour of not implementing either
10286 * alignment faults or any memory attribute handling).
10289 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10290 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
10291 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
10293 #ifndef CONFIG_USER_ONLY
10295 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
10296 * the block size so we might have to do more than one TLB lookup.
10297 * We know that in fact for any v8 CPU the page size is at least 4K
10298 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
10299 * 1K as an artefact of legacy v5 subpage support being present in the
10300 * same QEMU executable.
10302 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
10303 void *hostaddr
[maxidx
];
10305 unsigned mmu_idx
= cpu_mmu_index(env
, false);
10306 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
10308 for (try = 0; try < 2; try++) {
10310 for (i
= 0; i
< maxidx
; i
++) {
10311 hostaddr
[i
] = tlb_vaddr_to_host(env
,
10312 vaddr
+ TARGET_PAGE_SIZE
* i
,
10314 if (!hostaddr
[i
]) {
10319 /* If it's all in the TLB it's fair game for just writing to;
10320 * we know we don't need to update dirty status, etc.
10322 for (i
= 0; i
< maxidx
- 1; i
++) {
10323 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
10325 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
10328 /* OK, try a store and see if we can populate the tlb. This
10329 * might cause an exception if the memory isn't writable,
10330 * in which case we will longjmp out of here. We must for
10331 * this purpose use the actual register value passed to us
10332 * so that we get the fault address right.
10334 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
10335 /* Now we can populate the other TLB entries, if any */
10336 for (i
= 0; i
< maxidx
; i
++) {
10337 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
10338 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
10339 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
10344 /* Slow path (probably attempt to do this to an I/O device or
10345 * similar, or clearing of a block of code we have translations
10346 * cached for). Just do a series of byte writes as the architecture
10347 * demands. It's not worth trying to use a cpu_physical_memory_map(),
10348 * memset(), unmap() sequence here because:
10349 * + we'd need to account for the blocksize being larger than a page
10350 * + the direct-RAM access case is almost always going to be dealt
10351 * with in the fastpath code above, so there's no speed benefit
10352 * + we would have to deal with the map returning NULL because the
10353 * bounce buffer was in use
10355 for (i
= 0; i
< blocklen
; i
++) {
10356 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
10360 memset(g2h(vaddr
), 0, blocklen
);
10364 /* Note that signed overflow is undefined in C. The following routines are
10365 careful to use unsigned types where modulo arithmetic is required.
10366 Failure to do so _will_ break on newer gcc. */
10368 /* Signed saturating arithmetic. */
10370 /* Perform 16-bit signed saturating addition. */
10371 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
10376 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
10385 /* Perform 8-bit signed saturating addition. */
10386 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
10391 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
10400 /* Perform 16-bit signed saturating subtraction. */
10401 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
10406 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
10415 /* Perform 8-bit signed saturating subtraction. */
10416 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
10421 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
10430 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10431 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10432 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10433 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10436 #include "op_addsub.h"
10438 /* Unsigned saturating arithmetic. */
10439 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
10448 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
10456 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
10465 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
10473 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10474 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10475 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10476 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10479 #include "op_addsub.h"
10481 /* Signed modulo arithmetic. */
10482 #define SARITH16(a, b, n, op) do { \
10484 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10485 RESULT(sum, n, 16); \
10487 ge |= 3 << (n * 2); \
10490 #define SARITH8(a, b, n, op) do { \
10492 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10493 RESULT(sum, n, 8); \
10499 #define ADD16(a, b, n) SARITH16(a, b, n, +)
10500 #define SUB16(a, b, n) SARITH16(a, b, n, -)
10501 #define ADD8(a, b, n) SARITH8(a, b, n, +)
10502 #define SUB8(a, b, n) SARITH8(a, b, n, -)
10506 #include "op_addsub.h"
10508 /* Unsigned modulo arithmetic. */
10509 #define ADD16(a, b, n) do { \
10511 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10512 RESULT(sum, n, 16); \
10513 if ((sum >> 16) == 1) \
10514 ge |= 3 << (n * 2); \
10517 #define ADD8(a, b, n) do { \
10519 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10520 RESULT(sum, n, 8); \
10521 if ((sum >> 8) == 1) \
10525 #define SUB16(a, b, n) do { \
10527 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10528 RESULT(sum, n, 16); \
10529 if ((sum >> 16) == 0) \
10530 ge |= 3 << (n * 2); \
10533 #define SUB8(a, b, n) do { \
10535 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10536 RESULT(sum, n, 8); \
10537 if ((sum >> 8) == 0) \
10544 #include "op_addsub.h"
10546 /* Halved signed arithmetic. */
10547 #define ADD16(a, b, n) \
10548 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10549 #define SUB16(a, b, n) \
10550 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10551 #define ADD8(a, b, n) \
10552 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10553 #define SUB8(a, b, n) \
10554 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10557 #include "op_addsub.h"
10559 /* Halved unsigned arithmetic. */
10560 #define ADD16(a, b, n) \
10561 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10562 #define SUB16(a, b, n) \
10563 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10564 #define ADD8(a, b, n) \
10565 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10566 #define SUB8(a, b, n) \
10567 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10570 #include "op_addsub.h"
10572 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
10580 /* Unsigned sum of absolute byte differences. */
10581 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
10584 sum
= do_usad(a
, b
);
10585 sum
+= do_usad(a
>> 8, b
>> 8);
10586 sum
+= do_usad(a
>> 16, b
>>16);
10587 sum
+= do_usad(a
>> 24, b
>> 24);
10591 /* For ARMv6 SEL instruction. */
10592 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
10604 mask
|= 0xff000000;
10605 return (a
& mask
) | (b
& ~mask
);
10608 /* VFP support. We follow the convention used for VFP instructions:
10609 Single precision routines have a "s" suffix, double precision a
10612 /* Convert host exception flags to vfp form. */
10613 static inline int vfp_exceptbits_from_host(int host_bits
)
10615 int target_bits
= 0;
10617 if (host_bits
& float_flag_invalid
)
10619 if (host_bits
& float_flag_divbyzero
)
10621 if (host_bits
& float_flag_overflow
)
10623 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
10625 if (host_bits
& float_flag_inexact
)
10626 target_bits
|= 0x10;
10627 if (host_bits
& float_flag_input_denormal
)
10628 target_bits
|= 0x80;
10629 return target_bits
;
10632 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
10637 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
10638 | (env
->vfp
.vec_len
<< 16)
10639 | (env
->vfp
.vec_stride
<< 20);
10640 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
10641 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
10642 fpscr
|= vfp_exceptbits_from_host(i
);
10646 uint32_t vfp_get_fpscr(CPUARMState
*env
)
10648 return HELPER(vfp_get_fpscr
)(env
);
10651 /* Convert vfp exception flags to target form. */
10652 static inline int vfp_exceptbits_to_host(int target_bits
)
10656 if (target_bits
& 1)
10657 host_bits
|= float_flag_invalid
;
10658 if (target_bits
& 2)
10659 host_bits
|= float_flag_divbyzero
;
10660 if (target_bits
& 4)
10661 host_bits
|= float_flag_overflow
;
10662 if (target_bits
& 8)
10663 host_bits
|= float_flag_underflow
;
10664 if (target_bits
& 0x10)
10665 host_bits
|= float_flag_inexact
;
10666 if (target_bits
& 0x80)
10667 host_bits
|= float_flag_input_denormal
;
10671 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
10676 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
10677 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
10678 env
->vfp
.vec_len
= (val
>> 16) & 7;
10679 env
->vfp
.vec_stride
= (val
>> 20) & 3;
10682 if (changed
& (3 << 22)) {
10683 i
= (val
>> 22) & 3;
10685 case FPROUNDING_TIEEVEN
:
10686 i
= float_round_nearest_even
;
10688 case FPROUNDING_POSINF
:
10689 i
= float_round_up
;
10691 case FPROUNDING_NEGINF
:
10692 i
= float_round_down
;
10694 case FPROUNDING_ZERO
:
10695 i
= float_round_to_zero
;
10698 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
10700 if (changed
& (1 << 24)) {
10701 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
10702 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
10704 if (changed
& (1 << 25))
10705 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
10707 i
= vfp_exceptbits_to_host(val
);
10708 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
10709 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
10712 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
10714 HELPER(vfp_set_fpscr
)(env
, val
);
10717 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
10719 #define VFP_BINOP(name) \
10720 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
10722 float_status *fpst = fpstp; \
10723 return float32_ ## name(a, b, fpst); \
10725 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
10727 float_status *fpst = fpstp; \
10728 return float64_ ## name(a, b, fpst); \
10740 float32
VFP_HELPER(neg
, s
)(float32 a
)
10742 return float32_chs(a
);
10745 float64
VFP_HELPER(neg
, d
)(float64 a
)
10747 return float64_chs(a
);
10750 float32
VFP_HELPER(abs
, s
)(float32 a
)
10752 return float32_abs(a
);
10755 float64
VFP_HELPER(abs
, d
)(float64 a
)
10757 return float64_abs(a
);
10760 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
10762 return float32_sqrt(a
, &env
->vfp
.fp_status
);
10765 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
10767 return float64_sqrt(a
, &env
->vfp
.fp_status
);
10770 /* XXX: check quiet/signaling case */
10771 #define DO_VFP_cmp(p, type) \
10772 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
10775 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
10776 case 0: flags = 0x6; break; \
10777 case -1: flags = 0x8; break; \
10778 case 1: flags = 0x2; break; \
10779 default: case 2: flags = 0x3; break; \
10781 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
10782 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
10784 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
10787 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
10788 case 0: flags = 0x6; break; \
10789 case -1: flags = 0x8; break; \
10790 case 1: flags = 0x2; break; \
10791 default: case 2: flags = 0x3; break; \
10793 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
10794 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
10796 DO_VFP_cmp(s
, float32
)
10797 DO_VFP_cmp(d
, float64
)
10800 /* Integer to float and float to integer conversions */
10802 #define CONV_ITOF(name, fsz, sign) \
10803 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
10805 float_status *fpst = fpstp; \
10806 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
10809 #define CONV_FTOI(name, fsz, sign, round) \
10810 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
10812 float_status *fpst = fpstp; \
10813 if (float##fsz##_is_any_nan(x)) { \
10814 float_raise(float_flag_invalid, fpst); \
10817 return float##fsz##_to_##sign##int32##round(x, fpst); \
10820 #define FLOAT_CONVS(name, p, fsz, sign) \
10821 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
10822 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
10823 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
10825 FLOAT_CONVS(si
, s
, 32, )
10826 FLOAT_CONVS(si
, d
, 64, )
10827 FLOAT_CONVS(ui
, s
, 32, u
)
10828 FLOAT_CONVS(ui
, d
, 64, u
)
10834 /* floating point conversion */
10835 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
10837 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
10838 /* ARM requires that S<->D conversion of any kind of NaN generates
10839 * a quiet NaN by forcing the most significant frac bit to 1.
10841 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
10844 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
10846 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
10847 /* ARM requires that S<->D conversion of any kind of NaN generates
10848 * a quiet NaN by forcing the most significant frac bit to 1.
10850 return float32_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
10853 /* VFP3 fixed point conversion. */
10854 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10855 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
10858 float_status *fpst = fpstp; \
10860 tmp = itype##_to_##float##fsz(x, fpst); \
10861 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
10864 /* Notice that we want only input-denormal exception flags from the
10865 * scalbn operation: the other possible flags (overflow+inexact if
10866 * we overflow to infinity, output-denormal) aren't correct for the
10867 * complete scale-and-convert operation.
10869 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
10870 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
10874 float_status *fpst = fpstp; \
10875 int old_exc_flags = get_float_exception_flags(fpst); \
10877 if (float##fsz##_is_any_nan(x)) { \
10878 float_raise(float_flag_invalid, fpst); \
10881 tmp = float##fsz##_scalbn(x, shift, fpst); \
10882 old_exc_flags |= get_float_exception_flags(fpst) \
10883 & float_flag_input_denormal; \
10884 set_float_exception_flags(old_exc_flags, fpst); \
10885 return float##fsz##_to_##itype##round(tmp, fpst); \
10888 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
10889 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10890 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
10891 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
10893 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
10894 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10895 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
10897 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
10898 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
10899 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
10900 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
10901 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
10902 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
10903 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
10904 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
10905 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
10906 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
10907 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
10908 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
10909 #undef VFP_CONV_FIX
10910 #undef VFP_CONV_FIX_FLOAT
10911 #undef VFP_CONV_FLOAT_FIX_ROUND
10913 /* Set the current fp rounding mode and return the old one.
10914 * The argument is a softfloat float_round_ value.
10916 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
10918 float_status
*fp_status
= &env
->vfp
.fp_status
;
10920 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
10921 set_float_rounding_mode(rmode
, fp_status
);
10926 /* Set the current fp rounding mode in the standard fp status and return
10927 * the old one. This is for NEON instructions that need to change the
10928 * rounding mode but wish to use the standard FPSCR values for everything
10929 * else. Always set the rounding mode back to the correct value after
10931 * The argument is a softfloat float_round_ value.
10933 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
10935 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
10937 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
10938 set_float_rounding_mode(rmode
, fp_status
);
10943 /* Half precision conversions. */
10944 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
10946 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10947 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
10949 return float32_maybe_silence_nan(r
, s
);
10954 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
10956 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10957 float16 r
= float32_to_float16(a
, ieee
, s
);
10959 r
= float16_maybe_silence_nan(r
, s
);
10961 return float16_val(r
);
10964 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
10966 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
10969 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
10971 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
10974 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
10976 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
10979 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
10981 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
10984 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
10986 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10987 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
10989 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
10994 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
10996 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10997 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
10999 r
= float16_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
11001 return float16_val(r
);
11004 #define float32_two make_float32(0x40000000)
11005 #define float32_three make_float32(0x40400000)
11006 #define float32_one_point_five make_float32(0x3fc00000)
11008 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11010 float_status
*s
= &env
->vfp
.standard_fp_status
;
11011 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11012 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11013 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11014 float_raise(float_flag_input_denormal
, s
);
11016 return float32_two
;
11018 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
11021 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11023 float_status
*s
= &env
->vfp
.standard_fp_status
;
11025 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11026 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11027 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11028 float_raise(float_flag_input_denormal
, s
);
11030 return float32_one_point_five
;
11032 product
= float32_mul(a
, b
, s
);
11033 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
11036 /* NEON helpers. */
11038 /* Constants 256 and 512 are used in some helpers; we avoid relying on
11039 * int->float conversions at run-time. */
11040 #define float64_256 make_float64(0x4070000000000000LL)
11041 #define float64_512 make_float64(0x4080000000000000LL)
11042 #define float32_maxnorm make_float32(0x7f7fffff)
11043 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11045 /* Reciprocal functions
11047 * The algorithm that must be used to calculate the estimate
11048 * is specified by the ARM ARM, see FPRecipEstimate()
11051 static float64
recip_estimate(float64 a
, float_status
*real_fp_status
)
11053 /* These calculations mustn't set any fp exception flags,
11054 * so we use a local copy of the fp_status.
11056 float_status dummy_status
= *real_fp_status
;
11057 float_status
*s
= &dummy_status
;
11058 /* q = (int)(a * 512.0) */
11059 float64 q
= float64_mul(float64_512
, a
, s
);
11060 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
11062 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
11063 q
= int64_to_float64(q_int
, s
);
11064 q
= float64_add(q
, float64_half
, s
);
11065 q
= float64_div(q
, float64_512
, s
);
11066 q
= float64_div(float64_one
, q
, s
);
11068 /* s = (int)(256.0 * r + 0.5) */
11069 q
= float64_mul(q
, float64_256
, s
);
11070 q
= float64_add(q
, float64_half
, s
);
11071 q_int
= float64_to_int64_round_to_zero(q
, s
);
11073 /* return (double)s / 256.0 */
11074 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
11077 /* Common wrapper to call recip_estimate */
11078 static float64
call_recip_estimate(float64 num
, int off
, float_status
*fpst
)
11080 uint64_t val64
= float64_val(num
);
11081 uint64_t frac
= extract64(val64
, 0, 52);
11082 int64_t exp
= extract64(val64
, 52, 11);
11084 float64 scaled
, estimate
;
11086 /* Generate the scaled number for the estimate function */
11088 if (extract64(frac
, 51, 1) == 0) {
11090 frac
= extract64(frac
, 0, 50) << 2;
11092 frac
= extract64(frac
, 0, 51) << 1;
11096 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
11097 scaled
= make_float64((0x3feULL
<< 52)
11098 | extract64(frac
, 44, 8) << 44);
11100 estimate
= recip_estimate(scaled
, fpst
);
11102 /* Build new result */
11103 val64
= float64_val(estimate
);
11104 sbit
= 0x8000000000000000ULL
& val64
;
11106 frac
= extract64(val64
, 0, 52);
11109 frac
= 1ULL << 51 | extract64(frac
, 1, 51);
11110 } else if (exp
== -1) {
11111 frac
= 1ULL << 50 | extract64(frac
, 2, 50);
11115 return make_float64(sbit
| (exp
<< 52) | frac
);
11118 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
11120 switch (fpst
->float_rounding_mode
) {
11121 case float_round_nearest_even
: /* Round to Nearest */
11123 case float_round_up
: /* Round to +Inf */
11125 case float_round_down
: /* Round to -Inf */
11127 case float_round_to_zero
: /* Round to Zero */
11131 g_assert_not_reached();
11134 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
11136 float_status
*fpst
= fpstp
;
11137 float32 f32
= float32_squash_input_denormal(input
, fpst
);
11138 uint32_t f32_val
= float32_val(f32
);
11139 uint32_t f32_sbit
= 0x80000000ULL
& f32_val
;
11140 int32_t f32_exp
= extract32(f32_val
, 23, 8);
11141 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
11147 if (float32_is_any_nan(f32
)) {
11149 if (float32_is_signaling_nan(f32
, fpst
)) {
11150 float_raise(float_flag_invalid
, fpst
);
11151 nan
= float32_maybe_silence_nan(f32
, fpst
);
11153 if (fpst
->default_nan_mode
) {
11154 nan
= float32_default_nan(fpst
);
11157 } else if (float32_is_infinity(f32
)) {
11158 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11159 } else if (float32_is_zero(f32
)) {
11160 float_raise(float_flag_divbyzero
, fpst
);
11161 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11162 } else if ((f32_val
& ~(1ULL << 31)) < (1ULL << 21)) {
11163 /* Abs(value) < 2.0^-128 */
11164 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11165 if (round_to_inf(fpst
, f32_sbit
)) {
11166 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11168 return float32_set_sign(float32_maxnorm
, float32_is_neg(f32
));
11170 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
11171 float_raise(float_flag_underflow
, fpst
);
11172 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11176 f64
= make_float64(((int64_t)(f32_exp
) << 52) | (int64_t)(f32_frac
) << 29);
11177 r64
= call_recip_estimate(f64
, 253, fpst
);
11178 r64_val
= float64_val(r64
);
11179 r64_exp
= extract64(r64_val
, 52, 11);
11180 r64_frac
= extract64(r64_val
, 0, 52);
11182 /* result = sign : result_exp<7:0> : fraction<51:29>; */
11183 return make_float32(f32_sbit
|
11184 (r64_exp
& 0xff) << 23 |
11185 extract64(r64_frac
, 29, 24));
11188 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
11190 float_status
*fpst
= fpstp
;
11191 float64 f64
= float64_squash_input_denormal(input
, fpst
);
11192 uint64_t f64_val
= float64_val(f64
);
11193 uint64_t f64_sbit
= 0x8000000000000000ULL
& f64_val
;
11194 int64_t f64_exp
= extract64(f64_val
, 52, 11);
11200 /* Deal with any special cases */
11201 if (float64_is_any_nan(f64
)) {
11203 if (float64_is_signaling_nan(f64
, fpst
)) {
11204 float_raise(float_flag_invalid
, fpst
);
11205 nan
= float64_maybe_silence_nan(f64
, fpst
);
11207 if (fpst
->default_nan_mode
) {
11208 nan
= float64_default_nan(fpst
);
11211 } else if (float64_is_infinity(f64
)) {
11212 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11213 } else if (float64_is_zero(f64
)) {
11214 float_raise(float_flag_divbyzero
, fpst
);
11215 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11216 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
11217 /* Abs(value) < 2.0^-1024 */
11218 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11219 if (round_to_inf(fpst
, f64_sbit
)) {
11220 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11222 return float64_set_sign(float64_maxnorm
, float64_is_neg(f64
));
11224 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
11225 float_raise(float_flag_underflow
, fpst
);
11226 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11229 r64
= call_recip_estimate(f64
, 2045, fpst
);
11230 r64_val
= float64_val(r64
);
11231 r64_exp
= extract64(r64_val
, 52, 11);
11232 r64_frac
= extract64(r64_val
, 0, 52);
11234 /* result = sign : result_exp<10:0> : fraction<51:0> */
11235 return make_float64(f64_sbit
|
11236 ((r64_exp
& 0x7ff) << 52) |
11240 /* The algorithm that must be used to calculate the estimate
11241 * is specified by the ARM ARM.
11243 static float64
recip_sqrt_estimate(float64 a
, float_status
*real_fp_status
)
11245 /* These calculations mustn't set any fp exception flags,
11246 * so we use a local copy of the fp_status.
11248 float_status dummy_status
= *real_fp_status
;
11249 float_status
*s
= &dummy_status
;
11253 if (float64_lt(a
, float64_half
, s
)) {
11254 /* range 0.25 <= a < 0.5 */
11256 /* a in units of 1/512 rounded down */
11257 /* q0 = (int)(a * 512.0); */
11258 q
= float64_mul(float64_512
, a
, s
);
11259 q_int
= float64_to_int64_round_to_zero(q
, s
);
11261 /* reciprocal root r */
11262 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
11263 q
= int64_to_float64(q_int
, s
);
11264 q
= float64_add(q
, float64_half
, s
);
11265 q
= float64_div(q
, float64_512
, s
);
11266 q
= float64_sqrt(q
, s
);
11267 q
= float64_div(float64_one
, q
, s
);
11269 /* range 0.5 <= a < 1.0 */
11271 /* a in units of 1/256 rounded down */
11272 /* q1 = (int)(a * 256.0); */
11273 q
= float64_mul(float64_256
, a
, s
);
11274 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
11276 /* reciprocal root r */
11277 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
11278 q
= int64_to_float64(q_int
, s
);
11279 q
= float64_add(q
, float64_half
, s
);
11280 q
= float64_div(q
, float64_256
, s
);
11281 q
= float64_sqrt(q
, s
);
11282 q
= float64_div(float64_one
, q
, s
);
11284 /* r in units of 1/256 rounded to nearest */
11285 /* s = (int)(256.0 * r + 0.5); */
11287 q
= float64_mul(q
, float64_256
,s
);
11288 q
= float64_add(q
, float64_half
, s
);
11289 q_int
= float64_to_int64_round_to_zero(q
, s
);
11291 /* return (double)s / 256.0;*/
11292 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
11295 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
11297 float_status
*s
= fpstp
;
11298 float32 f32
= float32_squash_input_denormal(input
, s
);
11299 uint32_t val
= float32_val(f32
);
11300 uint32_t f32_sbit
= 0x80000000 & val
;
11301 int32_t f32_exp
= extract32(val
, 23, 8);
11302 uint32_t f32_frac
= extract32(val
, 0, 23);
11308 if (float32_is_any_nan(f32
)) {
11310 if (float32_is_signaling_nan(f32
, s
)) {
11311 float_raise(float_flag_invalid
, s
);
11312 nan
= float32_maybe_silence_nan(f32
, s
);
11314 if (s
->default_nan_mode
) {
11315 nan
= float32_default_nan(s
);
11318 } else if (float32_is_zero(f32
)) {
11319 float_raise(float_flag_divbyzero
, s
);
11320 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11321 } else if (float32_is_neg(f32
)) {
11322 float_raise(float_flag_invalid
, s
);
11323 return float32_default_nan(s
);
11324 } else if (float32_is_infinity(f32
)) {
11325 return float32_zero
;
11328 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11329 * preserving the parity of the exponent. */
11331 f64_frac
= ((uint64_t) f32_frac
) << 29;
11332 if (f32_exp
== 0) {
11333 while (extract64(f64_frac
, 51, 1) == 0) {
11334 f64_frac
= f64_frac
<< 1;
11335 f32_exp
= f32_exp
-1;
11337 f64_frac
= extract64(f64_frac
, 0, 51) << 1;
11340 if (extract64(f32_exp
, 0, 1) == 0) {
11341 f64
= make_float64(((uint64_t) f32_sbit
) << 32
11345 f64
= make_float64(((uint64_t) f32_sbit
) << 32
11350 result_exp
= (380 - f32_exp
) / 2;
11352 f64
= recip_sqrt_estimate(f64
, s
);
11354 val64
= float64_val(f64
);
11356 val
= ((result_exp
& 0xff) << 23)
11357 | ((val64
>> 29) & 0x7fffff);
11358 return make_float32(val
);
11361 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
11363 float_status
*s
= fpstp
;
11364 float64 f64
= float64_squash_input_denormal(input
, s
);
11365 uint64_t val
= float64_val(f64
);
11366 uint64_t f64_sbit
= 0x8000000000000000ULL
& val
;
11367 int64_t f64_exp
= extract64(val
, 52, 11);
11368 uint64_t f64_frac
= extract64(val
, 0, 52);
11369 int64_t result_exp
;
11370 uint64_t result_frac
;
11372 if (float64_is_any_nan(f64
)) {
11374 if (float64_is_signaling_nan(f64
, s
)) {
11375 float_raise(float_flag_invalid
, s
);
11376 nan
= float64_maybe_silence_nan(f64
, s
);
11378 if (s
->default_nan_mode
) {
11379 nan
= float64_default_nan(s
);
11382 } else if (float64_is_zero(f64
)) {
11383 float_raise(float_flag_divbyzero
, s
);
11384 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11385 } else if (float64_is_neg(f64
)) {
11386 float_raise(float_flag_invalid
, s
);
11387 return float64_default_nan(s
);
11388 } else if (float64_is_infinity(f64
)) {
11389 return float64_zero
;
11392 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11393 * preserving the parity of the exponent. */
11395 if (f64_exp
== 0) {
11396 while (extract64(f64_frac
, 51, 1) == 0) {
11397 f64_frac
= f64_frac
<< 1;
11398 f64_exp
= f64_exp
- 1;
11400 f64_frac
= extract64(f64_frac
, 0, 51) << 1;
11403 if (extract64(f64_exp
, 0, 1) == 0) {
11404 f64
= make_float64(f64_sbit
11408 f64
= make_float64(f64_sbit
11413 result_exp
= (3068 - f64_exp
) / 2;
11415 f64
= recip_sqrt_estimate(f64
, s
);
11417 result_frac
= extract64(float64_val(f64
), 0, 52);
11419 return make_float64(f64_sbit
|
11420 ((result_exp
& 0x7ff) << 52) |
11424 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
11426 float_status
*s
= fpstp
;
11429 if ((a
& 0x80000000) == 0) {
11433 f64
= make_float64((0x3feULL
<< 52)
11434 | ((int64_t)(a
& 0x7fffffff) << 21));
11436 f64
= recip_estimate(f64
, s
);
11438 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
11441 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
11443 float_status
*fpst
= fpstp
;
11446 if ((a
& 0xc0000000) == 0) {
11450 if (a
& 0x80000000) {
11451 f64
= make_float64((0x3feULL
<< 52)
11452 | ((uint64_t)(a
& 0x7fffffff) << 21));
11453 } else { /* bits 31-30 == '01' */
11454 f64
= make_float64((0x3fdULL
<< 52)
11455 | ((uint64_t)(a
& 0x3fffffff) << 22));
11458 f64
= recip_sqrt_estimate(f64
, fpst
);
11460 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
11463 /* VFPv4 fused multiply-accumulate */
11464 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
11466 float_status
*fpst
= fpstp
;
11467 return float32_muladd(a
, b
, c
, 0, fpst
);
11470 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
11472 float_status
*fpst
= fpstp
;
11473 return float64_muladd(a
, b
, c
, 0, fpst
);
11476 /* ARMv8 round to integral */
11477 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
11479 return float32_round_to_int(x
, fp_status
);
11482 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
11484 return float64_round_to_int(x
, fp_status
);
11487 float32
HELPER(rints
)(float32 x
, void *fp_status
)
11489 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
11492 ret
= float32_round_to_int(x
, fp_status
);
11494 /* Suppress any inexact exceptions the conversion produced */
11495 if (!(old_flags
& float_flag_inexact
)) {
11496 new_flags
= get_float_exception_flags(fp_status
);
11497 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
11503 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
11505 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
11508 ret
= float64_round_to_int(x
, fp_status
);
11510 new_flags
= get_float_exception_flags(fp_status
);
11512 /* Suppress any inexact exceptions the conversion produced */
11513 if (!(old_flags
& float_flag_inexact
)) {
11514 new_flags
= get_float_exception_flags(fp_status
);
11515 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
11521 /* Convert ARM rounding mode to softfloat */
11522 int arm_rmode_to_sf(int rmode
)
11525 case FPROUNDING_TIEAWAY
:
11526 rmode
= float_round_ties_away
;
11528 case FPROUNDING_ODD
:
11529 /* FIXME: add support for TIEAWAY and ODD */
11530 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
11532 case FPROUNDING_TIEEVEN
:
11534 rmode
= float_round_nearest_even
;
11536 case FPROUNDING_POSINF
:
11537 rmode
= float_round_up
;
11539 case FPROUNDING_NEGINF
:
11540 rmode
= float_round_down
;
11542 case FPROUNDING_ZERO
:
11543 rmode
= float_round_to_zero
;
11550 * The upper bytes of val (above the number specified by 'bytes') must have
11551 * been zeroed out by the caller.
11553 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11557 stl_le_p(buf
, val
);
11559 /* zlib crc32 converts the accumulator and output to one's complement. */
11560 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
11563 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11567 stl_le_p(buf
, val
);
11569 /* Linux crc32c converts the output to one's complement. */
11570 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;