4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
35 #include "exec/cpu_ldst.h"
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
40 #ifndef CONFIG_USER_ONLY
42 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
43 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
44 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
45 target_ulong
*page_size_ptr
,
46 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
49 static void switch_mode(CPUARMState
*env
, int mode
);
51 static int vfp_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
53 ARMCPU
*cpu
= env_archcpu(env
);
54 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
56 /* VFP data registers are always little-endian. */
58 return gdb_get_reg64(buf
, *aa32_vfp_dreg(env
, reg
));
60 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
61 /* Aliases for Q regs. */
64 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
65 return gdb_get_reg128(buf
, q
[0], q
[1]);
68 switch (reg
- nregs
) {
69 case 0: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); break;
70 case 1: return gdb_get_reg32(buf
, vfp_get_fpscr(env
)); break;
71 case 2: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); break;
76 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
78 ARMCPU
*cpu
= env_archcpu(env
);
79 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
82 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
85 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
88 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
90 q
[1] = ldq_le_p(buf
+ 8);
94 switch (reg
- nregs
) {
95 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
96 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
97 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
102 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
107 /* 128 bit FP register - quads are in LE order */
108 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
109 return gdb_get_reg128(buf
, q
[1], q
[0]);
113 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
116 return gdb_get_reg32(buf
,vfp_get_fpcr(env
));
122 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
126 /* 128 bit FP register */
128 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
129 q
[0] = ldq_le_p(buf
);
130 q
[1] = ldq_le_p(buf
+ 8);
135 vfp_set_fpsr(env
, ldl_p(buf
));
139 vfp_set_fpcr(env
, ldl_p(buf
));
146 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
148 assert(ri
->fieldoffset
);
149 if (cpreg_field_is_64bit(ri
)) {
150 return CPREG_FIELD64(env
, ri
);
152 return CPREG_FIELD32(env
, ri
);
156 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
159 assert(ri
->fieldoffset
);
160 if (cpreg_field_is_64bit(ri
)) {
161 CPREG_FIELD64(env
, ri
) = value
;
163 CPREG_FIELD32(env
, ri
) = value
;
167 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
169 return (char *)env
+ ri
->fieldoffset
;
172 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
174 /* Raw read of a coprocessor register (as needed for migration, etc). */
175 if (ri
->type
& ARM_CP_CONST
) {
176 return ri
->resetvalue
;
177 } else if (ri
->raw_readfn
) {
178 return ri
->raw_readfn(env
, ri
);
179 } else if (ri
->readfn
) {
180 return ri
->readfn(env
, ri
);
182 return raw_read(env
, ri
);
186 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
189 /* Raw write of a coprocessor register (as needed for migration, etc).
190 * Note that constant registers are treated as write-ignored; the
191 * caller should check for success by whether a readback gives the
194 if (ri
->type
& ARM_CP_CONST
) {
196 } else if (ri
->raw_writefn
) {
197 ri
->raw_writefn(env
, ri
, v
);
198 } else if (ri
->writefn
) {
199 ri
->writefn(env
, ri
, v
);
201 raw_write(env
, ri
, v
);
206 * arm_get/set_gdb_*: get/set a gdb register
207 * @env: the CPU state
208 * @buf: a buffer to copy to/from
209 * @reg: register number (offset from start of group)
211 * We return the number of bytes copied
214 static int arm_gdb_get_sysreg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
216 ARMCPU
*cpu
= env_archcpu(env
);
217 const ARMCPRegInfo
*ri
;
220 key
= cpu
->dyn_sysreg_xml
.data
.cpregs
.keys
[reg
];
221 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
223 if (cpreg_field_is_64bit(ri
)) {
224 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
226 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
232 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
237 #ifdef TARGET_AARCH64
238 static int arm_gdb_get_svereg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
240 ARMCPU
*cpu
= env_archcpu(env
);
243 /* The first 32 registers are the zregs */
247 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
248 len
+= gdb_get_reg128(buf
,
249 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1],
250 env
->vfp
.zregs
[reg
].d
[vq
* 2]);
255 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
257 return gdb_get_reg32(buf
, vfp_get_fpcr(env
));
258 /* then 16 predicates and the ffr */
263 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
264 len
+= gdb_get_reg64(buf
, env
->vfp
.pregs
[preg
].p
[vq
/ 4]);
271 * We report in Vector Granules (VG) which is 64bit in a Z reg
272 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
274 int vq
= sve_zcr_len_for_el(env
, arm_current_el(env
)) + 1;
275 return gdb_get_reg32(buf
, vq
* 2);
278 /* gdbstub asked for something out our range */
279 qemu_log_mask(LOG_UNIMP
, "%s: out of range register %d", __func__
, reg
);
286 static int arm_gdb_set_svereg(CPUARMState
*env
, uint8_t *buf
, int reg
)
288 ARMCPU
*cpu
= env_archcpu(env
);
290 /* The first 32 registers are the zregs */
292 /* The first 32 registers are the zregs */
296 uint64_t *p
= (uint64_t *) buf
;
297 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
298 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1] = *p
++;
299 env
->vfp
.zregs
[reg
].d
[vq
* 2] = *p
++;
305 vfp_set_fpsr(env
, *(uint32_t *)buf
);
308 vfp_set_fpcr(env
, *(uint32_t *)buf
);
314 uint64_t *p
= (uint64_t *) buf
;
315 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
316 env
->vfp
.pregs
[preg
].p
[vq
/ 4] = *p
++;
322 /* cannot set vg via gdbstub */
325 /* gdbstub asked for something out our range */
331 #endif /* TARGET_AARCH64 */
333 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
335 /* Return true if the regdef would cause an assertion if you called
336 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
337 * program bug for it not to have the NO_RAW flag).
338 * NB that returning false here doesn't necessarily mean that calling
339 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
340 * read/write access functions which are safe for raw use" from "has
341 * read/write access functions which have side effects but has forgotten
342 * to provide raw access functions".
343 * The tests here line up with the conditions in read/write_raw_cp_reg()
344 * and assertions in raw_read()/raw_write().
346 if ((ri
->type
& ARM_CP_CONST
) ||
348 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
354 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
356 /* Write the coprocessor state from cpu->env to the (index,value) list. */
360 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
361 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
362 const ARMCPRegInfo
*ri
;
365 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
370 if (ri
->type
& ARM_CP_NO_RAW
) {
374 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
377 * Only sync if the previous list->cpustate sync succeeded.
378 * Rather than tracking the success/failure state for every
379 * item in the list, we just recheck "does the raw write we must
380 * have made in write_list_to_cpustate() read back OK" here.
382 uint64_t oldval
= cpu
->cpreg_values
[i
];
384 if (oldval
== newval
) {
388 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
389 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
393 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
395 cpu
->cpreg_values
[i
] = newval
;
400 bool write_list_to_cpustate(ARMCPU
*cpu
)
405 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
406 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
407 uint64_t v
= cpu
->cpreg_values
[i
];
408 const ARMCPRegInfo
*ri
;
410 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
415 if (ri
->type
& ARM_CP_NO_RAW
) {
418 /* Write value and confirm it reads back as written
419 * (to catch read-only registers and partially read-only
420 * registers where the incoming migration value doesn't match)
422 write_raw_cp_reg(&cpu
->env
, ri
, v
);
423 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
430 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
432 ARMCPU
*cpu
= opaque
;
434 const ARMCPRegInfo
*ri
;
436 regidx
= *(uint32_t *)key
;
437 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
439 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
440 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
441 /* The value array need not be initialized at this point */
442 cpu
->cpreg_array_len
++;
446 static void count_cpreg(gpointer key
, gpointer opaque
)
448 ARMCPU
*cpu
= opaque
;
450 const ARMCPRegInfo
*ri
;
452 regidx
= *(uint32_t *)key
;
453 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
455 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
456 cpu
->cpreg_array_len
++;
460 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
462 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
463 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
474 void init_cpreg_list(ARMCPU
*cpu
)
476 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
477 * Note that we require cpreg_tuples[] to be sorted by key ID.
482 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
483 keys
= g_list_sort(keys
, cpreg_key_compare
);
485 cpu
->cpreg_array_len
= 0;
487 g_list_foreach(keys
, count_cpreg
, cpu
);
489 arraylen
= cpu
->cpreg_array_len
;
490 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
491 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
492 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
493 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
494 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
495 cpu
->cpreg_array_len
= 0;
497 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
499 assert(cpu
->cpreg_array_len
== arraylen
);
505 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
506 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
508 * access_el3_aa32ns: Used to check AArch32 register views.
509 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
511 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
512 const ARMCPRegInfo
*ri
,
515 bool secure
= arm_is_secure_below_el3(env
);
517 assert(!arm_el_is_aa64(env
, 3));
519 return CP_ACCESS_TRAP_UNCATEGORIZED
;
524 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
525 const ARMCPRegInfo
*ri
,
528 if (!arm_el_is_aa64(env
, 3)) {
529 return access_el3_aa32ns(env
, ri
, isread
);
534 /* Some secure-only AArch32 registers trap to EL3 if used from
535 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
536 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
537 * We assume that the .access field is set to PL1_RW.
539 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
540 const ARMCPRegInfo
*ri
,
543 if (arm_current_el(env
) == 3) {
546 if (arm_is_secure_below_el3(env
)) {
547 return CP_ACCESS_TRAP_EL3
;
549 /* This will be EL1 NS and EL2 NS, which just UNDEF */
550 return CP_ACCESS_TRAP_UNCATEGORIZED
;
553 /* Check for traps to "powerdown debug" registers, which are controlled
556 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
559 int el
= arm_current_el(env
);
560 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
561 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
562 (arm_hcr_el2_eff(env
) & HCR_TGE
);
564 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
565 return CP_ACCESS_TRAP_EL2
;
567 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
568 return CP_ACCESS_TRAP_EL3
;
573 /* Check for traps to "debug ROM" registers, which are controlled
574 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
576 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
579 int el
= arm_current_el(env
);
580 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
581 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
582 (arm_hcr_el2_eff(env
) & HCR_TGE
);
584 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
585 return CP_ACCESS_TRAP_EL2
;
587 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
588 return CP_ACCESS_TRAP_EL3
;
593 /* Check for traps to general debug registers, which are controlled
594 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
596 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
599 int el
= arm_current_el(env
);
600 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
601 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
602 (arm_hcr_el2_eff(env
) & HCR_TGE
);
604 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
605 return CP_ACCESS_TRAP_EL2
;
607 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
608 return CP_ACCESS_TRAP_EL3
;
613 /* Check for traps to performance monitor registers, which are controlled
614 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
616 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 int el
= arm_current_el(env
);
621 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
622 && !arm_is_secure_below_el3(env
)) {
623 return CP_ACCESS_TRAP_EL2
;
625 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
626 return CP_ACCESS_TRAP_EL3
;
631 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
632 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
635 if (arm_current_el(env
) == 1) {
636 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
637 if (arm_hcr_el2_eff(env
) & trap
) {
638 return CP_ACCESS_TRAP_EL2
;
644 /* Check for traps from EL1 due to HCR_EL2.TSW. */
645 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
648 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
649 return CP_ACCESS_TRAP_EL2
;
654 /* Check for traps from EL1 due to HCR_EL2.TACR. */
655 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
658 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
659 return CP_ACCESS_TRAP_EL2
;
664 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
665 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
668 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
669 return CP_ACCESS_TRAP_EL2
;
674 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
676 ARMCPU
*cpu
= env_archcpu(env
);
678 raw_write(env
, ri
, value
);
679 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
682 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
684 ARMCPU
*cpu
= env_archcpu(env
);
686 if (raw_read(env
, ri
) != value
) {
687 /* Unlike real hardware the qemu TLB uses virtual addresses,
688 * not modified virtual addresses, so this causes a TLB flush.
691 raw_write(env
, ri
, value
);
695 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
698 ARMCPU
*cpu
= env_archcpu(env
);
700 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
701 && !extended_addresses_enabled(env
)) {
702 /* For VMSA (when not using the LPAE long descriptor page table
703 * format) this register includes the ASID, so do a TLB flush.
704 * For PMSA it is purely a process ID and no action is needed.
708 raw_write(env
, ri
, value
);
711 /* IS variants of TLB operations must affect all cores */
712 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
715 CPUState
*cs
= env_cpu(env
);
717 tlb_flush_all_cpus_synced(cs
);
720 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
723 CPUState
*cs
= env_cpu(env
);
725 tlb_flush_all_cpus_synced(cs
);
728 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
731 CPUState
*cs
= env_cpu(env
);
733 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
736 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
739 CPUState
*cs
= env_cpu(env
);
741 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
745 * Non-IS variants of TLB operations are upgraded to
746 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
747 * force broadcast of these operations.
749 static bool tlb_force_broadcast(CPUARMState
*env
)
751 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
752 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
755 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
758 /* Invalidate all (TLBIALL) */
759 CPUState
*cs
= env_cpu(env
);
761 if (tlb_force_broadcast(env
)) {
762 tlb_flush_all_cpus_synced(cs
);
768 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
771 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
772 CPUState
*cs
= env_cpu(env
);
774 value
&= TARGET_PAGE_MASK
;
775 if (tlb_force_broadcast(env
)) {
776 tlb_flush_page_all_cpus_synced(cs
, value
);
778 tlb_flush_page(cs
, value
);
782 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
785 /* Invalidate by ASID (TLBIASID) */
786 CPUState
*cs
= env_cpu(env
);
788 if (tlb_force_broadcast(env
)) {
789 tlb_flush_all_cpus_synced(cs
);
795 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
798 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
799 CPUState
*cs
= env_cpu(env
);
801 value
&= TARGET_PAGE_MASK
;
802 if (tlb_force_broadcast(env
)) {
803 tlb_flush_page_all_cpus_synced(cs
, value
);
805 tlb_flush_page(cs
, value
);
809 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
812 CPUState
*cs
= env_cpu(env
);
814 tlb_flush_by_mmuidx(cs
,
816 ARMMMUIdxBit_E10_1_PAN
|
818 ARMMMUIdxBit_Stage2
);
821 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
824 CPUState
*cs
= env_cpu(env
);
826 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
828 ARMMMUIdxBit_E10_1_PAN
|
830 ARMMMUIdxBit_Stage2
);
833 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
836 /* Invalidate by IPA. This has to invalidate any structures that
837 * contain only stage 2 translation information, but does not need
838 * to apply to structures that contain combined stage 1 and stage 2
839 * translation information.
840 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
842 CPUState
*cs
= env_cpu(env
);
845 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
849 pageaddr
= sextract64(value
<< 12, 0, 40);
851 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
854 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
857 CPUState
*cs
= env_cpu(env
);
860 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
864 pageaddr
= sextract64(value
<< 12, 0, 40);
866 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
867 ARMMMUIdxBit_Stage2
);
870 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
873 CPUState
*cs
= env_cpu(env
);
875 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
878 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
881 CPUState
*cs
= env_cpu(env
);
883 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
886 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
889 CPUState
*cs
= env_cpu(env
);
890 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
892 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
895 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
898 CPUState
*cs
= env_cpu(env
);
899 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
901 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
905 static const ARMCPRegInfo cp_reginfo
[] = {
906 /* Define the secure and non-secure FCSE identifier CP registers
907 * separately because there is no secure bank in V8 (no _EL3). This allows
908 * the secure register to be properly reset and migrated. There is also no
909 * v8 EL1 version of the register so the non-secure instance stands alone.
912 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
913 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
914 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
915 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
916 { .name
= "FCSEIDR_S",
917 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
918 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
919 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
920 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
921 /* Define the secure and non-secure context identifier CP registers
922 * separately because there is no secure bank in V8 (no _EL3). This allows
923 * the secure register to be properly reset and migrated. In the
924 * non-secure case, the 32-bit register will have reset and migration
925 * disabled during registration as it is handled by the 64-bit instance.
927 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
928 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
929 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
930 .secure
= ARM_CP_SECSTATE_NS
,
931 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
932 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
933 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
934 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
935 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
936 .secure
= ARM_CP_SECSTATE_S
,
937 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
938 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
942 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
943 /* NB: Some of these registers exist in v8 but with more precise
944 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
946 /* MMU Domain access control / MPU write buffer control */
948 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
949 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
950 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
951 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
952 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
953 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
954 * For v6 and v5, these mappings are overly broad.
956 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
957 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
958 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
959 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
960 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
961 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
962 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
963 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
964 /* Cache maintenance ops; some of this space may be overridden later. */
965 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
966 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
967 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
971 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
972 /* Not all pre-v6 cores implemented this WFI, so this is slightly
975 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
976 .access
= PL1_W
, .type
= ARM_CP_WFI
},
980 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
981 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
982 * is UNPREDICTABLE; we choose to NOP as most implementations do).
984 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
985 .access
= PL1_W
, .type
= ARM_CP_WFI
},
986 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
987 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
988 * OMAPCP will override this space.
990 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
991 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
993 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
994 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
996 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
997 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
998 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
1000 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
1001 * implementing it as RAZ means the "debug architecture version" bits
1002 * will read as a reserved value, which should cause Linux to not try
1003 * to use the debug hardware.
1005 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
1006 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1007 /* MMU TLB control. Note that the wildcarding means we cover not just
1008 * the unified TLB ops but also the dside/iside/inner-shareable variants.
1010 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
1011 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
1012 .type
= ARM_CP_NO_RAW
},
1013 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
1014 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
1015 .type
= ARM_CP_NO_RAW
},
1016 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
1017 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
1018 .type
= ARM_CP_NO_RAW
},
1019 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
1020 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
1021 .type
= ARM_CP_NO_RAW
},
1022 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
1023 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1024 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
1025 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1029 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1034 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
1035 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
1036 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
1037 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
1038 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
1040 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
1041 /* VFP coprocessor: cp10 & cp11 [23:20] */
1042 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
1044 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
1045 /* ASEDIS [31] bit is RAO/WI */
1049 /* VFPv3 and upwards with NEON implement 32 double precision
1050 * registers (D0-D31).
1052 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
1053 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1061 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1062 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1064 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1065 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1066 value
&= ~(0xf << 20);
1067 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
1070 env
->cp15
.cpacr_el1
= value
;
1073 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1076 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1077 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1079 uint64_t value
= env
->cp15
.cpacr_el1
;
1081 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1082 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1083 value
&= ~(0xf << 20);
1089 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1091 /* Call cpacr_write() so that we reset with the correct RAO bits set
1092 * for our CPU features.
1094 cpacr_write(env
, ri
, 0);
1097 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1100 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1101 /* Check if CPACR accesses are to be trapped to EL2 */
1102 if (arm_current_el(env
) == 1 &&
1103 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
1104 return CP_ACCESS_TRAP_EL2
;
1105 /* Check if CPACR accesses are to be trapped to EL3 */
1106 } else if (arm_current_el(env
) < 3 &&
1107 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1108 return CP_ACCESS_TRAP_EL3
;
1112 return CP_ACCESS_OK
;
1115 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1118 /* Check if CPTR accesses are set to trap to EL3 */
1119 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1120 return CP_ACCESS_TRAP_EL3
;
1123 return CP_ACCESS_OK
;
1126 static const ARMCPRegInfo v6_cp_reginfo
[] = {
1127 /* prefetch by MVA in v6, NOP in v7 */
1128 { .name
= "MVA_prefetch",
1129 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
1130 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1131 /* We need to break the TB after ISB to execute self-modifying code
1132 * correctly and also to take any pending interrupts immediately.
1133 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1135 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
1136 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
1137 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
1138 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1139 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
1140 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1141 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1142 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
1143 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1144 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1146 /* Watchpoint Fault Address Register : should actually only be present
1147 * for 1136, 1176, 11MPCore.
1149 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1150 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1151 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1152 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1153 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1154 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1158 /* Definitions for the PMU registers */
1159 #define PMCRN_MASK 0xf800
1160 #define PMCRN_SHIFT 11
1169 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1170 * which can be written as 1 to trigger behaviour but which stay RAZ).
1172 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1174 #define PMXEVTYPER_P 0x80000000
1175 #define PMXEVTYPER_U 0x40000000
1176 #define PMXEVTYPER_NSK 0x20000000
1177 #define PMXEVTYPER_NSU 0x10000000
1178 #define PMXEVTYPER_NSH 0x08000000
1179 #define PMXEVTYPER_M 0x04000000
1180 #define PMXEVTYPER_MT 0x02000000
1181 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1182 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1183 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1184 PMXEVTYPER_M | PMXEVTYPER_MT | \
1185 PMXEVTYPER_EVTCOUNT)
1187 #define PMCCFILTR 0xf8000000
1188 #define PMCCFILTR_M PMXEVTYPER_M
1189 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1191 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1193 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1196 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1197 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1199 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1202 typedef struct pm_event
{
1203 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1204 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1205 bool (*supported
)(CPUARMState
*);
1207 * Retrieve the current count of the underlying event. The programmed
1208 * counters hold a difference from the return value from this function
1210 uint64_t (*get_count
)(CPUARMState
*);
1212 * Return how many nanoseconds it will take (at a minimum) for count events
1213 * to occur. A negative value indicates the counter will never overflow, or
1214 * that the counter has otherwise arranged for the overflow bit to be set
1215 * and the PMU interrupt to be raised on overflow.
1217 int64_t (*ns_per_count
)(uint64_t);
1220 static bool event_always_supported(CPUARMState
*env
)
1225 static uint64_t swinc_get_count(CPUARMState
*env
)
1228 * SW_INCR events are written directly to the pmevcntr's by writes to
1229 * PMSWINC, so there is no underlying count maintained by the PMU itself
1234 static int64_t swinc_ns_per(uint64_t ignored
)
1240 * Return the underlying cycle count for the PMU cycle counters. If we're in
1241 * usermode, simply return 0.
1243 static uint64_t cycles_get_count(CPUARMState
*env
)
1245 #ifndef CONFIG_USER_ONLY
1246 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1247 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1249 return cpu_get_host_ticks();
1253 #ifndef CONFIG_USER_ONLY
1254 static int64_t cycles_ns_per(uint64_t cycles
)
1256 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1259 static bool instructions_supported(CPUARMState
*env
)
1261 return use_icount
== 1 /* Precise instruction counting */;
1264 static uint64_t instructions_get_count(CPUARMState
*env
)
1266 return (uint64_t)cpu_get_icount_raw();
1269 static int64_t instructions_ns_per(uint64_t icount
)
1271 return cpu_icount_to_ns((int64_t)icount
);
1275 static bool pmu_8_1_events_supported(CPUARMState
*env
)
1277 /* For events which are supported in any v8.1 PMU */
1278 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
1281 static bool pmu_8_4_events_supported(CPUARMState
*env
)
1283 /* For events which are supported in any v8.1 PMU */
1284 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
1287 static uint64_t zero_event_get_count(CPUARMState
*env
)
1289 /* For events which on QEMU never fire, so their count is always zero */
1293 static int64_t zero_event_ns_per(uint64_t cycles
)
1295 /* An event which never fires can never overflow */
1299 static const pm_event pm_events
[] = {
1300 { .number
= 0x000, /* SW_INCR */
1301 .supported
= event_always_supported
,
1302 .get_count
= swinc_get_count
,
1303 .ns_per_count
= swinc_ns_per
,
1305 #ifndef CONFIG_USER_ONLY
1306 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1307 .supported
= instructions_supported
,
1308 .get_count
= instructions_get_count
,
1309 .ns_per_count
= instructions_ns_per
,
1311 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1312 .supported
= event_always_supported
,
1313 .get_count
= cycles_get_count
,
1314 .ns_per_count
= cycles_ns_per
,
1317 { .number
= 0x023, /* STALL_FRONTEND */
1318 .supported
= pmu_8_1_events_supported
,
1319 .get_count
= zero_event_get_count
,
1320 .ns_per_count
= zero_event_ns_per
,
1322 { .number
= 0x024, /* STALL_BACKEND */
1323 .supported
= pmu_8_1_events_supported
,
1324 .get_count
= zero_event_get_count
,
1325 .ns_per_count
= zero_event_ns_per
,
1327 { .number
= 0x03c, /* STALL */
1328 .supported
= pmu_8_4_events_supported
,
1329 .get_count
= zero_event_get_count
,
1330 .ns_per_count
= zero_event_ns_per
,
1335 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1336 * events (i.e. the statistical profiling extension), this implementation
1337 * should first be updated to something sparse instead of the current
1338 * supported_event_map[] array.
1340 #define MAX_EVENT_ID 0x3c
1341 #define UNSUPPORTED_EVENT UINT16_MAX
1342 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1345 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1346 * of ARM event numbers to indices in our pm_events array.
1348 * Note: Events in the 0x40XX range are not currently supported.
1350 void pmu_init(ARMCPU
*cpu
)
1355 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1358 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1359 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1364 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1365 const pm_event
*cnt
= &pm_events
[i
];
1366 assert(cnt
->number
<= MAX_EVENT_ID
);
1367 /* We do not currently support events in the 0x40xx range */
1368 assert(cnt
->number
<= 0x3f);
1370 if (cnt
->supported(&cpu
->env
)) {
1371 supported_event_map
[cnt
->number
] = i
;
1372 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1373 if (cnt
->number
& 0x20) {
1374 cpu
->pmceid1
|= event_mask
;
1376 cpu
->pmceid0
|= event_mask
;
1383 * Check at runtime whether a PMU event is supported for the current machine
1385 static bool event_supported(uint16_t number
)
1387 if (number
> MAX_EVENT_ID
) {
1390 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1393 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1396 /* Performance monitor registers user accessibility is controlled
1397 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1398 * trapping to EL2 or EL3 for other accesses.
1400 int el
= arm_current_el(env
);
1402 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1403 return CP_ACCESS_TRAP
;
1405 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1406 && !arm_is_secure_below_el3(env
)) {
1407 return CP_ACCESS_TRAP_EL2
;
1409 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1410 return CP_ACCESS_TRAP_EL3
;
1413 return CP_ACCESS_OK
;
1416 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1417 const ARMCPRegInfo
*ri
,
1420 /* ER: event counter read trap control */
1421 if (arm_feature(env
, ARM_FEATURE_V8
)
1422 && arm_current_el(env
) == 0
1423 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1425 return CP_ACCESS_OK
;
1428 return pmreg_access(env
, ri
, isread
);
1431 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1432 const ARMCPRegInfo
*ri
,
1435 /* SW: software increment write trap control */
1436 if (arm_feature(env
, ARM_FEATURE_V8
)
1437 && arm_current_el(env
) == 0
1438 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1440 return CP_ACCESS_OK
;
1443 return pmreg_access(env
, ri
, isread
);
1446 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1447 const ARMCPRegInfo
*ri
,
1450 /* ER: event counter read trap control */
1451 if (arm_feature(env
, ARM_FEATURE_V8
)
1452 && arm_current_el(env
) == 0
1453 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1454 return CP_ACCESS_OK
;
1457 return pmreg_access(env
, ri
, isread
);
1460 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1461 const ARMCPRegInfo
*ri
,
1464 /* CR: cycle counter read trap control */
1465 if (arm_feature(env
, ARM_FEATURE_V8
)
1466 && arm_current_el(env
) == 0
1467 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1469 return CP_ACCESS_OK
;
1472 return pmreg_access(env
, ri
, isread
);
1475 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1476 * the current EL, security state, and register configuration.
1478 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1481 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1482 bool enabled
, prohibited
, filtered
;
1483 bool secure
= arm_is_secure(env
);
1484 int el
= arm_current_el(env
);
1485 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1487 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1491 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1492 (counter
< hpmn
|| counter
== 31)) {
1493 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1495 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1497 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1500 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1501 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1506 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1507 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1510 if (prohibited
&& counter
== 31) {
1511 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1514 if (counter
== 31) {
1515 filter
= env
->cp15
.pmccfiltr_el0
;
1517 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1520 p
= filter
& PMXEVTYPER_P
;
1521 u
= filter
& PMXEVTYPER_U
;
1522 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1523 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1524 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1525 m
= arm_el_is_aa64(env
, 1) &&
1526 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1529 filtered
= secure
? u
: u
!= nsu
;
1530 } else if (el
== 1) {
1531 filtered
= secure
? p
: p
!= nsk
;
1532 } else if (el
== 2) {
1538 if (counter
!= 31) {
1540 * If not checking PMCCNTR, ensure the counter is setup to an event we
1543 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1544 if (!event_supported(event
)) {
1549 return enabled
&& !prohibited
&& !filtered
;
1552 static void pmu_update_irq(CPUARMState
*env
)
1554 ARMCPU
*cpu
= env_archcpu(env
);
1555 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1556 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1560 * Ensure c15_ccnt is the guest-visible count so that operations such as
1561 * enabling/disabling the counter or filtering, modifying the count itself,
1562 * etc. can be done logically. This is essentially a no-op if the counter is
1563 * not enabled at the time of the call.
1565 static void pmccntr_op_start(CPUARMState
*env
)
1567 uint64_t cycles
= cycles_get_count(env
);
1569 if (pmu_counter_enabled(env
, 31)) {
1570 uint64_t eff_cycles
= cycles
;
1571 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1572 /* Increment once every 64 processor clock cycles */
1576 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1578 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1579 1ull << 63 : 1ull << 31;
1580 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1581 env
->cp15
.c9_pmovsr
|= (1 << 31);
1582 pmu_update_irq(env
);
1585 env
->cp15
.c15_ccnt
= new_pmccntr
;
1587 env
->cp15
.c15_ccnt_delta
= cycles
;
1591 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1592 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1595 static void pmccntr_op_finish(CPUARMState
*env
)
1597 if (pmu_counter_enabled(env
, 31)) {
1598 #ifndef CONFIG_USER_ONLY
1599 /* Calculate when the counter will next overflow */
1600 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1601 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1602 remaining_cycles
= (uint32_t)remaining_cycles
;
1604 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1606 if (overflow_in
> 0) {
1607 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1609 ARMCPU
*cpu
= env_archcpu(env
);
1610 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1614 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1615 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1616 /* Increment once every 64 processor clock cycles */
1619 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1623 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1626 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1628 if (event_supported(event
)) {
1629 uint16_t event_idx
= supported_event_map
[event
];
1630 count
= pm_events
[event_idx
].get_count(env
);
1633 if (pmu_counter_enabled(env
, counter
)) {
1634 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1636 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1637 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1638 pmu_update_irq(env
);
1640 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1642 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1645 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1647 if (pmu_counter_enabled(env
, counter
)) {
1648 #ifndef CONFIG_USER_ONLY
1649 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1650 uint16_t event_idx
= supported_event_map
[event
];
1651 uint64_t delta
= UINT32_MAX
-
1652 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1653 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1655 if (overflow_in
> 0) {
1656 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1658 ARMCPU
*cpu
= env_archcpu(env
);
1659 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1663 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1664 env
->cp15
.c14_pmevcntr
[counter
];
1668 void pmu_op_start(CPUARMState
*env
)
1671 pmccntr_op_start(env
);
1672 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1673 pmevcntr_op_start(env
, i
);
1677 void pmu_op_finish(CPUARMState
*env
)
1680 pmccntr_op_finish(env
);
1681 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1682 pmevcntr_op_finish(env
, i
);
1686 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1688 pmu_op_start(&cpu
->env
);
1691 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1693 pmu_op_finish(&cpu
->env
);
1696 void arm_pmu_timer_cb(void *opaque
)
1698 ARMCPU
*cpu
= opaque
;
1701 * Update all the counter values based on the current underlying counts,
1702 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1703 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1704 * counter may expire.
1706 pmu_op_start(&cpu
->env
);
1707 pmu_op_finish(&cpu
->env
);
1710 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1715 if (value
& PMCRC
) {
1716 /* The counter has been reset */
1717 env
->cp15
.c15_ccnt
= 0;
1720 if (value
& PMCRP
) {
1722 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1723 env
->cp15
.c14_pmevcntr
[i
] = 0;
1727 env
->cp15
.c9_pmcr
&= ~PMCR_WRITEABLE_MASK
;
1728 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITEABLE_MASK
);
1733 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1737 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1738 /* Increment a counter's count iff: */
1739 if ((value
& (1 << i
)) && /* counter's bit is set */
1740 /* counter is enabled and not filtered */
1741 pmu_counter_enabled(env
, i
) &&
1742 /* counter is SW_INCR */
1743 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1744 pmevcntr_op_start(env
, i
);
1747 * Detect if this write causes an overflow since we can't predict
1748 * PMSWINC overflows like we can for other events
1750 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1752 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1753 env
->cp15
.c9_pmovsr
|= (1 << i
);
1754 pmu_update_irq(env
);
1757 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1759 pmevcntr_op_finish(env
, i
);
1764 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1767 pmccntr_op_start(env
);
1768 ret
= env
->cp15
.c15_ccnt
;
1769 pmccntr_op_finish(env
);
1773 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1776 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1777 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1778 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1781 env
->cp15
.c9_pmselr
= value
& 0x1f;
1784 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1787 pmccntr_op_start(env
);
1788 env
->cp15
.c15_ccnt
= value
;
1789 pmccntr_op_finish(env
);
1792 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1795 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1797 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1800 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1803 pmccntr_op_start(env
);
1804 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1805 pmccntr_op_finish(env
);
1808 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1811 pmccntr_op_start(env
);
1812 /* M is not accessible from AArch32 */
1813 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1814 (value
& PMCCFILTR
);
1815 pmccntr_op_finish(env
);
1818 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1820 /* M is not visible in AArch32 */
1821 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1824 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1827 value
&= pmu_counter_mask(env
);
1828 env
->cp15
.c9_pmcnten
|= value
;
1831 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1834 value
&= pmu_counter_mask(env
);
1835 env
->cp15
.c9_pmcnten
&= ~value
;
1838 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1841 value
&= pmu_counter_mask(env
);
1842 env
->cp15
.c9_pmovsr
&= ~value
;
1843 pmu_update_irq(env
);
1846 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1849 value
&= pmu_counter_mask(env
);
1850 env
->cp15
.c9_pmovsr
|= value
;
1851 pmu_update_irq(env
);
1854 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1855 uint64_t value
, const uint8_t counter
)
1857 if (counter
== 31) {
1858 pmccfiltr_write(env
, ri
, value
);
1859 } else if (counter
< pmu_num_counters(env
)) {
1860 pmevcntr_op_start(env
, counter
);
1863 * If this counter's event type is changing, store the current
1864 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1865 * pmevcntr_op_finish has the correct baseline when it converts back to
1868 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1869 PMXEVTYPER_EVTCOUNT
;
1870 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1871 if (old_event
!= new_event
) {
1873 if (event_supported(new_event
)) {
1874 uint16_t event_idx
= supported_event_map
[new_event
];
1875 count
= pm_events
[event_idx
].get_count(env
);
1877 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1880 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1881 pmevcntr_op_finish(env
, counter
);
1883 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1884 * PMSELR value is equal to or greater than the number of implemented
1885 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1889 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1890 const uint8_t counter
)
1892 if (counter
== 31) {
1893 return env
->cp15
.pmccfiltr_el0
;
1894 } else if (counter
< pmu_num_counters(env
)) {
1895 return env
->cp15
.c14_pmevtyper
[counter
];
1898 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1899 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1905 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1908 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1909 pmevtyper_write(env
, ri
, value
, counter
);
1912 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1915 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1916 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1919 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1920 * pmu_op_finish calls when loading saved state for a migration. Because
1921 * we're potentially updating the type of event here, the value written to
1922 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1923 * different counter type. Therefore, we need to set this value to the
1924 * current count for the counter type we're writing so that pmu_op_finish
1925 * has the correct count for its calculation.
1927 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1928 if (event_supported(event
)) {
1929 uint16_t event_idx
= supported_event_map
[event
];
1930 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1931 pm_events
[event_idx
].get_count(env
);
1935 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1937 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1938 return pmevtyper_read(env
, ri
, counter
);
1941 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1944 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1947 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1949 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1952 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1953 uint64_t value
, uint8_t counter
)
1955 if (counter
< pmu_num_counters(env
)) {
1956 pmevcntr_op_start(env
, counter
);
1957 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1958 pmevcntr_op_finish(env
, counter
);
1961 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1962 * are CONSTRAINED UNPREDICTABLE.
1966 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1969 if (counter
< pmu_num_counters(env
)) {
1971 pmevcntr_op_start(env
, counter
);
1972 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1973 pmevcntr_op_finish(env
, counter
);
1976 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1977 * are CONSTRAINED UNPREDICTABLE. */
1982 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1985 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1986 pmevcntr_write(env
, ri
, value
, counter
);
1989 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1991 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1992 return pmevcntr_read(env
, ri
, counter
);
1995 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1998 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1999 assert(counter
< pmu_num_counters(env
));
2000 env
->cp15
.c14_pmevcntr
[counter
] = value
;
2001 pmevcntr_write(env
, ri
, value
, counter
);
2004 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2006 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
2007 assert(counter
< pmu_num_counters(env
));
2008 return env
->cp15
.c14_pmevcntr
[counter
];
2011 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2014 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
2017 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2019 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
2022 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2025 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2026 env
->cp15
.c9_pmuserenr
= value
& 0xf;
2028 env
->cp15
.c9_pmuserenr
= value
& 1;
2032 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2035 /* We have no event counters so only the C bit can be changed */
2036 value
&= pmu_counter_mask(env
);
2037 env
->cp15
.c9_pminten
|= value
;
2038 pmu_update_irq(env
);
2041 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2044 value
&= pmu_counter_mask(env
);
2045 env
->cp15
.c9_pminten
&= ~value
;
2046 pmu_update_irq(env
);
2049 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2052 /* Note that even though the AArch64 view of this register has bits
2053 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2054 * architectural requirements for bits which are RES0 only in some
2055 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2056 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2058 raw_write(env
, ri
, value
& ~0x1FULL
);
2061 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2063 /* Begin with base v8.0 state. */
2064 uint32_t valid_mask
= 0x3fff;
2065 ARMCPU
*cpu
= env_archcpu(env
);
2067 if (arm_el_is_aa64(env
, 3)) {
2068 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
2069 valid_mask
&= ~SCR_NET
;
2071 valid_mask
&= ~(SCR_RW
| SCR_ST
);
2074 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2075 valid_mask
&= ~SCR_HCE
;
2077 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2078 * supported if EL2 exists. The bit is UNK/SBZP when
2079 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2080 * when EL2 is unavailable.
2081 * On ARMv8, this bit is always available.
2083 if (arm_feature(env
, ARM_FEATURE_V7
) &&
2084 !arm_feature(env
, ARM_FEATURE_V8
)) {
2085 valid_mask
&= ~SCR_SMD
;
2088 if (cpu_isar_feature(aa64_lor
, cpu
)) {
2089 valid_mask
|= SCR_TLOR
;
2091 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
2092 valid_mask
|= SCR_API
| SCR_APK
;
2095 /* Clear all-context RES0 bits. */
2096 value
&= valid_mask
;
2097 raw_write(env
, ri
, value
);
2100 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
2101 const ARMCPRegInfo
*ri
,
2104 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
2105 return CP_ACCESS_TRAP_EL2
;
2108 return CP_ACCESS_OK
;
2111 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2113 ARMCPU
*cpu
= env_archcpu(env
);
2115 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2118 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
2119 ri
->secure
& ARM_CP_SECSTATE_S
);
2121 return cpu
->ccsidr
[index
];
2124 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2127 raw_write(env
, ri
, value
& 0xf);
2130 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2132 CPUState
*cs
= env_cpu(env
);
2133 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
2135 bool allow_virt
= (arm_current_el(env
) == 1 &&
2136 (!arm_is_secure_below_el3(env
) ||
2137 (env
->cp15
.scr_el3
& SCR_EEL2
)));
2139 if (allow_virt
&& (hcr_el2
& HCR_IMO
)) {
2140 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
2144 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
2149 if (allow_virt
&& (hcr_el2
& HCR_FMO
)) {
2150 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
2154 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
2159 /* External aborts are not possible in QEMU so A bit is always clear */
2163 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2166 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
2167 return CP_ACCESS_TRAP_EL2
;
2170 return CP_ACCESS_OK
;
2173 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2176 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2177 return access_aa64_tid1(env
, ri
, isread
);
2180 return CP_ACCESS_OK
;
2183 static const ARMCPRegInfo v7_cp_reginfo
[] = {
2184 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2185 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
2186 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2187 /* Performance monitors are implementation defined in v7,
2188 * but with an ARM recommended set of registers, which we
2191 * Performance registers fall into three categories:
2192 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2193 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2194 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2195 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2196 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2198 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2199 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2200 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2201 .writefn
= pmcntenset_write
,
2202 .accessfn
= pmreg_access
,
2203 .raw_writefn
= raw_write
},
2204 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2205 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2206 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2207 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2208 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2209 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2211 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2212 .accessfn
= pmreg_access
,
2213 .writefn
= pmcntenclr_write
,
2214 .type
= ARM_CP_ALIAS
},
2215 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2216 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2217 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2218 .type
= ARM_CP_ALIAS
,
2219 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2220 .writefn
= pmcntenclr_write
},
2221 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2222 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2223 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2224 .accessfn
= pmreg_access
,
2225 .writefn
= pmovsr_write
,
2226 .raw_writefn
= raw_write
},
2227 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2228 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2229 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2230 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2231 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2232 .writefn
= pmovsr_write
,
2233 .raw_writefn
= raw_write
},
2234 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2235 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2236 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2237 .writefn
= pmswinc_write
},
2238 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2239 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2240 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2241 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2242 .writefn
= pmswinc_write
},
2243 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2244 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2245 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2246 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2247 .raw_writefn
= raw_write
},
2248 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2249 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2250 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2251 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2252 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2253 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2254 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2255 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2256 .accessfn
= pmreg_access_ccntr
},
2257 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2258 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2259 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2261 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2262 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2263 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2264 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2265 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2266 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2267 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2269 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2270 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2271 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2272 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2274 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2276 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2277 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2278 .accessfn
= pmreg_access
,
2279 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2280 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2281 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2282 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2283 .accessfn
= pmreg_access
,
2284 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2285 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2286 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2287 .accessfn
= pmreg_access_xevcntr
,
2288 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2289 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2290 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2291 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2292 .accessfn
= pmreg_access_xevcntr
,
2293 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2294 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2295 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2296 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2298 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2299 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2300 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2301 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2302 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2304 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2305 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2306 .access
= PL1_RW
, .accessfn
= access_tpm
,
2307 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2308 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2310 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2311 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2312 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2313 .access
= PL1_RW
, .accessfn
= access_tpm
,
2315 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2316 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2317 .resetvalue
= 0x0 },
2318 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2319 .access
= PL1_RW
, .accessfn
= access_tpm
,
2320 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2321 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2322 .writefn
= pmintenclr_write
, },
2323 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2324 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2325 .access
= PL1_RW
, .accessfn
= access_tpm
,
2326 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2327 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2328 .writefn
= pmintenclr_write
},
2329 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2330 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2332 .accessfn
= access_aa64_tid2
,
2333 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2334 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2335 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2337 .accessfn
= access_aa64_tid2
,
2338 .writefn
= csselr_write
, .resetvalue
= 0,
2339 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2340 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2341 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2342 * just RAZ for all cores:
2344 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2345 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2346 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2347 .accessfn
= access_aa64_tid1
,
2349 /* Auxiliary fault status registers: these also are IMPDEF, and we
2350 * choose to RAZ/WI for all cores.
2352 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2353 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2354 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2355 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2356 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2357 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2358 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2359 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2360 /* MAIR can just read-as-written because we don't implement caches
2361 * and so don't need to care about memory attributes.
2363 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2364 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2365 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2366 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2368 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2369 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2370 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2372 /* For non-long-descriptor page tables these are PRRR and NMRR;
2373 * regardless they still act as reads-as-written for QEMU.
2375 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2376 * allows them to assign the correct fieldoffset based on the endianness
2377 * handled in the field definitions.
2379 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2380 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2381 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2382 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2383 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2384 .resetfn
= arm_cp_reset_ignore
},
2385 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2386 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2387 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2388 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2389 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2390 .resetfn
= arm_cp_reset_ignore
},
2391 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2392 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2393 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2394 /* 32 bit ITLB invalidates */
2395 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2396 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2397 .writefn
= tlbiall_write
},
2398 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2399 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2400 .writefn
= tlbimva_write
},
2401 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2402 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2403 .writefn
= tlbiasid_write
},
2404 /* 32 bit DTLB invalidates */
2405 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2406 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2407 .writefn
= tlbiall_write
},
2408 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2409 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2410 .writefn
= tlbimva_write
},
2411 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2412 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2413 .writefn
= tlbiasid_write
},
2414 /* 32 bit TLB invalidates */
2415 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2416 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2417 .writefn
= tlbiall_write
},
2418 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2419 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2420 .writefn
= tlbimva_write
},
2421 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2422 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2423 .writefn
= tlbiasid_write
},
2424 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2425 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2426 .writefn
= tlbimvaa_write
},
2430 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2431 /* 32 bit TLB invalidates, Inner Shareable */
2432 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2433 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2434 .writefn
= tlbiall_is_write
},
2435 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2436 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2437 .writefn
= tlbimva_is_write
},
2438 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2439 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2440 .writefn
= tlbiasid_is_write
},
2441 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2442 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2443 .writefn
= tlbimvaa_is_write
},
2447 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2448 /* PMOVSSET is not implemented in v7 before v7ve */
2449 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2450 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2451 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2452 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2453 .writefn
= pmovsset_write
,
2454 .raw_writefn
= raw_write
},
2455 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2456 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2457 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2458 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2459 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2460 .writefn
= pmovsset_write
,
2461 .raw_writefn
= raw_write
},
2465 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2472 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2475 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2476 return CP_ACCESS_TRAP
;
2478 return CP_ACCESS_OK
;
2481 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2482 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2483 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2485 .writefn
= teecr_write
},
2486 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2487 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2488 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2492 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2493 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2494 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2496 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2497 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2499 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2500 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2501 .resetfn
= arm_cp_reset_ignore
},
2502 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2503 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2504 .access
= PL0_R
|PL1_W
,
2505 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2507 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2508 .access
= PL0_R
|PL1_W
,
2509 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2510 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2511 .resetfn
= arm_cp_reset_ignore
},
2512 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2513 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2515 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2516 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2518 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2519 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2524 #ifndef CONFIG_USER_ONLY
2526 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2529 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2530 * Writable only at the highest implemented exception level.
2532 int el
= arm_current_el(env
);
2538 hcr
= arm_hcr_el2_eff(env
);
2539 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2540 cntkctl
= env
->cp15
.cnthctl_el2
;
2542 cntkctl
= env
->cp15
.c14_cntkctl
;
2544 if (!extract32(cntkctl
, 0, 2)) {
2545 return CP_ACCESS_TRAP
;
2549 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2550 arm_is_secure_below_el3(env
)) {
2551 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2552 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2560 if (!isread
&& el
< arm_highest_el(env
)) {
2561 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2564 return CP_ACCESS_OK
;
2567 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2570 unsigned int cur_el
= arm_current_el(env
);
2571 bool secure
= arm_is_secure(env
);
2572 uint64_t hcr
= arm_hcr_el2_eff(env
);
2576 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2577 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2578 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2579 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2582 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2583 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2584 return CP_ACCESS_TRAP
;
2587 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2588 if (hcr
& HCR_E2H
) {
2589 if (timeridx
== GTIMER_PHYS
&&
2590 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2591 return CP_ACCESS_TRAP_EL2
;
2594 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2595 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2596 timeridx
== GTIMER_PHYS
&& !secure
&&
2597 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2598 return CP_ACCESS_TRAP_EL2
;
2604 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2605 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2606 timeridx
== GTIMER_PHYS
&& !secure
&&
2608 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2609 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2610 return CP_ACCESS_TRAP_EL2
;
2614 return CP_ACCESS_OK
;
2617 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2620 unsigned int cur_el
= arm_current_el(env
);
2621 bool secure
= arm_is_secure(env
);
2622 uint64_t hcr
= arm_hcr_el2_eff(env
);
2626 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2627 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2628 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2629 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2633 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2634 * EL0 if EL0[PV]TEN is zero.
2636 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2637 return CP_ACCESS_TRAP
;
2642 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2643 timeridx
== GTIMER_PHYS
&& !secure
) {
2644 if (hcr
& HCR_E2H
) {
2645 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2646 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2647 return CP_ACCESS_TRAP_EL2
;
2650 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2651 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2652 return CP_ACCESS_TRAP_EL2
;
2658 return CP_ACCESS_OK
;
2661 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2662 const ARMCPRegInfo
*ri
,
2665 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2668 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2669 const ARMCPRegInfo
*ri
,
2672 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2675 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2678 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2681 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2684 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2687 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2688 const ARMCPRegInfo
*ri
,
2691 /* The AArch64 register view of the secure physical timer is
2692 * always accessible from EL3, and configurably accessible from
2695 switch (arm_current_el(env
)) {
2697 if (!arm_is_secure(env
)) {
2698 return CP_ACCESS_TRAP
;
2700 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2701 return CP_ACCESS_TRAP_EL3
;
2703 return CP_ACCESS_OK
;
2706 return CP_ACCESS_TRAP
;
2708 return CP_ACCESS_OK
;
2710 g_assert_not_reached();
2714 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2716 ARMCPU
*cpu
= env_archcpu(env
);
2718 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2721 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2723 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2726 /* Timer enabled: calculate and set current ISTATUS, irq, and
2727 * reset timer to when ISTATUS next has to change
2729 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2730 cpu
->env
.cp15
.cntvoff_el2
: 0;
2731 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2732 /* Note that this must be unsigned 64 bit arithmetic: */
2733 int istatus
= count
- offset
>= gt
->cval
;
2737 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2739 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2740 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2743 /* Next transition is when count rolls back over to zero */
2744 nexttick
= UINT64_MAX
;
2746 /* Next transition is when we hit cval */
2747 nexttick
= gt
->cval
+ offset
;
2749 /* Note that the desired next expiry time might be beyond the
2750 * signed-64-bit range of a QEMUTimer -- in this case we just
2751 * set the timer for as far in the future as possible. When the
2752 * timer expires we will reset the timer for any remaining period.
2754 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2755 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2757 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2759 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2761 /* Timer disabled: ISTATUS and timer output always clear */
2763 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2764 timer_del(cpu
->gt_timer
[timeridx
]);
2765 trace_arm_gt_recalc_disabled(timeridx
);
2769 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2772 ARMCPU
*cpu
= env_archcpu(env
);
2774 timer_del(cpu
->gt_timer
[timeridx
]);
2777 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2779 return gt_get_countervalue(env
);
2782 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2786 switch (arm_current_el(env
)) {
2788 hcr
= arm_hcr_el2_eff(env
);
2789 if (hcr
& HCR_E2H
) {
2794 hcr
= arm_hcr_el2_eff(env
);
2795 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2801 return env
->cp15
.cntvoff_el2
;
2804 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2806 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2809 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2813 trace_arm_gt_cval_write(timeridx
, value
);
2814 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2815 gt_recalc_timer(env_archcpu(env
), timeridx
);
2818 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2821 uint64_t offset
= 0;
2825 case GTIMER_HYPVIRT
:
2826 offset
= gt_virt_cnt_offset(env
);
2830 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2831 (gt_get_countervalue(env
) - offset
));
2834 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2838 uint64_t offset
= 0;
2842 case GTIMER_HYPVIRT
:
2843 offset
= gt_virt_cnt_offset(env
);
2847 trace_arm_gt_tval_write(timeridx
, value
);
2848 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2849 sextract64(value
, 0, 32);
2850 gt_recalc_timer(env_archcpu(env
), timeridx
);
2853 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2857 ARMCPU
*cpu
= env_archcpu(env
);
2858 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2860 trace_arm_gt_ctl_write(timeridx
, value
);
2861 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2862 if ((oldval
^ value
) & 1) {
2863 /* Enable toggled */
2864 gt_recalc_timer(cpu
, timeridx
);
2865 } else if ((oldval
^ value
) & 2) {
2866 /* IMASK toggled: don't need to recalculate,
2867 * just set the interrupt line based on ISTATUS
2869 int irqstate
= (oldval
& 4) && !(value
& 2);
2871 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2872 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2876 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2878 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2881 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2884 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2887 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2889 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2892 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2895 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2898 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2901 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2904 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2906 switch (arm_mmu_idx(env
)) {
2907 case ARMMMUIdx_E20_0
:
2908 case ARMMMUIdx_E20_2
:
2909 case ARMMMUIdx_E20_2_PAN
:
2916 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2918 switch (arm_mmu_idx(env
)) {
2919 case ARMMMUIdx_E20_0
:
2920 case ARMMMUIdx_E20_2
:
2921 case ARMMMUIdx_E20_2_PAN
:
2922 return GTIMER_HYPVIRT
;
2928 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2929 const ARMCPRegInfo
*ri
)
2931 int timeridx
= gt_phys_redir_timeridx(env
);
2932 return env
->cp15
.c14_timer
[timeridx
].cval
;
2935 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2938 int timeridx
= gt_phys_redir_timeridx(env
);
2939 gt_cval_write(env
, ri
, timeridx
, value
);
2942 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2943 const ARMCPRegInfo
*ri
)
2945 int timeridx
= gt_phys_redir_timeridx(env
);
2946 return gt_tval_read(env
, ri
, timeridx
);
2949 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2952 int timeridx
= gt_phys_redir_timeridx(env
);
2953 gt_tval_write(env
, ri
, timeridx
, value
);
2956 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2957 const ARMCPRegInfo
*ri
)
2959 int timeridx
= gt_phys_redir_timeridx(env
);
2960 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2963 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2966 int timeridx
= gt_phys_redir_timeridx(env
);
2967 gt_ctl_write(env
, ri
, timeridx
, value
);
2970 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2972 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2975 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2978 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2981 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2983 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2986 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2989 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2992 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2995 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2998 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3001 ARMCPU
*cpu
= env_archcpu(env
);
3003 trace_arm_gt_cntvoff_write(value
);
3004 raw_write(env
, ri
, value
);
3005 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3008 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
3009 const ARMCPRegInfo
*ri
)
3011 int timeridx
= gt_virt_redir_timeridx(env
);
3012 return env
->cp15
.c14_timer
[timeridx
].cval
;
3015 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3018 int timeridx
= gt_virt_redir_timeridx(env
);
3019 gt_cval_write(env
, ri
, timeridx
, value
);
3022 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
3023 const ARMCPRegInfo
*ri
)
3025 int timeridx
= gt_virt_redir_timeridx(env
);
3026 return gt_tval_read(env
, ri
, timeridx
);
3029 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3032 int timeridx
= gt_virt_redir_timeridx(env
);
3033 gt_tval_write(env
, ri
, timeridx
, value
);
3036 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
3037 const ARMCPRegInfo
*ri
)
3039 int timeridx
= gt_virt_redir_timeridx(env
);
3040 return env
->cp15
.c14_timer
[timeridx
].ctl
;
3043 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3046 int timeridx
= gt_virt_redir_timeridx(env
);
3047 gt_ctl_write(env
, ri
, timeridx
, value
);
3050 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3052 gt_timer_reset(env
, ri
, GTIMER_HYP
);
3055 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3058 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
3061 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3063 return gt_tval_read(env
, ri
, GTIMER_HYP
);
3066 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3069 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
3072 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3075 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
3078 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3080 gt_timer_reset(env
, ri
, GTIMER_SEC
);
3083 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3086 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
3089 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3091 return gt_tval_read(env
, ri
, GTIMER_SEC
);
3094 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3097 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
3100 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3103 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
3106 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3108 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
3111 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3114 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3117 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3119 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
3122 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3125 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3128 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3131 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3134 void arm_gt_ptimer_cb(void *opaque
)
3136 ARMCPU
*cpu
= opaque
;
3138 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3141 void arm_gt_vtimer_cb(void *opaque
)
3143 ARMCPU
*cpu
= opaque
;
3145 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3148 void arm_gt_htimer_cb(void *opaque
)
3150 ARMCPU
*cpu
= opaque
;
3152 gt_recalc_timer(cpu
, GTIMER_HYP
);
3155 void arm_gt_stimer_cb(void *opaque
)
3157 ARMCPU
*cpu
= opaque
;
3159 gt_recalc_timer(cpu
, GTIMER_SEC
);
3162 void arm_gt_hvtimer_cb(void *opaque
)
3164 ARMCPU
*cpu
= opaque
;
3166 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
3169 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
3171 ARMCPU
*cpu
= env_archcpu(env
);
3173 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
3176 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3177 /* Note that CNTFRQ is purely reads-as-written for the benefit
3178 * of software; writing it doesn't actually change the timer frequency.
3179 * Our reset value matches the fixed frequency we implement the timer at.
3181 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
3182 .type
= ARM_CP_ALIAS
,
3183 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3184 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
3186 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3187 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3188 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3189 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3190 .resetfn
= arm_gt_cntfrq_reset
,
3192 /* overall control: mostly access permissions */
3193 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
3194 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
3196 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
3199 /* per-timer control */
3200 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3201 .secure
= ARM_CP_SECSTATE_NS
,
3202 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3203 .accessfn
= gt_ptimer_access
,
3204 .fieldoffset
= offsetoflow32(CPUARMState
,
3205 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3206 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3207 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3209 { .name
= "CNTP_CTL_S",
3210 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3211 .secure
= ARM_CP_SECSTATE_S
,
3212 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3213 .accessfn
= gt_ptimer_access
,
3214 .fieldoffset
= offsetoflow32(CPUARMState
,
3215 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3216 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3218 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3219 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
3220 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3221 .accessfn
= gt_ptimer_access
,
3222 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3224 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3225 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3227 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
3228 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3229 .accessfn
= gt_vtimer_access
,
3230 .fieldoffset
= offsetoflow32(CPUARMState
,
3231 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3232 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3233 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3235 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3236 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
3237 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3238 .accessfn
= gt_vtimer_access
,
3239 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3241 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3242 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3244 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3245 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3246 .secure
= ARM_CP_SECSTATE_NS
,
3247 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3248 .accessfn
= gt_ptimer_access
,
3249 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3251 { .name
= "CNTP_TVAL_S",
3252 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3253 .secure
= ARM_CP_SECSTATE_S
,
3254 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3255 .accessfn
= gt_ptimer_access
,
3256 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
3258 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3259 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
3260 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3261 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
3262 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3264 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
3265 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3266 .accessfn
= gt_vtimer_access
,
3267 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3269 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3270 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
3271 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3272 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3273 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3275 /* The counter itself */
3276 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3277 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3278 .accessfn
= gt_pct_access
,
3279 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3281 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3282 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3283 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3284 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3286 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3287 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3288 .accessfn
= gt_vct_access
,
3289 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3291 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3292 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3293 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3294 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3296 /* Comparison value, indicating when the timer goes off */
3297 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3298 .secure
= ARM_CP_SECSTATE_NS
,
3300 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3301 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3302 .accessfn
= gt_ptimer_access
,
3303 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3304 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3306 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3307 .secure
= ARM_CP_SECSTATE_S
,
3309 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3310 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3311 .accessfn
= gt_ptimer_access
,
3312 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3314 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3315 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3318 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3319 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3320 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3321 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3323 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3325 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3326 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3327 .accessfn
= gt_vtimer_access
,
3328 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3329 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3331 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3332 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3335 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3336 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3337 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3338 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3340 /* Secure timer -- this is actually restricted to only EL3
3341 * and configurably Secure-EL1 via the accessfn.
3343 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3344 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3345 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3346 .accessfn
= gt_stimer_access
,
3347 .readfn
= gt_sec_tval_read
,
3348 .writefn
= gt_sec_tval_write
,
3349 .resetfn
= gt_sec_timer_reset
,
3351 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3352 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3353 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3354 .accessfn
= gt_stimer_access
,
3355 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3357 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3359 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3360 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3361 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3362 .accessfn
= gt_stimer_access
,
3363 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3364 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3369 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3372 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3373 return CP_ACCESS_TRAP
;
3375 return CP_ACCESS_OK
;
3380 /* In user-mode most of the generic timer registers are inaccessible
3381 * however modern kernels (4.12+) allow access to cntvct_el0
3384 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3386 ARMCPU
*cpu
= env_archcpu(env
);
3388 /* Currently we have no support for QEMUTimer in linux-user so we
3389 * can't call gt_get_countervalue(env), instead we directly
3390 * call the lower level functions.
3392 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3395 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3396 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3397 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3398 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3399 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3400 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3402 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3403 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3404 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3405 .readfn
= gt_virt_cnt_read
,
3412 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3414 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3415 raw_write(env
, ri
, value
);
3416 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3417 raw_write(env
, ri
, value
& 0xfffff6ff);
3419 raw_write(env
, ri
, value
& 0xfffff1ff);
3423 #ifndef CONFIG_USER_ONLY
3424 /* get_phys_addr() isn't present for user-mode-only targets */
3426 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3430 /* The ATS12NSO* operations must trap to EL3 if executed in
3431 * Secure EL1 (which can only happen if EL3 is AArch64).
3432 * They are simply UNDEF if executed from NS EL1.
3433 * They function normally from EL2 or EL3.
3435 if (arm_current_el(env
) == 1) {
3436 if (arm_is_secure_below_el3(env
)) {
3437 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3439 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3442 return CP_ACCESS_OK
;
3445 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3446 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3449 target_ulong page_size
;
3453 bool format64
= false;
3454 MemTxAttrs attrs
= {};
3455 ARMMMUFaultInfo fi
= {};
3456 ARMCacheAttrs cacheattrs
= {};
3458 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3459 &prot
, &page_size
, &fi
, &cacheattrs
);
3463 * Some kinds of translation fault must cause exceptions rather
3464 * than being reported in the PAR.
3466 int current_el
= arm_current_el(env
);
3468 uint32_t syn
, fsr
, fsc
;
3469 bool take_exc
= false;
3471 if (fi
.s1ptw
&& current_el
== 1 && !arm_is_secure(env
)
3472 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3474 * Synchronous stage 2 fault on an access made as part of the
3475 * translation table walk for AT S1E0* or AT S1E1* insn
3476 * executed from NS EL1. If this is a synchronous external abort
3477 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3478 * to EL3. Otherwise the fault is taken as an exception to EL2,
3479 * and HPFAR_EL2 holds the faulting IPA.
3481 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3482 (env
->cp15
.scr_el3
& SCR_EA
)) {
3485 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3489 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3491 * Synchronous external aborts during a translation table walk
3492 * are taken as Data Abort exceptions.
3495 if (current_el
== 3) {
3501 target_el
= exception_target_el(env
);
3507 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3508 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3509 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3510 fsr
= arm_fi_to_lfsc(&fi
);
3511 fsc
= extract32(fsr
, 0, 6);
3513 fsr
= arm_fi_to_sfsc(&fi
);
3517 * Report exception with ESR indicating a fault due to a
3518 * translation table walk for a cache maintenance instruction.
3520 syn
= syn_data_abort_no_iss(current_el
== target_el
,
3521 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3522 env
->exception
.vaddress
= value
;
3523 env
->exception
.fsr
= fsr
;
3524 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3530 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3533 * * TTBCR.EAE determines whether the result is returned using the
3534 * 32-bit or the 64-bit PAR format
3535 * * Instructions executed in Hyp mode always use the 64bit format
3537 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3538 * * The Non-secure TTBCR.EAE bit is set to 1
3539 * * The implementation includes EL2, and the value of HCR.VM is 1
3541 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3543 * ATS1Hx always uses the 64bit format.
3545 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3547 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3548 if (mmu_idx
== ARMMMUIdx_E10_0
||
3549 mmu_idx
== ARMMMUIdx_E10_1
||
3550 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3551 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3553 format64
|= arm_current_el(env
) == 2;
3559 /* Create a 64-bit PAR */
3560 par64
= (1 << 11); /* LPAE bit always set */
3562 par64
|= phys_addr
& ~0xfffULL
;
3563 if (!attrs
.secure
) {
3564 par64
|= (1 << 9); /* NS */
3566 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3567 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3569 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3572 par64
|= (fsr
& 0x3f) << 1; /* FS */
3574 par64
|= (1 << 9); /* S */
3577 par64
|= (1 << 8); /* PTW */
3581 /* fsr is a DFSR/IFSR value for the short descriptor
3582 * translation table format (with WnR always clear).
3583 * Convert it to a 32-bit PAR.
3586 /* We do not set any attribute bits in the PAR */
3587 if (page_size
== (1 << 24)
3588 && arm_feature(env
, ARM_FEATURE_V7
)) {
3589 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3591 par64
= phys_addr
& 0xfffff000;
3593 if (!attrs
.secure
) {
3594 par64
|= (1 << 9); /* NS */
3597 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3599 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3600 ((fsr
& 0xf) << 1) | 1;
3606 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3608 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3611 int el
= arm_current_el(env
);
3612 bool secure
= arm_is_secure_below_el3(env
);
3614 switch (ri
->opc2
& 6) {
3616 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3619 mmu_idx
= ARMMMUIdx_SE3
;
3622 g_assert(!secure
); /* TODO: ARMv8.4-SecEL2 */
3625 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3626 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3627 : ARMMMUIdx_Stage1_E1_PAN
);
3629 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3633 g_assert_not_reached();
3637 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3640 mmu_idx
= ARMMMUIdx_SE10_0
;
3643 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3646 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3649 g_assert_not_reached();
3653 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3654 mmu_idx
= ARMMMUIdx_E10_1
;
3657 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3658 mmu_idx
= ARMMMUIdx_E10_0
;
3661 g_assert_not_reached();
3664 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3666 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3669 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3672 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3675 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3677 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3680 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3683 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3684 return CP_ACCESS_TRAP
;
3686 return CP_ACCESS_OK
;
3689 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3692 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3694 int secure
= arm_is_secure_below_el3(env
);
3696 switch (ri
->opc2
& 6) {
3699 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3700 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3701 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3702 : ARMMMUIdx_Stage1_E1_PAN
);
3704 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3707 case 4: /* AT S1E2R, AT S1E2W */
3708 mmu_idx
= ARMMMUIdx_E2
;
3710 case 6: /* AT S1E3R, AT S1E3W */
3711 mmu_idx
= ARMMMUIdx_SE3
;
3714 g_assert_not_reached();
3717 case 2: /* AT S1E0R, AT S1E0W */
3718 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3720 case 4: /* AT S12E1R, AT S12E1W */
3721 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3723 case 6: /* AT S12E0R, AT S12E0W */
3724 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3727 g_assert_not_reached();
3730 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3734 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3735 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3736 .access
= PL1_RW
, .resetvalue
= 0,
3737 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3738 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3739 .writefn
= par_write
},
3740 #ifndef CONFIG_USER_ONLY
3741 /* This underdecoding is safe because the reginfo is NO_RAW. */
3742 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3743 .access
= PL1_W
, .accessfn
= ats_access
,
3744 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3749 /* Return basic MPU access permission bits. */
3750 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3757 for (i
= 0; i
< 16; i
+= 2) {
3758 ret
|= (val
>> i
) & mask
;
3764 /* Pad basic MPU access permission bits to extended format. */
3765 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3772 for (i
= 0; i
< 16; i
+= 2) {
3773 ret
|= (val
& mask
) << i
;
3779 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3782 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3785 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3787 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3790 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3793 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3796 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3798 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3801 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3803 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3809 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3813 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3816 ARMCPU
*cpu
= env_archcpu(env
);
3817 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3823 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3824 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3828 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3831 ARMCPU
*cpu
= env_archcpu(env
);
3832 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3834 if (value
>= nrgs
) {
3835 qemu_log_mask(LOG_GUEST_ERROR
,
3836 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3837 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3841 raw_write(env
, ri
, value
);
3844 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3845 /* Reset for all these registers is handled in arm_cpu_reset(),
3846 * because the PMSAv7 is also used by M-profile CPUs, which do
3847 * not register cpregs but still need the state to be reset.
3849 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3850 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3851 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3852 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3853 .resetfn
= arm_cp_reset_ignore
},
3854 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3855 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3856 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3857 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3858 .resetfn
= arm_cp_reset_ignore
},
3859 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3860 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3861 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3862 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3863 .resetfn
= arm_cp_reset_ignore
},
3864 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3866 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3867 .writefn
= pmsav7_rgnr_write
,
3868 .resetfn
= arm_cp_reset_ignore
},
3872 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3873 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3874 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3875 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3876 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3877 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3878 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3879 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3880 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3881 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3883 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3885 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3887 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3889 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3891 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3892 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3894 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3895 /* Protection region base and size registers */
3896 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3897 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3898 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3899 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3900 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3901 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3902 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3903 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3904 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3905 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3906 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3907 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3908 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3909 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3910 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3911 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3912 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3913 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3914 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3915 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3916 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3917 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3918 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3919 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3923 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3926 TCR
*tcr
= raw_ptr(env
, ri
);
3927 int maskshift
= extract32(value
, 0, 3);
3929 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3930 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3931 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3932 * using Long-desciptor translation table format */
3933 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3934 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3935 /* In an implementation that includes the Security Extensions
3936 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3937 * Short-descriptor translation table format.
3939 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3945 /* Update the masks corresponding to the TCR bank being written
3946 * Note that we always calculate mask and base_mask, but
3947 * they are only used for short-descriptor tables (ie if EAE is 0);
3948 * for long-descriptor tables the TCR fields are used differently
3949 * and the mask and base_mask values are meaningless.
3951 tcr
->raw_tcr
= value
;
3952 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3953 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3956 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3959 ARMCPU
*cpu
= env_archcpu(env
);
3960 TCR
*tcr
= raw_ptr(env
, ri
);
3962 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3963 /* With LPAE the TTBCR could result in a change of ASID
3964 * via the TTBCR.A1 bit, so do a TLB flush.
3966 tlb_flush(CPU(cpu
));
3968 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3969 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3970 vmsa_ttbcr_raw_write(env
, ri
, value
);
3973 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3975 TCR
*tcr
= raw_ptr(env
, ri
);
3977 /* Reset both the TCR as well as the masks corresponding to the bank of
3978 * the TCR being reset.
3982 tcr
->base_mask
= 0xffffc000u
;
3985 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3988 ARMCPU
*cpu
= env_archcpu(env
);
3989 TCR
*tcr
= raw_ptr(env
, ri
);
3991 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3992 tlb_flush(CPU(cpu
));
3993 tcr
->raw_tcr
= value
;
3996 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3999 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
4000 if (cpreg_field_is_64bit(ri
) &&
4001 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
4002 ARMCPU
*cpu
= env_archcpu(env
);
4003 tlb_flush(CPU(cpu
));
4005 raw_write(env
, ri
, value
);
4008 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4012 * If we are running with E2&0 regime, then an ASID is active.
4013 * Flush if that might be changing. Note we're not checking
4014 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4015 * holds the active ASID, only checking the field that might.
4017 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
4018 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
4019 tlb_flush_by_mmuidx(env_cpu(env
),
4020 ARMMMUIdxBit_E20_2
|
4021 ARMMMUIdxBit_E20_2_PAN
|
4022 ARMMMUIdxBit_E20_0
);
4024 raw_write(env
, ri
, value
);
4027 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4030 ARMCPU
*cpu
= env_archcpu(env
);
4031 CPUState
*cs
= CPU(cpu
);
4034 * A change in VMID to the stage2 page table (Stage2) invalidates
4035 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
4037 if (raw_read(env
, ri
) != value
) {
4038 tlb_flush_by_mmuidx(cs
,
4039 ARMMMUIdxBit_E10_1
|
4040 ARMMMUIdxBit_E10_1_PAN
|
4041 ARMMMUIdxBit_E10_0
|
4042 ARMMMUIdxBit_Stage2
);
4043 raw_write(env
, ri
, value
);
4047 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
4048 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
4049 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
4050 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
4051 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
4052 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
4053 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4054 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
4055 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
4056 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
4057 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4058 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
4059 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
4060 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
4061 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
4062 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4063 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
4068 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
4069 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
4070 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
4071 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4072 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
4073 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
4074 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
4075 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4076 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4077 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4078 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
4079 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
4080 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
4081 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4082 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4083 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4084 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
4085 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
4086 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4087 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4088 .writefn
= vmsa_tcr_el12_write
,
4089 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
4090 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
4091 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4092 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4093 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
4094 .raw_writefn
= vmsa_ttbcr_raw_write
,
4095 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
4096 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
4100 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4101 * qemu tlbs nor adjusting cached masks.
4103 static const ARMCPRegInfo ttbcr2_reginfo
= {
4104 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
4105 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4106 .type
= ARM_CP_ALIAS
,
4107 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
4108 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
4111 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4114 env
->cp15
.c15_ticonfig
= value
& 0xe7;
4115 /* The OS_TYPE bit in this register changes the reported CPUID! */
4116 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
4117 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
4120 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4123 env
->cp15
.c15_threadid
= value
& 0xffff;
4126 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4129 /* Wait-for-interrupt (deprecated) */
4130 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
4133 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4136 /* On OMAP there are registers indicating the max/min index of dcache lines
4137 * containing a dirty line; cache flush operations have to reset these.
4139 env
->cp15
.c15_i_max
= 0x000;
4140 env
->cp15
.c15_i_min
= 0xff0;
4143 static const ARMCPRegInfo omap_cp_reginfo
[] = {
4144 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
4145 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
4146 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
4148 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
4149 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4150 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
4152 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
4153 .writefn
= omap_ticonfig_write
},
4154 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
4156 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
4157 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
4158 .access
= PL1_RW
, .resetvalue
= 0xff0,
4159 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
4160 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
4162 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
4163 .writefn
= omap_threadid_write
},
4164 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
4165 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4166 .type
= ARM_CP_NO_RAW
,
4167 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
4168 /* TODO: Peripheral port remap register:
4169 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4170 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4173 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
4174 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
4175 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
4176 .writefn
= omap_cachemaint_write
},
4177 { .name
= "C9", .cp
= 15, .crn
= 9,
4178 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
4179 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
4183 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4186 env
->cp15
.c15_cpar
= value
& 0x3fff;
4189 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
4190 { .name
= "XSCALE_CPAR",
4191 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4192 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
4193 .writefn
= xscale_cpar_write
, },
4194 { .name
= "XSCALE_AUXCR",
4195 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
4196 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
4198 /* XScale specific cache-lockdown: since we have no cache we NOP these
4199 * and hope the guest does not really rely on cache behaviour.
4201 { .name
= "XSCALE_LOCK_ICACHE_LINE",
4202 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
4203 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4204 { .name
= "XSCALE_UNLOCK_ICACHE",
4205 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
4206 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4207 { .name
= "XSCALE_DCACHE_LOCK",
4208 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
4209 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4210 { .name
= "XSCALE_UNLOCK_DCACHE",
4211 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
4212 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4216 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
4217 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4218 * implementation of this implementation-defined space.
4219 * Ideally this should eventually disappear in favour of actually
4220 * implementing the correct behaviour for all cores.
4222 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
4223 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4225 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
4230 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
4231 /* Cache status: RAZ because we have no cache so it's always clean */
4232 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
4233 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4238 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4239 /* We never have a a block transfer operation in progress */
4240 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4241 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4243 /* The cache ops themselves: these all NOP for QEMU */
4244 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4245 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4246 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4247 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4248 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4249 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4250 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4251 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4252 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4253 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4254 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4255 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4259 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4260 /* The cache test-and-clean instructions always return (1 << 30)
4261 * to indicate that there are no dirty cache lines.
4263 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4264 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4265 .resetvalue
= (1 << 30) },
4266 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4267 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4268 .resetvalue
= (1 << 30) },
4272 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4273 /* Ignore ReadBuffer accesses */
4274 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4275 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4276 .access
= PL1_RW
, .resetvalue
= 0,
4277 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4281 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4283 ARMCPU
*cpu
= env_archcpu(env
);
4284 unsigned int cur_el
= arm_current_el(env
);
4285 bool secure
= arm_is_secure(env
);
4287 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4288 return env
->cp15
.vpidr_el2
;
4290 return raw_read(env
, ri
);
4293 static uint64_t mpidr_read_val(CPUARMState
*env
)
4295 ARMCPU
*cpu
= env_archcpu(env
);
4296 uint64_t mpidr
= cpu
->mp_affinity
;
4298 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4299 mpidr
|= (1U << 31);
4300 /* Cores which are uniprocessor (non-coherent)
4301 * but still implement the MP extensions set
4302 * bit 30. (For instance, Cortex-R5).
4304 if (cpu
->mp_is_up
) {
4305 mpidr
|= (1u << 30);
4311 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4313 unsigned int cur_el
= arm_current_el(env
);
4314 bool secure
= arm_is_secure(env
);
4316 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4317 return env
->cp15
.vmpidr_el2
;
4319 return mpidr_read_val(env
);
4322 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4324 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4325 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4326 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4327 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4328 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4329 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4330 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4331 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4332 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4333 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4334 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4335 offsetof(CPUARMState
, cp15
.par_ns
)} },
4336 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4337 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4338 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4339 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4340 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4341 .writefn
= vmsa_ttbr_write
, },
4342 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4343 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4344 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4345 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4346 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4347 .writefn
= vmsa_ttbr_write
, },
4351 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4353 return vfp_get_fpcr(env
);
4356 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4359 vfp_set_fpcr(env
, value
);
4362 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4364 return vfp_get_fpsr(env
);
4367 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4370 vfp_set_fpsr(env
, value
);
4373 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4376 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4377 return CP_ACCESS_TRAP
;
4379 return CP_ACCESS_OK
;
4382 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4385 env
->daif
= value
& PSTATE_DAIF
;
4388 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4390 return env
->pstate
& PSTATE_PAN
;
4393 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4396 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4399 static const ARMCPRegInfo pan_reginfo
= {
4400 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4401 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4402 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4403 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4406 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4408 return env
->pstate
& PSTATE_UAO
;
4411 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4414 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4417 static const ARMCPRegInfo uao_reginfo
= {
4418 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4419 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4420 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4421 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4424 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4425 const ARMCPRegInfo
*ri
,
4428 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4429 switch (arm_current_el(env
)) {
4431 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4432 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4433 return CP_ACCESS_TRAP
;
4437 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4438 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4439 return CP_ACCESS_TRAP_EL2
;
4443 return CP_ACCESS_OK
;
4446 static CPAccessResult
aa64_cacheop_pou_access(CPUARMState
*env
,
4447 const ARMCPRegInfo
*ri
,
4450 /* Cache invalidate/clean to Point of Unification... */
4451 switch (arm_current_el(env
)) {
4453 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4454 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4455 return CP_ACCESS_TRAP
;
4459 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4460 if (arm_hcr_el2_eff(env
) & HCR_TPU
) {
4461 return CP_ACCESS_TRAP_EL2
;
4465 return CP_ACCESS_OK
;
4468 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4469 * Page D4-1736 (DDI0487A.b)
4472 static int vae1_tlbmask(CPUARMState
*env
)
4474 /* Since we exclude secure first, we may read HCR_EL2 directly. */
4475 if (arm_is_secure_below_el3(env
)) {
4476 return ARMMMUIdxBit_SE10_1
|
4477 ARMMMUIdxBit_SE10_1_PAN
|
4478 ARMMMUIdxBit_SE10_0
;
4479 } else if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
))
4480 == (HCR_E2H
| HCR_TGE
)) {
4481 return ARMMMUIdxBit_E20_2
|
4482 ARMMMUIdxBit_E20_2_PAN
|
4485 return ARMMMUIdxBit_E10_1
|
4486 ARMMMUIdxBit_E10_1_PAN
|
4491 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4494 CPUState
*cs
= env_cpu(env
);
4495 int mask
= vae1_tlbmask(env
);
4497 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4500 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4503 CPUState
*cs
= env_cpu(env
);
4504 int mask
= vae1_tlbmask(env
);
4506 if (tlb_force_broadcast(env
)) {
4507 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4509 tlb_flush_by_mmuidx(cs
, mask
);
4513 static int alle1_tlbmask(CPUARMState
*env
)
4516 * Note that the 'ALL' scope must invalidate both stage 1 and
4517 * stage 2 translations, whereas most other scopes only invalidate
4518 * stage 1 translations.
4520 if (arm_is_secure_below_el3(env
)) {
4521 return ARMMMUIdxBit_SE10_1
|
4522 ARMMMUIdxBit_SE10_1_PAN
|
4523 ARMMMUIdxBit_SE10_0
;
4524 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
4525 return ARMMMUIdxBit_E10_1
|
4526 ARMMMUIdxBit_E10_1_PAN
|
4527 ARMMMUIdxBit_E10_0
|
4528 ARMMMUIdxBit_Stage2
;
4530 return ARMMMUIdxBit_E10_1
|
4531 ARMMMUIdxBit_E10_1_PAN
|
4536 static int e2_tlbmask(CPUARMState
*env
)
4538 /* TODO: ARMv8.4-SecEL2 */
4539 return ARMMMUIdxBit_E20_0
|
4540 ARMMMUIdxBit_E20_2
|
4541 ARMMMUIdxBit_E20_2_PAN
|
4545 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4548 CPUState
*cs
= env_cpu(env
);
4549 int mask
= alle1_tlbmask(env
);
4551 tlb_flush_by_mmuidx(cs
, mask
);
4554 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4557 CPUState
*cs
= env_cpu(env
);
4558 int mask
= e2_tlbmask(env
);
4560 tlb_flush_by_mmuidx(cs
, mask
);
4563 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4566 ARMCPU
*cpu
= env_archcpu(env
);
4567 CPUState
*cs
= CPU(cpu
);
4569 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4572 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4575 CPUState
*cs
= env_cpu(env
);
4576 int mask
= alle1_tlbmask(env
);
4578 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4581 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4584 CPUState
*cs
= env_cpu(env
);
4585 int mask
= e2_tlbmask(env
);
4587 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4590 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4593 CPUState
*cs
= env_cpu(env
);
4595 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4598 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4601 /* Invalidate by VA, EL2
4602 * Currently handles both VAE2 and VALE2, since we don't support
4603 * flush-last-level-only.
4605 CPUState
*cs
= env_cpu(env
);
4606 int mask
= e2_tlbmask(env
);
4607 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4609 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4612 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4615 /* Invalidate by VA, EL3
4616 * Currently handles both VAE3 and VALE3, since we don't support
4617 * flush-last-level-only.
4619 ARMCPU
*cpu
= env_archcpu(env
);
4620 CPUState
*cs
= CPU(cpu
);
4621 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4623 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4626 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4629 CPUState
*cs
= env_cpu(env
);
4630 int mask
= vae1_tlbmask(env
);
4631 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4633 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4636 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4639 /* Invalidate by VA, EL1&0 (AArch64 version).
4640 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4641 * since we don't support flush-for-specific-ASID-only or
4642 * flush-last-level-only.
4644 CPUState
*cs
= env_cpu(env
);
4645 int mask
= vae1_tlbmask(env
);
4646 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4648 if (tlb_force_broadcast(env
)) {
4649 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4651 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4655 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4658 CPUState
*cs
= env_cpu(env
);
4659 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4661 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4665 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4668 CPUState
*cs
= env_cpu(env
);
4669 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4671 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4675 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4678 /* Invalidate by IPA. This has to invalidate any structures that
4679 * contain only stage 2 translation information, but does not need
4680 * to apply to structures that contain combined stage 1 and stage 2
4681 * translation information.
4682 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4684 ARMCPU
*cpu
= env_archcpu(env
);
4685 CPUState
*cs
= CPU(cpu
);
4688 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4692 pageaddr
= sextract64(value
<< 12, 0, 48);
4694 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
4697 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4700 CPUState
*cs
= env_cpu(env
);
4703 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4707 pageaddr
= sextract64(value
<< 12, 0, 48);
4709 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4710 ARMMMUIdxBit_Stage2
);
4713 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4716 int cur_el
= arm_current_el(env
);
4719 uint64_t hcr
= arm_hcr_el2_eff(env
);
4722 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4723 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4724 return CP_ACCESS_TRAP_EL2
;
4727 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4728 return CP_ACCESS_TRAP
;
4730 if (hcr
& HCR_TDZ
) {
4731 return CP_ACCESS_TRAP_EL2
;
4734 } else if (hcr
& HCR_TDZ
) {
4735 return CP_ACCESS_TRAP_EL2
;
4738 return CP_ACCESS_OK
;
4741 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4743 ARMCPU
*cpu
= env_archcpu(env
);
4744 int dzp_bit
= 1 << 4;
4746 /* DZP indicates whether DC ZVA access is allowed */
4747 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4750 return cpu
->dcz_blocksize
| dzp_bit
;
4753 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4756 if (!(env
->pstate
& PSTATE_SP
)) {
4757 /* Access to SP_EL0 is undefined if it's being used as
4758 * the stack pointer.
4760 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4762 return CP_ACCESS_OK
;
4765 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4767 return env
->pstate
& PSTATE_SP
;
4770 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4772 update_spsel(env
, val
);
4775 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4778 ARMCPU
*cpu
= env_archcpu(env
);
4780 if (raw_read(env
, ri
) == value
) {
4781 /* Skip the TLB flush if nothing actually changed; Linux likes
4782 * to do a lot of pointless SCTLR writes.
4787 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4788 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4792 raw_write(env
, ri
, value
);
4793 /* ??? Lots of these bits are not implemented. */
4794 /* This may enable/disable the MMU, so do a TLB flush. */
4795 tlb_flush(CPU(cpu
));
4797 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4799 * Normally we would always end the TB on an SCTLR write; see the
4800 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4801 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4802 * of hflags from the translator, so do it here.
4804 arm_rebuild_hflags(env
);
4808 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4811 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4812 return CP_ACCESS_TRAP_FP_EL2
;
4814 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4815 return CP_ACCESS_TRAP_FP_EL3
;
4817 return CP_ACCESS_OK
;
4820 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4823 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4826 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4827 /* Minimal set of EL0-visible registers. This will need to be expanded
4828 * significantly for system emulation of AArch64 CPUs.
4830 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4831 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4832 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4833 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4834 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4835 .type
= ARM_CP_NO_RAW
,
4836 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4837 .fieldoffset
= offsetof(CPUARMState
, daif
),
4838 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4839 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4840 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4841 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4842 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4843 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4844 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4845 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4846 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4847 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4848 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4849 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4850 .readfn
= aa64_dczid_read
},
4851 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4852 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4853 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4854 #ifndef CONFIG_USER_ONLY
4855 /* Avoid overhead of an access check that always passes in user-mode */
4856 .accessfn
= aa64_zva_access
,
4859 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4860 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4861 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4862 /* Cache ops: all NOPs since we don't emulate caches */
4863 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4864 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4865 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4866 .accessfn
= aa64_cacheop_pou_access
},
4867 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4868 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4869 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4870 .accessfn
= aa64_cacheop_pou_access
},
4871 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4872 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4873 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4874 .accessfn
= aa64_cacheop_pou_access
},
4875 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4876 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4877 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
4878 .type
= ARM_CP_NOP
},
4879 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4880 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4881 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4882 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4883 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4884 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4885 .accessfn
= aa64_cacheop_poc_access
},
4886 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4887 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4888 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4889 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4890 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4891 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4892 .accessfn
= aa64_cacheop_pou_access
},
4893 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4894 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4895 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4896 .accessfn
= aa64_cacheop_poc_access
},
4897 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4898 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4899 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4900 /* TLBI operations */
4901 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4902 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4903 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4904 .writefn
= tlbi_aa64_vmalle1is_write
},
4905 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4906 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4907 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4908 .writefn
= tlbi_aa64_vae1is_write
},
4909 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4910 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4911 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4912 .writefn
= tlbi_aa64_vmalle1is_write
},
4913 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4914 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4915 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4916 .writefn
= tlbi_aa64_vae1is_write
},
4917 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4918 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4919 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4920 .writefn
= tlbi_aa64_vae1is_write
},
4921 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4922 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4923 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4924 .writefn
= tlbi_aa64_vae1is_write
},
4925 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4926 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4927 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4928 .writefn
= tlbi_aa64_vmalle1_write
},
4929 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4930 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4931 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4932 .writefn
= tlbi_aa64_vae1_write
},
4933 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4934 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4935 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4936 .writefn
= tlbi_aa64_vmalle1_write
},
4937 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4938 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4939 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4940 .writefn
= tlbi_aa64_vae1_write
},
4941 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4942 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4943 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4944 .writefn
= tlbi_aa64_vae1_write
},
4945 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4946 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4947 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4948 .writefn
= tlbi_aa64_vae1_write
},
4949 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4950 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4951 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4952 .writefn
= tlbi_aa64_ipas2e1is_write
},
4953 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4954 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4955 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4956 .writefn
= tlbi_aa64_ipas2e1is_write
},
4957 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4958 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4959 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4960 .writefn
= tlbi_aa64_alle1is_write
},
4961 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4962 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4963 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4964 .writefn
= tlbi_aa64_alle1is_write
},
4965 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4966 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4967 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4968 .writefn
= tlbi_aa64_ipas2e1_write
},
4969 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4970 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4971 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4972 .writefn
= tlbi_aa64_ipas2e1_write
},
4973 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4974 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4975 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4976 .writefn
= tlbi_aa64_alle1_write
},
4977 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4978 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4979 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4980 .writefn
= tlbi_aa64_alle1is_write
},
4981 #ifndef CONFIG_USER_ONLY
4982 /* 64 bit address translation operations */
4983 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4984 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4985 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4986 .writefn
= ats_write64
},
4987 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4988 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4989 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4990 .writefn
= ats_write64
},
4991 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4992 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4993 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4994 .writefn
= ats_write64
},
4995 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4996 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4997 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4998 .writefn
= ats_write64
},
4999 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
5000 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
5001 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5002 .writefn
= ats_write64
},
5003 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
5004 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
5005 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5006 .writefn
= ats_write64
},
5007 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
5008 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
5009 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5010 .writefn
= ats_write64
},
5011 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
5012 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
5013 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5014 .writefn
= ats_write64
},
5015 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5016 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
5017 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
5018 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5019 .writefn
= ats_write64
},
5020 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
5021 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
5022 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5023 .writefn
= ats_write64
},
5024 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
5025 .type
= ARM_CP_ALIAS
,
5026 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
5027 .access
= PL1_RW
, .resetvalue
= 0,
5028 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
5029 .writefn
= par_write
},
5031 /* TLB invalidate last level of translation table walk */
5032 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5033 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5034 .writefn
= tlbimva_is_write
},
5035 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5036 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5037 .writefn
= tlbimvaa_is_write
},
5038 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5039 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5040 .writefn
= tlbimva_write
},
5041 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5042 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5043 .writefn
= tlbimvaa_write
},
5044 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5045 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5046 .writefn
= tlbimva_hyp_write
},
5047 { .name
= "TLBIMVALHIS",
5048 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5049 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5050 .writefn
= tlbimva_hyp_is_write
},
5051 { .name
= "TLBIIPAS2",
5052 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5053 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5054 .writefn
= tlbiipas2_write
},
5055 { .name
= "TLBIIPAS2IS",
5056 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5057 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5058 .writefn
= tlbiipas2_is_write
},
5059 { .name
= "TLBIIPAS2L",
5060 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5061 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5062 .writefn
= tlbiipas2_write
},
5063 { .name
= "TLBIIPAS2LIS",
5064 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5065 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5066 .writefn
= tlbiipas2_is_write
},
5067 /* 32 bit cache operations */
5068 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5069 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5070 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5071 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5072 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5073 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5074 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5075 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5076 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5077 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5078 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5079 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5080 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5081 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5082 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5083 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5084 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5085 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5086 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5087 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5088 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5089 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5090 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5091 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5092 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5093 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5094 /* MMU Domain access control / MPU write buffer control */
5095 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5096 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5097 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5098 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5099 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5100 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5101 .type
= ARM_CP_ALIAS
,
5102 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5104 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5105 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5106 .type
= ARM_CP_ALIAS
,
5107 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5109 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5110 /* We rely on the access checks not allowing the guest to write to the
5111 * state field when SPSel indicates that it's being used as the stack
5114 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5115 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5116 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5117 .type
= ARM_CP_ALIAS
,
5118 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5119 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5120 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5121 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5122 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5123 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5124 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5125 .type
= ARM_CP_NO_RAW
,
5126 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5127 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5128 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5129 .type
= ARM_CP_ALIAS
,
5130 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
5131 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
5132 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5133 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5134 .access
= PL2_RW
, .resetvalue
= 0,
5135 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5136 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5137 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5138 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5139 .access
= PL2_RW
, .resetvalue
= 0,
5140 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5141 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5142 .type
= ARM_CP_ALIAS
,
5143 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5145 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5146 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5147 .type
= ARM_CP_ALIAS
,
5148 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5150 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5151 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5152 .type
= ARM_CP_ALIAS
,
5153 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5155 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5156 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5157 .type
= ARM_CP_ALIAS
,
5158 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5160 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5161 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5162 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5164 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5165 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
5166 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5167 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5168 .writefn
= sdcr_write
,
5169 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5173 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5174 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
5175 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5176 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5178 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
5179 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5180 .type
= ARM_CP_NO_RAW
,
5181 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5183 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5184 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5185 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5186 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5187 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5188 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5190 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5191 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5192 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5193 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5194 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5195 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5196 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5198 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5199 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5200 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5201 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5202 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5203 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5205 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5206 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5207 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5209 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5210 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5211 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5213 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5214 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5215 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5217 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5218 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5219 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5220 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5221 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5222 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5223 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5224 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5225 .cp
= 15, .opc1
= 6, .crm
= 2,
5226 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5227 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
5228 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5229 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5230 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5231 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5232 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5233 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5234 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5235 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5236 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5237 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5238 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5239 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5240 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5241 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5243 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5244 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5245 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5246 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5247 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5248 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5249 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5250 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5252 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5253 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5254 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5255 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5256 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5258 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5259 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5260 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5261 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5262 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5263 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5264 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5265 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5266 .access
= PL2_RW
, .accessfn
= access_tda
,
5267 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5268 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5269 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5270 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5271 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5272 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5273 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5274 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5275 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5276 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5277 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5278 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5279 .type
= ARM_CP_CONST
,
5280 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5281 .access
= PL2_RW
, .resetvalue
= 0 },
5285 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5286 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
5287 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5288 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5290 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5294 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5296 ARMCPU
*cpu
= env_archcpu(env
);
5298 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5299 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5301 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5304 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5305 valid_mask
&= ~HCR_HCD
;
5306 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5307 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5308 * However, if we're using the SMC PSCI conduit then QEMU is
5309 * effectively acting like EL3 firmware and so the guest at
5310 * EL2 should retain the ability to prevent EL1 from being
5311 * able to make SMC calls into the ersatz firmware, so in
5312 * that case HCR.TSC should be read/write.
5314 valid_mask
&= ~HCR_TSC
;
5317 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5318 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5319 valid_mask
|= HCR_E2H
;
5321 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5322 valid_mask
|= HCR_TLOR
;
5324 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5325 valid_mask
|= HCR_API
| HCR_APK
;
5329 /* Clear RES0 bits. */
5330 value
&= valid_mask
;
5332 /* These bits change the MMU setup:
5333 * HCR_VM enables stage 2 translation
5334 * HCR_PTW forbids certain page-table setups
5335 * HCR_DC Disables stage1 and enables stage2 translation
5337 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
5338 tlb_flush(CPU(cpu
));
5340 env
->cp15
.hcr_el2
= value
;
5343 * Updates to VI and VF require us to update the status of
5344 * virtual interrupts, which are the logical OR of these bits
5345 * and the state of the input lines from the GIC. (This requires
5346 * that we have the iothread lock, which is done by marking the
5347 * reginfo structs as ARM_CP_IO.)
5348 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5349 * possible for it to be taken immediately, because VIRQ and
5350 * VFIQ are masked unless running at EL0 or EL1, and HCR
5351 * can only be written at EL2.
5353 g_assert(qemu_mutex_iothread_locked());
5354 arm_cpu_update_virq(cpu
);
5355 arm_cpu_update_vfiq(cpu
);
5358 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5360 do_hcr_write(env
, value
, 0);
5363 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5366 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5367 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5368 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
5371 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5374 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5375 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5376 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
5380 * Return the effective value of HCR_EL2.
5381 * Bits that are not included here:
5382 * RW (read from SCR_EL3.RW as needed)
5384 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5386 uint64_t ret
= env
->cp15
.hcr_el2
;
5388 if (arm_is_secure_below_el3(env
)) {
5390 * "This register has no effect if EL2 is not enabled in the
5391 * current Security state". This is ARMv8.4-SecEL2 speak for
5392 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5394 * Prior to that, the language was "In an implementation that
5395 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5396 * as if this field is 0 for all purposes other than a direct
5397 * read or write access of HCR_EL2". With lots of enumeration
5398 * on a per-field basis. In current QEMU, this is condition
5399 * is arm_is_secure_below_el3.
5401 * Since the v8.4 language applies to the entire register, and
5402 * appears to be backward compatible, use that.
5408 * For a cpu that supports both aarch64 and aarch32, we can set bits
5409 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5410 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5412 if (!arm_el_is_aa64(env
, 2)) {
5413 uint64_t aa32_valid
;
5416 * These bits are up-to-date as of ARMv8.6.
5417 * For HCR, it's easiest to list just the 2 bits that are invalid.
5418 * For HCR2, list those that are valid.
5420 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
5421 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
5422 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
5426 if (ret
& HCR_TGE
) {
5427 /* These bits are up-to-date as of ARMv8.6. */
5428 if (ret
& HCR_E2H
) {
5429 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5430 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5431 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5432 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
5433 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
5434 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
5436 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5438 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5439 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5440 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5447 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5451 * For A-profile AArch32 EL3, if NSACR.CP10
5452 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5454 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5455 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5456 value
&= ~(0x3 << 10);
5457 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
5459 env
->cp15
.cptr_el
[2] = value
;
5462 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5465 * For A-profile AArch32 EL3, if NSACR.CP10
5466 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5468 uint64_t value
= env
->cp15
.cptr_el
[2];
5470 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5471 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5477 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5478 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5480 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5481 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5482 .writefn
= hcr_write
},
5483 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5484 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5485 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5486 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5487 .writefn
= hcr_writelow
},
5488 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5489 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5490 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5491 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5492 .type
= ARM_CP_ALIAS
,
5493 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5495 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5496 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5497 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5498 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5499 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5500 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5501 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5502 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5503 .type
= ARM_CP_ALIAS
,
5504 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5506 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5507 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5508 .type
= ARM_CP_ALIAS
,
5509 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5511 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5512 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5513 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5514 .access
= PL2_RW
, .writefn
= vbar_write
,
5515 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5517 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5518 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5519 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5520 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5521 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5522 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5523 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5524 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5525 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5526 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5527 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5528 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5530 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5531 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5532 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5533 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5534 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5535 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5536 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5538 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5539 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5540 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5541 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5543 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5544 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5545 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5547 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5548 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5549 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5551 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5552 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5553 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5554 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5555 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5556 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5557 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5558 .type
= ARM_CP_ALIAS
,
5559 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5560 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5561 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5562 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5564 /* no .writefn needed as this can't cause an ASID change;
5565 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5567 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5568 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5569 .cp
= 15, .opc1
= 6, .crm
= 2,
5570 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5571 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5572 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5573 .writefn
= vttbr_write
},
5574 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5575 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5576 .access
= PL2_RW
, .writefn
= vttbr_write
,
5577 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5578 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5579 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5580 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5581 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5582 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5583 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5584 .access
= PL2_RW
, .resetvalue
= 0,
5585 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5586 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5587 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5588 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5589 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5590 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5591 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5592 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5593 { .name
= "TLBIALLNSNH",
5594 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5595 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5596 .writefn
= tlbiall_nsnh_write
},
5597 { .name
= "TLBIALLNSNHIS",
5598 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5599 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5600 .writefn
= tlbiall_nsnh_is_write
},
5601 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5602 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5603 .writefn
= tlbiall_hyp_write
},
5604 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5605 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5606 .writefn
= tlbiall_hyp_is_write
},
5607 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5608 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5609 .writefn
= tlbimva_hyp_write
},
5610 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5611 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5612 .writefn
= tlbimva_hyp_is_write
},
5613 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5614 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5615 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5616 .writefn
= tlbi_aa64_alle2_write
},
5617 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5618 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5619 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5620 .writefn
= tlbi_aa64_vae2_write
},
5621 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5622 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5623 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5624 .writefn
= tlbi_aa64_vae2_write
},
5625 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5626 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5627 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5628 .writefn
= tlbi_aa64_alle2is_write
},
5629 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5630 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5631 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5632 .writefn
= tlbi_aa64_vae2is_write
},
5633 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5634 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5635 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5636 .writefn
= tlbi_aa64_vae2is_write
},
5637 #ifndef CONFIG_USER_ONLY
5638 /* Unlike the other EL2-related AT operations, these must
5639 * UNDEF from EL3 if EL2 is not implemented, which is why we
5640 * define them here rather than with the rest of the AT ops.
5642 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5643 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5644 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5645 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5646 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5647 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5648 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5649 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5650 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5651 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5652 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5653 * to behave as if SCR.NS was 1.
5655 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5657 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5658 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5660 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5661 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5662 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5663 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5664 * reset values as IMPDEF. We choose to reset to 3 to comply with
5665 * both ARMv7 and ARMv8.
5667 .access
= PL2_RW
, .resetvalue
= 3,
5668 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5669 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5670 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5671 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5672 .writefn
= gt_cntvoff_write
,
5673 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5674 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5675 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5676 .writefn
= gt_cntvoff_write
,
5677 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5678 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5679 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5680 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5681 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5682 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5683 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5684 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5685 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5686 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5687 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5688 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5689 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5690 .resetfn
= gt_hyp_timer_reset
,
5691 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5692 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5694 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5696 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5698 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5700 /* The only field of MDCR_EL2 that has a defined architectural reset value
5701 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5702 * don't implement any PMU event counters, so using zero as a reset
5703 * value for MDCR_EL2 is okay
5705 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5706 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5707 .access
= PL2_RW
, .resetvalue
= 0,
5708 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5709 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5710 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5711 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5712 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5713 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5714 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5716 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5717 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5718 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5720 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5724 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5725 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5726 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5727 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5729 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5730 .writefn
= hcr_writehigh
},
5734 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5737 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5738 * At Secure EL1 it traps to EL3.
5740 if (arm_current_el(env
) == 3) {
5741 return CP_ACCESS_OK
;
5743 if (arm_is_secure_below_el3(env
)) {
5744 return CP_ACCESS_TRAP_EL3
;
5746 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5748 return CP_ACCESS_OK
;
5750 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5753 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5754 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5755 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5756 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5757 .resetvalue
= 0, .writefn
= scr_write
},
5758 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5759 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5760 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5761 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5762 .writefn
= scr_write
},
5763 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5764 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5765 .access
= PL3_RW
, .resetvalue
= 0,
5766 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5768 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5769 .access
= PL3_RW
, .resetvalue
= 0,
5770 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5771 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5772 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5773 .writefn
= vbar_write
, .resetvalue
= 0,
5774 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5775 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5776 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5777 .access
= PL3_RW
, .resetvalue
= 0,
5778 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5779 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5780 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5782 /* no .writefn needed as this can't cause an ASID change;
5783 * we must provide a .raw_writefn and .resetfn because we handle
5784 * reset and migration for the AArch32 TTBCR(S), which might be
5785 * using mask and base_mask.
5787 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5788 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5789 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5790 .type
= ARM_CP_ALIAS
,
5791 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5793 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5794 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5795 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5796 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5797 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5798 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5799 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5800 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5801 .type
= ARM_CP_ALIAS
,
5802 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5804 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5805 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5806 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5807 .access
= PL3_RW
, .writefn
= vbar_write
,
5808 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5810 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5811 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5812 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5813 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5814 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5815 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5816 .access
= PL3_RW
, .resetvalue
= 0,
5817 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5818 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5819 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5820 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5822 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5823 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5824 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5826 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5827 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5828 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5830 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5831 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5832 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5833 .writefn
= tlbi_aa64_alle3is_write
},
5834 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5835 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5836 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5837 .writefn
= tlbi_aa64_vae3is_write
},
5838 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5839 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5840 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5841 .writefn
= tlbi_aa64_vae3is_write
},
5842 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5843 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5844 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5845 .writefn
= tlbi_aa64_alle3_write
},
5846 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5847 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5848 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5849 .writefn
= tlbi_aa64_vae3_write
},
5850 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5851 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5852 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5853 .writefn
= tlbi_aa64_vae3_write
},
5857 #ifndef CONFIG_USER_ONLY
5858 /* Test if system register redirection is to occur in the current state. */
5859 static bool redirect_for_e2h(CPUARMState
*env
)
5861 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
5864 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5868 if (redirect_for_e2h(env
)) {
5869 /* Switch to the saved EL2 version of the register. */
5871 readfn
= ri
->readfn
;
5873 readfn
= ri
->orig_readfn
;
5875 if (readfn
== NULL
) {
5878 return readfn(env
, ri
);
5881 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5886 if (redirect_for_e2h(env
)) {
5887 /* Switch to the saved EL2 version of the register. */
5889 writefn
= ri
->writefn
;
5891 writefn
= ri
->orig_writefn
;
5893 if (writefn
== NULL
) {
5894 writefn
= raw_write
;
5896 writefn(env
, ri
, value
);
5899 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
5902 uint32_t src_key
, dst_key
, new_key
;
5903 const char *src_name
, *dst_name
, *new_name
;
5904 bool (*feature
)(const ARMISARegisters
*id
);
5907 #define K(op0, op1, crn, crm, op2) \
5908 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5910 static const struct E2HAlias aliases
[] = {
5911 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5912 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5913 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5914 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5915 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5916 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5917 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5918 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5919 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5920 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5921 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5922 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5923 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5924 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5925 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5926 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5927 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5928 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5929 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5930 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5931 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5932 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5933 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5934 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5935 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5936 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5937 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5938 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5939 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5940 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5941 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5942 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5945 * Note that redirection of ZCR is mentioned in the description
5946 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5947 * not in the summary table.
5949 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5950 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
5952 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5953 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5959 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
5960 const struct E2HAlias
*a
= &aliases
[i
];
5961 ARMCPRegInfo
*src_reg
, *dst_reg
;
5963 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
5967 src_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->src_key
);
5968 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->dst_key
);
5969 g_assert(src_reg
!= NULL
);
5970 g_assert(dst_reg
!= NULL
);
5972 /* Cross-compare names to detect typos in the keys. */
5973 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
5974 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
5976 /* None of the core system registers use opaque; we will. */
5977 g_assert(src_reg
->opaque
== NULL
);
5979 /* Create alias before redirection so we dup the right data. */
5981 ARMCPRegInfo
*new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
5982 uint32_t *new_key
= g_memdup(&a
->new_key
, sizeof(uint32_t));
5985 new_reg
->name
= a
->new_name
;
5986 new_reg
->type
|= ARM_CP_ALIAS
;
5987 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5988 new_reg
->access
&= PL2_RW
| PL3_RW
;
5990 ok
= g_hash_table_insert(cpu
->cp_regs
, new_key
, new_reg
);
5994 src_reg
->opaque
= dst_reg
;
5995 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
5996 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
5997 if (!src_reg
->raw_readfn
) {
5998 src_reg
->raw_readfn
= raw_read
;
6000 if (!src_reg
->raw_writefn
) {
6001 src_reg
->raw_writefn
= raw_write
;
6003 src_reg
->readfn
= el2_e2h_read
;
6004 src_reg
->writefn
= el2_e2h_write
;
6009 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6012 int cur_el
= arm_current_el(env
);
6015 uint64_t hcr
= arm_hcr_el2_eff(env
);
6018 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
6019 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
6020 return CP_ACCESS_TRAP_EL2
;
6023 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
6024 return CP_ACCESS_TRAP
;
6026 if (hcr
& HCR_TID2
) {
6027 return CP_ACCESS_TRAP_EL2
;
6030 } else if (hcr
& HCR_TID2
) {
6031 return CP_ACCESS_TRAP_EL2
;
6035 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
6036 return CP_ACCESS_TRAP_EL2
;
6039 return CP_ACCESS_OK
;
6042 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6045 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6046 * read via a bit in OSLSR_EL1.
6050 if (ri
->state
== ARM_CP_STATE_AA32
) {
6051 oslock
= (value
== 0xC5ACCE55);
6056 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
6059 static const ARMCPRegInfo debug_cp_reginfo
[] = {
6060 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6061 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6062 * unlike DBGDRAR it is never accessible from EL0.
6063 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6066 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
6067 .access
= PL0_R
, .accessfn
= access_tdra
,
6068 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6069 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
6070 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6071 .access
= PL1_R
, .accessfn
= access_tdra
,
6072 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6073 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
6074 .access
= PL0_R
, .accessfn
= access_tdra
,
6075 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6076 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6077 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
6078 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6079 .access
= PL1_RW
, .accessfn
= access_tda
,
6080 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
6082 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
6083 * We don't implement the configurable EL0 access.
6085 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
6086 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6087 .type
= ARM_CP_ALIAS
,
6088 .access
= PL1_R
, .accessfn
= access_tda
,
6089 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
6090 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
6091 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
6092 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6093 .accessfn
= access_tdosa
,
6094 .writefn
= oslar_write
},
6095 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
6096 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
6097 .access
= PL1_R
, .resetvalue
= 10,
6098 .accessfn
= access_tdosa
,
6099 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
6100 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6101 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6102 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
6103 .access
= PL1_RW
, .accessfn
= access_tdosa
,
6104 .type
= ARM_CP_NOP
},
6105 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6106 * implement vector catch debug events yet.
6109 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6110 .access
= PL1_RW
, .accessfn
= access_tda
,
6111 .type
= ARM_CP_NOP
},
6112 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6113 * to save and restore a 32-bit guest's DBGVCR)
6115 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
6116 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
6117 .access
= PL2_RW
, .accessfn
= access_tda
,
6118 .type
= ARM_CP_NOP
},
6119 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6120 * Channel but Linux may try to access this register. The 32-bit
6121 * alias is DBGDCCINT.
6123 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
6124 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6125 .access
= PL1_RW
, .accessfn
= access_tda
,
6126 .type
= ARM_CP_NOP
},
6130 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
6131 /* 64 bit access versions of the (dummy) debug registers */
6132 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
6133 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6134 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
6135 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6139 /* Return the exception level to which exceptions should be taken
6140 * via SVEAccessTrap. If an exception should be routed through
6141 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6142 * take care of raising that exception.
6143 * C.f. the ARM pseudocode function CheckSVEEnabled.
6145 int sve_exception_el(CPUARMState
*env
, int el
)
6147 #ifndef CONFIG_USER_ONLY
6148 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
6150 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6151 bool disabled
= false;
6153 /* The CPACR.ZEN controls traps to EL1:
6154 * 0, 2 : trap EL0 and EL1 accesses
6155 * 1 : trap only EL0 accesses
6156 * 3 : trap no accesses
6158 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
6160 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
6165 return hcr_el2
& HCR_TGE
? 2 : 1;
6168 /* Check CPACR.FPEN. */
6169 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
6171 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
6179 /* CPTR_EL2. Since TZ and TFP are positive,
6180 * they will be zero when EL2 is not present.
6182 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
6183 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
6186 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
6191 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6192 if (arm_feature(env
, ARM_FEATURE_EL3
)
6193 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
6200 static uint32_t sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
6204 end_len
= start_len
&= 0xf;
6205 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
6206 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
6207 assert(end_len
< start_len
);
6213 * Given that SVE is enabled, return the vector length for EL.
6215 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
6217 ARMCPU
*cpu
= env_archcpu(env
);
6218 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
6221 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
6223 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
6224 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
6226 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6227 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
6230 return sve_zcr_get_valid_len(cpu
, zcr_len
);
6233 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6236 int cur_el
= arm_current_el(env
);
6237 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
6240 /* Bits other than [3:0] are RAZ/WI. */
6241 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
6242 raw_write(env
, ri
, value
& 0xf);
6245 * Because we arrived here, we know both FP and SVE are enabled;
6246 * otherwise we would have trapped access to the ZCR_ELn register.
6248 new_len
= sve_zcr_len_for_el(env
, cur_el
);
6249 if (new_len
< old_len
) {
6250 aarch64_sve_narrow_vq(env
, new_len
+ 1);
6254 static const ARMCPRegInfo zcr_el1_reginfo
= {
6255 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
6256 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
6257 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
6258 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
6259 .writefn
= zcr_write
, .raw_writefn
= raw_write
6262 static const ARMCPRegInfo zcr_el2_reginfo
= {
6263 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6264 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6265 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6266 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
6267 .writefn
= zcr_write
, .raw_writefn
= raw_write
6270 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
6271 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6272 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6273 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6274 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
6277 static const ARMCPRegInfo zcr_el3_reginfo
= {
6278 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
6279 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
6280 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
6281 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
6282 .writefn
= zcr_write
, .raw_writefn
= raw_write
6285 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6287 CPUARMState
*env
= &cpu
->env
;
6289 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6290 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6292 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6294 if (env
->cpu_watchpoint
[n
]) {
6295 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6296 env
->cpu_watchpoint
[n
] = NULL
;
6299 if (!extract64(wcr
, 0, 1)) {
6300 /* E bit clear : watchpoint disabled */
6304 switch (extract64(wcr
, 3, 2)) {
6306 /* LSC 00 is reserved and must behave as if the wp is disabled */
6309 flags
|= BP_MEM_READ
;
6312 flags
|= BP_MEM_WRITE
;
6315 flags
|= BP_MEM_ACCESS
;
6319 /* Attempts to use both MASK and BAS fields simultaneously are
6320 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6321 * thus generating a watchpoint for every byte in the masked region.
6323 mask
= extract64(wcr
, 24, 4);
6324 if (mask
== 1 || mask
== 2) {
6325 /* Reserved values of MASK; we must act as if the mask value was
6326 * some non-reserved value, or as if the watchpoint were disabled.
6327 * We choose the latter.
6331 /* Watchpoint covers an aligned area up to 2GB in size */
6333 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6334 * whether the watchpoint fires when the unmasked bits match; we opt
6335 * to generate the exceptions.
6339 /* Watchpoint covers bytes defined by the byte address select bits */
6340 int bas
= extract64(wcr
, 5, 8);
6344 /* This must act as if the watchpoint is disabled */
6348 if (extract64(wvr
, 2, 1)) {
6349 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6350 * ignored, and BAS[3:0] define which bytes to watch.
6354 /* The BAS bits are supposed to be programmed to indicate a contiguous
6355 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6356 * we fire for each byte in the word/doubleword addressed by the WVR.
6357 * We choose to ignore any non-zero bits after the first range of 1s.
6359 basstart
= ctz32(bas
);
6360 len
= cto32(bas
>> basstart
);
6364 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6365 &env
->cpu_watchpoint
[n
]);
6368 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6371 CPUARMState
*env
= &cpu
->env
;
6373 /* Completely clear out existing QEMU watchpoints and our array, to
6374 * avoid possible stale entries following migration load.
6376 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6377 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6379 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6380 hw_watchpoint_update(cpu
, i
);
6384 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6387 ARMCPU
*cpu
= env_archcpu(env
);
6390 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6391 * register reads and behaves as if values written are sign extended.
6392 * Bits [1:0] are RES0.
6394 value
= sextract64(value
, 0, 49) & ~3ULL;
6396 raw_write(env
, ri
, value
);
6397 hw_watchpoint_update(cpu
, i
);
6400 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6403 ARMCPU
*cpu
= env_archcpu(env
);
6406 raw_write(env
, ri
, value
);
6407 hw_watchpoint_update(cpu
, i
);
6410 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6412 CPUARMState
*env
= &cpu
->env
;
6413 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6414 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6419 if (env
->cpu_breakpoint
[n
]) {
6420 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6421 env
->cpu_breakpoint
[n
] = NULL
;
6424 if (!extract64(bcr
, 0, 1)) {
6425 /* E bit clear : watchpoint disabled */
6429 bt
= extract64(bcr
, 20, 4);
6432 case 4: /* unlinked address mismatch (reserved if AArch64) */
6433 case 5: /* linked address mismatch (reserved if AArch64) */
6434 qemu_log_mask(LOG_UNIMP
,
6435 "arm: address mismatch breakpoint types not implemented\n");
6437 case 0: /* unlinked address match */
6438 case 1: /* linked address match */
6440 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6441 * we behave as if the register was sign extended. Bits [1:0] are
6442 * RES0. The BAS field is used to allow setting breakpoints on 16
6443 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6444 * a bp will fire if the addresses covered by the bp and the addresses
6445 * covered by the insn overlap but the insn doesn't start at the
6446 * start of the bp address range. We choose to require the insn and
6447 * the bp to have the same address. The constraints on writing to
6448 * BAS enforced in dbgbcr_write mean we have only four cases:
6449 * 0b0000 => no breakpoint
6450 * 0b0011 => breakpoint on addr
6451 * 0b1100 => breakpoint on addr + 2
6452 * 0b1111 => breakpoint on addr
6453 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6455 int bas
= extract64(bcr
, 5, 4);
6456 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
6465 case 2: /* unlinked context ID match */
6466 case 8: /* unlinked VMID match (reserved if no EL2) */
6467 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6468 qemu_log_mask(LOG_UNIMP
,
6469 "arm: unlinked context breakpoint types not implemented\n");
6471 case 9: /* linked VMID match (reserved if no EL2) */
6472 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6473 case 3: /* linked context ID match */
6475 /* We must generate no events for Linked context matches (unless
6476 * they are linked to by some other bp/wp, which is handled in
6477 * updates for the linking bp/wp). We choose to also generate no events
6478 * for reserved values.
6483 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6486 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6489 CPUARMState
*env
= &cpu
->env
;
6491 /* Completely clear out existing QEMU breakpoints and our array, to
6492 * avoid possible stale entries following migration load.
6494 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6495 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6497 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6498 hw_breakpoint_update(cpu
, i
);
6502 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6505 ARMCPU
*cpu
= env_archcpu(env
);
6508 raw_write(env
, ri
, value
);
6509 hw_breakpoint_update(cpu
, i
);
6512 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6515 ARMCPU
*cpu
= env_archcpu(env
);
6518 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6521 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6522 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6524 raw_write(env
, ri
, value
);
6525 hw_breakpoint_update(cpu
, i
);
6528 static void define_debug_regs(ARMCPU
*cpu
)
6530 /* Define v7 and v8 architectural debug registers.
6531 * These are just dummy implementations for now.
6534 int wrps
, brps
, ctx_cmps
;
6535 ARMCPRegInfo dbgdidr
= {
6536 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
6537 .access
= PL0_R
, .accessfn
= access_tda
,
6538 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->isar
.dbgdidr
,
6541 /* Note that all these register fields hold "number of Xs minus 1". */
6542 brps
= arm_num_brps(cpu
);
6543 wrps
= arm_num_wrps(cpu
);
6544 ctx_cmps
= arm_num_ctx_cmps(cpu
);
6546 assert(ctx_cmps
<= brps
);
6548 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6549 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6551 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6552 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6555 for (i
= 0; i
< brps
; i
++) {
6556 ARMCPRegInfo dbgregs
[] = {
6557 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
6558 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6559 .access
= PL1_RW
, .accessfn
= access_tda
,
6560 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6561 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6563 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
6564 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6565 .access
= PL1_RW
, .accessfn
= access_tda
,
6566 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6567 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6571 define_arm_cp_regs(cpu
, dbgregs
);
6574 for (i
= 0; i
< wrps
; i
++) {
6575 ARMCPRegInfo dbgregs
[] = {
6576 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
6577 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6578 .access
= PL1_RW
, .accessfn
= access_tda
,
6579 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6580 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6582 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
6583 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6584 .access
= PL1_RW
, .accessfn
= access_tda
,
6585 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6586 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6590 define_arm_cp_regs(cpu
, dbgregs
);
6594 static void define_pmu_regs(ARMCPU
*cpu
)
6597 * v7 performance monitor control register: same implementor
6598 * field as main ID register, and we implement four counters in
6599 * addition to the cycle count register.
6601 unsigned int i
, pmcrn
= 4;
6602 ARMCPRegInfo pmcr
= {
6603 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6605 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6606 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6607 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6608 .raw_writefn
= raw_write
,
6610 ARMCPRegInfo pmcr64
= {
6611 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6612 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6613 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6615 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6616 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
) |
6618 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6620 define_one_arm_cp_reg(cpu
, &pmcr
);
6621 define_one_arm_cp_reg(cpu
, &pmcr64
);
6622 for (i
= 0; i
< pmcrn
; i
++) {
6623 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6624 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6625 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6626 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6627 ARMCPRegInfo pmev_regs
[] = {
6628 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6629 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6630 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6631 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6632 .accessfn
= pmreg_access
},
6633 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6634 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6635 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6637 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6638 .raw_readfn
= pmevcntr_rawread
,
6639 .raw_writefn
= pmevcntr_rawwrite
},
6640 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6641 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6642 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6643 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6644 .accessfn
= pmreg_access
},
6645 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6646 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6647 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6649 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6650 .raw_writefn
= pmevtyper_rawwrite
},
6653 define_arm_cp_regs(cpu
, pmev_regs
);
6654 g_free(pmevcntr_name
);
6655 g_free(pmevcntr_el0_name
);
6656 g_free(pmevtyper_name
);
6657 g_free(pmevtyper_el0_name
);
6659 if (cpu_isar_feature(aa32_pmu_8_1
, cpu
)) {
6660 ARMCPRegInfo v81_pmu_regs
[] = {
6661 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6662 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6663 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6664 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6665 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6666 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6667 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6668 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6671 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6673 if (cpu_isar_feature(any_pmu_8_4
, cpu
)) {
6674 static const ARMCPRegInfo v84_pmmir
= {
6675 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
6676 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
6677 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6680 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
6684 /* We don't know until after realize whether there's a GICv3
6685 * attached, and that is what registers the gicv3 sysregs.
6686 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6689 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6691 ARMCPU
*cpu
= env_archcpu(env
);
6692 uint64_t pfr1
= cpu
->id_pfr1
;
6694 if (env
->gicv3state
) {
6700 #ifndef CONFIG_USER_ONLY
6701 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6703 ARMCPU
*cpu
= env_archcpu(env
);
6704 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6706 if (env
->gicv3state
) {
6713 /* Shared logic between LORID and the rest of the LOR* registers.
6714 * Secure state has already been delt with.
6716 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
6718 int el
= arm_current_el(env
);
6720 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
6721 return CP_ACCESS_TRAP_EL2
;
6723 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
6724 return CP_ACCESS_TRAP_EL3
;
6726 return CP_ACCESS_OK
;
6729 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6732 if (arm_is_secure_below_el3(env
)) {
6733 /* Access ok in secure mode. */
6734 return CP_ACCESS_OK
;
6736 return access_lor_ns(env
);
6739 static CPAccessResult
access_lor_other(CPUARMState
*env
,
6740 const ARMCPRegInfo
*ri
, bool isread
)
6742 if (arm_is_secure_below_el3(env
)) {
6743 /* Access denied in secure mode. */
6744 return CP_ACCESS_TRAP
;
6746 return access_lor_ns(env
);
6750 * A trivial implementation of ARMv8.1-LOR leaves all of these
6751 * registers fixed at 0, which indicates that there are zero
6752 * supported Limited Ordering regions.
6754 static const ARMCPRegInfo lor_reginfo
[] = {
6755 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6756 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6757 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6758 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6759 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6760 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6761 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6762 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6763 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6764 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6765 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6766 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6767 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6768 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6769 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6770 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6771 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6772 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6773 .access
= PL1_R
, .accessfn
= access_lorid
,
6774 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6778 #ifdef TARGET_AARCH64
6779 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6782 int el
= arm_current_el(env
);
6785 arm_feature(env
, ARM_FEATURE_EL2
) &&
6786 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
6787 return CP_ACCESS_TRAP_EL2
;
6790 arm_feature(env
, ARM_FEATURE_EL3
) &&
6791 !(env
->cp15
.scr_el3
& SCR_APK
)) {
6792 return CP_ACCESS_TRAP_EL3
;
6794 return CP_ACCESS_OK
;
6797 static const ARMCPRegInfo pauth_reginfo
[] = {
6798 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6799 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
6800 .access
= PL1_RW
, .accessfn
= access_pauth
,
6801 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
6802 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6803 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
6804 .access
= PL1_RW
, .accessfn
= access_pauth
,
6805 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
6806 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6807 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
6808 .access
= PL1_RW
, .accessfn
= access_pauth
,
6809 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
6810 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6811 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
6812 .access
= PL1_RW
, .accessfn
= access_pauth
,
6813 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
6814 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6815 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
6816 .access
= PL1_RW
, .accessfn
= access_pauth
,
6817 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
6818 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6819 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
6820 .access
= PL1_RW
, .accessfn
= access_pauth
,
6821 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
6822 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6823 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
6824 .access
= PL1_RW
, .accessfn
= access_pauth
,
6825 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
6826 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6827 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
6828 .access
= PL1_RW
, .accessfn
= access_pauth
,
6829 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
6830 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6831 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
6832 .access
= PL1_RW
, .accessfn
= access_pauth
,
6833 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
6834 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6835 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
6836 .access
= PL1_RW
, .accessfn
= access_pauth
,
6837 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
6841 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6846 /* Success sets NZCV = 0000. */
6847 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
6849 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
6851 * ??? Failed, for unknown reasons in the crypto subsystem.
6852 * The best we can do is log the reason and return the
6853 * timed-out indication to the guest. There is no reason
6854 * we know to expect this failure to be transitory, so the
6855 * guest may well hang retrying the operation.
6857 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
6858 ri
->name
, error_get_pretty(err
));
6861 env
->ZF
= 0; /* NZCF = 0100 */
6867 /* We do not support re-seeding, so the two registers operate the same. */
6868 static const ARMCPRegInfo rndr_reginfo
[] = {
6869 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
6870 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6871 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
6872 .access
= PL0_R
, .readfn
= rndr_readfn
},
6873 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
6874 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6875 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
6876 .access
= PL0_R
, .readfn
= rndr_readfn
},
6880 #ifndef CONFIG_USER_ONLY
6881 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
6884 ARMCPU
*cpu
= env_archcpu(env
);
6885 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6886 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
6887 uint64_t vaddr_in
= (uint64_t) value
;
6888 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
6890 int mem_idx
= cpu_mmu_index(env
, false);
6892 /* This won't be crossing page boundaries */
6893 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
6899 /* RCU lock is already being held */
6900 mr
= memory_region_from_host(haddr
, &offset
);
6903 memory_region_do_writeback(mr
, offset
, dline_size
);
6908 static const ARMCPRegInfo dcpop_reg
[] = {
6909 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
6910 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
6911 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6912 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
6916 static const ARMCPRegInfo dcpodp_reg
[] = {
6917 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
6918 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
6919 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6920 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
6923 #endif /*CONFIG_USER_ONLY*/
6927 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6930 int el
= arm_current_el(env
);
6933 uint64_t sctlr
= arm_sctlr(env
, el
);
6934 if (!(sctlr
& SCTLR_EnRCTX
)) {
6935 return CP_ACCESS_TRAP
;
6937 } else if (el
== 1) {
6938 uint64_t hcr
= arm_hcr_el2_eff(env
);
6940 return CP_ACCESS_TRAP_EL2
;
6943 return CP_ACCESS_OK
;
6946 static const ARMCPRegInfo predinv_reginfo
[] = {
6947 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
6948 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
6949 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6950 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
6951 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
6952 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6953 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
6954 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
6955 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6957 * Note the AArch32 opcodes have a different OPC1.
6959 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
6960 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
6961 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6962 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
6963 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
6964 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6965 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
6966 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
6967 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6971 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6973 /* Read the high 32 bits of the current CCSIDR */
6974 return extract64(ccsidr_read(env
, ri
), 32, 32);
6977 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
6978 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
6979 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
6981 .accessfn
= access_aa64_tid2
,
6982 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
6986 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6989 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
6990 return CP_ACCESS_TRAP_EL2
;
6993 return CP_ACCESS_OK
;
6996 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6999 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7000 return access_aa64_tid3(env
, ri
, isread
);
7003 return CP_ACCESS_OK
;
7006 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7009 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
7010 return CP_ACCESS_TRAP_EL2
;
7013 return CP_ACCESS_OK
;
7016 static const ARMCPRegInfo jazelle_regs
[] = {
7018 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
7019 .access
= PL1_R
, .accessfn
= access_jazelle
,
7020 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7022 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
7023 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7025 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
7026 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7030 static const ARMCPRegInfo vhe_reginfo
[] = {
7031 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7032 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
7034 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
7035 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
7036 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
7037 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
7038 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
7039 #ifndef CONFIG_USER_ONLY
7040 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
7041 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
7043 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
7044 .type
= ARM_CP_IO
, .access
= PL2_RW
,
7045 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
7046 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
7047 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
7048 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
7049 .resetfn
= gt_hv_timer_reset
,
7050 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
7051 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
7053 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
7055 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
7056 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
7057 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7058 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
7059 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7060 .access
= PL2_RW
, .accessfn
= e2h_access
,
7061 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
7062 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
7063 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7064 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
7065 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7066 .access
= PL2_RW
, .accessfn
= e2h_access
,
7067 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
7068 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
7069 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7070 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
7071 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7072 .access
= PL2_RW
, .accessfn
= e2h_access
,
7073 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
7074 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7075 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
7076 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7077 .access
= PL2_RW
, .accessfn
= e2h_access
,
7078 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
7079 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7080 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
7081 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7082 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
7083 .access
= PL2_RW
, .accessfn
= e2h_access
,
7084 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
7085 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7086 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
7087 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7088 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
7089 .access
= PL2_RW
, .accessfn
= e2h_access
,
7090 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
7095 #ifndef CONFIG_USER_ONLY
7096 static const ARMCPRegInfo ats1e1_reginfo
[] = {
7097 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
7098 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7099 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7100 .writefn
= ats_write64
},
7101 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
7102 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7103 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7104 .writefn
= ats_write64
},
7108 static const ARMCPRegInfo ats1cp_reginfo
[] = {
7109 { .name
= "ATS1CPRP",
7110 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7111 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7112 .writefn
= ats_write
},
7113 { .name
= "ATS1CPWP",
7114 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7115 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7116 .writefn
= ats_write
},
7122 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7123 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7124 * is non-zero, which is never for ARMv7, optionally in ARMv8
7125 * and mandatorily for ARMv8.2 and up.
7126 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7127 * implementation is RAZ/WI we can ignore this detail, as we
7130 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
7131 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
7132 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
7133 .access
= PL1_RW
, .accessfn
= access_tacr
,
7134 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7135 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7136 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7137 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7142 void register_cp_regs_for_features(ARMCPU
*cpu
)
7144 /* Register all the coprocessor registers based on feature bits */
7145 CPUARMState
*env
= &cpu
->env
;
7146 if (arm_feature(env
, ARM_FEATURE_M
)) {
7147 /* M profile has no coprocessor registers */
7151 define_arm_cp_regs(cpu
, cp_reginfo
);
7152 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7153 /* Must go early as it is full of wildcards that may be
7154 * overridden by later definitions.
7156 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
7159 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7160 /* The ID registers all have impdef reset values */
7161 ARMCPRegInfo v6_idregs
[] = {
7162 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
7163 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
7164 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7165 .accessfn
= access_aa32_tid3
,
7166 .resetvalue
= cpu
->id_pfr0
},
7167 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7168 * the value of the GIC field until after we define these regs.
7170 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
7171 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
7172 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
7173 .accessfn
= access_aa32_tid3
,
7174 .readfn
= id_pfr1_read
,
7175 .writefn
= arm_cp_write_ignore
},
7176 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
7177 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
7178 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7179 .accessfn
= access_aa32_tid3
,
7180 .resetvalue
= cpu
->isar
.id_dfr0
},
7181 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
7182 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
7183 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7184 .accessfn
= access_aa32_tid3
,
7185 .resetvalue
= cpu
->id_afr0
},
7186 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
7187 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
7188 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7189 .accessfn
= access_aa32_tid3
,
7190 .resetvalue
= cpu
->isar
.id_mmfr0
},
7191 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
7192 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
7193 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7194 .accessfn
= access_aa32_tid3
,
7195 .resetvalue
= cpu
->isar
.id_mmfr1
},
7196 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
7197 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
7198 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7199 .accessfn
= access_aa32_tid3
,
7200 .resetvalue
= cpu
->isar
.id_mmfr2
},
7201 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
7202 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
7203 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7204 .accessfn
= access_aa32_tid3
,
7205 .resetvalue
= cpu
->isar
.id_mmfr3
},
7206 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
7207 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
7208 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7209 .accessfn
= access_aa32_tid3
,
7210 .resetvalue
= cpu
->isar
.id_isar0
},
7211 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
7212 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
7213 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7214 .accessfn
= access_aa32_tid3
,
7215 .resetvalue
= cpu
->isar
.id_isar1
},
7216 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
7217 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
7218 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7219 .accessfn
= access_aa32_tid3
,
7220 .resetvalue
= cpu
->isar
.id_isar2
},
7221 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
7222 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
7223 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7224 .accessfn
= access_aa32_tid3
,
7225 .resetvalue
= cpu
->isar
.id_isar3
},
7226 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
7227 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
7228 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7229 .accessfn
= access_aa32_tid3
,
7230 .resetvalue
= cpu
->isar
.id_isar4
},
7231 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
7232 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
7233 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7234 .accessfn
= access_aa32_tid3
,
7235 .resetvalue
= cpu
->isar
.id_isar5
},
7236 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
7237 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
7238 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7239 .accessfn
= access_aa32_tid3
,
7240 .resetvalue
= cpu
->isar
.id_mmfr4
},
7241 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
7242 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
7243 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7244 .accessfn
= access_aa32_tid3
,
7245 .resetvalue
= cpu
->isar
.id_isar6
},
7248 define_arm_cp_regs(cpu
, v6_idregs
);
7249 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
7251 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
7253 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
7254 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
7256 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
7257 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
7258 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
7260 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
7261 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
7263 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7264 ARMCPRegInfo clidr
= {
7265 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
7266 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
7267 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7268 .accessfn
= access_aa64_tid2
,
7269 .resetvalue
= cpu
->clidr
7271 define_one_arm_cp_reg(cpu
, &clidr
);
7272 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
7273 define_debug_regs(cpu
);
7274 define_pmu_regs(cpu
);
7276 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
7278 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7279 /* AArch64 ID registers, which all have impdef reset values.
7280 * Note that within the ID register ranges the unused slots
7281 * must all RAZ, not UNDEF; future architecture versions may
7282 * define new registers here.
7284 ARMCPRegInfo v8_idregs
[] = {
7286 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7287 * emulation because we don't know the right value for the
7288 * GIC field until after we define these regs.
7290 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7291 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
7293 #ifdef CONFIG_USER_ONLY
7294 .type
= ARM_CP_CONST
,
7295 .resetvalue
= cpu
->isar
.id_aa64pfr0
7297 .type
= ARM_CP_NO_RAW
,
7298 .accessfn
= access_aa64_tid3
,
7299 .readfn
= id_aa64pfr0_read
,
7300 .writefn
= arm_cp_write_ignore
7303 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7304 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
7305 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7306 .accessfn
= access_aa64_tid3
,
7307 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
7308 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7309 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
7310 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7311 .accessfn
= access_aa64_tid3
,
7313 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7314 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
7315 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7316 .accessfn
= access_aa64_tid3
,
7318 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7319 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
7320 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7321 .accessfn
= access_aa64_tid3
,
7322 /* At present, only SVEver == 0 is defined anyway. */
7324 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7325 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
7326 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7327 .accessfn
= access_aa64_tid3
,
7329 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7330 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
7331 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7332 .accessfn
= access_aa64_tid3
,
7334 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7335 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7336 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7337 .accessfn
= access_aa64_tid3
,
7339 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7340 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7341 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7342 .accessfn
= access_aa64_tid3
,
7343 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
7344 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7345 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7346 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7347 .accessfn
= access_aa64_tid3
,
7348 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
7349 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7350 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7351 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7352 .accessfn
= access_aa64_tid3
,
7354 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7355 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7356 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7357 .accessfn
= access_aa64_tid3
,
7359 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7360 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7361 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7362 .accessfn
= access_aa64_tid3
,
7363 .resetvalue
= cpu
->id_aa64afr0
},
7364 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7365 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7366 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7367 .accessfn
= access_aa64_tid3
,
7368 .resetvalue
= cpu
->id_aa64afr1
},
7369 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7370 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7371 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7372 .accessfn
= access_aa64_tid3
,
7374 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7375 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7376 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7377 .accessfn
= access_aa64_tid3
,
7379 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7380 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7381 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7382 .accessfn
= access_aa64_tid3
,
7383 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7384 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7385 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7386 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7387 .accessfn
= access_aa64_tid3
,
7388 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7389 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7390 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7391 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7392 .accessfn
= access_aa64_tid3
,
7394 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7395 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
7396 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7397 .accessfn
= access_aa64_tid3
,
7399 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7400 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
7401 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7402 .accessfn
= access_aa64_tid3
,
7404 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7405 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
7406 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7407 .accessfn
= access_aa64_tid3
,
7409 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7410 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
7411 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7412 .accessfn
= access_aa64_tid3
,
7414 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7415 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
7416 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7417 .accessfn
= access_aa64_tid3
,
7419 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7420 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
7421 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7422 .accessfn
= access_aa64_tid3
,
7423 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
7424 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7425 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
7426 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7427 .accessfn
= access_aa64_tid3
,
7428 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
7429 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7430 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
7431 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7432 .accessfn
= access_aa64_tid3
,
7433 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
7434 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7435 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
7436 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7437 .accessfn
= access_aa64_tid3
,
7439 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7440 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
7441 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7442 .accessfn
= access_aa64_tid3
,
7444 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7445 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
7446 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7447 .accessfn
= access_aa64_tid3
,
7449 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7450 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
7451 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7452 .accessfn
= access_aa64_tid3
,
7454 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7455 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
7456 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7457 .accessfn
= access_aa64_tid3
,
7459 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7460 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
7461 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7462 .accessfn
= access_aa64_tid3
,
7463 .resetvalue
= cpu
->isar
.mvfr0
},
7464 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7465 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
7466 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7467 .accessfn
= access_aa64_tid3
,
7468 .resetvalue
= cpu
->isar
.mvfr1
},
7469 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7470 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
7471 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7472 .accessfn
= access_aa64_tid3
,
7473 .resetvalue
= cpu
->isar
.mvfr2
},
7474 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7475 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
7476 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7477 .accessfn
= access_aa64_tid3
,
7479 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7480 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
7481 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7482 .accessfn
= access_aa64_tid3
,
7484 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7485 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
7486 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7487 .accessfn
= access_aa64_tid3
,
7489 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7490 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
7491 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7492 .accessfn
= access_aa64_tid3
,
7494 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7495 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
7496 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7497 .accessfn
= access_aa64_tid3
,
7499 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
7500 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
7501 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7502 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
7503 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
7504 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
7505 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7506 .resetvalue
= cpu
->pmceid0
},
7507 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
7508 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
7509 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7510 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
7511 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
7512 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
7513 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7514 .resetvalue
= cpu
->pmceid1
},
7517 #ifdef CONFIG_USER_ONLY
7518 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
7519 { .name
= "ID_AA64PFR0_EL1",
7520 .exported_bits
= 0x000f000f00ff0000,
7521 .fixed_bits
= 0x0000000000000011 },
7522 { .name
= "ID_AA64PFR1_EL1",
7523 .exported_bits
= 0x00000000000000f0 },
7524 { .name
= "ID_AA64PFR*_EL1_RESERVED",
7526 { .name
= "ID_AA64ZFR0_EL1" },
7527 { .name
= "ID_AA64MMFR0_EL1",
7528 .fixed_bits
= 0x00000000ff000000 },
7529 { .name
= "ID_AA64MMFR1_EL1" },
7530 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
7532 { .name
= "ID_AA64DFR0_EL1",
7533 .fixed_bits
= 0x0000000000000006 },
7534 { .name
= "ID_AA64DFR1_EL1" },
7535 { .name
= "ID_AA64DFR*_EL1_RESERVED",
7537 { .name
= "ID_AA64AFR*",
7539 { .name
= "ID_AA64ISAR0_EL1",
7540 .exported_bits
= 0x00fffffff0fffff0 },
7541 { .name
= "ID_AA64ISAR1_EL1",
7542 .exported_bits
= 0x000000f0ffffffff },
7543 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
7545 REGUSERINFO_SENTINEL
7547 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
7549 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7550 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
7551 !arm_feature(env
, ARM_FEATURE_EL2
)) {
7552 ARMCPRegInfo rvbar
= {
7553 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7554 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
7555 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
7557 define_one_arm_cp_reg(cpu
, &rvbar
);
7559 define_arm_cp_regs(cpu
, v8_idregs
);
7560 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
7562 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7563 uint64_t vmpidr_def
= mpidr_read_val(env
);
7564 ARMCPRegInfo vpidr_regs
[] = {
7565 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
7566 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7567 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7568 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
7569 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
7570 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7571 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7572 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
7573 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7574 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
7575 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7576 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7577 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
7578 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
7579 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7580 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7582 .resetvalue
= vmpidr_def
,
7583 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
7586 define_arm_cp_regs(cpu
, vpidr_regs
);
7587 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
7588 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7589 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
7591 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7592 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
7593 ARMCPRegInfo rvbar
= {
7594 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
7595 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
7596 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
7598 define_one_arm_cp_reg(cpu
, &rvbar
);
7601 /* If EL2 is missing but higher ELs are enabled, we need to
7602 * register the no_el2 reginfos.
7604 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7605 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7606 * of MIDR_EL1 and MPIDR_EL1.
7608 ARMCPRegInfo vpidr_regs
[] = {
7609 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7610 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7611 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
7612 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
7613 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7614 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7615 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7616 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
7617 .type
= ARM_CP_NO_RAW
,
7618 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
7621 define_arm_cp_regs(cpu
, vpidr_regs
);
7622 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
7623 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7624 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
7628 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7629 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
7630 ARMCPRegInfo el3_regs
[] = {
7631 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
7632 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
7633 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
7634 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
7635 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
7637 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
7638 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
7639 .resetvalue
= cpu
->reset_sctlr
},
7643 define_arm_cp_regs(cpu
, el3_regs
);
7645 /* The behaviour of NSACR is sufficiently various that we don't
7646 * try to describe it in a single reginfo:
7647 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7648 * reads as constant 0xc00 from NS EL1 and NS EL2
7649 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7650 * if v7 without EL3, register doesn't exist
7651 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7653 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7654 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7655 ARMCPRegInfo nsacr
= {
7656 .name
= "NSACR", .type
= ARM_CP_CONST
,
7657 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7658 .access
= PL1_RW
, .accessfn
= nsacr_access
,
7661 define_one_arm_cp_reg(cpu
, &nsacr
);
7663 ARMCPRegInfo nsacr
= {
7665 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7666 .access
= PL3_RW
| PL1_R
,
7668 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
7670 define_one_arm_cp_reg(cpu
, &nsacr
);
7673 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7674 ARMCPRegInfo nsacr
= {
7675 .name
= "NSACR", .type
= ARM_CP_CONST
,
7676 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7680 define_one_arm_cp_reg(cpu
, &nsacr
);
7684 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
7685 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7686 /* PMSAv6 not implemented */
7687 assert(arm_feature(env
, ARM_FEATURE_V7
));
7688 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7689 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
7691 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
7694 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7695 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
7696 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7697 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
7698 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
7701 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
7702 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
7704 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
7705 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
7707 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
7708 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
7710 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
7711 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
7713 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
7714 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
7716 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
7717 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
7719 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
7720 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
7722 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7723 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
7725 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7726 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
7728 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
7729 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
7731 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
7732 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
7734 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
7735 define_arm_cp_regs(cpu
, jazelle_regs
);
7737 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7738 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7739 * be read-only (ie write causes UNDEF exception).
7742 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
7743 /* Pre-v8 MIDR space.
7744 * Note that the MIDR isn't a simple constant register because
7745 * of the TI925 behaviour where writes to another register can
7746 * cause the MIDR value to change.
7748 * Unimplemented registers in the c15 0 0 0 space default to
7749 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7750 * and friends override accordingly.
7753 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
7754 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
7755 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
7756 .readfn
= midr_read
,
7757 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7758 .type
= ARM_CP_OVERRIDE
},
7759 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7761 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
7762 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7764 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
7765 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7767 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
7768 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7770 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
7771 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7773 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
7774 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7777 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
7778 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7779 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
7780 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
7781 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7782 .readfn
= midr_read
},
7783 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7784 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7785 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7786 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7787 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7788 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
7789 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7790 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7791 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
7793 .accessfn
= access_aa64_tid1
,
7794 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
7797 ARMCPRegInfo id_cp_reginfo
[] = {
7798 /* These are common to v8 and pre-v8 */
7800 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
7801 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
7802 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7803 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
7804 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
7805 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
7806 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7807 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7809 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
7811 .accessfn
= access_aa32_tid1
,
7812 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7815 /* TLBTR is specific to VMSA */
7816 ARMCPRegInfo id_tlbtr_reginfo
= {
7818 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
7820 .accessfn
= access_aa32_tid1
,
7821 .type
= ARM_CP_CONST
, .resetvalue
= 0,
7823 /* MPUIR is specific to PMSA V6+ */
7824 ARMCPRegInfo id_mpuir_reginfo
= {
7826 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7827 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7828 .resetvalue
= cpu
->pmsav7_dregion
<< 8
7830 ARMCPRegInfo crn0_wi_reginfo
= {
7831 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
7832 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
7833 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
7835 #ifdef CONFIG_USER_ONLY
7836 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
7837 { .name
= "MIDR_EL1",
7838 .exported_bits
= 0x00000000ffffffff },
7839 { .name
= "REVIDR_EL1" },
7840 REGUSERINFO_SENTINEL
7842 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
7844 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
7845 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7847 /* Register the blanket "writes ignored" value first to cover the
7848 * whole space. Then update the specific ID registers to allow write
7849 * access, so that they ignore writes rather than causing them to
7852 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
7853 for (r
= id_pre_v8_midr_cp_reginfo
;
7854 r
->type
!= ARM_CP_SENTINEL
; r
++) {
7857 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7860 id_mpuir_reginfo
.access
= PL1_RW
;
7861 id_tlbtr_reginfo
.access
= PL1_RW
;
7863 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7864 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
7866 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
7868 define_arm_cp_regs(cpu
, id_cp_reginfo
);
7869 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
7870 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
7871 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
7872 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
7876 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
7877 ARMCPRegInfo mpidr_cp_reginfo
[] = {
7878 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7879 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
7880 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
7883 #ifdef CONFIG_USER_ONLY
7884 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
7885 { .name
= "MPIDR_EL1",
7886 .fixed_bits
= 0x0000000080000000 },
7887 REGUSERINFO_SENTINEL
7889 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
7891 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
7894 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
7895 ARMCPRegInfo auxcr_reginfo
[] = {
7896 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
7897 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
7898 .access
= PL1_RW
, .accessfn
= access_tacr
,
7899 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
7900 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
7901 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
7902 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7904 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
7905 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
7906 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
7910 define_arm_cp_regs(cpu
, auxcr_reginfo
);
7911 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
7912 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
7916 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
7918 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7919 * There are two flavours:
7920 * (1) older 32-bit only cores have a simple 32-bit CBAR
7921 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7922 * 32-bit register visible to AArch32 at a different encoding
7923 * to the "flavour 1" register and with the bits rearranged to
7924 * be able to squash a 64-bit address into the 32-bit view.
7925 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7926 * in future if we support AArch32-only configs of some of the
7927 * AArch64 cores we might need to add a specific feature flag
7928 * to indicate cores with "flavour 2" CBAR.
7930 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7931 /* 32 bit view is [31:18] 0...0 [43:32]. */
7932 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
7933 | extract64(cpu
->reset_cbar
, 32, 12);
7934 ARMCPRegInfo cbar_reginfo
[] = {
7936 .type
= ARM_CP_CONST
,
7937 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
7938 .access
= PL1_R
, .resetvalue
= cbar32
},
7939 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7940 .type
= ARM_CP_CONST
,
7941 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
7942 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
7945 /* We don't implement a r/w 64 bit CBAR currently */
7946 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
7947 define_arm_cp_regs(cpu
, cbar_reginfo
);
7949 ARMCPRegInfo cbar
= {
7951 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
7952 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
7953 .fieldoffset
= offsetof(CPUARMState
,
7954 cp15
.c15_config_base_address
)
7956 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
7957 cbar
.access
= PL1_R
;
7958 cbar
.fieldoffset
= 0;
7959 cbar
.type
= ARM_CP_CONST
;
7961 define_one_arm_cp_reg(cpu
, &cbar
);
7965 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
7966 ARMCPRegInfo vbar_cp_reginfo
[] = {
7967 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
7968 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
7969 .access
= PL1_RW
, .writefn
= vbar_write
,
7970 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
7971 offsetof(CPUARMState
, cp15
.vbar_ns
) },
7975 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
7978 /* Generic registers whose values depend on the implementation */
7980 ARMCPRegInfo sctlr
= {
7981 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
7982 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
7983 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
7984 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
7985 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
7986 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
7987 .raw_writefn
= raw_write
,
7989 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7990 /* Normally we would always end the TB on an SCTLR write, but Linux
7991 * arch/arm/mach-pxa/sleep.S expects two instructions following
7992 * an MMU enable to execute from cache. Imitate this behaviour.
7994 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
7996 define_one_arm_cp_reg(cpu
, &sctlr
);
7999 if (cpu_isar_feature(aa64_lor
, cpu
)) {
8000 define_arm_cp_regs(cpu
, lor_reginfo
);
8002 if (cpu_isar_feature(aa64_pan
, cpu
)) {
8003 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
8005 #ifndef CONFIG_USER_ONLY
8006 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
8007 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
8009 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
8010 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
8013 if (cpu_isar_feature(aa64_uao
, cpu
)) {
8014 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
8017 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8018 define_arm_cp_regs(cpu
, vhe_reginfo
);
8021 if (cpu_isar_feature(aa64_sve
, cpu
)) {
8022 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
8023 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8024 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
8026 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
8028 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8029 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
8033 #ifdef TARGET_AARCH64
8034 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
8035 define_arm_cp_regs(cpu
, pauth_reginfo
);
8037 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
8038 define_arm_cp_regs(cpu
, rndr_reginfo
);
8040 #ifndef CONFIG_USER_ONLY
8041 /* Data Cache clean instructions up to PoP */
8042 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
8043 define_one_arm_cp_reg(cpu
, dcpop_reg
);
8045 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
8046 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
8049 #endif /*CONFIG_USER_ONLY*/
8052 if (cpu_isar_feature(any_predinv
, cpu
)) {
8053 define_arm_cp_regs(cpu
, predinv_reginfo
);
8056 if (cpu_isar_feature(any_ccidx
, cpu
)) {
8057 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
8060 #ifndef CONFIG_USER_ONLY
8062 * Register redirections and aliases must be done last,
8063 * after the registers from the other extensions have been defined.
8065 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8066 define_arm_vh_e2h_redirects_aliases(cpu
);
8071 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
8073 CPUState
*cs
= CPU(cpu
);
8074 CPUARMState
*env
= &cpu
->env
;
8076 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8078 * The lower part of each SVE register aliases to the FPU
8079 * registers so we don't need to include both.
8081 #ifdef TARGET_AARCH64
8082 if (isar_feature_aa64_sve(&cpu
->isar
)) {
8083 gdb_register_coprocessor(cs
, arm_gdb_get_svereg
, arm_gdb_set_svereg
,
8084 arm_gen_dynamic_svereg_xml(cs
, cs
->gdb_num_regs
),
8085 "sve-registers.xml", 0);
8089 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
8090 aarch64_fpu_gdb_set_reg
,
8091 34, "aarch64-fpu.xml", 0);
8093 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
8094 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8095 51, "arm-neon.xml", 0);
8096 } else if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
8097 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8098 35, "arm-vfp3.xml", 0);
8099 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
8100 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8101 19, "arm-vfp.xml", 0);
8103 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
8104 arm_gen_dynamic_sysreg_xml(cs
, cs
->gdb_num_regs
),
8105 "system-registers.xml", 0);
8109 /* Sort alphabetically by type name, except for "any". */
8110 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
8112 ObjectClass
*class_a
= (ObjectClass
*)a
;
8113 ObjectClass
*class_b
= (ObjectClass
*)b
;
8114 const char *name_a
, *name_b
;
8116 name_a
= object_class_get_name(class_a
);
8117 name_b
= object_class_get_name(class_b
);
8118 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
8120 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
8123 return strcmp(name_a
, name_b
);
8127 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
8129 ObjectClass
*oc
= data
;
8130 const char *typename
;
8133 typename
= object_class_get_name(oc
);
8134 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8135 qemu_printf(" %s\n", name
);
8139 void arm_cpu_list(void)
8143 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8144 list
= g_slist_sort(list
, arm_cpu_list_compare
);
8145 qemu_printf("Available CPUs:\n");
8146 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
8150 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
8152 ObjectClass
*oc
= data
;
8153 CpuDefinitionInfoList
**cpu_list
= user_data
;
8154 CpuDefinitionInfoList
*entry
;
8155 CpuDefinitionInfo
*info
;
8156 const char *typename
;
8158 typename
= object_class_get_name(oc
);
8159 info
= g_malloc0(sizeof(*info
));
8160 info
->name
= g_strndup(typename
,
8161 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8162 info
->q_typename
= g_strdup(typename
);
8164 entry
= g_malloc0(sizeof(*entry
));
8165 entry
->value
= info
;
8166 entry
->next
= *cpu_list
;
8170 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
8172 CpuDefinitionInfoList
*cpu_list
= NULL
;
8175 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8176 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
8182 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
8183 void *opaque
, int state
, int secstate
,
8184 int crm
, int opc1
, int opc2
,
8187 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8188 * add a single reginfo struct to the hash table.
8190 uint32_t *key
= g_new(uint32_t, 1);
8191 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
8192 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
8193 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
8195 r2
->name
= g_strdup(name
);
8196 /* Reset the secure state to the specific incoming state. This is
8197 * necessary as the register may have been defined with both states.
8199 r2
->secure
= secstate
;
8201 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8202 /* Register is banked (using both entries in array).
8203 * Overwriting fieldoffset as the array is only used to define
8204 * banked registers but later only fieldoffset is used.
8206 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
8209 if (state
== ARM_CP_STATE_AA32
) {
8210 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8211 /* If the register is banked then we don't need to migrate or
8212 * reset the 32-bit instance in certain cases:
8214 * 1) If the register has both 32-bit and 64-bit instances then we
8215 * can count on the 64-bit instance taking care of the
8217 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8218 * taking care of the secure bank. This requires that separate
8219 * 32 and 64-bit definitions are provided.
8221 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
8222 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
8223 r2
->type
|= ARM_CP_ALIAS
;
8225 } else if ((secstate
!= r
->secure
) && !ns
) {
8226 /* The register is not banked so we only want to allow migration of
8227 * the non-secure instance.
8229 r2
->type
|= ARM_CP_ALIAS
;
8232 if (r
->state
== ARM_CP_STATE_BOTH
) {
8233 /* We assume it is a cp15 register if the .cp field is left unset.
8239 #ifdef HOST_WORDS_BIGENDIAN
8240 if (r2
->fieldoffset
) {
8241 r2
->fieldoffset
+= sizeof(uint32_t);
8246 if (state
== ARM_CP_STATE_AA64
) {
8247 /* To allow abbreviation of ARMCPRegInfo
8248 * definitions, we treat cp == 0 as equivalent to
8249 * the value for "standard guest-visible sysreg".
8250 * STATE_BOTH definitions are also always "standard
8251 * sysreg" in their AArch64 view (the .cp value may
8252 * be non-zero for the benefit of the AArch32 view).
8254 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
8255 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
8257 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
8258 r2
->opc0
, opc1
, opc2
);
8260 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
8263 r2
->opaque
= opaque
;
8265 /* reginfo passed to helpers is correct for the actual access,
8266 * and is never ARM_CP_STATE_BOTH:
8269 /* Make sure reginfo passed to helpers for wildcarded regs
8270 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8275 /* By convention, for wildcarded registers only the first
8276 * entry is used for migration; the others are marked as
8277 * ALIAS so we don't try to transfer the register
8278 * multiple times. Special registers (ie NOP/WFI) are
8279 * never migratable and not even raw-accessible.
8281 if ((r
->type
& ARM_CP_SPECIAL
)) {
8282 r2
->type
|= ARM_CP_NO_RAW
;
8284 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
8285 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
8286 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
8287 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
8290 /* Check that raw accesses are either forbidden or handled. Note that
8291 * we can't assert this earlier because the setup of fieldoffset for
8292 * banked registers has to be done first.
8294 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
8295 assert(!raw_accessors_invalid(r2
));
8298 /* Overriding of an existing definition must be explicitly
8301 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
8302 ARMCPRegInfo
*oldreg
;
8303 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
8304 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
8305 fprintf(stderr
, "Register redefined: cp=%d %d bit "
8306 "crn=%d crm=%d opc1=%d opc2=%d, "
8307 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
8308 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
8309 oldreg
->name
, r2
->name
);
8310 g_assert_not_reached();
8313 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
8317 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
8318 const ARMCPRegInfo
*r
, void *opaque
)
8320 /* Define implementations of coprocessor registers.
8321 * We store these in a hashtable because typically
8322 * there are less than 150 registers in a space which
8323 * is 16*16*16*8*8 = 262144 in size.
8324 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8325 * If a register is defined twice then the second definition is
8326 * used, so this can be used to define some generic registers and
8327 * then override them with implementation specific variations.
8328 * At least one of the original and the second definition should
8329 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8330 * against accidental use.
8332 * The state field defines whether the register is to be
8333 * visible in the AArch32 or AArch64 execution state. If the
8334 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8335 * reginfo structure for the AArch32 view, which sees the lower
8336 * 32 bits of the 64 bit register.
8338 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8339 * be wildcarded. AArch64 registers are always considered to be 64
8340 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8341 * the register, if any.
8343 int crm
, opc1
, opc2
, state
;
8344 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8345 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8346 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8347 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8348 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8349 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8350 /* 64 bit registers have only CRm and Opc1 fields */
8351 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8352 /* op0 only exists in the AArch64 encodings */
8353 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8354 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8355 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8356 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8357 * encodes a minimum access level for the register. We roll this
8358 * runtime check into our general permission check code, so check
8359 * here that the reginfo's specified permissions are strict enough
8360 * to encompass the generic architectural permission check.
8362 if (r
->state
!= ARM_CP_STATE_AA32
) {
8366 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8367 mask
= PL0U_R
| PL1_RW
;
8387 /* min_EL EL1, secure mode only (we don't check the latter) */
8391 /* broken reginfo with out-of-range opc1 */
8395 /* assert our permissions are not too lax (stricter is fine) */
8396 assert((r
->access
& ~mask
) == 0);
8399 /* Check that the register definition has enough info to handle
8400 * reads and writes if they are permitted.
8402 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
8403 if (r
->access
& PL3_R
) {
8404 assert((r
->fieldoffset
||
8405 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8408 if (r
->access
& PL3_W
) {
8409 assert((r
->fieldoffset
||
8410 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8414 /* Bad type field probably means missing sentinel at end of reg list */
8415 assert(cptype_valid(r
->type
));
8416 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
8417 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
8418 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
8419 for (state
= ARM_CP_STATE_AA32
;
8420 state
<= ARM_CP_STATE_AA64
; state
++) {
8421 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
8424 if (state
== ARM_CP_STATE_AA32
) {
8425 /* Under AArch32 CP registers can be common
8426 * (same for secure and non-secure world) or banked.
8430 switch (r
->secure
) {
8431 case ARM_CP_SECSTATE_S
:
8432 case ARM_CP_SECSTATE_NS
:
8433 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8434 r
->secure
, crm
, opc1
, opc2
,
8438 name
= g_strdup_printf("%s_S", r
->name
);
8439 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8441 crm
, opc1
, opc2
, name
);
8443 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8445 crm
, opc1
, opc2
, r
->name
);
8449 /* AArch64 registers get mapped to non-secure instance
8451 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8453 crm
, opc1
, opc2
, r
->name
);
8461 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
8462 const ARMCPRegInfo
*regs
, void *opaque
)
8464 /* Define a whole list of registers */
8465 const ARMCPRegInfo
*r
;
8466 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8467 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
8472 * Modify ARMCPRegInfo for access from userspace.
8474 * This is a data driven modification directed by
8475 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8476 * user-space cannot alter any values and dynamic values pertaining to
8477 * execution state are hidden from user space view anyway.
8479 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
8481 const ARMCPRegUserSpaceInfo
*m
;
8484 for (m
= mods
; m
->name
; m
++) {
8485 GPatternSpec
*pat
= NULL
;
8487 pat
= g_pattern_spec_new(m
->name
);
8489 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8490 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
8491 r
->type
= ARM_CP_CONST
;
8495 } else if (strcmp(r
->name
, m
->name
) == 0) {
8496 r
->type
= ARM_CP_CONST
;
8498 r
->resetvalue
&= m
->exported_bits
;
8499 r
->resetvalue
|= m
->fixed_bits
;
8504 g_pattern_spec_free(pat
);
8509 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
8511 return g_hash_table_lookup(cpregs
, &encoded_cp
);
8514 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8517 /* Helper coprocessor write function for write-ignore registers */
8520 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8522 /* Helper coprocessor write function for read-as-zero registers */
8526 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
8528 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8531 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
8533 /* Return true if it is not valid for us to switch to
8534 * this CPU mode (ie all the UNPREDICTABLE cases in
8535 * the ARM ARM CPSRWriteByInstr pseudocode).
8538 /* Changes to or from Hyp via MSR and CPS are illegal. */
8539 if (write_type
== CPSRWriteByInstr
&&
8540 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
8541 mode
== ARM_CPU_MODE_HYP
)) {
8546 case ARM_CPU_MODE_USR
:
8548 case ARM_CPU_MODE_SYS
:
8549 case ARM_CPU_MODE_SVC
:
8550 case ARM_CPU_MODE_ABT
:
8551 case ARM_CPU_MODE_UND
:
8552 case ARM_CPU_MODE_IRQ
:
8553 case ARM_CPU_MODE_FIQ
:
8554 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8555 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8557 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8558 * and CPS are treated as illegal mode changes.
8560 if (write_type
== CPSRWriteByInstr
&&
8561 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
8562 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
8566 case ARM_CPU_MODE_HYP
:
8567 return !arm_feature(env
, ARM_FEATURE_EL2
)
8568 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
8569 case ARM_CPU_MODE_MON
:
8570 return arm_current_el(env
) < 3;
8576 uint32_t cpsr_read(CPUARMState
*env
)
8579 ZF
= (env
->ZF
== 0);
8580 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
8581 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
8582 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
8583 | ((env
->condexec_bits
& 0xfc) << 8)
8584 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
8587 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
8588 CPSRWriteType write_type
)
8590 uint32_t changed_daif
;
8592 if (mask
& CPSR_NZCV
) {
8593 env
->ZF
= (~val
) & CPSR_Z
;
8595 env
->CF
= (val
>> 29) & 1;
8596 env
->VF
= (val
<< 3) & 0x80000000;
8599 env
->QF
= ((val
& CPSR_Q
) != 0);
8601 env
->thumb
= ((val
& CPSR_T
) != 0);
8602 if (mask
& CPSR_IT_0_1
) {
8603 env
->condexec_bits
&= ~3;
8604 env
->condexec_bits
|= (val
>> 25) & 3;
8606 if (mask
& CPSR_IT_2_7
) {
8607 env
->condexec_bits
&= 3;
8608 env
->condexec_bits
|= (val
>> 8) & 0xfc;
8610 if (mask
& CPSR_GE
) {
8611 env
->GE
= (val
>> 16) & 0xf;
8614 /* In a V7 implementation that includes the security extensions but does
8615 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8616 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8617 * bits respectively.
8619 * In a V8 implementation, it is permitted for privileged software to
8620 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8622 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
8623 arm_feature(env
, ARM_FEATURE_EL3
) &&
8624 !arm_feature(env
, ARM_FEATURE_EL2
) &&
8625 !arm_is_secure(env
)) {
8627 changed_daif
= (env
->daif
^ val
) & mask
;
8629 if (changed_daif
& CPSR_A
) {
8630 /* Check to see if we are allowed to change the masking of async
8631 * abort exceptions from a non-secure state.
8633 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
8634 qemu_log_mask(LOG_GUEST_ERROR
,
8635 "Ignoring attempt to switch CPSR_A flag from "
8636 "non-secure world with SCR.AW bit clear\n");
8641 if (changed_daif
& CPSR_F
) {
8642 /* Check to see if we are allowed to change the masking of FIQ
8643 * exceptions from a non-secure state.
8645 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
8646 qemu_log_mask(LOG_GUEST_ERROR
,
8647 "Ignoring attempt to switch CPSR_F flag from "
8648 "non-secure world with SCR.FW bit clear\n");
8652 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8653 * If this bit is set software is not allowed to mask
8654 * FIQs, but is allowed to set CPSR_F to 0.
8656 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
8658 qemu_log_mask(LOG_GUEST_ERROR
,
8659 "Ignoring attempt to enable CPSR_F flag "
8660 "(non-maskable FIQ [NMFI] support enabled)\n");
8666 env
->daif
&= ~(CPSR_AIF
& mask
);
8667 env
->daif
|= val
& CPSR_AIF
& mask
;
8669 if (write_type
!= CPSRWriteRaw
&&
8670 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
8671 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
8672 /* Note that we can only get here in USR mode if this is a
8673 * gdb stub write; for this case we follow the architectural
8674 * behaviour for guest writes in USR mode of ignoring an attempt
8675 * to switch mode. (Those are caught by translate.c for writes
8676 * triggered by guest instructions.)
8679 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
8680 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8681 * v7, and has defined behaviour in v8:
8682 * + leave CPSR.M untouched
8683 * + allow changes to the other CPSR fields
8685 * For user changes via the GDB stub, we don't set PSTATE.IL,
8686 * as this would be unnecessarily harsh for a user error.
8689 if (write_type
!= CPSRWriteByGDBStub
&&
8690 arm_feature(env
, ARM_FEATURE_V8
)) {
8694 qemu_log_mask(LOG_GUEST_ERROR
,
8695 "Illegal AArch32 mode switch attempt from %s to %s\n",
8696 aarch32_mode_name(env
->uncached_cpsr
),
8697 aarch32_mode_name(val
));
8699 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
8700 write_type
== CPSRWriteExceptionReturn
?
8701 "Exception return from AArch32" :
8702 "AArch32 mode switch from",
8703 aarch32_mode_name(env
->uncached_cpsr
),
8704 aarch32_mode_name(val
), env
->regs
[15]);
8705 switch_mode(env
, val
& CPSR_M
);
8708 mask
&= ~CACHED_CPSR_BITS
;
8709 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
8712 /* Sign/zero extend */
8713 uint32_t HELPER(sxtb16
)(uint32_t x
)
8716 res
= (uint16_t)(int8_t)x
;
8717 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
8721 uint32_t HELPER(uxtb16
)(uint32_t x
)
8724 res
= (uint16_t)(uint8_t)x
;
8725 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
8729 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
8733 if (num
== INT_MIN
&& den
== -1)
8738 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
8745 uint32_t HELPER(rbit
)(uint32_t x
)
8750 #ifdef CONFIG_USER_ONLY
8752 static void switch_mode(CPUARMState
*env
, int mode
)
8754 ARMCPU
*cpu
= env_archcpu(env
);
8756 if (mode
!= ARM_CPU_MODE_USR
) {
8757 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
8761 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
8762 uint32_t cur_el
, bool secure
)
8767 void aarch64_sync_64_to_32(CPUARMState
*env
)
8769 g_assert_not_reached();
8774 static void switch_mode(CPUARMState
*env
, int mode
)
8779 old_mode
= env
->uncached_cpsr
& CPSR_M
;
8780 if (mode
== old_mode
)
8783 if (old_mode
== ARM_CPU_MODE_FIQ
) {
8784 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8785 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
8786 } else if (mode
== ARM_CPU_MODE_FIQ
) {
8787 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8788 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
8791 i
= bank_number(old_mode
);
8792 env
->banked_r13
[i
] = env
->regs
[13];
8793 env
->banked_spsr
[i
] = env
->spsr
;
8795 i
= bank_number(mode
);
8796 env
->regs
[13] = env
->banked_r13
[i
];
8797 env
->spsr
= env
->banked_spsr
[i
];
8799 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
8800 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
8803 /* Physical Interrupt Target EL Lookup Table
8805 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8807 * The below multi-dimensional table is used for looking up the target
8808 * exception level given numerous condition criteria. Specifically, the
8809 * target EL is based on SCR and HCR routing controls as well as the
8810 * currently executing EL and secure state.
8813 * target_el_table[2][2][2][2][2][4]
8814 * | | | | | +--- Current EL
8815 * | | | | +------ Non-secure(0)/Secure(1)
8816 * | | | +--------- HCR mask override
8817 * | | +------------ SCR exec state control
8818 * | +--------------- SCR mask override
8819 * +------------------ 32-bit(0)/64-bit(1) EL3
8821 * The table values are as such:
8825 * The ARM ARM target EL table includes entries indicating that an "exception
8826 * is not taken". The two cases where this is applicable are:
8827 * 1) An exception is taken from EL3 but the SCR does not have the exception
8829 * 2) An exception is taken from EL2 but the HCR does not have the exception
8831 * In these two cases, the below table contain a target of EL1. This value is
8832 * returned as it is expected that the consumer of the table data will check
8833 * for "target EL >= current EL" to ensure the exception is not taken.
8837 * BIT IRQ IMO Non-secure Secure
8838 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8840 static const int8_t target_el_table
[2][2][2][2][2][4] = {
8841 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8842 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8843 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8844 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8845 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8846 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8847 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8848 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8849 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8850 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
8851 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
8852 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
8853 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8854 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8855 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8856 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
8860 * Determine the target EL for physical exceptions
8862 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
8863 uint32_t cur_el
, bool secure
)
8865 CPUARMState
*env
= cs
->env_ptr
;
8870 /* Is the highest EL AArch64? */
8871 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
8874 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8875 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
8877 /* Either EL2 is the highest EL (and so the EL2 register width
8878 * is given by is64); or there is no EL2 or EL3, in which case
8879 * the value of 'rw' does not affect the table lookup anyway.
8884 hcr_el2
= arm_hcr_el2_eff(env
);
8887 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
8888 hcr
= hcr_el2
& HCR_IMO
;
8891 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
8892 hcr
= hcr_el2
& HCR_FMO
;
8895 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
8896 hcr
= hcr_el2
& HCR_AMO
;
8901 * For these purposes, TGE and AMO/IMO/FMO both force the
8902 * interrupt to EL2. Fold TGE into the bit extracted above.
8904 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
8906 /* Perform a table-lookup for the target EL given the current state */
8907 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
8909 assert(target_el
> 0);
8914 void arm_log_exception(int idx
)
8916 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8917 const char *exc
= NULL
;
8918 static const char * const excnames
[] = {
8919 [EXCP_UDEF
] = "Undefined Instruction",
8921 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8922 [EXCP_DATA_ABORT
] = "Data Abort",
8925 [EXCP_BKPT
] = "Breakpoint",
8926 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8927 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8928 [EXCP_HVC
] = "Hypervisor Call",
8929 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8930 [EXCP_SMC
] = "Secure Monitor Call",
8931 [EXCP_VIRQ
] = "Virtual IRQ",
8932 [EXCP_VFIQ
] = "Virtual FIQ",
8933 [EXCP_SEMIHOST
] = "Semihosting call",
8934 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8935 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8936 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8937 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
8938 [EXCP_LSERR
] = "v8M LSERR UsageFault",
8939 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
8942 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8943 exc
= excnames
[idx
];
8948 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8953 * Function used to synchronize QEMU's AArch64 register set with AArch32
8954 * register set. This is necessary when switching between AArch32 and AArch64
8957 void aarch64_sync_32_to_64(CPUARMState
*env
)
8960 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8962 /* We can blanket copy R[0:7] to X[0:7] */
8963 for (i
= 0; i
< 8; i
++) {
8964 env
->xregs
[i
] = env
->regs
[i
];
8968 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8969 * Otherwise, they come from the banked user regs.
8971 if (mode
== ARM_CPU_MODE_FIQ
) {
8972 for (i
= 8; i
< 13; i
++) {
8973 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8976 for (i
= 8; i
< 13; i
++) {
8977 env
->xregs
[i
] = env
->regs
[i
];
8982 * Registers x13-x23 are the various mode SP and FP registers. Registers
8983 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8984 * from the mode banked register.
8986 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8987 env
->xregs
[13] = env
->regs
[13];
8988 env
->xregs
[14] = env
->regs
[14];
8990 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8991 /* HYP is an exception in that it is copied from r14 */
8992 if (mode
== ARM_CPU_MODE_HYP
) {
8993 env
->xregs
[14] = env
->regs
[14];
8995 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8999 if (mode
== ARM_CPU_MODE_HYP
) {
9000 env
->xregs
[15] = env
->regs
[13];
9002 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9005 if (mode
== ARM_CPU_MODE_IRQ
) {
9006 env
->xregs
[16] = env
->regs
[14];
9007 env
->xregs
[17] = env
->regs
[13];
9009 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9010 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9013 if (mode
== ARM_CPU_MODE_SVC
) {
9014 env
->xregs
[18] = env
->regs
[14];
9015 env
->xregs
[19] = env
->regs
[13];
9017 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9018 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9021 if (mode
== ARM_CPU_MODE_ABT
) {
9022 env
->xregs
[20] = env
->regs
[14];
9023 env
->xregs
[21] = env
->regs
[13];
9025 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9026 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9029 if (mode
== ARM_CPU_MODE_UND
) {
9030 env
->xregs
[22] = env
->regs
[14];
9031 env
->xregs
[23] = env
->regs
[13];
9033 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9034 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9038 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9039 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9040 * FIQ bank for r8-r14.
9042 if (mode
== ARM_CPU_MODE_FIQ
) {
9043 for (i
= 24; i
< 31; i
++) {
9044 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9047 for (i
= 24; i
< 29; i
++) {
9048 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9050 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9051 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9054 env
->pc
= env
->regs
[15];
9058 * Function used to synchronize QEMU's AArch32 register set with AArch64
9059 * register set. This is necessary when switching between AArch32 and AArch64
9062 void aarch64_sync_64_to_32(CPUARMState
*env
)
9065 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9067 /* We can blanket copy X[0:7] to R[0:7] */
9068 for (i
= 0; i
< 8; i
++) {
9069 env
->regs
[i
] = env
->xregs
[i
];
9073 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9074 * Otherwise, we copy x8-x12 into the banked user regs.
9076 if (mode
== ARM_CPU_MODE_FIQ
) {
9077 for (i
= 8; i
< 13; i
++) {
9078 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9081 for (i
= 8; i
< 13; i
++) {
9082 env
->regs
[i
] = env
->xregs
[i
];
9087 * Registers r13 & r14 depend on the current mode.
9088 * If we are in a given mode, we copy the corresponding x registers to r13
9089 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9092 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9093 env
->regs
[13] = env
->xregs
[13];
9094 env
->regs
[14] = env
->xregs
[14];
9096 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9099 * HYP is an exception in that it does not have its own banked r14 but
9100 * shares the USR r14
9102 if (mode
== ARM_CPU_MODE_HYP
) {
9103 env
->regs
[14] = env
->xregs
[14];
9105 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9109 if (mode
== ARM_CPU_MODE_HYP
) {
9110 env
->regs
[13] = env
->xregs
[15];
9112 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9115 if (mode
== ARM_CPU_MODE_IRQ
) {
9116 env
->regs
[14] = env
->xregs
[16];
9117 env
->regs
[13] = env
->xregs
[17];
9119 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9120 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9123 if (mode
== ARM_CPU_MODE_SVC
) {
9124 env
->regs
[14] = env
->xregs
[18];
9125 env
->regs
[13] = env
->xregs
[19];
9127 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9128 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9131 if (mode
== ARM_CPU_MODE_ABT
) {
9132 env
->regs
[14] = env
->xregs
[20];
9133 env
->regs
[13] = env
->xregs
[21];
9135 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9136 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9139 if (mode
== ARM_CPU_MODE_UND
) {
9140 env
->regs
[14] = env
->xregs
[22];
9141 env
->regs
[13] = env
->xregs
[23];
9143 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9144 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9147 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9148 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9149 * FIQ bank for r8-r14.
9151 if (mode
== ARM_CPU_MODE_FIQ
) {
9152 for (i
= 24; i
< 31; i
++) {
9153 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9156 for (i
= 24; i
< 29; i
++) {
9157 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9159 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9160 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9163 env
->regs
[15] = env
->pc
;
9166 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9167 uint32_t mask
, uint32_t offset
,
9172 /* Change the CPU state so as to actually take the exception. */
9173 switch_mode(env
, new_mode
);
9174 new_el
= arm_current_el(env
);
9177 * For exceptions taken to AArch32 we must clear the SS bit in both
9178 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9180 env
->uncached_cpsr
&= ~PSTATE_SS
;
9181 env
->spsr
= cpsr_read(env
);
9182 /* Clear IT bits. */
9183 env
->condexec_bits
= 0;
9184 /* Switch to the new mode, and to the correct instruction set. */
9185 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9186 /* Set new mode endianness */
9187 env
->uncached_cpsr
&= ~CPSR_E
;
9188 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
9189 env
->uncached_cpsr
|= CPSR_E
;
9191 /* J and IL must always be cleared for exception entry */
9192 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9195 if (new_mode
== ARM_CPU_MODE_HYP
) {
9196 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9197 env
->elr_el
[2] = env
->regs
[15];
9199 /* CPSR.PAN is normally preserved preserved unless... */
9200 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
9203 if (!arm_is_secure_below_el3(env
)) {
9204 /* ... the target is EL3, from non-secure state. */
9205 env
->uncached_cpsr
&= ~CPSR_PAN
;
9208 /* ... the target is EL3, from secure state ... */
9211 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9212 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
9213 env
->uncached_cpsr
|= CPSR_PAN
;
9219 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9220 * and we should just guard the thumb mode on V4
9222 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9224 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9226 env
->regs
[14] = env
->regs
[15] + offset
;
9228 env
->regs
[15] = newpc
;
9229 arm_rebuild_hflags(env
);
9232 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9235 * Handle exception entry to Hyp mode; this is sufficiently
9236 * different to entry to other AArch32 modes that we handle it
9239 * The vector table entry used is always the 0x14 Hyp mode entry point,
9240 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9241 * The offset applied to the preferred return address is always zero
9242 * (see DDI0487C.a section G1.12.3).
9243 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9245 uint32_t addr
, mask
;
9246 ARMCPU
*cpu
= ARM_CPU(cs
);
9247 CPUARMState
*env
= &cpu
->env
;
9249 switch (cs
->exception_index
) {
9257 /* Fall through to prefetch abort. */
9258 case EXCP_PREFETCH_ABORT
:
9259 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9260 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9261 (uint32_t)env
->exception
.vaddress
);
9264 case EXCP_DATA_ABORT
:
9265 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9266 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9267 (uint32_t)env
->exception
.vaddress
);
9283 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9286 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9287 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9289 * QEMU syndrome values are v8-style. v7 has the IL bit
9290 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9291 * If this is a v7 CPU, squash the IL bit in those cases.
9293 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9294 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9295 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9296 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9297 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9300 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9303 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9308 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9311 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9314 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9318 addr
+= env
->cp15
.hvbar
;
9320 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9323 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9325 ARMCPU
*cpu
= ARM_CPU(cs
);
9326 CPUARMState
*env
= &cpu
->env
;
9333 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9334 switch (syn_get_ec(env
->exception
.syndrome
)) {
9336 case EC_BREAKPOINT_SAME_EL
:
9340 case EC_WATCHPOINT_SAME_EL
:
9346 case EC_VECTORCATCH
:
9355 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9358 if (env
->exception
.target_el
== 2) {
9359 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9363 switch (cs
->exception_index
) {
9365 new_mode
= ARM_CPU_MODE_UND
;
9374 new_mode
= ARM_CPU_MODE_SVC
;
9377 /* The PC already points to the next instruction. */
9381 /* Fall through to prefetch abort. */
9382 case EXCP_PREFETCH_ABORT
:
9383 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9384 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9385 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9386 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9387 new_mode
= ARM_CPU_MODE_ABT
;
9389 mask
= CPSR_A
| CPSR_I
;
9392 case EXCP_DATA_ABORT
:
9393 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9394 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9395 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9397 (uint32_t)env
->exception
.vaddress
);
9398 new_mode
= ARM_CPU_MODE_ABT
;
9400 mask
= CPSR_A
| CPSR_I
;
9404 new_mode
= ARM_CPU_MODE_IRQ
;
9406 /* Disable IRQ and imprecise data aborts. */
9407 mask
= CPSR_A
| CPSR_I
;
9409 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9410 /* IRQ routed to monitor mode */
9411 new_mode
= ARM_CPU_MODE_MON
;
9416 new_mode
= ARM_CPU_MODE_FIQ
;
9418 /* Disable FIQ, IRQ and imprecise data aborts. */
9419 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9420 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9421 /* FIQ routed to monitor mode */
9422 new_mode
= ARM_CPU_MODE_MON
;
9427 new_mode
= ARM_CPU_MODE_IRQ
;
9429 /* Disable IRQ and imprecise data aborts. */
9430 mask
= CPSR_A
| CPSR_I
;
9434 new_mode
= ARM_CPU_MODE_FIQ
;
9436 /* Disable FIQ, IRQ and imprecise data aborts. */
9437 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9441 new_mode
= ARM_CPU_MODE_MON
;
9443 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9447 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9448 return; /* Never happens. Keep compiler happy. */
9451 if (new_mode
== ARM_CPU_MODE_MON
) {
9452 addr
+= env
->cp15
.mvbar
;
9453 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9454 /* High vectors. When enabled, base address cannot be remapped. */
9457 /* ARM v7 architectures provide a vector base address register to remap
9458 * the interrupt vector table.
9459 * This register is only followed in non-monitor mode, and is banked.
9460 * Note: only bits 31:5 are valid.
9462 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9465 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9466 env
->cp15
.scr_el3
&= ~SCR_NS
;
9469 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9472 /* Handle exception entry to a target EL which is using AArch64 */
9473 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9475 ARMCPU
*cpu
= ARM_CPU(cs
);
9476 CPUARMState
*env
= &cpu
->env
;
9477 unsigned int new_el
= env
->exception
.target_el
;
9478 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9479 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9480 unsigned int old_mode
;
9481 unsigned int cur_el
= arm_current_el(env
);
9484 * Note that new_el can never be 0. If cur_el is 0, then
9485 * el0_a64 is is_a64(), else el0_a64 is ignored.
9487 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9489 if (cur_el
< new_el
) {
9490 /* Entry vector offset depends on whether the implemented EL
9491 * immediately lower than the target level is using AArch32 or AArch64
9498 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9501 hcr
= arm_hcr_el2_eff(env
);
9502 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
9503 is_aa64
= (hcr
& HCR_RW
) != 0;
9508 is_aa64
= is_a64(env
);
9511 g_assert_not_reached();
9519 } else if (pstate_read(env
) & PSTATE_SP
) {
9523 switch (cs
->exception_index
) {
9524 case EXCP_PREFETCH_ABORT
:
9525 case EXCP_DATA_ABORT
:
9526 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9527 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9528 env
->cp15
.far_el
[new_el
]);
9536 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9538 * QEMU internal FP/SIMD syndromes from AArch32 include the
9539 * TA and coproc fields which are only exposed if the exception
9540 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9541 * AArch64 format syndrome.
9543 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9545 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9556 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9560 old_mode
= pstate_read(env
);
9561 aarch64_save_sp(env
, arm_current_el(env
));
9562 env
->elr_el
[new_el
] = env
->pc
;
9564 old_mode
= cpsr_read(env
);
9565 env
->elr_el
[new_el
] = env
->regs
[15];
9567 aarch64_sync_32_to_64(env
);
9569 env
->condexec_bits
= 0;
9571 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
9573 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9574 env
->elr_el
[new_el
]);
9576 if (cpu_isar_feature(aa64_pan
, cpu
)) {
9577 /* The value of PSTATE.PAN is normally preserved, except when ... */
9578 new_mode
|= old_mode
& PSTATE_PAN
;
9581 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9582 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
9583 != (HCR_E2H
| HCR_TGE
)) {
9588 /* ... the target is EL1 ... */
9589 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9590 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
9591 new_mode
|= PSTATE_PAN
;
9597 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9599 aarch64_restore_sp(env
, new_el
);
9600 helper_rebuild_hflags_a64(env
, new_el
);
9604 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9605 new_el
, env
->pc
, pstate_read(env
));
9609 * Do semihosting call and set the appropriate return value. All the
9610 * permission and validity checks have been done at translate time.
9612 * We only see semihosting exceptions in TCG only as they are not
9613 * trapped to the hypervisor in KVM.
9616 static void handle_semihosting(CPUState
*cs
)
9618 ARMCPU
*cpu
= ARM_CPU(cs
);
9619 CPUARMState
*env
= &cpu
->env
;
9622 qemu_log_mask(CPU_LOG_INT
,
9623 "...handling as semihosting call 0x%" PRIx64
"\n",
9625 env
->xregs
[0] = do_arm_semihosting(env
);
9628 qemu_log_mask(CPU_LOG_INT
,
9629 "...handling as semihosting call 0x%x\n",
9631 env
->regs
[0] = do_arm_semihosting(env
);
9632 env
->regs
[15] += env
->thumb
? 2 : 4;
9637 /* Handle a CPU exception for A and R profile CPUs.
9638 * Do any appropriate logging, handle PSCI calls, and then hand off
9639 * to the AArch64-entry or AArch32-entry function depending on the
9640 * target exception level's register width.
9642 void arm_cpu_do_interrupt(CPUState
*cs
)
9644 ARMCPU
*cpu
= ARM_CPU(cs
);
9645 CPUARMState
*env
= &cpu
->env
;
9646 unsigned int new_el
= env
->exception
.target_el
;
9648 assert(!arm_feature(env
, ARM_FEATURE_M
));
9650 arm_log_exception(cs
->exception_index
);
9651 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9653 if (qemu_loglevel_mask(CPU_LOG_INT
)
9654 && !excp_is_internal(cs
->exception_index
)) {
9655 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9656 syn_get_ec(env
->exception
.syndrome
),
9657 env
->exception
.syndrome
);
9660 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9661 arm_handle_psci_call(cpu
);
9662 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9667 * Semihosting semantics depend on the register width of the code
9668 * that caused the exception, not the target exception level, so
9669 * must be handled here.
9672 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9673 handle_semihosting(cs
);
9678 /* Hooks may change global state so BQL should be held, also the
9679 * BQL needs to be held for any modification of
9680 * cs->interrupt_request.
9682 g_assert(qemu_mutex_iothread_locked());
9684 arm_call_pre_el_change_hook(cpu
);
9686 assert(!excp_is_internal(cs
->exception_index
));
9687 if (arm_el_is_aa64(env
, new_el
)) {
9688 arm_cpu_do_interrupt_aarch64(cs
);
9690 arm_cpu_do_interrupt_aarch32(cs
);
9693 arm_call_el_change_hook(cpu
);
9695 if (!kvm_enabled()) {
9696 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9699 #endif /* !CONFIG_USER_ONLY */
9701 /* Return the exception level which controls this address translation regime */
9702 static uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9705 case ARMMMUIdx_E20_0
:
9706 case ARMMMUIdx_E20_2
:
9707 case ARMMMUIdx_E20_2_PAN
:
9708 case ARMMMUIdx_Stage2
:
9713 case ARMMMUIdx_SE10_0
:
9714 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9715 case ARMMMUIdx_SE10_1
:
9716 case ARMMMUIdx_SE10_1_PAN
:
9717 case ARMMMUIdx_Stage1_E0
:
9718 case ARMMMUIdx_Stage1_E1
:
9719 case ARMMMUIdx_Stage1_E1_PAN
:
9720 case ARMMMUIdx_E10_0
:
9721 case ARMMMUIdx_E10_1
:
9722 case ARMMMUIdx_E10_1_PAN
:
9723 case ARMMMUIdx_MPrivNegPri
:
9724 case ARMMMUIdx_MUserNegPri
:
9725 case ARMMMUIdx_MPriv
:
9726 case ARMMMUIdx_MUser
:
9727 case ARMMMUIdx_MSPrivNegPri
:
9728 case ARMMMUIdx_MSUserNegPri
:
9729 case ARMMMUIdx_MSPriv
:
9730 case ARMMMUIdx_MSUser
:
9733 g_assert_not_reached();
9737 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
9739 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
9741 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
9742 el
= (mmu_idx
== ARMMMUIdx_E20_0
? 2 : 1);
9744 return env
->cp15
.sctlr_el
[el
];
9747 /* Return the SCTLR value which controls this address translation regime */
9748 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9750 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9753 #ifndef CONFIG_USER_ONLY
9755 /* Return true if the specified stage of address translation is disabled */
9756 static inline bool regime_translation_disabled(CPUARMState
*env
,
9759 if (arm_feature(env
, ARM_FEATURE_M
)) {
9760 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9761 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9762 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9763 /* Enabled, but not for HardFault and NMI */
9764 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9765 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9766 /* Enabled for all cases */
9770 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9771 * we warned about that in armv7m_nvic.c when the guest set it.
9777 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9778 /* HCR.DC means HCR.VM behaves as 1 */
9779 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9782 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9783 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9784 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9789 if ((env
->cp15
.hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
9790 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9794 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9797 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9800 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9803 /* Return the TTBR associated with this translation regime */
9804 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9807 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9808 return env
->cp15
.vttbr_el2
;
9811 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9813 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9817 #endif /* !CONFIG_USER_ONLY */
9819 /* Return the TCR controlling this translation regime */
9820 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9822 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9823 return &env
->cp15
.vtcr_el2
;
9825 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9828 /* Convert a possible stage1+2 MMU index into the appropriate
9831 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9834 case ARMMMUIdx_E10_0
:
9835 return ARMMMUIdx_Stage1_E0
;
9836 case ARMMMUIdx_E10_1
:
9837 return ARMMMUIdx_Stage1_E1
;
9838 case ARMMMUIdx_E10_1_PAN
:
9839 return ARMMMUIdx_Stage1_E1_PAN
;
9845 /* Return true if the translation regime is using LPAE format page tables */
9846 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9849 int el
= regime_el(env
, mmu_idx
);
9850 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9853 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9854 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9860 /* Returns true if the stage 1 translation regime is using LPAE format page
9861 * tables. Used when raising alignment exceptions, whose FSR changes depending
9862 * on whether the long or short descriptor format is in use. */
9863 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9865 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9867 return regime_using_lpae_format(env
, mmu_idx
);
9870 #ifndef CONFIG_USER_ONLY
9871 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9874 case ARMMMUIdx_SE10_0
:
9875 case ARMMMUIdx_E20_0
:
9876 case ARMMMUIdx_Stage1_E0
:
9877 case ARMMMUIdx_MUser
:
9878 case ARMMMUIdx_MSUser
:
9879 case ARMMMUIdx_MUserNegPri
:
9880 case ARMMMUIdx_MSUserNegPri
:
9884 case ARMMMUIdx_E10_0
:
9885 case ARMMMUIdx_E10_1
:
9886 case ARMMMUIdx_E10_1_PAN
:
9887 g_assert_not_reached();
9891 /* Translate section/page access permissions to page
9892 * R/W protection flags
9895 * @mmu_idx: MMU index indicating required translation regime
9896 * @ap: The 3-bit access permissions (AP[2:0])
9897 * @domain_prot: The 2-bit domain access permissions
9899 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9900 int ap
, int domain_prot
)
9902 bool is_user
= regime_is_user(env
, mmu_idx
);
9904 if (domain_prot
== 3) {
9905 return PAGE_READ
| PAGE_WRITE
;
9910 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9913 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9915 return is_user
? 0 : PAGE_READ
;
9922 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9927 return PAGE_READ
| PAGE_WRITE
;
9930 return PAGE_READ
| PAGE_WRITE
;
9931 case 4: /* Reserved. */
9934 return is_user
? 0 : PAGE_READ
;
9938 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9943 g_assert_not_reached();
9947 /* Translate section/page access permissions to page
9948 * R/W protection flags.
9950 * @ap: The 2-bit simple AP (AP[2:1])
9951 * @is_user: TRUE if accessing from PL0
9953 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9957 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9959 return PAGE_READ
| PAGE_WRITE
;
9961 return is_user
? 0 : PAGE_READ
;
9965 g_assert_not_reached();
9970 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9972 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9975 /* Translate S2 section/page access permissions to protection flags
9978 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9979 * @xn: XN (execute-never) bit
9981 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9992 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9999 /* Translate section/page access permissions to protection flags
10001 * @env: CPUARMState
10002 * @mmu_idx: MMU index indicating required translation regime
10003 * @is_aa64: TRUE if AArch64
10004 * @ap: The 2-bit simple AP (AP[2:1])
10005 * @ns: NS (non-secure) bit
10006 * @xn: XN (execute-never) bit
10007 * @pxn: PXN (privileged execute-never) bit
10009 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10010 int ap
, int ns
, int xn
, int pxn
)
10012 bool is_user
= regime_is_user(env
, mmu_idx
);
10013 int prot_rw
, user_rw
;
10017 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
10019 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10023 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
10026 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10029 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10033 /* TODO have_wxn should be replaced with
10034 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10035 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10036 * compatible processors have EL2, which is required for [U]WXN.
10038 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10041 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10045 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
10046 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10048 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10049 switch (regime_el(env
, mmu_idx
)) {
10053 xn
= xn
|| !(user_rw
& PAGE_READ
);
10057 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10059 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10060 (uwxn
&& (user_rw
& PAGE_WRITE
));
10070 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10073 return prot_rw
| PAGE_EXEC
;
10076 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10077 uint32_t *table
, uint32_t address
)
10079 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10080 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10082 if (address
& tcr
->mask
) {
10083 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10084 /* Translation table walk disabled for TTBR1 */
10087 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10089 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10090 /* Translation table walk disabled for TTBR0 */
10093 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10095 *table
|= (address
>> 18) & 0x3ffc;
10099 /* Translate a S1 pagetable walk through S2 if needed. */
10100 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10101 hwaddr addr
, MemTxAttrs txattrs
,
10102 ARMMMUFaultInfo
*fi
)
10104 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
10105 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10106 target_ulong s2size
;
10110 ARMCacheAttrs cacheattrs
= {};
10111 ARMCacheAttrs
*pcacheattrs
= NULL
;
10113 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
10115 * PTW means we must fault if this S1 walk touches S2 Device
10116 * memory; otherwise we don't care about the attributes and can
10117 * save the S2 translation the effort of computing them.
10119 pcacheattrs
= &cacheattrs
;
10122 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_Stage2
, &s2pa
,
10123 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
10125 assert(fi
->type
!= ARMFault_None
);
10131 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
10132 /* Access was to Device memory: generate Permission fault */
10133 fi
->type
= ARMFault_Permission
;
10144 /* All loads done in the course of a page table walk go through here. */
10145 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10146 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10148 ARMCPU
*cpu
= ARM_CPU(cs
);
10149 CPUARMState
*env
= &cpu
->env
;
10150 MemTxAttrs attrs
= {};
10151 MemTxResult result
= MEMTX_OK
;
10155 attrs
.secure
= is_secure
;
10156 as
= arm_addressspace(cs
, attrs
);
10157 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10161 if (regime_translation_big_endian(env
, mmu_idx
)) {
10162 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10164 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10166 if (result
== MEMTX_OK
) {
10169 fi
->type
= ARMFault_SyncExternalOnWalk
;
10170 fi
->ea
= arm_extabort_type(result
);
10174 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10175 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10177 ARMCPU
*cpu
= ARM_CPU(cs
);
10178 CPUARMState
*env
= &cpu
->env
;
10179 MemTxAttrs attrs
= {};
10180 MemTxResult result
= MEMTX_OK
;
10184 attrs
.secure
= is_secure
;
10185 as
= arm_addressspace(cs
, attrs
);
10186 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10190 if (regime_translation_big_endian(env
, mmu_idx
)) {
10191 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10193 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10195 if (result
== MEMTX_OK
) {
10198 fi
->type
= ARMFault_SyncExternalOnWalk
;
10199 fi
->ea
= arm_extabort_type(result
);
10203 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10204 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10205 hwaddr
*phys_ptr
, int *prot
,
10206 target_ulong
*page_size
,
10207 ARMMMUFaultInfo
*fi
)
10209 CPUState
*cs
= env_cpu(env
);
10220 /* Pagetable walk. */
10221 /* Lookup l1 descriptor. */
10222 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10223 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10224 fi
->type
= ARMFault_Translation
;
10227 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10229 if (fi
->type
!= ARMFault_None
) {
10233 domain
= (desc
>> 5) & 0x0f;
10234 if (regime_el(env
, mmu_idx
) == 1) {
10235 dacr
= env
->cp15
.dacr_ns
;
10237 dacr
= env
->cp15
.dacr_s
;
10239 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10241 /* Section translation fault. */
10242 fi
->type
= ARMFault_Translation
;
10248 if (domain_prot
== 0 || domain_prot
== 2) {
10249 fi
->type
= ARMFault_Domain
;
10254 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10255 ap
= (desc
>> 10) & 3;
10256 *page_size
= 1024 * 1024;
10258 /* Lookup l2 entry. */
10260 /* Coarse pagetable. */
10261 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10263 /* Fine pagetable. */
10264 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10266 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10268 if (fi
->type
!= ARMFault_None
) {
10271 switch (desc
& 3) {
10272 case 0: /* Page translation fault. */
10273 fi
->type
= ARMFault_Translation
;
10275 case 1: /* 64k page. */
10276 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10277 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10278 *page_size
= 0x10000;
10280 case 2: /* 4k page. */
10281 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10282 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10283 *page_size
= 0x1000;
10285 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10287 /* ARMv6/XScale extended small page format */
10288 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10289 || arm_feature(env
, ARM_FEATURE_V6
)) {
10290 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10291 *page_size
= 0x1000;
10293 /* UNPREDICTABLE in ARMv5; we choose to take a
10294 * page translation fault.
10296 fi
->type
= ARMFault_Translation
;
10300 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10301 *page_size
= 0x400;
10303 ap
= (desc
>> 4) & 3;
10306 /* Never happens, but compiler isn't smart enough to tell. */
10310 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10311 *prot
|= *prot
? PAGE_EXEC
: 0;
10312 if (!(*prot
& (1 << access_type
))) {
10313 /* Access permission fault. */
10314 fi
->type
= ARMFault_Permission
;
10317 *phys_ptr
= phys_addr
;
10320 fi
->domain
= domain
;
10325 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10326 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10327 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10328 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10330 CPUState
*cs
= env_cpu(env
);
10344 /* Pagetable walk. */
10345 /* Lookup l1 descriptor. */
10346 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10347 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10348 fi
->type
= ARMFault_Translation
;
10351 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10353 if (fi
->type
!= ARMFault_None
) {
10357 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10358 /* Section translation fault, or attempt to use the encoding
10359 * which is Reserved on implementations without PXN.
10361 fi
->type
= ARMFault_Translation
;
10364 if ((type
== 1) || !(desc
& (1 << 18))) {
10365 /* Page or Section. */
10366 domain
= (desc
>> 5) & 0x0f;
10368 if (regime_el(env
, mmu_idx
) == 1) {
10369 dacr
= env
->cp15
.dacr_ns
;
10371 dacr
= env
->cp15
.dacr_s
;
10376 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10377 if (domain_prot
== 0 || domain_prot
== 2) {
10378 /* Section or Page domain fault */
10379 fi
->type
= ARMFault_Domain
;
10383 if (desc
& (1 << 18)) {
10384 /* Supersection. */
10385 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10386 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10387 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10388 *page_size
= 0x1000000;
10391 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10392 *page_size
= 0x100000;
10394 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10395 xn
= desc
& (1 << 4);
10397 ns
= extract32(desc
, 19, 1);
10399 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10400 pxn
= (desc
>> 2) & 1;
10402 ns
= extract32(desc
, 3, 1);
10403 /* Lookup l2 entry. */
10404 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10405 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10407 if (fi
->type
!= ARMFault_None
) {
10410 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10411 switch (desc
& 3) {
10412 case 0: /* Page translation fault. */
10413 fi
->type
= ARMFault_Translation
;
10415 case 1: /* 64k page. */
10416 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10417 xn
= desc
& (1 << 15);
10418 *page_size
= 0x10000;
10420 case 2: case 3: /* 4k page. */
10421 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10423 *page_size
= 0x1000;
10426 /* Never happens, but compiler isn't smart enough to tell. */
10430 if (domain_prot
== 3) {
10431 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10433 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10436 if (xn
&& access_type
== MMU_INST_FETCH
) {
10437 fi
->type
= ARMFault_Permission
;
10441 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10442 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10443 /* The simplified model uses AP[0] as an access control bit. */
10444 if ((ap
& 1) == 0) {
10445 /* Access flag fault. */
10446 fi
->type
= ARMFault_AccessFlag
;
10449 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10451 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10453 if (*prot
&& !xn
) {
10454 *prot
|= PAGE_EXEC
;
10456 if (!(*prot
& (1 << access_type
))) {
10457 /* Access permission fault. */
10458 fi
->type
= ARMFault_Permission
;
10463 /* The NS bit will (as required by the architecture) have no effect if
10464 * the CPU doesn't support TZ or this is a non-secure translation
10465 * regime, because the attribute will already be non-secure.
10467 attrs
->secure
= false;
10469 *phys_ptr
= phys_addr
;
10472 fi
->domain
= domain
;
10478 * check_s2_mmu_setup
10480 * @is_aa64: True if the translation regime is in AArch64 state
10481 * @startlevel: Suggested starting level
10482 * @inputsize: Bitsize of IPAs
10483 * @stride: Page-table stride (See the ARM ARM)
10485 * Returns true if the suggested S2 translation parameters are OK and
10488 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10489 int inputsize
, int stride
)
10491 const int grainsize
= stride
+ 3;
10492 int startsizecheck
;
10494 /* Negative levels are never allowed. */
10499 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10500 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10505 CPUARMState
*env
= &cpu
->env
;
10506 unsigned int pamax
= arm_pamax(cpu
);
10509 case 13: /* 64KB Pages. */
10510 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10514 case 11: /* 16KB Pages. */
10515 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10519 case 9: /* 4KB Pages. */
10520 if (level
== 0 && pamax
<= 42) {
10525 g_assert_not_reached();
10528 /* Inputsize checks. */
10529 if (inputsize
> pamax
&&
10530 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10531 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10535 /* AArch32 only supports 4KB pages. Assert on that. */
10536 assert(stride
== 9);
10545 /* Translate from the 4-bit stage 2 representation of
10546 * memory attributes (without cache-allocation hints) to
10547 * the 8-bit representation of the stage 1 MAIR registers
10548 * (which includes allocation hints).
10550 * ref: shared/translation/attrs/S2AttrDecode()
10551 * .../S2ConvertAttrsHints()
10553 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10555 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10556 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10557 uint8_t hihint
= 0, lohint
= 0;
10559 if (hiattr
!= 0) { /* normal memory */
10560 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10561 hiattr
= loattr
= 1; /* non-cacheable */
10563 if (hiattr
!= 1) { /* Write-through or write-back */
10564 hihint
= 3; /* RW allocate */
10566 if (loattr
!= 1) { /* Write-through or write-back */
10567 lohint
= 3; /* RW allocate */
10572 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10574 #endif /* !CONFIG_USER_ONLY */
10576 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
10578 if (regime_has_2_ranges(mmu_idx
)) {
10579 return extract64(tcr
, 37, 2);
10580 } else if (mmu_idx
== ARMMMUIdx_Stage2
) {
10581 return 0; /* VTCR_EL2 */
10583 /* Replicate the single TBI bit so we always have 2 bits. */
10584 return extract32(tcr
, 20, 1) * 3;
10588 static int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
10590 if (regime_has_2_ranges(mmu_idx
)) {
10591 return extract64(tcr
, 51, 2);
10592 } else if (mmu_idx
== ARMMMUIdx_Stage2
) {
10593 return 0; /* VTCR_EL2 */
10595 /* Replicate the single TBID bit so we always have 2 bits. */
10596 return extract32(tcr
, 29, 1) * 3;
10600 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10601 ARMMMUIdx mmu_idx
, bool data
)
10603 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10604 bool epd
, hpd
, using16k
, using64k
;
10605 int select
, tsz
, tbi
;
10607 if (!regime_has_2_ranges(mmu_idx
)) {
10609 tsz
= extract32(tcr
, 0, 6);
10610 using64k
= extract32(tcr
, 14, 1);
10611 using16k
= extract32(tcr
, 15, 1);
10612 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10616 hpd
= extract32(tcr
, 24, 1);
10621 * Bit 55 is always between the two regions, and is canonical for
10622 * determining if address tagging is enabled.
10624 select
= extract64(va
, 55, 1);
10626 tsz
= extract32(tcr
, 0, 6);
10627 epd
= extract32(tcr
, 7, 1);
10628 using64k
= extract32(tcr
, 14, 1);
10629 using16k
= extract32(tcr
, 15, 1);
10630 hpd
= extract64(tcr
, 41, 1);
10632 int tg
= extract32(tcr
, 30, 2);
10633 using16k
= tg
== 1;
10634 using64k
= tg
== 3;
10635 tsz
= extract32(tcr
, 16, 6);
10636 epd
= extract32(tcr
, 23, 1);
10637 hpd
= extract64(tcr
, 42, 1);
10640 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10641 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10643 /* Present TBI as a composite with TBID. */
10644 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
10646 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
10648 tbi
= (tbi
>> select
) & 1;
10650 return (ARMVAParameters
) {
10656 .using16k
= using16k
,
10657 .using64k
= using64k
,
10661 #ifndef CONFIG_USER_ONLY
10662 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10665 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10666 uint32_t el
= regime_el(env
, mmu_idx
);
10670 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10672 bool sext
= extract32(tcr
, 4, 1);
10673 bool sign
= extract32(tcr
, 3, 1);
10676 * If the sign-extend bit is not the same as t0sz[3], the result
10677 * is unpredictable. Flag this as a guest error.
10679 if (sign
!= sext
) {
10680 qemu_log_mask(LOG_GUEST_ERROR
,
10681 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10683 tsz
= sextract32(tcr
, 0, 4) + 8;
10687 } else if (el
== 2) {
10689 tsz
= extract32(tcr
, 0, 3);
10691 hpd
= extract64(tcr
, 24, 1);
10694 int t0sz
= extract32(tcr
, 0, 3);
10695 int t1sz
= extract32(tcr
, 16, 3);
10698 select
= va
> (0xffffffffu
>> t0sz
);
10700 /* Note that we will detect errors later. */
10701 select
= va
>= ~(0xffffffffu
>> t1sz
);
10705 epd
= extract32(tcr
, 7, 1);
10706 hpd
= extract64(tcr
, 41, 1);
10709 epd
= extract32(tcr
, 23, 1);
10710 hpd
= extract64(tcr
, 42, 1);
10712 /* For aarch32, hpd0 is not enabled without t2e as well. */
10713 hpd
&= extract32(tcr
, 6, 1);
10716 return (ARMVAParameters
) {
10724 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10725 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10726 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10727 target_ulong
*page_size_ptr
,
10728 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10730 ARMCPU
*cpu
= env_archcpu(env
);
10731 CPUState
*cs
= CPU(cpu
);
10732 /* Read an LPAE long-descriptor translation table. */
10733 ARMFaultType fault_type
= ARMFault_Translation
;
10735 ARMVAParameters param
;
10737 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10738 uint32_t tableattrs
;
10739 target_ulong page_size
;
10742 int addrsize
, inputsize
;
10743 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10744 int ap
, ns
, xn
, pxn
;
10745 uint32_t el
= regime_el(env
, mmu_idx
);
10746 uint64_t descaddrmask
;
10747 bool aarch64
= arm_el_is_aa64(env
, el
);
10748 bool guarded
= false;
10751 * This code does not handle the different format TCR for VTCR_EL2.
10752 * This code also does not support shareability levels.
10753 * Attribute and permission bit handling should also be checked when adding
10754 * support for those page table walks.
10757 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10758 access_type
!= MMU_INST_FETCH
);
10760 addrsize
= 64 - 8 * param
.tbi
;
10761 inputsize
= 64 - param
.tsz
;
10763 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10765 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
10766 inputsize
= addrsize
- param
.tsz
;
10770 * We determined the region when collecting the parameters, but we
10771 * have not yet validated that the address is valid for the region.
10772 * Extract the top bits and verify that they all match select.
10774 * For aa32, if inputsize == addrsize, then we have selected the
10775 * region by exclusion in aa32_va_parameters and there is no more
10776 * validation to do here.
10778 if (inputsize
< addrsize
) {
10779 target_ulong top_bits
= sextract64(address
, inputsize
,
10780 addrsize
- inputsize
);
10781 if (-top_bits
!= param
.select
) {
10782 /* The gap between the two regions is a Translation fault */
10783 fault_type
= ARMFault_Translation
;
10788 if (param
.using64k
) {
10790 } else if (param
.using16k
) {
10796 /* Note that QEMU ignores shareability and cacheability attributes,
10797 * so we don't need to do anything with the SH, ORGN, IRGN fields
10798 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10799 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10800 * implement any ASID-like capability so we can ignore it (instead
10801 * we will always flush the TLB any time the ASID is changed).
10803 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10805 /* Here we should have set up all the parameters for the translation:
10806 * inputsize, ttbr, epd, stride, tbi
10810 /* Translation table walk disabled => Translation fault on TLB miss
10811 * Note: This is always 0 on 64-bit EL2 and EL3.
10816 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
10817 /* The starting level depends on the virtual address size (which can
10818 * be up to 48 bits) and the translation granule size. It indicates
10819 * the number of strides (stride bits at a time) needed to
10820 * consume the bits of the input address. In the pseudocode this is:
10821 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10822 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10823 * our 'stride + 3' and 'stride' is our 'stride'.
10824 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10825 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10826 * = 4 - (inputsize - 4) / stride;
10828 level
= 4 - (inputsize
- 4) / stride
;
10830 /* For stage 2 translations the starting level is specified by the
10831 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10833 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10834 uint32_t startlevel
;
10837 if (!aarch64
|| stride
== 9) {
10838 /* AArch32 or 4KB pages */
10839 startlevel
= 2 - sl0
;
10841 /* 16KB or 64KB pages */
10842 startlevel
= 3 - sl0
;
10845 /* Check that the starting level is valid. */
10846 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10847 inputsize
, stride
);
10849 fault_type
= ARMFault_Translation
;
10852 level
= startlevel
;
10855 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10856 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10858 /* Now we can extract the actual base address from the TTBR */
10859 descaddr
= extract64(ttbr
, 0, 48);
10861 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
10862 * and also to mask out CnP (bit 0) which could validly be non-zero.
10864 descaddr
&= ~indexmask
;
10866 /* The address field in the descriptor goes up to bit 39 for ARMv7
10867 * but up to bit 47 for ARMv8, but we use the descaddrmask
10868 * up to bit 39 for AArch32, because we don't need other bits in that case
10869 * to construct next descriptor address (anyway they should be all zeroes).
10871 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10872 ~indexmask_grainsize
;
10874 /* Secure accesses start with the page table in secure memory and
10875 * can be downgraded to non-secure at any step. Non-secure accesses
10876 * remain non-secure. We implement this by just ORing in the NSTable/NS
10877 * bits at each step.
10879 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10881 uint64_t descriptor
;
10884 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10886 nstable
= extract32(tableattrs
, 4, 1);
10887 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10888 if (fi
->type
!= ARMFault_None
) {
10892 if (!(descriptor
& 1) ||
10893 (!(descriptor
& 2) && (level
== 3))) {
10894 /* Invalid, or the Reserved level 3 encoding */
10897 descaddr
= descriptor
& descaddrmask
;
10899 if ((descriptor
& 2) && (level
< 3)) {
10900 /* Table entry. The top five bits are attributes which may
10901 * propagate down through lower levels of the table (and
10902 * which are all arranged so that 0 means "no effect", so
10903 * we can gather them up by ORing in the bits at each level).
10905 tableattrs
|= extract64(descriptor
, 59, 5);
10907 indexmask
= indexmask_grainsize
;
10910 /* Block entry at level 1 or 2, or page entry at level 3.
10911 * These are basically the same thing, although the number
10912 * of bits we pull in from the vaddr varies.
10914 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10915 descaddr
|= (address
& (page_size
- 1));
10916 /* Extract attributes from the descriptor */
10917 attrs
= extract64(descriptor
, 2, 10)
10918 | (extract64(descriptor
, 52, 12) << 10);
10920 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10921 /* Stage 2 table descriptors do not include any attribute fields */
10924 /* Merge in attributes from table descriptors */
10925 attrs
|= nstable
<< 3; /* NS */
10926 guarded
= extract64(descriptor
, 50, 1); /* GP */
10928 /* HPD disables all the table attributes except NSTable. */
10931 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10932 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10933 * means "force PL1 access only", which means forcing AP[1] to 0.
10935 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10936 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10939 /* Here descaddr is the final physical address, and attributes
10940 * are all in attrs.
10942 fault_type
= ARMFault_AccessFlag
;
10943 if ((attrs
& (1 << 8)) == 0) {
10948 ap
= extract32(attrs
, 4, 2);
10949 xn
= extract32(attrs
, 12, 1);
10951 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10953 *prot
= get_S2prot(env
, ap
, xn
);
10955 ns
= extract32(attrs
, 3, 1);
10956 pxn
= extract32(attrs
, 11, 1);
10957 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10960 fault_type
= ARMFault_Permission
;
10961 if (!(*prot
& (1 << access_type
))) {
10966 /* The NS bit will (as required by the architecture) have no effect if
10967 * the CPU doesn't support TZ or this is a non-secure translation
10968 * regime, because the attribute will already be non-secure.
10970 txattrs
->secure
= false;
10972 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10973 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
10974 txattrs
->target_tlb_bit0
= true;
10977 if (cacheattrs
!= NULL
) {
10978 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10979 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10980 extract32(attrs
, 0, 4));
10982 /* Index into MAIR registers for cache attributes */
10983 uint8_t attrindx
= extract32(attrs
, 0, 3);
10984 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10985 assert(attrindx
<= 7);
10986 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10988 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10991 *phys_ptr
= descaddr
;
10992 *page_size_ptr
= page_size
;
10996 fi
->type
= fault_type
;
10998 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10999 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
);
11003 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11005 int32_t address
, int *prot
)
11007 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11008 *prot
= PAGE_READ
| PAGE_WRITE
;
11010 case 0xF0000000 ... 0xFFFFFFFF:
11011 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11012 /* hivecs execing is ok */
11013 *prot
|= PAGE_EXEC
;
11016 case 0x00000000 ... 0x7FFFFFFF:
11017 *prot
|= PAGE_EXEC
;
11021 /* Default system address map for M profile cores.
11022 * The architecture specifies which regions are execute-never;
11023 * at the MPU level no other checks are defined.
11026 case 0x00000000 ... 0x1fffffff: /* ROM */
11027 case 0x20000000 ... 0x3fffffff: /* SRAM */
11028 case 0x60000000 ... 0x7fffffff: /* RAM */
11029 case 0x80000000 ... 0x9fffffff: /* RAM */
11030 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11032 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11033 case 0xa0000000 ... 0xbfffffff: /* Device */
11034 case 0xc0000000 ... 0xdfffffff: /* Device */
11035 case 0xe0000000 ... 0xffffffff: /* System */
11036 *prot
= PAGE_READ
| PAGE_WRITE
;
11039 g_assert_not_reached();
11044 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11045 ARMMMUIdx mmu_idx
, bool is_user
)
11047 /* Return true if we should use the default memory map as a
11048 * "background" region if there are no hits against any MPU regions.
11050 CPUARMState
*env
= &cpu
->env
;
11056 if (arm_feature(env
, ARM_FEATURE_M
)) {
11057 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11058 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11060 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11064 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11066 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11067 return arm_feature(env
, ARM_FEATURE_M
) &&
11068 extract32(address
, 20, 12) == 0xe00;
11071 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11073 /* True if address is in the M profile system region
11074 * 0xe0000000 - 0xffffffff
11076 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11079 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11080 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11081 hwaddr
*phys_ptr
, int *prot
,
11082 target_ulong
*page_size
,
11083 ARMMMUFaultInfo
*fi
)
11085 ARMCPU
*cpu
= env_archcpu(env
);
11087 bool is_user
= regime_is_user(env
, mmu_idx
);
11089 *phys_ptr
= address
;
11090 *page_size
= TARGET_PAGE_SIZE
;
11093 if (regime_translation_disabled(env
, mmu_idx
) ||
11094 m_is_ppb_region(env
, address
)) {
11095 /* MPU disabled or M profile PPB access: use default memory map.
11096 * The other case which uses the default memory map in the
11097 * v7M ARM ARM pseudocode is exception vector reads from the vector
11098 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11099 * which always does a direct read using address_space_ldl(), rather
11100 * than going via this function, so we don't need to check that here.
11102 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11103 } else { /* MPU enabled */
11104 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11105 /* region search */
11106 uint32_t base
= env
->pmsav7
.drbar
[n
];
11107 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11109 bool srdis
= false;
11111 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11116 qemu_log_mask(LOG_GUEST_ERROR
,
11117 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11121 rmask
= (1ull << rsize
) - 1;
11123 if (base
& rmask
) {
11124 qemu_log_mask(LOG_GUEST_ERROR
,
11125 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11126 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11131 if (address
< base
|| address
> base
+ rmask
) {
11133 * Address not in this region. We must check whether the
11134 * region covers addresses in the same page as our address.
11135 * In that case we must not report a size that covers the
11136 * whole page for a subsequent hit against a different MPU
11137 * region or the background region, because it would result in
11138 * incorrect TLB hits for subsequent accesses to addresses that
11139 * are in this MPU region.
11141 if (ranges_overlap(base
, rmask
,
11142 address
& TARGET_PAGE_MASK
,
11143 TARGET_PAGE_SIZE
)) {
11149 /* Region matched */
11151 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11153 uint32_t srdis_mask
;
11155 rsize
-= 3; /* sub region size (power of 2) */
11156 snd
= ((address
- base
) >> rsize
) & 0x7;
11157 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11159 srdis_mask
= srdis
? 0x3 : 0x0;
11160 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11161 /* This will check in groups of 2, 4 and then 8, whether
11162 * the subregion bits are consistent. rsize is incremented
11163 * back up to give the region size, considering consistent
11164 * adjacent subregions as one region. Stop testing if rsize
11165 * is already big enough for an entire QEMU page.
11167 int snd_rounded
= snd
& ~(i
- 1);
11168 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11169 snd_rounded
+ 8, i
);
11170 if (srdis_mask
^ srdis_multi
) {
11173 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11180 if (rsize
< TARGET_PAGE_BITS
) {
11181 *page_size
= 1 << rsize
;
11186 if (n
== -1) { /* no hits */
11187 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11188 /* background fault */
11189 fi
->type
= ARMFault_Background
;
11192 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11193 } else { /* a MPU hit! */
11194 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11195 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11197 if (m_is_system_region(env
, address
)) {
11198 /* System space is always execute never */
11202 if (is_user
) { /* User mode AP bit decoding */
11207 break; /* no access */
11209 *prot
|= PAGE_WRITE
;
11213 *prot
|= PAGE_READ
| PAGE_EXEC
;
11216 /* for v7M, same as 6; for R profile a reserved value */
11217 if (arm_feature(env
, ARM_FEATURE_M
)) {
11218 *prot
|= PAGE_READ
| PAGE_EXEC
;
11223 qemu_log_mask(LOG_GUEST_ERROR
,
11224 "DRACR[%d]: Bad value for AP bits: 0x%"
11225 PRIx32
"\n", n
, ap
);
11227 } else { /* Priv. mode AP bits decoding */
11230 break; /* no access */
11234 *prot
|= PAGE_WRITE
;
11238 *prot
|= PAGE_READ
| PAGE_EXEC
;
11241 /* for v7M, same as 6; for R profile a reserved value */
11242 if (arm_feature(env
, ARM_FEATURE_M
)) {
11243 *prot
|= PAGE_READ
| PAGE_EXEC
;
11248 qemu_log_mask(LOG_GUEST_ERROR
,
11249 "DRACR[%d]: Bad value for AP bits: 0x%"
11250 PRIx32
"\n", n
, ap
);
11254 /* execute never */
11256 *prot
&= ~PAGE_EXEC
;
11261 fi
->type
= ARMFault_Permission
;
11263 return !(*prot
& (1 << access_type
));
11266 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11267 uint32_t address
, MMUAccessType access_type
)
11269 /* The architecture specifies that certain address ranges are
11270 * exempt from v8M SAU/IDAU checks.
11273 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11274 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11275 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11276 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11277 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11278 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11281 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11282 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11283 V8M_SAttributes
*sattrs
)
11285 /* Look up the security attributes for this address. Compare the
11286 * pseudocode SecurityCheck() function.
11287 * We assume the caller has zero-initialized *sattrs.
11289 ARMCPU
*cpu
= env_archcpu(env
);
11291 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11292 int idau_region
= IREGION_NOTVALID
;
11293 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11294 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11297 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11298 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11300 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11304 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11305 /* 0xf0000000..0xffffffff is always S for insn fetches */
11309 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11310 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11314 if (idau_region
!= IREGION_NOTVALID
) {
11315 sattrs
->irvalid
= true;
11316 sattrs
->iregion
= idau_region
;
11319 switch (env
->sau
.ctrl
& 3) {
11320 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11322 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11325 default: /* SAU.ENABLE == 1 */
11326 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11327 if (env
->sau
.rlar
[r
] & 1) {
11328 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11329 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11331 if (base
<= address
&& limit
>= address
) {
11332 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11333 sattrs
->subpage
= true;
11335 if (sattrs
->srvalid
) {
11336 /* If we hit in more than one region then we must report
11337 * as Secure, not NS-Callable, with no valid region
11340 sattrs
->ns
= false;
11341 sattrs
->nsc
= false;
11342 sattrs
->sregion
= 0;
11343 sattrs
->srvalid
= false;
11346 if (env
->sau
.rlar
[r
] & 2) {
11347 sattrs
->nsc
= true;
11351 sattrs
->srvalid
= true;
11352 sattrs
->sregion
= r
;
11356 * Address not in this region. We must check whether the
11357 * region covers addresses in the same page as our address.
11358 * In that case we must not report a size that covers the
11359 * whole page for a subsequent hit against a different MPU
11360 * region or the background region, because it would result
11361 * in incorrect TLB hits for subsequent accesses to
11362 * addresses that are in this MPU region.
11364 if (limit
>= base
&&
11365 ranges_overlap(base
, limit
- base
+ 1,
11367 TARGET_PAGE_SIZE
)) {
11368 sattrs
->subpage
= true;
11377 * The IDAU will override the SAU lookup results if it specifies
11378 * higher security than the SAU does.
11381 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11382 sattrs
->ns
= false;
11383 sattrs
->nsc
= idau_nsc
;
11388 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11389 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11390 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11391 int *prot
, bool *is_subpage
,
11392 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11394 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11395 * that a full phys-to-virt translation does).
11396 * mregion is (if not NULL) set to the region number which matched,
11397 * or -1 if no region number is returned (MPU off, address did not
11398 * hit a region, address hit in multiple regions).
11399 * We set is_subpage to true if the region hit doesn't cover the
11400 * entire TARGET_PAGE the address is within.
11402 ARMCPU
*cpu
= env_archcpu(env
);
11403 bool is_user
= regime_is_user(env
, mmu_idx
);
11404 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11406 int matchregion
= -1;
11408 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11409 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11411 *is_subpage
= false;
11412 *phys_ptr
= address
;
11418 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11419 * was an exception vector read from the vector table (which is always
11420 * done using the default system address map), because those accesses
11421 * are done in arm_v7m_load_vector(), which always does a direct
11422 * read using address_space_ldl(), rather than going via this function.
11424 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11426 } else if (m_is_ppb_region(env
, address
)) {
11429 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11433 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11434 /* region search */
11435 /* Note that the base address is bits [31:5] from the register
11436 * with bits [4:0] all zeroes, but the limit address is bits
11437 * [31:5] from the register with bits [4:0] all ones.
11439 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11440 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11442 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11443 /* Region disabled */
11447 if (address
< base
|| address
> limit
) {
11449 * Address not in this region. We must check whether the
11450 * region covers addresses in the same page as our address.
11451 * In that case we must not report a size that covers the
11452 * whole page for a subsequent hit against a different MPU
11453 * region or the background region, because it would result in
11454 * incorrect TLB hits for subsequent accesses to addresses that
11455 * are in this MPU region.
11457 if (limit
>= base
&&
11458 ranges_overlap(base
, limit
- base
+ 1,
11460 TARGET_PAGE_SIZE
)) {
11461 *is_subpage
= true;
11466 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11467 *is_subpage
= true;
11470 if (matchregion
!= -1) {
11471 /* Multiple regions match -- always a failure (unlike
11472 * PMSAv7 where highest-numbered-region wins)
11474 fi
->type
= ARMFault_Permission
;
11485 /* background fault */
11486 fi
->type
= ARMFault_Background
;
11490 if (matchregion
== -1) {
11491 /* hit using the background region */
11492 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11494 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11495 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11497 if (m_is_system_region(env
, address
)) {
11498 /* System space is always execute never */
11502 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11503 if (*prot
&& !xn
) {
11504 *prot
|= PAGE_EXEC
;
11506 /* We don't need to look the attribute up in the MAIR0/MAIR1
11507 * registers because that only tells us about cacheability.
11510 *mregion
= matchregion
;
11514 fi
->type
= ARMFault_Permission
;
11516 return !(*prot
& (1 << access_type
));
11520 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11521 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11522 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11523 int *prot
, target_ulong
*page_size
,
11524 ARMMMUFaultInfo
*fi
)
11526 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11527 V8M_SAttributes sattrs
= {};
11529 bool mpu_is_subpage
;
11531 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11532 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11533 if (access_type
== MMU_INST_FETCH
) {
11534 /* Instruction fetches always use the MMU bank and the
11535 * transaction attribute determined by the fetch address,
11536 * regardless of CPU state. This is painful for QEMU
11537 * to handle, because it would mean we need to encode
11538 * into the mmu_idx not just the (user, negpri) information
11539 * for the current security state but also that for the
11540 * other security state, which would balloon the number
11541 * of mmu_idx values needed alarmingly.
11542 * Fortunately we can avoid this because it's not actually
11543 * possible to arbitrarily execute code from memory with
11544 * the wrong security attribute: it will always generate
11545 * an exception of some kind or another, apart from the
11546 * special case of an NS CPU executing an SG instruction
11547 * in S&NSC memory. So we always just fail the translation
11548 * here and sort things out in the exception handler
11549 * (including possibly emulating an SG instruction).
11551 if (sattrs
.ns
!= !secure
) {
11553 fi
->type
= ARMFault_QEMU_NSCExec
;
11555 fi
->type
= ARMFault_QEMU_SFault
;
11557 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11558 *phys_ptr
= address
;
11563 /* For data accesses we always use the MMU bank indicated
11564 * by the current CPU state, but the security attributes
11565 * might downgrade a secure access to nonsecure.
11568 txattrs
->secure
= false;
11569 } else if (!secure
) {
11570 /* NS access to S memory must fault.
11571 * Architecturally we should first check whether the
11572 * MPU information for this address indicates that we
11573 * are doing an unaligned access to Device memory, which
11574 * should generate a UsageFault instead. QEMU does not
11575 * currently check for that kind of unaligned access though.
11576 * If we added it we would need to do so as a special case
11577 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11579 fi
->type
= ARMFault_QEMU_SFault
;
11580 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11581 *phys_ptr
= address
;
11588 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11589 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11590 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11594 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11595 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11596 hwaddr
*phys_ptr
, int *prot
,
11597 ARMMMUFaultInfo
*fi
)
11602 bool is_user
= regime_is_user(env
, mmu_idx
);
11604 if (regime_translation_disabled(env
, mmu_idx
)) {
11605 /* MPU disabled. */
11606 *phys_ptr
= address
;
11607 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11611 *phys_ptr
= address
;
11612 for (n
= 7; n
>= 0; n
--) {
11613 base
= env
->cp15
.c6_region
[n
];
11614 if ((base
& 1) == 0) {
11617 mask
= 1 << ((base
>> 1) & 0x1f);
11618 /* Keep this shift separate from the above to avoid an
11619 (undefined) << 32. */
11620 mask
= (mask
<< 1) - 1;
11621 if (((base
^ address
) & ~mask
) == 0) {
11626 fi
->type
= ARMFault_Background
;
11630 if (access_type
== MMU_INST_FETCH
) {
11631 mask
= env
->cp15
.pmsav5_insn_ap
;
11633 mask
= env
->cp15
.pmsav5_data_ap
;
11635 mask
= (mask
>> (n
* 4)) & 0xf;
11638 fi
->type
= ARMFault_Permission
;
11643 fi
->type
= ARMFault_Permission
;
11647 *prot
= PAGE_READ
| PAGE_WRITE
;
11652 *prot
|= PAGE_WRITE
;
11656 *prot
= PAGE_READ
| PAGE_WRITE
;
11660 fi
->type
= ARMFault_Permission
;
11670 /* Bad permission. */
11671 fi
->type
= ARMFault_Permission
;
11675 *prot
|= PAGE_EXEC
;
11679 /* Combine either inner or outer cacheability attributes for normal
11680 * memory, according to table D4-42 and pseudocode procedure
11681 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11683 * NB: only stage 1 includes allocation hints (RW bits), leading to
11686 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11688 if (s1
== 4 || s2
== 4) {
11689 /* non-cacheable has precedence */
11691 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11692 /* stage 1 write-through takes precedence */
11694 } else if (extract32(s2
, 2, 2) == 2) {
11695 /* stage 2 write-through takes precedence, but the allocation hint
11696 * is still taken from stage 1
11698 return (2 << 2) | extract32(s1
, 0, 2);
11699 } else { /* write-back */
11704 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11705 * and CombineS1S2Desc()
11707 * @s1: Attributes from stage 1 walk
11708 * @s2: Attributes from stage 2 walk
11710 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11712 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11713 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11716 /* Combine shareability attributes (table D4-43) */
11717 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11718 /* if either are outer-shareable, the result is outer-shareable */
11719 ret
.shareability
= 2;
11720 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11721 /* if either are inner-shareable, the result is inner-shareable */
11722 ret
.shareability
= 3;
11724 /* both non-shareable */
11725 ret
.shareability
= 0;
11728 /* Combine memory type and cacheability attributes */
11729 if (s1hi
== 0 || s2hi
== 0) {
11730 /* Device has precedence over normal */
11731 if (s1lo
== 0 || s2lo
== 0) {
11732 /* nGnRnE has precedence over anything */
11734 } else if (s1lo
== 4 || s2lo
== 4) {
11735 /* non-Reordering has precedence over Reordering */
11736 ret
.attrs
= 4; /* nGnRE */
11737 } else if (s1lo
== 8 || s2lo
== 8) {
11738 /* non-Gathering has precedence over Gathering */
11739 ret
.attrs
= 8; /* nGRE */
11741 ret
.attrs
= 0xc; /* GRE */
11744 /* Any location for which the resultant memory type is any
11745 * type of Device memory is always treated as Outer Shareable.
11747 ret
.shareability
= 2;
11748 } else { /* Normal memory */
11749 /* Outer/inner cacheability combine independently */
11750 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11751 | combine_cacheattr_nibble(s1lo
, s2lo
);
11753 if (ret
.attrs
== 0x44) {
11754 /* Any location for which the resultant memory type is Normal
11755 * Inner Non-cacheable, Outer Non-cacheable is always treated
11756 * as Outer Shareable.
11758 ret
.shareability
= 2;
11766 /* get_phys_addr - get the physical address for this virtual address
11768 * Find the physical address corresponding to the given virtual address,
11769 * by doing a translation table walk on MMU based systems or using the
11770 * MPU state on MPU based systems.
11772 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11773 * prot and page_size may not be filled in, and the populated fsr value provides
11774 * information on why the translation aborted, in the format of a
11775 * DFSR/IFSR fault register, with the following caveats:
11776 * * we honour the short vs long DFSR format differences.
11777 * * the WnR bit is never set (the caller must do this).
11778 * * for PSMAv5 based systems we don't bother to return a full FSR format
11781 * @env: CPUARMState
11782 * @address: virtual address to get physical address for
11783 * @access_type: 0 for read, 1 for write, 2 for execute
11784 * @mmu_idx: MMU index indicating required translation regime
11785 * @phys_ptr: set to the physical address corresponding to the virtual address
11786 * @attrs: set to the memory transaction attributes to use
11787 * @prot: set to the permissions for the page containing phys_ptr
11788 * @page_size: set to the size of the page containing phys_ptr
11789 * @fi: set to fault info if the translation fails
11790 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11792 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11793 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11794 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11795 target_ulong
*page_size
,
11796 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11798 if (mmu_idx
== ARMMMUIdx_E10_0
||
11799 mmu_idx
== ARMMMUIdx_E10_1
||
11800 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
11801 /* Call ourselves recursively to do the stage 1 and then stage 2
11804 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11808 ARMCacheAttrs cacheattrs2
= {};
11810 ret
= get_phys_addr(env
, address
, access_type
,
11811 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11812 prot
, page_size
, fi
, cacheattrs
);
11814 /* If S1 fails or S2 is disabled, return early. */
11815 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
11820 /* S1 is done. Now do S2 translation. */
11821 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_Stage2
,
11822 phys_ptr
, attrs
, &s2_prot
,
11824 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11826 /* Combine the S1 and S2 perms. */
11829 /* Combine the S1 and S2 cache attributes, if needed */
11830 if (!ret
&& cacheattrs
!= NULL
) {
11831 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11833 * HCR.DC forces the first stage attributes to
11834 * Normal Non-Shareable,
11835 * Inner Write-Back Read-Allocate Write-Allocate,
11836 * Outer Write-Back Read-Allocate Write-Allocate.
11838 cacheattrs
->attrs
= 0xff;
11839 cacheattrs
->shareability
= 0;
11841 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11847 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11849 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11853 /* The page table entries may downgrade secure to non-secure, but
11854 * cannot upgrade an non-secure translation regime's attributes
11857 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11858 attrs
->user
= regime_is_user(env
, mmu_idx
);
11860 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11861 * In v7 and earlier it affects all stage 1 translations.
11863 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
11864 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11865 if (regime_el(env
, mmu_idx
) == 3) {
11866 address
+= env
->cp15
.fcseidr_s
;
11868 address
+= env
->cp15
.fcseidr_ns
;
11872 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11874 *page_size
= TARGET_PAGE_SIZE
;
11876 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11878 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11879 phys_ptr
, attrs
, prot
, page_size
, fi
);
11880 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11882 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11883 phys_ptr
, prot
, page_size
, fi
);
11886 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11887 phys_ptr
, prot
, fi
);
11889 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11890 " mmu_idx %u -> %s (prot %c%c%c)\n",
11891 access_type
== MMU_DATA_LOAD
? "reading" :
11892 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11893 (uint32_t)address
, mmu_idx
,
11894 ret
? "Miss" : "Hit",
11895 *prot
& PAGE_READ
? 'r' : '-',
11896 *prot
& PAGE_WRITE
? 'w' : '-',
11897 *prot
& PAGE_EXEC
? 'x' : '-');
11902 /* Definitely a real MMU, not an MPU */
11904 if (regime_translation_disabled(env
, mmu_idx
)) {
11906 * MMU disabled. S1 addresses within aa64 translation regimes are
11907 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
11909 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
11910 int r_el
= regime_el(env
, mmu_idx
);
11911 if (arm_el_is_aa64(env
, r_el
)) {
11912 int pamax
= arm_pamax(env_archcpu(env
));
11913 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
11916 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
11917 if (access_type
== MMU_INST_FETCH
) {
11918 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
11920 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
11921 addrtop
= (tbi
? 55 : 63);
11923 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
11924 fi
->type
= ARMFault_AddressSize
;
11926 fi
->stage2
= false;
11931 * When TBI is disabled, we've just validated that all of the
11932 * bits above PAMax are zero, so logically we only need to
11933 * clear the top byte for TBI. But it's clearer to follow
11934 * the pseudocode set of addrdesc.paddress.
11936 address
= extract64(address
, 0, 52);
11939 *phys_ptr
= address
;
11940 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11941 *page_size
= TARGET_PAGE_SIZE
;
11945 if (regime_using_lpae_format(env
, mmu_idx
)) {
11946 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11947 phys_ptr
, attrs
, prot
, page_size
,
11949 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11950 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11951 phys_ptr
, attrs
, prot
, page_size
, fi
);
11953 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11954 phys_ptr
, prot
, page_size
, fi
);
11958 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11961 ARMCPU
*cpu
= ARM_CPU(cs
);
11962 CPUARMState
*env
= &cpu
->env
;
11964 target_ulong page_size
;
11967 ARMMMUFaultInfo fi
= {};
11968 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11970 *attrs
= (MemTxAttrs
) {};
11972 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11973 attrs
, &prot
, &page_size
, &fi
, NULL
);
11983 /* Note that signed overflow is undefined in C. The following routines are
11984 careful to use unsigned types where modulo arithmetic is required.
11985 Failure to do so _will_ break on newer gcc. */
11987 /* Signed saturating arithmetic. */
11989 /* Perform 16-bit signed saturating addition. */
11990 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
11995 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12004 /* Perform 8-bit signed saturating addition. */
12005 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12010 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12019 /* Perform 16-bit signed saturating subtraction. */
12020 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12025 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12034 /* Perform 8-bit signed saturating subtraction. */
12035 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12040 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12049 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12050 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12051 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12052 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12055 #include "op_addsub.h"
12057 /* Unsigned saturating arithmetic. */
12058 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12067 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12075 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12084 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12092 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12093 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12094 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12095 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12098 #include "op_addsub.h"
12100 /* Signed modulo arithmetic. */
12101 #define SARITH16(a, b, n, op) do { \
12103 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12104 RESULT(sum, n, 16); \
12106 ge |= 3 << (n * 2); \
12109 #define SARITH8(a, b, n, op) do { \
12111 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12112 RESULT(sum, n, 8); \
12118 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12119 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12120 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12121 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12125 #include "op_addsub.h"
12127 /* Unsigned modulo arithmetic. */
12128 #define ADD16(a, b, n) do { \
12130 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12131 RESULT(sum, n, 16); \
12132 if ((sum >> 16) == 1) \
12133 ge |= 3 << (n * 2); \
12136 #define ADD8(a, b, n) do { \
12138 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12139 RESULT(sum, n, 8); \
12140 if ((sum >> 8) == 1) \
12144 #define SUB16(a, b, n) do { \
12146 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12147 RESULT(sum, n, 16); \
12148 if ((sum >> 16) == 0) \
12149 ge |= 3 << (n * 2); \
12152 #define SUB8(a, b, n) do { \
12154 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12155 RESULT(sum, n, 8); \
12156 if ((sum >> 8) == 0) \
12163 #include "op_addsub.h"
12165 /* Halved signed arithmetic. */
12166 #define ADD16(a, b, n) \
12167 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12168 #define SUB16(a, b, n) \
12169 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12170 #define ADD8(a, b, n) \
12171 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12172 #define SUB8(a, b, n) \
12173 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12176 #include "op_addsub.h"
12178 /* Halved unsigned arithmetic. */
12179 #define ADD16(a, b, n) \
12180 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12181 #define SUB16(a, b, n) \
12182 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12183 #define ADD8(a, b, n) \
12184 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12185 #define SUB8(a, b, n) \
12186 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12189 #include "op_addsub.h"
12191 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12199 /* Unsigned sum of absolute byte differences. */
12200 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12203 sum
= do_usad(a
, b
);
12204 sum
+= do_usad(a
>> 8, b
>> 8);
12205 sum
+= do_usad(a
>> 16, b
>>16);
12206 sum
+= do_usad(a
>> 24, b
>> 24);
12210 /* For ARMv6 SEL instruction. */
12211 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12223 mask
|= 0xff000000;
12224 return (a
& mask
) | (b
& ~mask
);
12228 * The upper bytes of val (above the number specified by 'bytes') must have
12229 * been zeroed out by the caller.
12231 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12235 stl_le_p(buf
, val
);
12237 /* zlib crc32 converts the accumulator and output to one's complement. */
12238 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12241 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12245 stl_le_p(buf
, val
);
12247 /* Linux crc32c converts the output to one's complement. */
12248 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12251 /* Return the exception level to which FP-disabled exceptions should
12252 * be taken, or 0 if FP is enabled.
12254 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12256 #ifndef CONFIG_USER_ONLY
12257 /* CPACR and the CPTR registers don't exist before v6, so FP is
12258 * always accessible
12260 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12264 if (arm_feature(env
, ARM_FEATURE_M
)) {
12265 /* CPACR can cause a NOCP UsageFault taken to current security state */
12266 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
12270 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
12271 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
12272 /* FP insns cause a NOCP UsageFault taken to Secure */
12280 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12281 * 0, 2 : trap EL0 and EL1/PL1 accesses
12282 * 1 : trap only EL0 accesses
12283 * 3 : trap no accesses
12284 * This register is ignored if E2H+TGE are both set.
12286 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12287 int fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12292 if (cur_el
== 0 || cur_el
== 1) {
12293 /* Trap to PL1, which might be EL1 or EL3 */
12294 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12299 if (cur_el
== 3 && !is_a64(env
)) {
12300 /* Secure PL1 running at EL3 */
12315 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12316 * to control non-secure access to the FPU. It doesn't have any
12317 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12319 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
12320 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
12321 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
12322 /* FP insns act as UNDEF */
12323 return cur_el
== 2 ? 2 : 1;
12327 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12328 * check because zero bits in the registers mean "don't trap".
12331 /* CPTR_EL2 : present in v7VE or v8 */
12332 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12333 && !arm_is_secure_below_el3(env
)) {
12334 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12338 /* CPTR_EL3 : present in v8 */
12339 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12340 /* Trap all FP ops to EL3 */
12347 /* Return the exception level we're running at if this is our mmu_idx */
12348 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
12350 if (mmu_idx
& ARM_MMU_IDX_M
) {
12351 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
12355 case ARMMMUIdx_E10_0
:
12356 case ARMMMUIdx_E20_0
:
12357 case ARMMMUIdx_SE10_0
:
12359 case ARMMMUIdx_E10_1
:
12360 case ARMMMUIdx_E10_1_PAN
:
12361 case ARMMMUIdx_SE10_1
:
12362 case ARMMMUIdx_SE10_1_PAN
:
12365 case ARMMMUIdx_E20_2
:
12366 case ARMMMUIdx_E20_2_PAN
:
12368 case ARMMMUIdx_SE3
:
12371 g_assert_not_reached();
12376 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12378 g_assert_not_reached();
12382 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
12384 if (arm_feature(env
, ARM_FEATURE_M
)) {
12385 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12388 /* See ARM pseudo-function ELIsInHost. */
12391 if (arm_is_secure_below_el3(env
)) {
12392 return ARMMMUIdx_SE10_0
;
12394 if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)
12395 && arm_el_is_aa64(env
, 2)) {
12396 return ARMMMUIdx_E20_0
;
12398 return ARMMMUIdx_E10_0
;
12400 if (arm_is_secure_below_el3(env
)) {
12401 if (env
->pstate
& PSTATE_PAN
) {
12402 return ARMMMUIdx_SE10_1_PAN
;
12404 return ARMMMUIdx_SE10_1
;
12406 if (env
->pstate
& PSTATE_PAN
) {
12407 return ARMMMUIdx_E10_1_PAN
;
12409 return ARMMMUIdx_E10_1
;
12411 /* TODO: ARMv8.4-SecEL2 */
12412 /* Note that TGE does not apply at EL2. */
12413 if ((env
->cp15
.hcr_el2
& HCR_E2H
) && arm_el_is_aa64(env
, 2)) {
12414 if (env
->pstate
& PSTATE_PAN
) {
12415 return ARMMMUIdx_E20_2_PAN
;
12417 return ARMMMUIdx_E20_2
;
12419 return ARMMMUIdx_E2
;
12421 return ARMMMUIdx_SE3
;
12423 g_assert_not_reached();
12427 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12429 return arm_mmu_idx_el(env
, arm_current_el(env
));
12432 #ifndef CONFIG_USER_ONLY
12433 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
12435 return stage_1_mmu_idx(arm_mmu_idx(env
));
12439 static uint32_t rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
12440 ARMMMUIdx mmu_idx
, uint32_t flags
)
12442 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
12443 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
,
12444 arm_to_core_mmu_idx(mmu_idx
));
12446 if (arm_singlestep_active(env
)) {
12447 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
12452 static uint32_t rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
12453 ARMMMUIdx mmu_idx
, uint32_t flags
)
12455 bool sctlr_b
= arm_sctlr_b(env
);
12458 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, 1);
12460 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
12461 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12463 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
12465 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12468 static uint32_t rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
12471 uint32_t flags
= 0;
12473 if (arm_v7m_is_handler_mode(env
)) {
12474 flags
= FIELD_DP32(flags
, TBFLAG_M32
, HANDLER
, 1);
12478 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12479 * is suppressing them because the requested execution priority
12482 if (arm_feature(env
, ARM_FEATURE_V8
) &&
12483 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
12484 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
12485 flags
= FIELD_DP32(flags
, TBFLAG_M32
, STACKCHECK
, 1);
12488 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12491 static uint32_t rebuild_hflags_aprofile(CPUARMState
*env
)
12495 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
,
12496 arm_debug_target_el(env
));
12500 static uint32_t rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
12503 uint32_t flags
= rebuild_hflags_aprofile(env
);
12505 if (arm_el_is_aa64(env
, 1)) {
12506 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12509 if (arm_current_el(env
) < 2 && env
->cp15
.hstr_el2
&&
12510 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12511 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HSTR_ACTIVE
, 1);
12514 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12517 static uint32_t rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
12520 uint32_t flags
= rebuild_hflags_aprofile(env
);
12521 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
12522 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
12526 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
12528 /* Get control bits for tagged addresses. */
12529 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12530 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12532 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
12533 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
12535 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
12536 int sve_el
= sve_exception_el(env
, el
);
12540 * If SVE is disabled, but FP is enabled,
12541 * then the effective len is 0.
12543 if (sve_el
!= 0 && fp_el
== 0) {
12546 zcr_len
= sve_zcr_len_for_el(env
, el
);
12548 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
12549 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
12552 sctlr
= regime_sctlr(env
, stage1
);
12554 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
12555 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12558 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
12560 * In order to save space in flags, we record only whether
12561 * pauth is "inactive", meaning all insns are implemented as
12562 * a nop, or "active" when some action must be performed.
12563 * The decision of which action to take is left to a helper.
12565 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
12566 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
12570 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12571 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12572 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
12573 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
12577 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
12578 if (!(env
->pstate
& PSTATE_UAO
)) {
12580 case ARMMMUIdx_E10_1
:
12581 case ARMMMUIdx_E10_1_PAN
:
12582 case ARMMMUIdx_SE10_1
:
12583 case ARMMMUIdx_SE10_1_PAN
:
12584 /* TODO: ARMv8.3-NV */
12585 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12587 case ARMMMUIdx_E20_2
:
12588 case ARMMMUIdx_E20_2_PAN
:
12589 /* TODO: ARMv8.4-SecEL2 */
12591 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
12592 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
12594 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
12595 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12603 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12606 static uint32_t rebuild_hflags_internal(CPUARMState
*env
)
12608 int el
= arm_current_el(env
);
12609 int fp_el
= fp_exception_el(env
, el
);
12610 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12613 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12614 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
12615 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12617 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12621 void arm_rebuild_hflags(CPUARMState
*env
)
12623 env
->hflags
= rebuild_hflags_internal(env
);
12627 * If we have triggered a EL state change we can't rely on the
12628 * translator having passed it to us, we need to recompute.
12630 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
12632 int el
= arm_current_el(env
);
12633 int fp_el
= fp_exception_el(env
, el
);
12634 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12635 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12638 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
12640 int fp_el
= fp_exception_el(env
, el
);
12641 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12643 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12647 * If we have triggered a EL state change we can't rely on the
12648 * translator having passed it to us, we need to recompute.
12650 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
12652 int el
= arm_current_el(env
);
12653 int fp_el
= fp_exception_el(env
, el
);
12654 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12655 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12658 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
12660 int fp_el
= fp_exception_el(env
, el
);
12661 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12663 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12666 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
12668 int fp_el
= fp_exception_el(env
, el
);
12669 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12671 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12674 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
12676 #ifdef CONFIG_DEBUG_TCG
12677 uint32_t env_flags_current
= env
->hflags
;
12678 uint32_t env_flags_rebuilt
= rebuild_hflags_internal(env
);
12680 if (unlikely(env_flags_current
!= env_flags_rebuilt
)) {
12681 fprintf(stderr
, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
12682 env_flags_current
, env_flags_rebuilt
);
12688 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12689 target_ulong
*cs_base
, uint32_t *pflags
)
12691 uint32_t flags
= env
->hflags
;
12692 uint32_t pstate_for_ss
;
12695 assert_hflags_rebuild_correctly(env
);
12697 if (FIELD_EX32(flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
12699 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12700 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
12702 pstate_for_ss
= env
->pstate
;
12704 *pc
= env
->regs
[15];
12706 if (arm_feature(env
, ARM_FEATURE_M
)) {
12707 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
12708 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
12709 != env
->v7m
.secure
) {
12710 flags
= FIELD_DP32(flags
, TBFLAG_M32
, FPCCR_S_WRONG
, 1);
12713 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
12714 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
12715 (env
->v7m
.secure
&&
12716 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
12718 * ASPEN is set, but FPCA/SFPA indicate that there is no
12719 * active FP context; we must create a new FP context before
12720 * executing any FP insn.
12722 flags
= FIELD_DP32(flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
, 1);
12725 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
12726 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
12727 flags
= FIELD_DP32(flags
, TBFLAG_M32
, LSPACT
, 1);
12731 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12732 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12734 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
12735 flags
= FIELD_DP32(flags
, TBFLAG_A32
,
12736 XSCALE_CPAR
, env
->cp15
.c15_cpar
);
12738 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
,
12740 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
,
12741 env
->vfp
.vec_stride
);
12743 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
12744 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12748 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, THUMB
, env
->thumb
);
12749 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, CONDEXEC
, env
->condexec_bits
);
12750 pstate_for_ss
= env
->uncached_cpsr
;
12754 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12755 * states defined in the ARM ARM for software singlestep:
12756 * SS_ACTIVE PSTATE.SS State
12757 * 0 x Inactive (the TB flag for SS is always 0)
12758 * 1 0 Active-pending
12759 * 1 1 Active-not-pending
12760 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
12762 if (FIELD_EX32(flags
, TBFLAG_ANY
, SS_ACTIVE
) &&
12763 (pstate_for_ss
& PSTATE_SS
)) {
12764 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12770 #ifdef TARGET_AARCH64
12772 * The manual says that when SVE is enabled and VQ is widened the
12773 * implementation is allowed to zero the previously inaccessible
12774 * portion of the registers. The corollary to that is that when
12775 * SVE is enabled and VQ is narrowed we are also allowed to zero
12776 * the now inaccessible portion of the registers.
12778 * The intent of this is that no predicate bit beyond VQ is ever set.
12779 * Which means that some operations on predicate registers themselves
12780 * may operate on full uint64_t or even unrolled across the maximum
12781 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
12782 * may well be cheaper than conditionals to restrict the operation
12783 * to the relevant portion of a uint16_t[16].
12785 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
12790 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
12791 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
12793 /* Zap the high bits of the zregs. */
12794 for (i
= 0; i
< 32; i
++) {
12795 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
12798 /* Zap the high bits of the pregs and ffr. */
12801 pmask
= ~(-1ULL << (16 * (vq
& 3)));
12803 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
12804 for (i
= 0; i
< 17; ++i
) {
12805 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
12812 * Notice a change in SVE vector size when changing EL.
12814 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
12815 int new_el
, bool el0_a64
)
12817 ARMCPU
*cpu
= env_archcpu(env
);
12818 int old_len
, new_len
;
12819 bool old_a64
, new_a64
;
12821 /* Nothing to do if no SVE. */
12822 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
12826 /* Nothing to do if FP is disabled in either EL. */
12827 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
12832 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12833 * at ELx, or not available because the EL is in AArch32 state, then
12834 * for all purposes other than a direct read, the ZCR_ELx.LEN field
12835 * has an effective value of 0".
12837 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12838 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12839 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
12840 * we already have the correct register contents when encountering the
12841 * vq0->vq0 transition between EL0->EL1.
12843 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
12844 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
12845 ? sve_zcr_len_for_el(env
, old_el
) : 0);
12846 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
12847 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
12848 ? sve_zcr_len_for_el(env
, new_el
) : 0);
12850 /* When changing vector length, clear inaccessible state. */
12851 if (new_len
< old_len
) {
12852 aarch64_sve_narrow_vq(env
, new_len
+ 1);