4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/tcg.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
36 #include "exec/cpu_ldst.h"
39 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
41 #ifndef CONFIG_USER_ONLY
43 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
44 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
46 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
47 target_ulong
*page_size_ptr
,
48 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
49 __attribute__((nonnull
));
52 static void switch_mode(CPUARMState
*env
, int mode
);
54 static int vfp_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
56 ARMCPU
*cpu
= env_archcpu(env
);
57 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
59 /* VFP data registers are always little-endian. */
61 return gdb_get_reg64(buf
, *aa32_vfp_dreg(env
, reg
));
63 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
64 /* Aliases for Q regs. */
67 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
68 return gdb_get_reg128(buf
, q
[0], q
[1]);
71 switch (reg
- nregs
) {
72 case 0: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); break;
73 case 1: return gdb_get_reg32(buf
, vfp_get_fpscr(env
)); break;
74 case 2: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); break;
79 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
81 ARMCPU
*cpu
= env_archcpu(env
);
82 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
85 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
88 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
91 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
93 q
[1] = ldq_le_p(buf
+ 8);
97 switch (reg
- nregs
) {
98 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
99 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
100 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
105 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
110 /* 128 bit FP register - quads are in LE order */
111 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
112 return gdb_get_reg128(buf
, q
[1], q
[0]);
116 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
119 return gdb_get_reg32(buf
,vfp_get_fpcr(env
));
125 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
129 /* 128 bit FP register */
131 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
132 q
[0] = ldq_le_p(buf
);
133 q
[1] = ldq_le_p(buf
+ 8);
138 vfp_set_fpsr(env
, ldl_p(buf
));
142 vfp_set_fpcr(env
, ldl_p(buf
));
149 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
151 assert(ri
->fieldoffset
);
152 if (cpreg_field_is_64bit(ri
)) {
153 return CPREG_FIELD64(env
, ri
);
155 return CPREG_FIELD32(env
, ri
);
159 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
162 assert(ri
->fieldoffset
);
163 if (cpreg_field_is_64bit(ri
)) {
164 CPREG_FIELD64(env
, ri
) = value
;
166 CPREG_FIELD32(env
, ri
) = value
;
170 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
172 return (char *)env
+ ri
->fieldoffset
;
175 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
177 /* Raw read of a coprocessor register (as needed for migration, etc). */
178 if (ri
->type
& ARM_CP_CONST
) {
179 return ri
->resetvalue
;
180 } else if (ri
->raw_readfn
) {
181 return ri
->raw_readfn(env
, ri
);
182 } else if (ri
->readfn
) {
183 return ri
->readfn(env
, ri
);
185 return raw_read(env
, ri
);
189 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
192 /* Raw write of a coprocessor register (as needed for migration, etc).
193 * Note that constant registers are treated as write-ignored; the
194 * caller should check for success by whether a readback gives the
197 if (ri
->type
& ARM_CP_CONST
) {
199 } else if (ri
->raw_writefn
) {
200 ri
->raw_writefn(env
, ri
, v
);
201 } else if (ri
->writefn
) {
202 ri
->writefn(env
, ri
, v
);
204 raw_write(env
, ri
, v
);
209 * arm_get/set_gdb_*: get/set a gdb register
210 * @env: the CPU state
211 * @buf: a buffer to copy to/from
212 * @reg: register number (offset from start of group)
214 * We return the number of bytes copied
217 static int arm_gdb_get_sysreg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
219 ARMCPU
*cpu
= env_archcpu(env
);
220 const ARMCPRegInfo
*ri
;
223 key
= cpu
->dyn_sysreg_xml
.data
.cpregs
.keys
[reg
];
224 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
226 if (cpreg_field_is_64bit(ri
)) {
227 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
229 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
235 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
240 #ifdef TARGET_AARCH64
241 static int arm_gdb_get_svereg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
243 ARMCPU
*cpu
= env_archcpu(env
);
246 /* The first 32 registers are the zregs */
250 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
251 len
+= gdb_get_reg128(buf
,
252 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1],
253 env
->vfp
.zregs
[reg
].d
[vq
* 2]);
258 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
260 return gdb_get_reg32(buf
, vfp_get_fpcr(env
));
261 /* then 16 predicates and the ffr */
266 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
267 len
+= gdb_get_reg64(buf
, env
->vfp
.pregs
[preg
].p
[vq
/ 4]);
274 * We report in Vector Granules (VG) which is 64bit in a Z reg
275 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
277 int vq
= sve_zcr_len_for_el(env
, arm_current_el(env
)) + 1;
278 return gdb_get_reg32(buf
, vq
* 2);
281 /* gdbstub asked for something out our range */
282 qemu_log_mask(LOG_UNIMP
, "%s: out of range register %d", __func__
, reg
);
289 static int arm_gdb_set_svereg(CPUARMState
*env
, uint8_t *buf
, int reg
)
291 ARMCPU
*cpu
= env_archcpu(env
);
293 /* The first 32 registers are the zregs */
295 /* The first 32 registers are the zregs */
299 uint64_t *p
= (uint64_t *) buf
;
300 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
301 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1] = *p
++;
302 env
->vfp
.zregs
[reg
].d
[vq
* 2] = *p
++;
308 vfp_set_fpsr(env
, *(uint32_t *)buf
);
311 vfp_set_fpcr(env
, *(uint32_t *)buf
);
317 uint64_t *p
= (uint64_t *) buf
;
318 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
319 env
->vfp
.pregs
[preg
].p
[vq
/ 4] = *p
++;
325 /* cannot set vg via gdbstub */
328 /* gdbstub asked for something out our range */
334 #endif /* TARGET_AARCH64 */
336 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
338 /* Return true if the regdef would cause an assertion if you called
339 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
340 * program bug for it not to have the NO_RAW flag).
341 * NB that returning false here doesn't necessarily mean that calling
342 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
343 * read/write access functions which are safe for raw use" from "has
344 * read/write access functions which have side effects but has forgotten
345 * to provide raw access functions".
346 * The tests here line up with the conditions in read/write_raw_cp_reg()
347 * and assertions in raw_read()/raw_write().
349 if ((ri
->type
& ARM_CP_CONST
) ||
351 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
357 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
359 /* Write the coprocessor state from cpu->env to the (index,value) list. */
363 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
364 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
365 const ARMCPRegInfo
*ri
;
368 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
373 if (ri
->type
& ARM_CP_NO_RAW
) {
377 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
380 * Only sync if the previous list->cpustate sync succeeded.
381 * Rather than tracking the success/failure state for every
382 * item in the list, we just recheck "does the raw write we must
383 * have made in write_list_to_cpustate() read back OK" here.
385 uint64_t oldval
= cpu
->cpreg_values
[i
];
387 if (oldval
== newval
) {
391 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
392 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
396 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
398 cpu
->cpreg_values
[i
] = newval
;
403 bool write_list_to_cpustate(ARMCPU
*cpu
)
408 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
409 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
410 uint64_t v
= cpu
->cpreg_values
[i
];
411 const ARMCPRegInfo
*ri
;
413 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
418 if (ri
->type
& ARM_CP_NO_RAW
) {
421 /* Write value and confirm it reads back as written
422 * (to catch read-only registers and partially read-only
423 * registers where the incoming migration value doesn't match)
425 write_raw_cp_reg(&cpu
->env
, ri
, v
);
426 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
433 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
435 ARMCPU
*cpu
= opaque
;
437 const ARMCPRegInfo
*ri
;
439 regidx
= *(uint32_t *)key
;
440 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
442 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
443 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
444 /* The value array need not be initialized at this point */
445 cpu
->cpreg_array_len
++;
449 static void count_cpreg(gpointer key
, gpointer opaque
)
451 ARMCPU
*cpu
= opaque
;
453 const ARMCPRegInfo
*ri
;
455 regidx
= *(uint32_t *)key
;
456 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
458 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
459 cpu
->cpreg_array_len
++;
463 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
465 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
466 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
477 void init_cpreg_list(ARMCPU
*cpu
)
479 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
480 * Note that we require cpreg_tuples[] to be sorted by key ID.
485 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
486 keys
= g_list_sort(keys
, cpreg_key_compare
);
488 cpu
->cpreg_array_len
= 0;
490 g_list_foreach(keys
, count_cpreg
, cpu
);
492 arraylen
= cpu
->cpreg_array_len
;
493 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
494 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
495 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
496 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
497 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
498 cpu
->cpreg_array_len
= 0;
500 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
502 assert(cpu
->cpreg_array_len
== arraylen
);
508 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
510 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
511 const ARMCPRegInfo
*ri
,
514 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
515 arm_is_secure_below_el3(env
)) {
516 return CP_ACCESS_TRAP_UNCATEGORIZED
;
521 /* Some secure-only AArch32 registers trap to EL3 if used from
522 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
523 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
524 * We assume that the .access field is set to PL1_RW.
526 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
527 const ARMCPRegInfo
*ri
,
530 if (arm_current_el(env
) == 3) {
533 if (arm_is_secure_below_el3(env
)) {
534 return CP_ACCESS_TRAP_EL3
;
536 /* This will be EL1 NS and EL2 NS, which just UNDEF */
537 return CP_ACCESS_TRAP_UNCATEGORIZED
;
540 /* Check for traps to "powerdown debug" registers, which are controlled
543 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
546 int el
= arm_current_el(env
);
547 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
548 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
549 (arm_hcr_el2_eff(env
) & HCR_TGE
);
551 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
552 return CP_ACCESS_TRAP_EL2
;
554 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
555 return CP_ACCESS_TRAP_EL3
;
560 /* Check for traps to "debug ROM" registers, which are controlled
561 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
563 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
566 int el
= arm_current_el(env
);
567 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
568 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
569 (arm_hcr_el2_eff(env
) & HCR_TGE
);
571 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
572 return CP_ACCESS_TRAP_EL2
;
574 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
575 return CP_ACCESS_TRAP_EL3
;
580 /* Check for traps to general debug registers, which are controlled
581 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
583 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
586 int el
= arm_current_el(env
);
587 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
588 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
589 (arm_hcr_el2_eff(env
) & HCR_TGE
);
591 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
592 return CP_ACCESS_TRAP_EL2
;
594 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
595 return CP_ACCESS_TRAP_EL3
;
600 /* Check for traps to performance monitor registers, which are controlled
601 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
603 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
606 int el
= arm_current_el(env
);
608 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
609 && !arm_is_secure_below_el3(env
)) {
610 return CP_ACCESS_TRAP_EL2
;
612 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
613 return CP_ACCESS_TRAP_EL3
;
618 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
619 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
622 if (arm_current_el(env
) == 1) {
623 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
624 if (arm_hcr_el2_eff(env
) & trap
) {
625 return CP_ACCESS_TRAP_EL2
;
631 /* Check for traps from EL1 due to HCR_EL2.TSW. */
632 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
635 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
636 return CP_ACCESS_TRAP_EL2
;
641 /* Check for traps from EL1 due to HCR_EL2.TACR. */
642 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
645 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
646 return CP_ACCESS_TRAP_EL2
;
651 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
652 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
655 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
656 return CP_ACCESS_TRAP_EL2
;
661 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
663 ARMCPU
*cpu
= env_archcpu(env
);
665 raw_write(env
, ri
, value
);
666 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
669 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
671 ARMCPU
*cpu
= env_archcpu(env
);
673 if (raw_read(env
, ri
) != value
) {
674 /* Unlike real hardware the qemu TLB uses virtual addresses,
675 * not modified virtual addresses, so this causes a TLB flush.
678 raw_write(env
, ri
, value
);
682 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
685 ARMCPU
*cpu
= env_archcpu(env
);
687 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
688 && !extended_addresses_enabled(env
)) {
689 /* For VMSA (when not using the LPAE long descriptor page table
690 * format) this register includes the ASID, so do a TLB flush.
691 * For PMSA it is purely a process ID and no action is needed.
695 raw_write(env
, ri
, value
);
698 /* IS variants of TLB operations must affect all cores */
699 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
702 CPUState
*cs
= env_cpu(env
);
704 tlb_flush_all_cpus_synced(cs
);
707 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
710 CPUState
*cs
= env_cpu(env
);
712 tlb_flush_all_cpus_synced(cs
);
715 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
718 CPUState
*cs
= env_cpu(env
);
720 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
723 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
726 CPUState
*cs
= env_cpu(env
);
728 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
732 * Non-IS variants of TLB operations are upgraded to
733 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
734 * force broadcast of these operations.
736 static bool tlb_force_broadcast(CPUARMState
*env
)
738 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
739 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
742 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
745 /* Invalidate all (TLBIALL) */
746 CPUState
*cs
= env_cpu(env
);
748 if (tlb_force_broadcast(env
)) {
749 tlb_flush_all_cpus_synced(cs
);
755 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
758 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
759 CPUState
*cs
= env_cpu(env
);
761 value
&= TARGET_PAGE_MASK
;
762 if (tlb_force_broadcast(env
)) {
763 tlb_flush_page_all_cpus_synced(cs
, value
);
765 tlb_flush_page(cs
, value
);
769 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
772 /* Invalidate by ASID (TLBIASID) */
773 CPUState
*cs
= env_cpu(env
);
775 if (tlb_force_broadcast(env
)) {
776 tlb_flush_all_cpus_synced(cs
);
782 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
785 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
786 CPUState
*cs
= env_cpu(env
);
788 value
&= TARGET_PAGE_MASK
;
789 if (tlb_force_broadcast(env
)) {
790 tlb_flush_page_all_cpus_synced(cs
, value
);
792 tlb_flush_page(cs
, value
);
796 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
799 CPUState
*cs
= env_cpu(env
);
801 tlb_flush_by_mmuidx(cs
,
803 ARMMMUIdxBit_E10_1_PAN
|
807 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
810 CPUState
*cs
= env_cpu(env
);
812 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
814 ARMMMUIdxBit_E10_1_PAN
|
819 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
822 CPUState
*cs
= env_cpu(env
);
824 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
827 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
830 CPUState
*cs
= env_cpu(env
);
832 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
835 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
838 CPUState
*cs
= env_cpu(env
);
839 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
841 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
844 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
847 CPUState
*cs
= env_cpu(env
);
848 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
850 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
854 static const ARMCPRegInfo cp_reginfo
[] = {
855 /* Define the secure and non-secure FCSE identifier CP registers
856 * separately because there is no secure bank in V8 (no _EL3). This allows
857 * the secure register to be properly reset and migrated. There is also no
858 * v8 EL1 version of the register so the non-secure instance stands alone.
861 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
862 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
863 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
864 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
865 { .name
= "FCSEIDR_S",
866 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
867 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
868 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
869 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
870 /* Define the secure and non-secure context identifier CP registers
871 * separately because there is no secure bank in V8 (no _EL3). This allows
872 * the secure register to be properly reset and migrated. In the
873 * non-secure case, the 32-bit register will have reset and migration
874 * disabled during registration as it is handled by the 64-bit instance.
876 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
877 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
878 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
879 .secure
= ARM_CP_SECSTATE_NS
,
880 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
881 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
882 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
883 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
884 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
885 .secure
= ARM_CP_SECSTATE_S
,
886 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
887 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
891 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
892 /* NB: Some of these registers exist in v8 but with more precise
893 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
895 /* MMU Domain access control / MPU write buffer control */
897 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
898 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
899 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
900 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
901 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
902 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
903 * For v6 and v5, these mappings are overly broad.
905 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
906 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
907 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
908 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
909 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
910 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
911 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
912 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
913 /* Cache maintenance ops; some of this space may be overridden later. */
914 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
915 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
916 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
920 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
921 /* Not all pre-v6 cores implemented this WFI, so this is slightly
924 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
925 .access
= PL1_W
, .type
= ARM_CP_WFI
},
929 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
930 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
931 * is UNPREDICTABLE; we choose to NOP as most implementations do).
933 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
934 .access
= PL1_W
, .type
= ARM_CP_WFI
},
935 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
936 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
937 * OMAPCP will override this space.
939 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
940 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
942 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
943 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
945 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
946 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
947 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
949 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
950 * implementing it as RAZ means the "debug architecture version" bits
951 * will read as a reserved value, which should cause Linux to not try
952 * to use the debug hardware.
954 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
955 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
956 /* MMU TLB control. Note that the wildcarding means we cover not just
957 * the unified TLB ops but also the dside/iside/inner-shareable variants.
959 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
960 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
961 .type
= ARM_CP_NO_RAW
},
962 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
963 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
964 .type
= ARM_CP_NO_RAW
},
965 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
966 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
967 .type
= ARM_CP_NO_RAW
},
968 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
969 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
970 .type
= ARM_CP_NO_RAW
},
971 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
972 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
973 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
974 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
978 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
983 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
984 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
985 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
986 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
987 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
989 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
990 /* VFP coprocessor: cp10 & cp11 [23:20] */
991 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
993 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
994 /* ASEDIS [31] bit is RAO/WI */
998 /* VFPv3 and upwards with NEON implement 32 double precision
999 * registers (D0-D31).
1001 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
1002 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1010 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1011 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1013 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1014 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1015 value
&= ~(0xf << 20);
1016 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
1019 env
->cp15
.cpacr_el1
= value
;
1022 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1025 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1026 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1028 uint64_t value
= env
->cp15
.cpacr_el1
;
1030 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1031 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1032 value
&= ~(0xf << 20);
1038 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1040 /* Call cpacr_write() so that we reset with the correct RAO bits set
1041 * for our CPU features.
1043 cpacr_write(env
, ri
, 0);
1046 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1049 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1050 /* Check if CPACR accesses are to be trapped to EL2 */
1051 if (arm_current_el(env
) == 1 &&
1052 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
1053 return CP_ACCESS_TRAP_EL2
;
1054 /* Check if CPACR accesses are to be trapped to EL3 */
1055 } else if (arm_current_el(env
) < 3 &&
1056 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1057 return CP_ACCESS_TRAP_EL3
;
1061 return CP_ACCESS_OK
;
1064 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1067 /* Check if CPTR accesses are set to trap to EL3 */
1068 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1069 return CP_ACCESS_TRAP_EL3
;
1072 return CP_ACCESS_OK
;
1075 static const ARMCPRegInfo v6_cp_reginfo
[] = {
1076 /* prefetch by MVA in v6, NOP in v7 */
1077 { .name
= "MVA_prefetch",
1078 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
1079 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1080 /* We need to break the TB after ISB to execute self-modifying code
1081 * correctly and also to take any pending interrupts immediately.
1082 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1084 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
1085 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
1086 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
1087 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1088 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
1089 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1090 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1091 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
1092 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1093 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1095 /* Watchpoint Fault Address Register : should actually only be present
1096 * for 1136, 1176, 11MPCore.
1098 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1099 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1100 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1101 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1102 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1103 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1107 /* Definitions for the PMU registers */
1108 #define PMCRN_MASK 0xf800
1109 #define PMCRN_SHIFT 11
1118 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1119 * which can be written as 1 to trigger behaviour but which stay RAZ).
1121 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1123 #define PMXEVTYPER_P 0x80000000
1124 #define PMXEVTYPER_U 0x40000000
1125 #define PMXEVTYPER_NSK 0x20000000
1126 #define PMXEVTYPER_NSU 0x10000000
1127 #define PMXEVTYPER_NSH 0x08000000
1128 #define PMXEVTYPER_M 0x04000000
1129 #define PMXEVTYPER_MT 0x02000000
1130 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1131 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1132 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1133 PMXEVTYPER_M | PMXEVTYPER_MT | \
1134 PMXEVTYPER_EVTCOUNT)
1136 #define PMCCFILTR 0xf8000000
1137 #define PMCCFILTR_M PMXEVTYPER_M
1138 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1140 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1142 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1145 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1146 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1148 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1151 typedef struct pm_event
{
1152 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1153 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1154 bool (*supported
)(CPUARMState
*);
1156 * Retrieve the current count of the underlying event. The programmed
1157 * counters hold a difference from the return value from this function
1159 uint64_t (*get_count
)(CPUARMState
*);
1161 * Return how many nanoseconds it will take (at a minimum) for count events
1162 * to occur. A negative value indicates the counter will never overflow, or
1163 * that the counter has otherwise arranged for the overflow bit to be set
1164 * and the PMU interrupt to be raised on overflow.
1166 int64_t (*ns_per_count
)(uint64_t);
1169 static bool event_always_supported(CPUARMState
*env
)
1174 static uint64_t swinc_get_count(CPUARMState
*env
)
1177 * SW_INCR events are written directly to the pmevcntr's by writes to
1178 * PMSWINC, so there is no underlying count maintained by the PMU itself
1183 static int64_t swinc_ns_per(uint64_t ignored
)
1189 * Return the underlying cycle count for the PMU cycle counters. If we're in
1190 * usermode, simply return 0.
1192 static uint64_t cycles_get_count(CPUARMState
*env
)
1194 #ifndef CONFIG_USER_ONLY
1195 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1196 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1198 return cpu_get_host_ticks();
1202 #ifndef CONFIG_USER_ONLY
1203 static int64_t cycles_ns_per(uint64_t cycles
)
1205 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1208 static bool instructions_supported(CPUARMState
*env
)
1210 return icount_enabled() == 1; /* Precise instruction counting */
1213 static uint64_t instructions_get_count(CPUARMState
*env
)
1215 return (uint64_t)icount_get_raw();
1218 static int64_t instructions_ns_per(uint64_t icount
)
1220 return icount_to_ns((int64_t)icount
);
1224 static bool pmu_8_1_events_supported(CPUARMState
*env
)
1226 /* For events which are supported in any v8.1 PMU */
1227 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
1230 static bool pmu_8_4_events_supported(CPUARMState
*env
)
1232 /* For events which are supported in any v8.1 PMU */
1233 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
1236 static uint64_t zero_event_get_count(CPUARMState
*env
)
1238 /* For events which on QEMU never fire, so their count is always zero */
1242 static int64_t zero_event_ns_per(uint64_t cycles
)
1244 /* An event which never fires can never overflow */
1248 static const pm_event pm_events
[] = {
1249 { .number
= 0x000, /* SW_INCR */
1250 .supported
= event_always_supported
,
1251 .get_count
= swinc_get_count
,
1252 .ns_per_count
= swinc_ns_per
,
1254 #ifndef CONFIG_USER_ONLY
1255 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1256 .supported
= instructions_supported
,
1257 .get_count
= instructions_get_count
,
1258 .ns_per_count
= instructions_ns_per
,
1260 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1261 .supported
= event_always_supported
,
1262 .get_count
= cycles_get_count
,
1263 .ns_per_count
= cycles_ns_per
,
1266 { .number
= 0x023, /* STALL_FRONTEND */
1267 .supported
= pmu_8_1_events_supported
,
1268 .get_count
= zero_event_get_count
,
1269 .ns_per_count
= zero_event_ns_per
,
1271 { .number
= 0x024, /* STALL_BACKEND */
1272 .supported
= pmu_8_1_events_supported
,
1273 .get_count
= zero_event_get_count
,
1274 .ns_per_count
= zero_event_ns_per
,
1276 { .number
= 0x03c, /* STALL */
1277 .supported
= pmu_8_4_events_supported
,
1278 .get_count
= zero_event_get_count
,
1279 .ns_per_count
= zero_event_ns_per
,
1284 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1285 * events (i.e. the statistical profiling extension), this implementation
1286 * should first be updated to something sparse instead of the current
1287 * supported_event_map[] array.
1289 #define MAX_EVENT_ID 0x3c
1290 #define UNSUPPORTED_EVENT UINT16_MAX
1291 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1294 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1295 * of ARM event numbers to indices in our pm_events array.
1297 * Note: Events in the 0x40XX range are not currently supported.
1299 void pmu_init(ARMCPU
*cpu
)
1304 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1307 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1308 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1313 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1314 const pm_event
*cnt
= &pm_events
[i
];
1315 assert(cnt
->number
<= MAX_EVENT_ID
);
1316 /* We do not currently support events in the 0x40xx range */
1317 assert(cnt
->number
<= 0x3f);
1319 if (cnt
->supported(&cpu
->env
)) {
1320 supported_event_map
[cnt
->number
] = i
;
1321 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1322 if (cnt
->number
& 0x20) {
1323 cpu
->pmceid1
|= event_mask
;
1325 cpu
->pmceid0
|= event_mask
;
1332 * Check at runtime whether a PMU event is supported for the current machine
1334 static bool event_supported(uint16_t number
)
1336 if (number
> MAX_EVENT_ID
) {
1339 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1342 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1345 /* Performance monitor registers user accessibility is controlled
1346 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1347 * trapping to EL2 or EL3 for other accesses.
1349 int el
= arm_current_el(env
);
1351 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1352 return CP_ACCESS_TRAP
;
1354 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1355 && !arm_is_secure_below_el3(env
)) {
1356 return CP_ACCESS_TRAP_EL2
;
1358 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1359 return CP_ACCESS_TRAP_EL3
;
1362 return CP_ACCESS_OK
;
1365 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1366 const ARMCPRegInfo
*ri
,
1369 /* ER: event counter read trap control */
1370 if (arm_feature(env
, ARM_FEATURE_V8
)
1371 && arm_current_el(env
) == 0
1372 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1374 return CP_ACCESS_OK
;
1377 return pmreg_access(env
, ri
, isread
);
1380 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1381 const ARMCPRegInfo
*ri
,
1384 /* SW: software increment write trap control */
1385 if (arm_feature(env
, ARM_FEATURE_V8
)
1386 && arm_current_el(env
) == 0
1387 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1389 return CP_ACCESS_OK
;
1392 return pmreg_access(env
, ri
, isread
);
1395 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1396 const ARMCPRegInfo
*ri
,
1399 /* ER: event counter read trap control */
1400 if (arm_feature(env
, ARM_FEATURE_V8
)
1401 && arm_current_el(env
) == 0
1402 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1403 return CP_ACCESS_OK
;
1406 return pmreg_access(env
, ri
, isread
);
1409 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1410 const ARMCPRegInfo
*ri
,
1413 /* CR: cycle counter read trap control */
1414 if (arm_feature(env
, ARM_FEATURE_V8
)
1415 && arm_current_el(env
) == 0
1416 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1418 return CP_ACCESS_OK
;
1421 return pmreg_access(env
, ri
, isread
);
1424 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1425 * the current EL, security state, and register configuration.
1427 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1430 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1431 bool enabled
, prohibited
, filtered
;
1432 bool secure
= arm_is_secure(env
);
1433 int el
= arm_current_el(env
);
1434 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1436 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1440 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1441 (counter
< hpmn
|| counter
== 31)) {
1442 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1444 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1446 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1449 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1450 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1455 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1456 !(env
->cp15
.mdcr_el3
& MDCR_SPME
);
1459 if (prohibited
&& counter
== 31) {
1460 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1463 if (counter
== 31) {
1464 filter
= env
->cp15
.pmccfiltr_el0
;
1466 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1469 p
= filter
& PMXEVTYPER_P
;
1470 u
= filter
& PMXEVTYPER_U
;
1471 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1472 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1473 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1474 m
= arm_el_is_aa64(env
, 1) &&
1475 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1478 filtered
= secure
? u
: u
!= nsu
;
1479 } else if (el
== 1) {
1480 filtered
= secure
? p
: p
!= nsk
;
1481 } else if (el
== 2) {
1487 if (counter
!= 31) {
1489 * If not checking PMCCNTR, ensure the counter is setup to an event we
1492 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1493 if (!event_supported(event
)) {
1498 return enabled
&& !prohibited
&& !filtered
;
1501 static void pmu_update_irq(CPUARMState
*env
)
1503 ARMCPU
*cpu
= env_archcpu(env
);
1504 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1505 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1509 * Ensure c15_ccnt is the guest-visible count so that operations such as
1510 * enabling/disabling the counter or filtering, modifying the count itself,
1511 * etc. can be done logically. This is essentially a no-op if the counter is
1512 * not enabled at the time of the call.
1514 static void pmccntr_op_start(CPUARMState
*env
)
1516 uint64_t cycles
= cycles_get_count(env
);
1518 if (pmu_counter_enabled(env
, 31)) {
1519 uint64_t eff_cycles
= cycles
;
1520 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1521 /* Increment once every 64 processor clock cycles */
1525 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1527 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1528 1ull << 63 : 1ull << 31;
1529 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1530 env
->cp15
.c9_pmovsr
|= (1 << 31);
1531 pmu_update_irq(env
);
1534 env
->cp15
.c15_ccnt
= new_pmccntr
;
1536 env
->cp15
.c15_ccnt_delta
= cycles
;
1540 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1541 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1544 static void pmccntr_op_finish(CPUARMState
*env
)
1546 if (pmu_counter_enabled(env
, 31)) {
1547 #ifndef CONFIG_USER_ONLY
1548 /* Calculate when the counter will next overflow */
1549 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1550 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1551 remaining_cycles
= (uint32_t)remaining_cycles
;
1553 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1555 if (overflow_in
> 0) {
1556 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1558 ARMCPU
*cpu
= env_archcpu(env
);
1559 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1563 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1564 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1565 /* Increment once every 64 processor clock cycles */
1568 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1572 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1575 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1577 if (event_supported(event
)) {
1578 uint16_t event_idx
= supported_event_map
[event
];
1579 count
= pm_events
[event_idx
].get_count(env
);
1582 if (pmu_counter_enabled(env
, counter
)) {
1583 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1585 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1586 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1587 pmu_update_irq(env
);
1589 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1591 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1594 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1596 if (pmu_counter_enabled(env
, counter
)) {
1597 #ifndef CONFIG_USER_ONLY
1598 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1599 uint16_t event_idx
= supported_event_map
[event
];
1600 uint64_t delta
= UINT32_MAX
-
1601 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1602 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1604 if (overflow_in
> 0) {
1605 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1607 ARMCPU
*cpu
= env_archcpu(env
);
1608 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1612 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1613 env
->cp15
.c14_pmevcntr
[counter
];
1617 void pmu_op_start(CPUARMState
*env
)
1620 pmccntr_op_start(env
);
1621 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1622 pmevcntr_op_start(env
, i
);
1626 void pmu_op_finish(CPUARMState
*env
)
1629 pmccntr_op_finish(env
);
1630 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1631 pmevcntr_op_finish(env
, i
);
1635 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1637 pmu_op_start(&cpu
->env
);
1640 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1642 pmu_op_finish(&cpu
->env
);
1645 void arm_pmu_timer_cb(void *opaque
)
1647 ARMCPU
*cpu
= opaque
;
1650 * Update all the counter values based on the current underlying counts,
1651 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1652 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1653 * counter may expire.
1655 pmu_op_start(&cpu
->env
);
1656 pmu_op_finish(&cpu
->env
);
1659 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1664 if (value
& PMCRC
) {
1665 /* The counter has been reset */
1666 env
->cp15
.c15_ccnt
= 0;
1669 if (value
& PMCRP
) {
1671 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1672 env
->cp15
.c14_pmevcntr
[i
] = 0;
1676 env
->cp15
.c9_pmcr
&= ~PMCR_WRITEABLE_MASK
;
1677 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITEABLE_MASK
);
1682 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1686 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1687 /* Increment a counter's count iff: */
1688 if ((value
& (1 << i
)) && /* counter's bit is set */
1689 /* counter is enabled and not filtered */
1690 pmu_counter_enabled(env
, i
) &&
1691 /* counter is SW_INCR */
1692 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1693 pmevcntr_op_start(env
, i
);
1696 * Detect if this write causes an overflow since we can't predict
1697 * PMSWINC overflows like we can for other events
1699 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1701 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1702 env
->cp15
.c9_pmovsr
|= (1 << i
);
1703 pmu_update_irq(env
);
1706 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1708 pmevcntr_op_finish(env
, i
);
1713 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1716 pmccntr_op_start(env
);
1717 ret
= env
->cp15
.c15_ccnt
;
1718 pmccntr_op_finish(env
);
1722 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1725 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1726 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1727 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1730 env
->cp15
.c9_pmselr
= value
& 0x1f;
1733 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1736 pmccntr_op_start(env
);
1737 env
->cp15
.c15_ccnt
= value
;
1738 pmccntr_op_finish(env
);
1741 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1744 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1746 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1749 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1752 pmccntr_op_start(env
);
1753 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1754 pmccntr_op_finish(env
);
1757 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1760 pmccntr_op_start(env
);
1761 /* M is not accessible from AArch32 */
1762 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1763 (value
& PMCCFILTR
);
1764 pmccntr_op_finish(env
);
1767 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1769 /* M is not visible in AArch32 */
1770 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1773 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1776 value
&= pmu_counter_mask(env
);
1777 env
->cp15
.c9_pmcnten
|= value
;
1780 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1783 value
&= pmu_counter_mask(env
);
1784 env
->cp15
.c9_pmcnten
&= ~value
;
1787 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1790 value
&= pmu_counter_mask(env
);
1791 env
->cp15
.c9_pmovsr
&= ~value
;
1792 pmu_update_irq(env
);
1795 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1798 value
&= pmu_counter_mask(env
);
1799 env
->cp15
.c9_pmovsr
|= value
;
1800 pmu_update_irq(env
);
1803 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1804 uint64_t value
, const uint8_t counter
)
1806 if (counter
== 31) {
1807 pmccfiltr_write(env
, ri
, value
);
1808 } else if (counter
< pmu_num_counters(env
)) {
1809 pmevcntr_op_start(env
, counter
);
1812 * If this counter's event type is changing, store the current
1813 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1814 * pmevcntr_op_finish has the correct baseline when it converts back to
1817 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1818 PMXEVTYPER_EVTCOUNT
;
1819 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1820 if (old_event
!= new_event
) {
1822 if (event_supported(new_event
)) {
1823 uint16_t event_idx
= supported_event_map
[new_event
];
1824 count
= pm_events
[event_idx
].get_count(env
);
1826 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1829 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1830 pmevcntr_op_finish(env
, counter
);
1832 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1833 * PMSELR value is equal to or greater than the number of implemented
1834 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1838 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1839 const uint8_t counter
)
1841 if (counter
== 31) {
1842 return env
->cp15
.pmccfiltr_el0
;
1843 } else if (counter
< pmu_num_counters(env
)) {
1844 return env
->cp15
.c14_pmevtyper
[counter
];
1847 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1848 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1854 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1857 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1858 pmevtyper_write(env
, ri
, value
, counter
);
1861 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1864 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1865 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1868 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1869 * pmu_op_finish calls when loading saved state for a migration. Because
1870 * we're potentially updating the type of event here, the value written to
1871 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1872 * different counter type. Therefore, we need to set this value to the
1873 * current count for the counter type we're writing so that pmu_op_finish
1874 * has the correct count for its calculation.
1876 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1877 if (event_supported(event
)) {
1878 uint16_t event_idx
= supported_event_map
[event
];
1879 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1880 pm_events
[event_idx
].get_count(env
);
1884 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1886 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1887 return pmevtyper_read(env
, ri
, counter
);
1890 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1893 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1896 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1898 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1901 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1902 uint64_t value
, uint8_t counter
)
1904 if (counter
< pmu_num_counters(env
)) {
1905 pmevcntr_op_start(env
, counter
);
1906 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1907 pmevcntr_op_finish(env
, counter
);
1910 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1911 * are CONSTRAINED UNPREDICTABLE.
1915 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1918 if (counter
< pmu_num_counters(env
)) {
1920 pmevcntr_op_start(env
, counter
);
1921 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1922 pmevcntr_op_finish(env
, counter
);
1925 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1926 * are CONSTRAINED UNPREDICTABLE. */
1931 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1934 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1935 pmevcntr_write(env
, ri
, value
, counter
);
1938 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1940 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1941 return pmevcntr_read(env
, ri
, counter
);
1944 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1947 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1948 assert(counter
< pmu_num_counters(env
));
1949 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1950 pmevcntr_write(env
, ri
, value
, counter
);
1953 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1955 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1956 assert(counter
< pmu_num_counters(env
));
1957 return env
->cp15
.c14_pmevcntr
[counter
];
1960 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1963 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1966 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1968 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1971 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1974 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1975 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1977 env
->cp15
.c9_pmuserenr
= value
& 1;
1981 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1984 /* We have no event counters so only the C bit can be changed */
1985 value
&= pmu_counter_mask(env
);
1986 env
->cp15
.c9_pminten
|= value
;
1987 pmu_update_irq(env
);
1990 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1993 value
&= pmu_counter_mask(env
);
1994 env
->cp15
.c9_pminten
&= ~value
;
1995 pmu_update_irq(env
);
1998 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2001 /* Note that even though the AArch64 view of this register has bits
2002 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2003 * architectural requirements for bits which are RES0 only in some
2004 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2005 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2007 raw_write(env
, ri
, value
& ~0x1FULL
);
2010 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2012 /* Begin with base v8.0 state. */
2013 uint32_t valid_mask
= 0x3fff;
2014 ARMCPU
*cpu
= env_archcpu(env
);
2016 if (ri
->state
== ARM_CP_STATE_AA64
) {
2017 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
2018 valid_mask
&= ~SCR_NET
;
2020 if (cpu_isar_feature(aa64_lor
, cpu
)) {
2021 valid_mask
|= SCR_TLOR
;
2023 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
2024 valid_mask
|= SCR_API
| SCR_APK
;
2026 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2027 valid_mask
|= SCR_ATA
;
2030 valid_mask
&= ~(SCR_RW
| SCR_ST
);
2033 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2034 valid_mask
&= ~SCR_HCE
;
2036 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2037 * supported if EL2 exists. The bit is UNK/SBZP when
2038 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2039 * when EL2 is unavailable.
2040 * On ARMv8, this bit is always available.
2042 if (arm_feature(env
, ARM_FEATURE_V7
) &&
2043 !arm_feature(env
, ARM_FEATURE_V8
)) {
2044 valid_mask
&= ~SCR_SMD
;
2048 /* Clear all-context RES0 bits. */
2049 value
&= valid_mask
;
2050 raw_write(env
, ri
, value
);
2053 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
2054 const ARMCPRegInfo
*ri
,
2057 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
2058 return CP_ACCESS_TRAP_EL2
;
2061 return CP_ACCESS_OK
;
2064 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2066 ARMCPU
*cpu
= env_archcpu(env
);
2068 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2071 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
2072 ri
->secure
& ARM_CP_SECSTATE_S
);
2074 return cpu
->ccsidr
[index
];
2077 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2080 raw_write(env
, ri
, value
& 0xf);
2083 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2085 CPUState
*cs
= env_cpu(env
);
2086 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
2088 bool allow_virt
= (arm_current_el(env
) == 1 &&
2089 (!arm_is_secure_below_el3(env
) ||
2090 (env
->cp15
.scr_el3
& SCR_EEL2
)));
2092 if (allow_virt
&& (hcr_el2
& HCR_IMO
)) {
2093 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
2097 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
2102 if (allow_virt
&& (hcr_el2
& HCR_FMO
)) {
2103 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
2107 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
2112 /* External aborts are not possible in QEMU so A bit is always clear */
2116 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2119 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
2120 return CP_ACCESS_TRAP_EL2
;
2123 return CP_ACCESS_OK
;
2126 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2129 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2130 return access_aa64_tid1(env
, ri
, isread
);
2133 return CP_ACCESS_OK
;
2136 static const ARMCPRegInfo v7_cp_reginfo
[] = {
2137 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2138 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
2139 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2140 /* Performance monitors are implementation defined in v7,
2141 * but with an ARM recommended set of registers, which we
2144 * Performance registers fall into three categories:
2145 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2146 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2147 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2148 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2149 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2151 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2152 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2153 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2154 .writefn
= pmcntenset_write
,
2155 .accessfn
= pmreg_access
,
2156 .raw_writefn
= raw_write
},
2157 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2158 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2159 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2160 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2161 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2162 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2164 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2165 .accessfn
= pmreg_access
,
2166 .writefn
= pmcntenclr_write
,
2167 .type
= ARM_CP_ALIAS
},
2168 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2169 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2170 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2171 .type
= ARM_CP_ALIAS
,
2172 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2173 .writefn
= pmcntenclr_write
},
2174 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2175 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2176 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2177 .accessfn
= pmreg_access
,
2178 .writefn
= pmovsr_write
,
2179 .raw_writefn
= raw_write
},
2180 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2181 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2182 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2183 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2184 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2185 .writefn
= pmovsr_write
,
2186 .raw_writefn
= raw_write
},
2187 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2188 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2189 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2190 .writefn
= pmswinc_write
},
2191 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2192 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2193 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2194 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2195 .writefn
= pmswinc_write
},
2196 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2197 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2198 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2199 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2200 .raw_writefn
= raw_write
},
2201 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2202 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2203 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2204 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2205 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2206 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2207 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2208 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2209 .accessfn
= pmreg_access_ccntr
},
2210 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2211 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2212 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2214 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2215 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2216 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2217 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2218 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2219 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2220 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2222 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2223 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2224 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2225 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2227 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2229 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2230 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2231 .accessfn
= pmreg_access
,
2232 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2233 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2234 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2235 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2236 .accessfn
= pmreg_access
,
2237 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2238 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2239 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2240 .accessfn
= pmreg_access_xevcntr
,
2241 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2242 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2243 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2244 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2245 .accessfn
= pmreg_access_xevcntr
,
2246 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2247 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2248 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2249 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2251 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2252 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2253 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2254 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2255 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2257 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2258 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2259 .access
= PL1_RW
, .accessfn
= access_tpm
,
2260 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2261 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2263 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2264 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2265 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2266 .access
= PL1_RW
, .accessfn
= access_tpm
,
2268 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2269 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2270 .resetvalue
= 0x0 },
2271 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2272 .access
= PL1_RW
, .accessfn
= access_tpm
,
2273 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2274 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2275 .writefn
= pmintenclr_write
, },
2276 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2277 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2278 .access
= PL1_RW
, .accessfn
= access_tpm
,
2279 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2280 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2281 .writefn
= pmintenclr_write
},
2282 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2283 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2285 .accessfn
= access_aa64_tid2
,
2286 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2287 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2288 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2290 .accessfn
= access_aa64_tid2
,
2291 .writefn
= csselr_write
, .resetvalue
= 0,
2292 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2293 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2294 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2295 * just RAZ for all cores:
2297 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2298 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2299 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2300 .accessfn
= access_aa64_tid1
,
2302 /* Auxiliary fault status registers: these also are IMPDEF, and we
2303 * choose to RAZ/WI for all cores.
2305 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2306 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2307 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2308 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2309 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2310 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2311 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2312 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2313 /* MAIR can just read-as-written because we don't implement caches
2314 * and so don't need to care about memory attributes.
2316 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2317 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2318 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2319 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2321 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2322 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2323 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2325 /* For non-long-descriptor page tables these are PRRR and NMRR;
2326 * regardless they still act as reads-as-written for QEMU.
2328 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2329 * allows them to assign the correct fieldoffset based on the endianness
2330 * handled in the field definitions.
2332 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2333 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2334 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2335 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2336 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2337 .resetfn
= arm_cp_reset_ignore
},
2338 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2339 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2340 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2341 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2342 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2343 .resetfn
= arm_cp_reset_ignore
},
2344 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2345 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2346 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2347 /* 32 bit ITLB invalidates */
2348 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2349 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2350 .writefn
= tlbiall_write
},
2351 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2352 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2353 .writefn
= tlbimva_write
},
2354 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2355 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2356 .writefn
= tlbiasid_write
},
2357 /* 32 bit DTLB invalidates */
2358 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2359 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2360 .writefn
= tlbiall_write
},
2361 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2362 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2363 .writefn
= tlbimva_write
},
2364 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2365 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2366 .writefn
= tlbiasid_write
},
2367 /* 32 bit TLB invalidates */
2368 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2369 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2370 .writefn
= tlbiall_write
},
2371 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2372 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2373 .writefn
= tlbimva_write
},
2374 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2375 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2376 .writefn
= tlbiasid_write
},
2377 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2378 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2379 .writefn
= tlbimvaa_write
},
2383 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2384 /* 32 bit TLB invalidates, Inner Shareable */
2385 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2386 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2387 .writefn
= tlbiall_is_write
},
2388 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2389 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2390 .writefn
= tlbimva_is_write
},
2391 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2392 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2393 .writefn
= tlbiasid_is_write
},
2394 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2395 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2396 .writefn
= tlbimvaa_is_write
},
2400 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2401 /* PMOVSSET is not implemented in v7 before v7ve */
2402 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2403 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2404 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2405 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2406 .writefn
= pmovsset_write
,
2407 .raw_writefn
= raw_write
},
2408 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2409 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2410 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2411 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2412 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2413 .writefn
= pmovsset_write
,
2414 .raw_writefn
= raw_write
},
2418 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2425 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2428 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2429 return CP_ACCESS_TRAP
;
2431 return CP_ACCESS_OK
;
2434 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2435 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2436 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2438 .writefn
= teecr_write
},
2439 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2440 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2441 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2445 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2446 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2447 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2449 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2450 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2452 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2453 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2454 .resetfn
= arm_cp_reset_ignore
},
2455 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2456 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2457 .access
= PL0_R
|PL1_W
,
2458 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2460 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2461 .access
= PL0_R
|PL1_W
,
2462 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2463 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2464 .resetfn
= arm_cp_reset_ignore
},
2465 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2466 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2468 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2469 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2471 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2472 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2477 #ifndef CONFIG_USER_ONLY
2479 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2482 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2483 * Writable only at the highest implemented exception level.
2485 int el
= arm_current_el(env
);
2491 hcr
= arm_hcr_el2_eff(env
);
2492 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2493 cntkctl
= env
->cp15
.cnthctl_el2
;
2495 cntkctl
= env
->cp15
.c14_cntkctl
;
2497 if (!extract32(cntkctl
, 0, 2)) {
2498 return CP_ACCESS_TRAP
;
2502 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2503 arm_is_secure_below_el3(env
)) {
2504 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2505 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2513 if (!isread
&& el
< arm_highest_el(env
)) {
2514 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2517 return CP_ACCESS_OK
;
2520 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2523 unsigned int cur_el
= arm_current_el(env
);
2524 bool secure
= arm_is_secure(env
);
2525 uint64_t hcr
= arm_hcr_el2_eff(env
);
2529 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2530 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2531 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2532 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2535 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2536 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2537 return CP_ACCESS_TRAP
;
2540 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2541 if (hcr
& HCR_E2H
) {
2542 if (timeridx
== GTIMER_PHYS
&&
2543 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2544 return CP_ACCESS_TRAP_EL2
;
2547 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2548 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2549 timeridx
== GTIMER_PHYS
&& !secure
&&
2550 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2551 return CP_ACCESS_TRAP_EL2
;
2557 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2558 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2559 timeridx
== GTIMER_PHYS
&& !secure
&&
2561 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2562 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2563 return CP_ACCESS_TRAP_EL2
;
2567 return CP_ACCESS_OK
;
2570 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2573 unsigned int cur_el
= arm_current_el(env
);
2574 bool secure
= arm_is_secure(env
);
2575 uint64_t hcr
= arm_hcr_el2_eff(env
);
2579 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2580 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2581 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2582 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2586 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2587 * EL0 if EL0[PV]TEN is zero.
2589 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2590 return CP_ACCESS_TRAP
;
2595 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2596 timeridx
== GTIMER_PHYS
&& !secure
) {
2597 if (hcr
& HCR_E2H
) {
2598 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2599 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2600 return CP_ACCESS_TRAP_EL2
;
2603 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2604 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2605 return CP_ACCESS_TRAP_EL2
;
2611 return CP_ACCESS_OK
;
2614 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2615 const ARMCPRegInfo
*ri
,
2618 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2621 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2622 const ARMCPRegInfo
*ri
,
2625 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2628 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2631 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2634 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2637 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2640 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2641 const ARMCPRegInfo
*ri
,
2644 /* The AArch64 register view of the secure physical timer is
2645 * always accessible from EL3, and configurably accessible from
2648 switch (arm_current_el(env
)) {
2650 if (!arm_is_secure(env
)) {
2651 return CP_ACCESS_TRAP
;
2653 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2654 return CP_ACCESS_TRAP_EL3
;
2656 return CP_ACCESS_OK
;
2659 return CP_ACCESS_TRAP
;
2661 return CP_ACCESS_OK
;
2663 g_assert_not_reached();
2667 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2669 ARMCPU
*cpu
= env_archcpu(env
);
2671 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2674 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2676 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2679 /* Timer enabled: calculate and set current ISTATUS, irq, and
2680 * reset timer to when ISTATUS next has to change
2682 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2683 cpu
->env
.cp15
.cntvoff_el2
: 0;
2684 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2685 /* Note that this must be unsigned 64 bit arithmetic: */
2686 int istatus
= count
- offset
>= gt
->cval
;
2690 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2692 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2693 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2696 /* Next transition is when count rolls back over to zero */
2697 nexttick
= UINT64_MAX
;
2699 /* Next transition is when we hit cval */
2700 nexttick
= gt
->cval
+ offset
;
2702 /* Note that the desired next expiry time might be beyond the
2703 * signed-64-bit range of a QEMUTimer -- in this case we just
2704 * set the timer for as far in the future as possible. When the
2705 * timer expires we will reset the timer for any remaining period.
2707 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2708 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2710 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2712 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2714 /* Timer disabled: ISTATUS and timer output always clear */
2716 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2717 timer_del(cpu
->gt_timer
[timeridx
]);
2718 trace_arm_gt_recalc_disabled(timeridx
);
2722 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2725 ARMCPU
*cpu
= env_archcpu(env
);
2727 timer_del(cpu
->gt_timer
[timeridx
]);
2730 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2732 return gt_get_countervalue(env
);
2735 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2739 switch (arm_current_el(env
)) {
2741 hcr
= arm_hcr_el2_eff(env
);
2742 if (hcr
& HCR_E2H
) {
2747 hcr
= arm_hcr_el2_eff(env
);
2748 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2754 return env
->cp15
.cntvoff_el2
;
2757 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2759 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2762 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2766 trace_arm_gt_cval_write(timeridx
, value
);
2767 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2768 gt_recalc_timer(env_archcpu(env
), timeridx
);
2771 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2774 uint64_t offset
= 0;
2778 case GTIMER_HYPVIRT
:
2779 offset
= gt_virt_cnt_offset(env
);
2783 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2784 (gt_get_countervalue(env
) - offset
));
2787 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2791 uint64_t offset
= 0;
2795 case GTIMER_HYPVIRT
:
2796 offset
= gt_virt_cnt_offset(env
);
2800 trace_arm_gt_tval_write(timeridx
, value
);
2801 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2802 sextract64(value
, 0, 32);
2803 gt_recalc_timer(env_archcpu(env
), timeridx
);
2806 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2810 ARMCPU
*cpu
= env_archcpu(env
);
2811 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2813 trace_arm_gt_ctl_write(timeridx
, value
);
2814 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2815 if ((oldval
^ value
) & 1) {
2816 /* Enable toggled */
2817 gt_recalc_timer(cpu
, timeridx
);
2818 } else if ((oldval
^ value
) & 2) {
2819 /* IMASK toggled: don't need to recalculate,
2820 * just set the interrupt line based on ISTATUS
2822 int irqstate
= (oldval
& 4) && !(value
& 2);
2824 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2825 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2829 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2831 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2834 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2837 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2840 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2842 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2845 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2848 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2851 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2854 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2857 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2859 switch (arm_mmu_idx(env
)) {
2860 case ARMMMUIdx_E20_0
:
2861 case ARMMMUIdx_E20_2
:
2862 case ARMMMUIdx_E20_2_PAN
:
2869 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2871 switch (arm_mmu_idx(env
)) {
2872 case ARMMMUIdx_E20_0
:
2873 case ARMMMUIdx_E20_2
:
2874 case ARMMMUIdx_E20_2_PAN
:
2875 return GTIMER_HYPVIRT
;
2881 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2882 const ARMCPRegInfo
*ri
)
2884 int timeridx
= gt_phys_redir_timeridx(env
);
2885 return env
->cp15
.c14_timer
[timeridx
].cval
;
2888 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2891 int timeridx
= gt_phys_redir_timeridx(env
);
2892 gt_cval_write(env
, ri
, timeridx
, value
);
2895 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2896 const ARMCPRegInfo
*ri
)
2898 int timeridx
= gt_phys_redir_timeridx(env
);
2899 return gt_tval_read(env
, ri
, timeridx
);
2902 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2905 int timeridx
= gt_phys_redir_timeridx(env
);
2906 gt_tval_write(env
, ri
, timeridx
, value
);
2909 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2910 const ARMCPRegInfo
*ri
)
2912 int timeridx
= gt_phys_redir_timeridx(env
);
2913 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2916 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2919 int timeridx
= gt_phys_redir_timeridx(env
);
2920 gt_ctl_write(env
, ri
, timeridx
, value
);
2923 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2925 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2928 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2931 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2934 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2936 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2939 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2942 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2945 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2948 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2951 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2954 ARMCPU
*cpu
= env_archcpu(env
);
2956 trace_arm_gt_cntvoff_write(value
);
2957 raw_write(env
, ri
, value
);
2958 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2961 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
2962 const ARMCPRegInfo
*ri
)
2964 int timeridx
= gt_virt_redir_timeridx(env
);
2965 return env
->cp15
.c14_timer
[timeridx
].cval
;
2968 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2971 int timeridx
= gt_virt_redir_timeridx(env
);
2972 gt_cval_write(env
, ri
, timeridx
, value
);
2975 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
2976 const ARMCPRegInfo
*ri
)
2978 int timeridx
= gt_virt_redir_timeridx(env
);
2979 return gt_tval_read(env
, ri
, timeridx
);
2982 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2985 int timeridx
= gt_virt_redir_timeridx(env
);
2986 gt_tval_write(env
, ri
, timeridx
, value
);
2989 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
2990 const ARMCPRegInfo
*ri
)
2992 int timeridx
= gt_virt_redir_timeridx(env
);
2993 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2996 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2999 int timeridx
= gt_virt_redir_timeridx(env
);
3000 gt_ctl_write(env
, ri
, timeridx
, value
);
3003 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3005 gt_timer_reset(env
, ri
, GTIMER_HYP
);
3008 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3011 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
3014 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3016 return gt_tval_read(env
, ri
, GTIMER_HYP
);
3019 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3022 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
3025 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3028 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
3031 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3033 gt_timer_reset(env
, ri
, GTIMER_SEC
);
3036 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3039 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
3042 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3044 return gt_tval_read(env
, ri
, GTIMER_SEC
);
3047 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3050 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
3053 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3056 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
3059 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3061 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
3064 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3067 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3070 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3072 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
3075 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3078 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3081 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3084 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3087 void arm_gt_ptimer_cb(void *opaque
)
3089 ARMCPU
*cpu
= opaque
;
3091 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3094 void arm_gt_vtimer_cb(void *opaque
)
3096 ARMCPU
*cpu
= opaque
;
3098 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3101 void arm_gt_htimer_cb(void *opaque
)
3103 ARMCPU
*cpu
= opaque
;
3105 gt_recalc_timer(cpu
, GTIMER_HYP
);
3108 void arm_gt_stimer_cb(void *opaque
)
3110 ARMCPU
*cpu
= opaque
;
3112 gt_recalc_timer(cpu
, GTIMER_SEC
);
3115 void arm_gt_hvtimer_cb(void *opaque
)
3117 ARMCPU
*cpu
= opaque
;
3119 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
3122 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
3124 ARMCPU
*cpu
= env_archcpu(env
);
3126 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
3129 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3130 /* Note that CNTFRQ is purely reads-as-written for the benefit
3131 * of software; writing it doesn't actually change the timer frequency.
3132 * Our reset value matches the fixed frequency we implement the timer at.
3134 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
3135 .type
= ARM_CP_ALIAS
,
3136 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3137 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
3139 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3140 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3141 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3142 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3143 .resetfn
= arm_gt_cntfrq_reset
,
3145 /* overall control: mostly access permissions */
3146 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
3147 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
3149 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
3152 /* per-timer control */
3153 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3154 .secure
= ARM_CP_SECSTATE_NS
,
3155 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3156 .accessfn
= gt_ptimer_access
,
3157 .fieldoffset
= offsetoflow32(CPUARMState
,
3158 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3159 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3160 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3162 { .name
= "CNTP_CTL_S",
3163 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3164 .secure
= ARM_CP_SECSTATE_S
,
3165 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3166 .accessfn
= gt_ptimer_access
,
3167 .fieldoffset
= offsetoflow32(CPUARMState
,
3168 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3169 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3171 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3172 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
3173 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3174 .accessfn
= gt_ptimer_access
,
3175 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3177 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3178 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3180 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
3181 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3182 .accessfn
= gt_vtimer_access
,
3183 .fieldoffset
= offsetoflow32(CPUARMState
,
3184 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3185 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3186 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3188 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3189 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
3190 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3191 .accessfn
= gt_vtimer_access
,
3192 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3194 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3195 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3197 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3198 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3199 .secure
= ARM_CP_SECSTATE_NS
,
3200 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3201 .accessfn
= gt_ptimer_access
,
3202 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3204 { .name
= "CNTP_TVAL_S",
3205 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3206 .secure
= ARM_CP_SECSTATE_S
,
3207 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3208 .accessfn
= gt_ptimer_access
,
3209 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
3211 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3212 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
3213 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3214 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
3215 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3217 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
3218 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3219 .accessfn
= gt_vtimer_access
,
3220 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3222 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3223 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
3224 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3225 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3226 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3228 /* The counter itself */
3229 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3230 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3231 .accessfn
= gt_pct_access
,
3232 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3234 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3235 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3236 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3237 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3239 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3240 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3241 .accessfn
= gt_vct_access
,
3242 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3244 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3245 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3246 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3247 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3249 /* Comparison value, indicating when the timer goes off */
3250 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3251 .secure
= ARM_CP_SECSTATE_NS
,
3253 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3254 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3255 .accessfn
= gt_ptimer_access
,
3256 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3257 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3259 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3260 .secure
= ARM_CP_SECSTATE_S
,
3262 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3263 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3264 .accessfn
= gt_ptimer_access
,
3265 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3267 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3268 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3271 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3272 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3273 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3274 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3276 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3278 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3279 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3280 .accessfn
= gt_vtimer_access
,
3281 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3282 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3284 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3285 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3288 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3289 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3290 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3291 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3293 /* Secure timer -- this is actually restricted to only EL3
3294 * and configurably Secure-EL1 via the accessfn.
3296 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3297 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3298 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3299 .accessfn
= gt_stimer_access
,
3300 .readfn
= gt_sec_tval_read
,
3301 .writefn
= gt_sec_tval_write
,
3302 .resetfn
= gt_sec_timer_reset
,
3304 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3305 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3306 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3307 .accessfn
= gt_stimer_access
,
3308 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3310 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3312 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3313 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3314 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3315 .accessfn
= gt_stimer_access
,
3316 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3317 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3322 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3325 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3326 return CP_ACCESS_TRAP
;
3328 return CP_ACCESS_OK
;
3333 /* In user-mode most of the generic timer registers are inaccessible
3334 * however modern kernels (4.12+) allow access to cntvct_el0
3337 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3339 ARMCPU
*cpu
= env_archcpu(env
);
3341 /* Currently we have no support for QEMUTimer in linux-user so we
3342 * can't call gt_get_countervalue(env), instead we directly
3343 * call the lower level functions.
3345 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3348 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3349 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3350 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3351 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3352 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3353 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3355 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3356 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3357 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3358 .readfn
= gt_virt_cnt_read
,
3365 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3367 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3368 raw_write(env
, ri
, value
);
3369 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3370 raw_write(env
, ri
, value
& 0xfffff6ff);
3372 raw_write(env
, ri
, value
& 0xfffff1ff);
3376 #ifndef CONFIG_USER_ONLY
3377 /* get_phys_addr() isn't present for user-mode-only targets */
3379 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3383 /* The ATS12NSO* operations must trap to EL3 if executed in
3384 * Secure EL1 (which can only happen if EL3 is AArch64).
3385 * They are simply UNDEF if executed from NS EL1.
3386 * They function normally from EL2 or EL3.
3388 if (arm_current_el(env
) == 1) {
3389 if (arm_is_secure_below_el3(env
)) {
3390 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3392 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3395 return CP_ACCESS_OK
;
3399 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3400 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3403 target_ulong page_size
;
3407 bool format64
= false;
3408 MemTxAttrs attrs
= {};
3409 ARMMMUFaultInfo fi
= {};
3410 ARMCacheAttrs cacheattrs
= {};
3412 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3413 &prot
, &page_size
, &fi
, &cacheattrs
);
3417 * Some kinds of translation fault must cause exceptions rather
3418 * than being reported in the PAR.
3420 int current_el
= arm_current_el(env
);
3422 uint32_t syn
, fsr
, fsc
;
3423 bool take_exc
= false;
3425 if (fi
.s1ptw
&& current_el
== 1 && !arm_is_secure(env
)
3426 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3428 * Synchronous stage 2 fault on an access made as part of the
3429 * translation table walk for AT S1E0* or AT S1E1* insn
3430 * executed from NS EL1. If this is a synchronous external abort
3431 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3432 * to EL3. Otherwise the fault is taken as an exception to EL2,
3433 * and HPFAR_EL2 holds the faulting IPA.
3435 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3436 (env
->cp15
.scr_el3
& SCR_EA
)) {
3439 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3443 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3445 * Synchronous external aborts during a translation table walk
3446 * are taken as Data Abort exceptions.
3449 if (current_el
== 3) {
3455 target_el
= exception_target_el(env
);
3461 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3462 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3463 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3464 fsr
= arm_fi_to_lfsc(&fi
);
3465 fsc
= extract32(fsr
, 0, 6);
3467 fsr
= arm_fi_to_sfsc(&fi
);
3471 * Report exception with ESR indicating a fault due to a
3472 * translation table walk for a cache maintenance instruction.
3474 syn
= syn_data_abort_no_iss(current_el
== target_el
, 0,
3475 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3476 env
->exception
.vaddress
= value
;
3477 env
->exception
.fsr
= fsr
;
3478 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3484 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3487 * * TTBCR.EAE determines whether the result is returned using the
3488 * 32-bit or the 64-bit PAR format
3489 * * Instructions executed in Hyp mode always use the 64bit format
3491 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3492 * * The Non-secure TTBCR.EAE bit is set to 1
3493 * * The implementation includes EL2, and the value of HCR.VM is 1
3495 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3497 * ATS1Hx always uses the 64bit format.
3499 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3501 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3502 if (mmu_idx
== ARMMMUIdx_E10_0
||
3503 mmu_idx
== ARMMMUIdx_E10_1
||
3504 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3505 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3507 format64
|= arm_current_el(env
) == 2;
3513 /* Create a 64-bit PAR */
3514 par64
= (1 << 11); /* LPAE bit always set */
3516 par64
|= phys_addr
& ~0xfffULL
;
3517 if (!attrs
.secure
) {
3518 par64
|= (1 << 9); /* NS */
3520 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3521 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3523 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3526 par64
|= (fsr
& 0x3f) << 1; /* FS */
3528 par64
|= (1 << 9); /* S */
3531 par64
|= (1 << 8); /* PTW */
3535 /* fsr is a DFSR/IFSR value for the short descriptor
3536 * translation table format (with WnR always clear).
3537 * Convert it to a 32-bit PAR.
3540 /* We do not set any attribute bits in the PAR */
3541 if (page_size
== (1 << 24)
3542 && arm_feature(env
, ARM_FEATURE_V7
)) {
3543 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3545 par64
= phys_addr
& 0xfffff000;
3547 if (!attrs
.secure
) {
3548 par64
|= (1 << 9); /* NS */
3551 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3553 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3554 ((fsr
& 0xf) << 1) | 1;
3559 #endif /* CONFIG_TCG */
3561 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3564 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3567 int el
= arm_current_el(env
);
3568 bool secure
= arm_is_secure_below_el3(env
);
3570 switch (ri
->opc2
& 6) {
3572 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3575 mmu_idx
= ARMMMUIdx_SE3
;
3578 g_assert(!secure
); /* TODO: ARMv8.4-SecEL2 */
3581 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3582 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3583 : ARMMMUIdx_Stage1_E1_PAN
);
3585 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3589 g_assert_not_reached();
3593 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3596 mmu_idx
= ARMMMUIdx_SE10_0
;
3599 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3602 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3605 g_assert_not_reached();
3609 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3610 mmu_idx
= ARMMMUIdx_E10_1
;
3613 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3614 mmu_idx
= ARMMMUIdx_E10_0
;
3617 g_assert_not_reached();
3620 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3622 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3624 /* Handled by hardware accelerator. */
3625 g_assert_not_reached();
3626 #endif /* CONFIG_TCG */
3629 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3633 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3636 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3638 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3640 /* Handled by hardware accelerator. */
3641 g_assert_not_reached();
3642 #endif /* CONFIG_TCG */
3645 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3648 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3649 return CP_ACCESS_TRAP
;
3651 return CP_ACCESS_OK
;
3654 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3658 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3660 int secure
= arm_is_secure_below_el3(env
);
3662 switch (ri
->opc2
& 6) {
3665 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3666 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3667 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3668 : ARMMMUIdx_Stage1_E1_PAN
);
3670 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3673 case 4: /* AT S1E2R, AT S1E2W */
3674 mmu_idx
= ARMMMUIdx_E2
;
3676 case 6: /* AT S1E3R, AT S1E3W */
3677 mmu_idx
= ARMMMUIdx_SE3
;
3680 g_assert_not_reached();
3683 case 2: /* AT S1E0R, AT S1E0W */
3684 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3686 case 4: /* AT S12E1R, AT S12E1W */
3687 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3689 case 6: /* AT S12E0R, AT S12E0W */
3690 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3693 g_assert_not_reached();
3696 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3698 /* Handled by hardware accelerator. */
3699 g_assert_not_reached();
3700 #endif /* CONFIG_TCG */
3704 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3705 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3706 .access
= PL1_RW
, .resetvalue
= 0,
3707 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3708 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3709 .writefn
= par_write
},
3710 #ifndef CONFIG_USER_ONLY
3711 /* This underdecoding is safe because the reginfo is NO_RAW. */
3712 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3713 .access
= PL1_W
, .accessfn
= ats_access
,
3714 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3719 /* Return basic MPU access permission bits. */
3720 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3727 for (i
= 0; i
< 16; i
+= 2) {
3728 ret
|= (val
>> i
) & mask
;
3734 /* Pad basic MPU access permission bits to extended format. */
3735 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3742 for (i
= 0; i
< 16; i
+= 2) {
3743 ret
|= (val
& mask
) << i
;
3749 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3752 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3755 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3757 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3760 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3763 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3766 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3768 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3771 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3773 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3779 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3783 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3786 ARMCPU
*cpu
= env_archcpu(env
);
3787 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3793 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3794 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3798 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3801 ARMCPU
*cpu
= env_archcpu(env
);
3802 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3804 if (value
>= nrgs
) {
3805 qemu_log_mask(LOG_GUEST_ERROR
,
3806 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3807 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3811 raw_write(env
, ri
, value
);
3814 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3815 /* Reset for all these registers is handled in arm_cpu_reset(),
3816 * because the PMSAv7 is also used by M-profile CPUs, which do
3817 * not register cpregs but still need the state to be reset.
3819 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3820 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3821 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3822 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3823 .resetfn
= arm_cp_reset_ignore
},
3824 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3825 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3826 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3827 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3828 .resetfn
= arm_cp_reset_ignore
},
3829 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3830 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3831 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3832 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3833 .resetfn
= arm_cp_reset_ignore
},
3834 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3836 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3837 .writefn
= pmsav7_rgnr_write
,
3838 .resetfn
= arm_cp_reset_ignore
},
3842 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3843 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3844 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3845 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3846 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3847 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3848 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3849 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3850 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3851 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3853 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3855 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3857 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3859 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3861 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3862 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3864 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3865 /* Protection region base and size registers */
3866 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3867 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3868 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3869 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3870 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3871 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3872 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3873 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3874 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3875 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3876 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3877 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3878 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3879 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3880 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3881 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3882 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3883 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3884 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3885 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3886 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3887 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3888 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3889 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3893 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3896 TCR
*tcr
= raw_ptr(env
, ri
);
3897 int maskshift
= extract32(value
, 0, 3);
3899 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3900 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3901 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3902 * using Long-desciptor translation table format */
3903 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3904 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3905 /* In an implementation that includes the Security Extensions
3906 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3907 * Short-descriptor translation table format.
3909 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3915 /* Update the masks corresponding to the TCR bank being written
3916 * Note that we always calculate mask and base_mask, but
3917 * they are only used for short-descriptor tables (ie if EAE is 0);
3918 * for long-descriptor tables the TCR fields are used differently
3919 * and the mask and base_mask values are meaningless.
3921 tcr
->raw_tcr
= value
;
3922 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3923 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3926 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3929 ARMCPU
*cpu
= env_archcpu(env
);
3930 TCR
*tcr
= raw_ptr(env
, ri
);
3932 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3933 /* With LPAE the TTBCR could result in a change of ASID
3934 * via the TTBCR.A1 bit, so do a TLB flush.
3936 tlb_flush(CPU(cpu
));
3938 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3939 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3940 vmsa_ttbcr_raw_write(env
, ri
, value
);
3943 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3945 TCR
*tcr
= raw_ptr(env
, ri
);
3947 /* Reset both the TCR as well as the masks corresponding to the bank of
3948 * the TCR being reset.
3952 tcr
->base_mask
= 0xffffc000u
;
3955 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3958 ARMCPU
*cpu
= env_archcpu(env
);
3959 TCR
*tcr
= raw_ptr(env
, ri
);
3961 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3962 tlb_flush(CPU(cpu
));
3963 tcr
->raw_tcr
= value
;
3966 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3969 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3970 if (cpreg_field_is_64bit(ri
) &&
3971 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3972 ARMCPU
*cpu
= env_archcpu(env
);
3973 tlb_flush(CPU(cpu
));
3975 raw_write(env
, ri
, value
);
3978 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3982 * If we are running with E2&0 regime, then an ASID is active.
3983 * Flush if that might be changing. Note we're not checking
3984 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3985 * holds the active ASID, only checking the field that might.
3987 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
3988 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3989 tlb_flush_by_mmuidx(env_cpu(env
),
3990 ARMMMUIdxBit_E20_2
|
3991 ARMMMUIdxBit_E20_2_PAN
|
3992 ARMMMUIdxBit_E20_0
);
3994 raw_write(env
, ri
, value
);
3997 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4000 ARMCPU
*cpu
= env_archcpu(env
);
4001 CPUState
*cs
= CPU(cpu
);
4004 * A change in VMID to the stage2 page table (Stage2) invalidates
4005 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
4007 if (raw_read(env
, ri
) != value
) {
4008 tlb_flush_by_mmuidx(cs
,
4009 ARMMMUIdxBit_E10_1
|
4010 ARMMMUIdxBit_E10_1_PAN
|
4011 ARMMMUIdxBit_E10_0
);
4012 raw_write(env
, ri
, value
);
4016 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
4017 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
4018 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
4019 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
4020 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
4021 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
4022 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4023 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
4024 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
4025 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
4026 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4027 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
4028 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
4029 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
4030 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
4031 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4032 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
4037 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
4038 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
4039 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
4040 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4041 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
4042 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
4043 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
4044 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4045 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4046 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4047 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
4048 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
4049 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
4050 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4051 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4052 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4053 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
4054 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
4055 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4056 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4057 .writefn
= vmsa_tcr_el12_write
,
4058 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
4059 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
4060 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4061 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4062 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
4063 .raw_writefn
= vmsa_ttbcr_raw_write
,
4064 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
4065 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
4069 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4070 * qemu tlbs nor adjusting cached masks.
4072 static const ARMCPRegInfo ttbcr2_reginfo
= {
4073 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
4074 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4075 .type
= ARM_CP_ALIAS
,
4076 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
4077 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
4080 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4083 env
->cp15
.c15_ticonfig
= value
& 0xe7;
4084 /* The OS_TYPE bit in this register changes the reported CPUID! */
4085 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
4086 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
4089 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4092 env
->cp15
.c15_threadid
= value
& 0xffff;
4095 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4098 /* Wait-for-interrupt (deprecated) */
4099 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
4102 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4105 /* On OMAP there are registers indicating the max/min index of dcache lines
4106 * containing a dirty line; cache flush operations have to reset these.
4108 env
->cp15
.c15_i_max
= 0x000;
4109 env
->cp15
.c15_i_min
= 0xff0;
4112 static const ARMCPRegInfo omap_cp_reginfo
[] = {
4113 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
4114 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
4115 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
4117 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
4118 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4119 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
4121 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
4122 .writefn
= omap_ticonfig_write
},
4123 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
4125 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
4126 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
4127 .access
= PL1_RW
, .resetvalue
= 0xff0,
4128 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
4129 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
4131 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
4132 .writefn
= omap_threadid_write
},
4133 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
4134 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4135 .type
= ARM_CP_NO_RAW
,
4136 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
4137 /* TODO: Peripheral port remap register:
4138 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4139 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4142 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
4143 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
4144 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
4145 .writefn
= omap_cachemaint_write
},
4146 { .name
= "C9", .cp
= 15, .crn
= 9,
4147 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
4148 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
4152 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4155 env
->cp15
.c15_cpar
= value
& 0x3fff;
4158 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
4159 { .name
= "XSCALE_CPAR",
4160 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4161 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
4162 .writefn
= xscale_cpar_write
, },
4163 { .name
= "XSCALE_AUXCR",
4164 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
4165 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
4167 /* XScale specific cache-lockdown: since we have no cache we NOP these
4168 * and hope the guest does not really rely on cache behaviour.
4170 { .name
= "XSCALE_LOCK_ICACHE_LINE",
4171 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
4172 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4173 { .name
= "XSCALE_UNLOCK_ICACHE",
4174 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
4175 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4176 { .name
= "XSCALE_DCACHE_LOCK",
4177 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
4178 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4179 { .name
= "XSCALE_UNLOCK_DCACHE",
4180 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
4181 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4185 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
4186 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4187 * implementation of this implementation-defined space.
4188 * Ideally this should eventually disappear in favour of actually
4189 * implementing the correct behaviour for all cores.
4191 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
4192 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4194 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
4199 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
4200 /* Cache status: RAZ because we have no cache so it's always clean */
4201 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
4202 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4207 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4208 /* We never have a a block transfer operation in progress */
4209 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4210 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4212 /* The cache ops themselves: these all NOP for QEMU */
4213 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4214 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4215 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4216 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4217 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4218 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4219 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4220 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4221 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4222 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4223 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4224 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4228 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4229 /* The cache test-and-clean instructions always return (1 << 30)
4230 * to indicate that there are no dirty cache lines.
4232 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4233 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4234 .resetvalue
= (1 << 30) },
4235 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4236 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4237 .resetvalue
= (1 << 30) },
4241 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4242 /* Ignore ReadBuffer accesses */
4243 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4244 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4245 .access
= PL1_RW
, .resetvalue
= 0,
4246 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4250 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4252 ARMCPU
*cpu
= env_archcpu(env
);
4253 unsigned int cur_el
= arm_current_el(env
);
4254 bool secure
= arm_is_secure(env
);
4256 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4257 return env
->cp15
.vpidr_el2
;
4259 return raw_read(env
, ri
);
4262 static uint64_t mpidr_read_val(CPUARMState
*env
)
4264 ARMCPU
*cpu
= env_archcpu(env
);
4265 uint64_t mpidr
= cpu
->mp_affinity
;
4267 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4268 mpidr
|= (1U << 31);
4269 /* Cores which are uniprocessor (non-coherent)
4270 * but still implement the MP extensions set
4271 * bit 30. (For instance, Cortex-R5).
4273 if (cpu
->mp_is_up
) {
4274 mpidr
|= (1u << 30);
4280 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4282 unsigned int cur_el
= arm_current_el(env
);
4283 bool secure
= arm_is_secure(env
);
4285 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4286 return env
->cp15
.vmpidr_el2
;
4288 return mpidr_read_val(env
);
4291 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4293 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4294 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4295 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4296 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4297 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4298 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4299 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4300 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4301 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4302 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4303 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4304 offsetof(CPUARMState
, cp15
.par_ns
)} },
4305 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4306 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4307 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4308 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4309 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4310 .writefn
= vmsa_ttbr_write
, },
4311 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4312 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4313 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4314 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4315 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4316 .writefn
= vmsa_ttbr_write
, },
4320 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4322 return vfp_get_fpcr(env
);
4325 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4328 vfp_set_fpcr(env
, value
);
4331 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4333 return vfp_get_fpsr(env
);
4336 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4339 vfp_set_fpsr(env
, value
);
4342 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4345 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4346 return CP_ACCESS_TRAP
;
4348 return CP_ACCESS_OK
;
4351 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4354 env
->daif
= value
& PSTATE_DAIF
;
4357 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4359 return env
->pstate
& PSTATE_PAN
;
4362 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4365 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4368 static const ARMCPRegInfo pan_reginfo
= {
4369 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4370 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4371 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4372 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4375 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4377 return env
->pstate
& PSTATE_UAO
;
4380 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4383 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4386 static const ARMCPRegInfo uao_reginfo
= {
4387 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4388 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4389 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4390 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4393 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4394 const ARMCPRegInfo
*ri
,
4397 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4398 switch (arm_current_el(env
)) {
4400 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4401 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4402 return CP_ACCESS_TRAP
;
4406 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4407 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4408 return CP_ACCESS_TRAP_EL2
;
4412 return CP_ACCESS_OK
;
4415 static CPAccessResult
aa64_cacheop_pou_access(CPUARMState
*env
,
4416 const ARMCPRegInfo
*ri
,
4419 /* Cache invalidate/clean to Point of Unification... */
4420 switch (arm_current_el(env
)) {
4422 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4423 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4424 return CP_ACCESS_TRAP
;
4428 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4429 if (arm_hcr_el2_eff(env
) & HCR_TPU
) {
4430 return CP_ACCESS_TRAP_EL2
;
4434 return CP_ACCESS_OK
;
4437 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4438 * Page D4-1736 (DDI0487A.b)
4441 static int vae1_tlbmask(CPUARMState
*env
)
4443 /* Since we exclude secure first, we may read HCR_EL2 directly. */
4444 if (arm_is_secure_below_el3(env
)) {
4445 return ARMMMUIdxBit_SE10_1
|
4446 ARMMMUIdxBit_SE10_1_PAN
|
4447 ARMMMUIdxBit_SE10_0
;
4448 } else if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
))
4449 == (HCR_E2H
| HCR_TGE
)) {
4450 return ARMMMUIdxBit_E20_2
|
4451 ARMMMUIdxBit_E20_2_PAN
|
4454 return ARMMMUIdxBit_E10_1
|
4455 ARMMMUIdxBit_E10_1_PAN
|
4460 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4463 CPUState
*cs
= env_cpu(env
);
4464 int mask
= vae1_tlbmask(env
);
4466 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4469 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4472 CPUState
*cs
= env_cpu(env
);
4473 int mask
= vae1_tlbmask(env
);
4475 if (tlb_force_broadcast(env
)) {
4476 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4478 tlb_flush_by_mmuidx(cs
, mask
);
4482 static int alle1_tlbmask(CPUARMState
*env
)
4485 * Note that the 'ALL' scope must invalidate both stage 1 and
4486 * stage 2 translations, whereas most other scopes only invalidate
4487 * stage 1 translations.
4489 if (arm_is_secure_below_el3(env
)) {
4490 return ARMMMUIdxBit_SE10_1
|
4491 ARMMMUIdxBit_SE10_1_PAN
|
4492 ARMMMUIdxBit_SE10_0
;
4494 return ARMMMUIdxBit_E10_1
|
4495 ARMMMUIdxBit_E10_1_PAN
|
4500 static int e2_tlbmask(CPUARMState
*env
)
4502 /* TODO: ARMv8.4-SecEL2 */
4503 return ARMMMUIdxBit_E20_0
|
4504 ARMMMUIdxBit_E20_2
|
4505 ARMMMUIdxBit_E20_2_PAN
|
4509 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4512 CPUState
*cs
= env_cpu(env
);
4513 int mask
= alle1_tlbmask(env
);
4515 tlb_flush_by_mmuidx(cs
, mask
);
4518 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4521 CPUState
*cs
= env_cpu(env
);
4522 int mask
= e2_tlbmask(env
);
4524 tlb_flush_by_mmuidx(cs
, mask
);
4527 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4530 ARMCPU
*cpu
= env_archcpu(env
);
4531 CPUState
*cs
= CPU(cpu
);
4533 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4536 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4539 CPUState
*cs
= env_cpu(env
);
4540 int mask
= alle1_tlbmask(env
);
4542 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4545 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4548 CPUState
*cs
= env_cpu(env
);
4549 int mask
= e2_tlbmask(env
);
4551 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4554 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4557 CPUState
*cs
= env_cpu(env
);
4559 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4562 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4565 /* Invalidate by VA, EL2
4566 * Currently handles both VAE2 and VALE2, since we don't support
4567 * flush-last-level-only.
4569 CPUState
*cs
= env_cpu(env
);
4570 int mask
= e2_tlbmask(env
);
4571 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4573 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4576 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4579 /* Invalidate by VA, EL3
4580 * Currently handles both VAE3 and VALE3, since we don't support
4581 * flush-last-level-only.
4583 ARMCPU
*cpu
= env_archcpu(env
);
4584 CPUState
*cs
= CPU(cpu
);
4585 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4587 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4590 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4593 CPUState
*cs
= env_cpu(env
);
4594 int mask
= vae1_tlbmask(env
);
4595 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4597 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4600 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4603 /* Invalidate by VA, EL1&0 (AArch64 version).
4604 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4605 * since we don't support flush-for-specific-ASID-only or
4606 * flush-last-level-only.
4608 CPUState
*cs
= env_cpu(env
);
4609 int mask
= vae1_tlbmask(env
);
4610 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4612 if (tlb_force_broadcast(env
)) {
4613 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4615 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4619 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4622 CPUState
*cs
= env_cpu(env
);
4623 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4625 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4629 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4632 CPUState
*cs
= env_cpu(env
);
4633 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4635 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4639 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4642 int cur_el
= arm_current_el(env
);
4645 uint64_t hcr
= arm_hcr_el2_eff(env
);
4648 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4649 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4650 return CP_ACCESS_TRAP_EL2
;
4653 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4654 return CP_ACCESS_TRAP
;
4656 if (hcr
& HCR_TDZ
) {
4657 return CP_ACCESS_TRAP_EL2
;
4660 } else if (hcr
& HCR_TDZ
) {
4661 return CP_ACCESS_TRAP_EL2
;
4664 return CP_ACCESS_OK
;
4667 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4669 ARMCPU
*cpu
= env_archcpu(env
);
4670 int dzp_bit
= 1 << 4;
4672 /* DZP indicates whether DC ZVA access is allowed */
4673 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4676 return cpu
->dcz_blocksize
| dzp_bit
;
4679 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4682 if (!(env
->pstate
& PSTATE_SP
)) {
4683 /* Access to SP_EL0 is undefined if it's being used as
4684 * the stack pointer.
4686 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4688 return CP_ACCESS_OK
;
4691 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4693 return env
->pstate
& PSTATE_SP
;
4696 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4698 update_spsel(env
, val
);
4701 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4704 ARMCPU
*cpu
= env_archcpu(env
);
4706 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4707 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4711 /* ??? Lots of these bits are not implemented. */
4713 if (ri
->state
== ARM_CP_STATE_AA64
&& !cpu_isar_feature(aa64_mte
, cpu
)) {
4714 if (ri
->opc1
== 6) { /* SCTLR_EL3 */
4715 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF
| SCTLR_ATA
);
4717 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF0
| SCTLR_TCF
|
4718 SCTLR_ATA0
| SCTLR_ATA
);
4722 if (raw_read(env
, ri
) == value
) {
4723 /* Skip the TLB flush if nothing actually changed; Linux likes
4724 * to do a lot of pointless SCTLR writes.
4729 raw_write(env
, ri
, value
);
4731 /* This may enable/disable the MMU, so do a TLB flush. */
4732 tlb_flush(CPU(cpu
));
4734 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4736 * Normally we would always end the TB on an SCTLR write; see the
4737 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4738 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4739 * of hflags from the translator, so do it here.
4741 arm_rebuild_hflags(env
);
4745 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4748 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4749 return CP_ACCESS_TRAP_FP_EL2
;
4751 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4752 return CP_ACCESS_TRAP_FP_EL3
;
4754 return CP_ACCESS_OK
;
4757 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4760 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4763 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4764 /* Minimal set of EL0-visible registers. This will need to be expanded
4765 * significantly for system emulation of AArch64 CPUs.
4767 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4768 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4769 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4770 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4771 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4772 .type
= ARM_CP_NO_RAW
,
4773 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4774 .fieldoffset
= offsetof(CPUARMState
, daif
),
4775 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4776 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4777 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4778 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4779 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4780 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4781 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4782 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4783 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4784 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4785 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4786 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4787 .readfn
= aa64_dczid_read
},
4788 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4789 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4790 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4791 #ifndef CONFIG_USER_ONLY
4792 /* Avoid overhead of an access check that always passes in user-mode */
4793 .accessfn
= aa64_zva_access
,
4796 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4797 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4798 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4799 /* Cache ops: all NOPs since we don't emulate caches */
4800 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4801 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4802 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4803 .accessfn
= aa64_cacheop_pou_access
},
4804 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4805 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4806 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4807 .accessfn
= aa64_cacheop_pou_access
},
4808 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4809 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4810 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4811 .accessfn
= aa64_cacheop_pou_access
},
4812 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4813 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4814 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
4815 .type
= ARM_CP_NOP
},
4816 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4817 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4818 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4819 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4820 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4821 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4822 .accessfn
= aa64_cacheop_poc_access
},
4823 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4824 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4825 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4826 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4827 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4828 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4829 .accessfn
= aa64_cacheop_pou_access
},
4830 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4831 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4832 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4833 .accessfn
= aa64_cacheop_poc_access
},
4834 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4835 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4836 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4837 /* TLBI operations */
4838 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4839 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4840 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4841 .writefn
= tlbi_aa64_vmalle1is_write
},
4842 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4843 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4844 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4845 .writefn
= tlbi_aa64_vae1is_write
},
4846 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4847 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4848 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4849 .writefn
= tlbi_aa64_vmalle1is_write
},
4850 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4851 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4852 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4853 .writefn
= tlbi_aa64_vae1is_write
},
4854 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4855 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4856 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4857 .writefn
= tlbi_aa64_vae1is_write
},
4858 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4859 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4860 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4861 .writefn
= tlbi_aa64_vae1is_write
},
4862 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4863 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4864 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4865 .writefn
= tlbi_aa64_vmalle1_write
},
4866 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4867 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4868 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4869 .writefn
= tlbi_aa64_vae1_write
},
4870 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4871 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4872 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4873 .writefn
= tlbi_aa64_vmalle1_write
},
4874 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4875 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4876 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4877 .writefn
= tlbi_aa64_vae1_write
},
4878 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4879 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4880 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4881 .writefn
= tlbi_aa64_vae1_write
},
4882 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4883 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4884 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4885 .writefn
= tlbi_aa64_vae1_write
},
4886 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4887 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4888 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4889 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4890 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4891 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4892 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4893 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4894 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4895 .writefn
= tlbi_aa64_alle1is_write
},
4896 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4897 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4898 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4899 .writefn
= tlbi_aa64_alle1is_write
},
4900 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4901 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4902 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4903 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4904 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4905 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4906 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4907 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4908 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4909 .writefn
= tlbi_aa64_alle1_write
},
4910 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4911 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4912 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4913 .writefn
= tlbi_aa64_alle1is_write
},
4914 #ifndef CONFIG_USER_ONLY
4915 /* 64 bit address translation operations */
4916 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4917 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4918 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4919 .writefn
= ats_write64
},
4920 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4921 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4922 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4923 .writefn
= ats_write64
},
4924 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4925 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4926 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4927 .writefn
= ats_write64
},
4928 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4929 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4930 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4931 .writefn
= ats_write64
},
4932 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4933 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4934 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4935 .writefn
= ats_write64
},
4936 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4937 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4938 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4939 .writefn
= ats_write64
},
4940 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4941 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4942 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4943 .writefn
= ats_write64
},
4944 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4945 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4946 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4947 .writefn
= ats_write64
},
4948 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4949 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4950 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4951 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4952 .writefn
= ats_write64
},
4953 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4954 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4955 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4956 .writefn
= ats_write64
},
4957 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4958 .type
= ARM_CP_ALIAS
,
4959 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4960 .access
= PL1_RW
, .resetvalue
= 0,
4961 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4962 .writefn
= par_write
},
4964 /* TLB invalidate last level of translation table walk */
4965 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4966 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4967 .writefn
= tlbimva_is_write
},
4968 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4969 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4970 .writefn
= tlbimvaa_is_write
},
4971 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4972 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4973 .writefn
= tlbimva_write
},
4974 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4975 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4976 .writefn
= tlbimvaa_write
},
4977 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4978 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4979 .writefn
= tlbimva_hyp_write
},
4980 { .name
= "TLBIMVALHIS",
4981 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4982 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4983 .writefn
= tlbimva_hyp_is_write
},
4984 { .name
= "TLBIIPAS2",
4985 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4986 .type
= ARM_CP_NOP
, .access
= PL2_W
},
4987 { .name
= "TLBIIPAS2IS",
4988 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4989 .type
= ARM_CP_NOP
, .access
= PL2_W
},
4990 { .name
= "TLBIIPAS2L",
4991 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4992 .type
= ARM_CP_NOP
, .access
= PL2_W
},
4993 { .name
= "TLBIIPAS2LIS",
4994 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4995 .type
= ARM_CP_NOP
, .access
= PL2_W
},
4996 /* 32 bit cache operations */
4997 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4998 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
4999 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5000 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5001 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5002 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5003 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5004 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5005 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5006 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5007 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5008 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5009 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5010 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5011 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5012 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5013 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5014 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5015 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5016 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5017 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5018 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5019 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5020 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5021 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5022 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5023 /* MMU Domain access control / MPU write buffer control */
5024 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5025 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5026 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5027 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5028 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5029 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5030 .type
= ARM_CP_ALIAS
,
5031 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5033 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5034 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5035 .type
= ARM_CP_ALIAS
,
5036 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5038 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5039 /* We rely on the access checks not allowing the guest to write to the
5040 * state field when SPSel indicates that it's being used as the stack
5043 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5044 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5045 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5046 .type
= ARM_CP_ALIAS
,
5047 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5048 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5049 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5050 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5051 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5052 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5053 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5054 .type
= ARM_CP_NO_RAW
,
5055 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5056 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5057 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5058 .type
= ARM_CP_ALIAS
,
5059 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
5060 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
5061 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5062 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5063 .access
= PL2_RW
, .resetvalue
= 0,
5064 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5065 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5066 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5067 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5068 .access
= PL2_RW
, .resetvalue
= 0,
5069 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5070 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5071 .type
= ARM_CP_ALIAS
,
5072 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5074 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5075 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5076 .type
= ARM_CP_ALIAS
,
5077 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5079 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5080 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5081 .type
= ARM_CP_ALIAS
,
5082 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5084 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5085 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5086 .type
= ARM_CP_ALIAS
,
5087 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5089 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5090 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5091 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5093 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5094 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
5095 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5096 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5097 .writefn
= sdcr_write
,
5098 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5102 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5103 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
5104 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5105 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5107 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
5108 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5109 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5111 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5112 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5113 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5114 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5115 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5116 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5118 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5119 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5120 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5121 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5122 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5123 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5124 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5126 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5127 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5128 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5129 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5130 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5131 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5133 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5134 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5135 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5137 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5138 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5139 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5141 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5142 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5143 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5145 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5146 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5147 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5148 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5149 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5150 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5151 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5152 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5153 .cp
= 15, .opc1
= 6, .crm
= 2,
5154 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5155 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
5156 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5157 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5158 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5159 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5160 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5161 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5162 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5163 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5164 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5165 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5166 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5167 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5168 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5169 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5171 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5172 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5173 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5174 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5175 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5176 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5177 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5178 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5180 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5181 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5182 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5183 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5184 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5186 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5187 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5188 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5189 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5190 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5191 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5192 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5193 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5194 .access
= PL2_RW
, .accessfn
= access_tda
,
5195 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5196 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5197 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5198 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5199 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5200 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5201 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5202 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5203 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5204 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5205 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5206 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5207 .type
= ARM_CP_CONST
,
5208 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5209 .access
= PL2_RW
, .resetvalue
= 0 },
5213 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5214 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
5215 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5216 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5218 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5222 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5224 ARMCPU
*cpu
= env_archcpu(env
);
5226 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5227 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5229 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5232 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5233 valid_mask
&= ~HCR_HCD
;
5234 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5235 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5236 * However, if we're using the SMC PSCI conduit then QEMU is
5237 * effectively acting like EL3 firmware and so the guest at
5238 * EL2 should retain the ability to prevent EL1 from being
5239 * able to make SMC calls into the ersatz firmware, so in
5240 * that case HCR.TSC should be read/write.
5242 valid_mask
&= ~HCR_TSC
;
5245 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5246 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5247 valid_mask
|= HCR_E2H
;
5249 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5250 valid_mask
|= HCR_TLOR
;
5252 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5253 valid_mask
|= HCR_API
| HCR_APK
;
5255 if (cpu_isar_feature(aa64_mte
, cpu
)) {
5256 valid_mask
|= HCR_ATA
| HCR_DCT
| HCR_TID5
;
5260 /* Clear RES0 bits. */
5261 value
&= valid_mask
;
5264 * These bits change the MMU setup:
5265 * HCR_VM enables stage 2 translation
5266 * HCR_PTW forbids certain page-table setups
5267 * HCR_DC disables stage1 and enables stage2 translation
5268 * HCR_DCT enables tagging on (disabled) stage1 translation
5270 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
| HCR_DCT
)) {
5271 tlb_flush(CPU(cpu
));
5273 env
->cp15
.hcr_el2
= value
;
5276 * Updates to VI and VF require us to update the status of
5277 * virtual interrupts, which are the logical OR of these bits
5278 * and the state of the input lines from the GIC. (This requires
5279 * that we have the iothread lock, which is done by marking the
5280 * reginfo structs as ARM_CP_IO.)
5281 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5282 * possible for it to be taken immediately, because VIRQ and
5283 * VFIQ are masked unless running at EL0 or EL1, and HCR
5284 * can only be written at EL2.
5286 g_assert(qemu_mutex_iothread_locked());
5287 arm_cpu_update_virq(cpu
);
5288 arm_cpu_update_vfiq(cpu
);
5291 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5293 do_hcr_write(env
, value
, 0);
5296 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5299 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5300 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5301 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
5304 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5307 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5308 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5309 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
5313 * Return the effective value of HCR_EL2.
5314 * Bits that are not included here:
5315 * RW (read from SCR_EL3.RW as needed)
5317 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5319 uint64_t ret
= env
->cp15
.hcr_el2
;
5321 if (arm_is_secure_below_el3(env
)) {
5323 * "This register has no effect if EL2 is not enabled in the
5324 * current Security state". This is ARMv8.4-SecEL2 speak for
5325 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5327 * Prior to that, the language was "In an implementation that
5328 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5329 * as if this field is 0 for all purposes other than a direct
5330 * read or write access of HCR_EL2". With lots of enumeration
5331 * on a per-field basis. In current QEMU, this is condition
5332 * is arm_is_secure_below_el3.
5334 * Since the v8.4 language applies to the entire register, and
5335 * appears to be backward compatible, use that.
5341 * For a cpu that supports both aarch64 and aarch32, we can set bits
5342 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5343 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5345 if (!arm_el_is_aa64(env
, 2)) {
5346 uint64_t aa32_valid
;
5349 * These bits are up-to-date as of ARMv8.6.
5350 * For HCR, it's easiest to list just the 2 bits that are invalid.
5351 * For HCR2, list those that are valid.
5353 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
5354 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
5355 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
5359 if (ret
& HCR_TGE
) {
5360 /* These bits are up-to-date as of ARMv8.6. */
5361 if (ret
& HCR_E2H
) {
5362 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5363 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5364 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5365 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
5366 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
5367 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
5369 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5371 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5372 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5373 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5380 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5384 * For A-profile AArch32 EL3, if NSACR.CP10
5385 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5387 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5388 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5389 value
&= ~(0x3 << 10);
5390 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
5392 env
->cp15
.cptr_el
[2] = value
;
5395 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5398 * For A-profile AArch32 EL3, if NSACR.CP10
5399 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5401 uint64_t value
= env
->cp15
.cptr_el
[2];
5403 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5404 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5410 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5411 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5413 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5414 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5415 .writefn
= hcr_write
},
5416 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5417 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5418 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5419 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5420 .writefn
= hcr_writelow
},
5421 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5422 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5423 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5424 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5425 .type
= ARM_CP_ALIAS
,
5426 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5428 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5429 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5430 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5431 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5432 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5433 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5434 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5435 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5436 .type
= ARM_CP_ALIAS
,
5437 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5439 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5440 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5441 .type
= ARM_CP_ALIAS
,
5442 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5444 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5445 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5446 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5447 .access
= PL2_RW
, .writefn
= vbar_write
,
5448 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5450 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5451 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5452 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5453 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5454 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5455 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5456 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5457 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5458 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5459 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5460 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5461 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5463 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5464 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5465 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5466 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5467 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5468 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5469 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5471 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5472 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5473 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5474 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5476 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5477 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5478 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5480 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5481 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5482 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5484 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5485 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5486 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5487 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5488 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5489 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5490 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5491 .type
= ARM_CP_ALIAS
,
5492 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5493 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5494 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5495 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5497 /* no .writefn needed as this can't cause an ASID change;
5498 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5500 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5501 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5502 .cp
= 15, .opc1
= 6, .crm
= 2,
5503 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5504 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5505 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5506 .writefn
= vttbr_write
},
5507 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5508 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5509 .access
= PL2_RW
, .writefn
= vttbr_write
,
5510 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5511 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5512 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5513 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5514 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5515 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5516 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5517 .access
= PL2_RW
, .resetvalue
= 0,
5518 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5519 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5520 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5521 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5522 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5523 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5524 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5525 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5526 { .name
= "TLBIALLNSNH",
5527 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5528 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5529 .writefn
= tlbiall_nsnh_write
},
5530 { .name
= "TLBIALLNSNHIS",
5531 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5532 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5533 .writefn
= tlbiall_nsnh_is_write
},
5534 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5535 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5536 .writefn
= tlbiall_hyp_write
},
5537 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5538 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5539 .writefn
= tlbiall_hyp_is_write
},
5540 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5541 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5542 .writefn
= tlbimva_hyp_write
},
5543 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5544 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5545 .writefn
= tlbimva_hyp_is_write
},
5546 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5547 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5548 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5549 .writefn
= tlbi_aa64_alle2_write
},
5550 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5551 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5552 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5553 .writefn
= tlbi_aa64_vae2_write
},
5554 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5555 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5556 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5557 .writefn
= tlbi_aa64_vae2_write
},
5558 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5559 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5560 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5561 .writefn
= tlbi_aa64_alle2is_write
},
5562 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5563 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5564 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5565 .writefn
= tlbi_aa64_vae2is_write
},
5566 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5567 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5568 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5569 .writefn
= tlbi_aa64_vae2is_write
},
5570 #ifndef CONFIG_USER_ONLY
5571 /* Unlike the other EL2-related AT operations, these must
5572 * UNDEF from EL3 if EL2 is not implemented, which is why we
5573 * define them here rather than with the rest of the AT ops.
5575 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5576 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5577 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5578 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5579 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5580 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5581 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5582 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5583 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5584 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5585 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5586 * to behave as if SCR.NS was 1.
5588 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5590 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5591 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5593 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5594 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5595 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5596 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5597 * reset values as IMPDEF. We choose to reset to 3 to comply with
5598 * both ARMv7 and ARMv8.
5600 .access
= PL2_RW
, .resetvalue
= 3,
5601 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5602 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5603 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5604 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5605 .writefn
= gt_cntvoff_write
,
5606 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5607 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5608 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5609 .writefn
= gt_cntvoff_write
,
5610 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5611 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5612 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5613 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5614 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5615 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5616 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5617 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5618 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5619 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5620 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5621 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5622 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5623 .resetfn
= gt_hyp_timer_reset
,
5624 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5625 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5627 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5629 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5631 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5633 /* The only field of MDCR_EL2 that has a defined architectural reset value
5634 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5635 * don't implement any PMU event counters, so using zero as a reset
5636 * value for MDCR_EL2 is okay
5638 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5639 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5640 .access
= PL2_RW
, .resetvalue
= 0,
5641 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5642 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5643 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5644 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5645 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5646 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5647 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5649 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5650 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5651 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5653 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5657 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5658 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5659 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5660 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5662 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5663 .writefn
= hcr_writehigh
},
5667 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5670 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5671 * At Secure EL1 it traps to EL3.
5673 if (arm_current_el(env
) == 3) {
5674 return CP_ACCESS_OK
;
5676 if (arm_is_secure_below_el3(env
)) {
5677 return CP_ACCESS_TRAP_EL3
;
5679 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5681 return CP_ACCESS_OK
;
5683 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5686 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5687 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5688 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5689 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5690 .resetvalue
= 0, .writefn
= scr_write
},
5691 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5692 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5693 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5694 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5695 .writefn
= scr_write
},
5696 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5697 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5698 .access
= PL3_RW
, .resetvalue
= 0,
5699 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5701 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5702 .access
= PL3_RW
, .resetvalue
= 0,
5703 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5704 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5705 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5706 .writefn
= vbar_write
, .resetvalue
= 0,
5707 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5708 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5709 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5710 .access
= PL3_RW
, .resetvalue
= 0,
5711 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5712 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5713 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5715 /* no .writefn needed as this can't cause an ASID change;
5716 * we must provide a .raw_writefn and .resetfn because we handle
5717 * reset and migration for the AArch32 TTBCR(S), which might be
5718 * using mask and base_mask.
5720 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5721 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5722 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5723 .type
= ARM_CP_ALIAS
,
5724 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5726 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5727 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5728 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5729 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5730 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5731 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5732 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5733 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5734 .type
= ARM_CP_ALIAS
,
5735 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5737 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5738 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5739 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5740 .access
= PL3_RW
, .writefn
= vbar_write
,
5741 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5743 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5744 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5745 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5746 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5747 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5748 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5749 .access
= PL3_RW
, .resetvalue
= 0,
5750 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5751 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5752 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5753 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5755 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5756 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5757 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5759 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5760 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5761 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5763 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5764 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5765 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5766 .writefn
= tlbi_aa64_alle3is_write
},
5767 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5768 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5769 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5770 .writefn
= tlbi_aa64_vae3is_write
},
5771 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5772 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5773 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5774 .writefn
= tlbi_aa64_vae3is_write
},
5775 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5776 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5777 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5778 .writefn
= tlbi_aa64_alle3_write
},
5779 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5780 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5781 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5782 .writefn
= tlbi_aa64_vae3_write
},
5783 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5784 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5785 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5786 .writefn
= tlbi_aa64_vae3_write
},
5790 #ifndef CONFIG_USER_ONLY
5791 /* Test if system register redirection is to occur in the current state. */
5792 static bool redirect_for_e2h(CPUARMState
*env
)
5794 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
5797 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5801 if (redirect_for_e2h(env
)) {
5802 /* Switch to the saved EL2 version of the register. */
5804 readfn
= ri
->readfn
;
5806 readfn
= ri
->orig_readfn
;
5808 if (readfn
== NULL
) {
5811 return readfn(env
, ri
);
5814 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5819 if (redirect_for_e2h(env
)) {
5820 /* Switch to the saved EL2 version of the register. */
5822 writefn
= ri
->writefn
;
5824 writefn
= ri
->orig_writefn
;
5826 if (writefn
== NULL
) {
5827 writefn
= raw_write
;
5829 writefn(env
, ri
, value
);
5832 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
5835 uint32_t src_key
, dst_key
, new_key
;
5836 const char *src_name
, *dst_name
, *new_name
;
5837 bool (*feature
)(const ARMISARegisters
*id
);
5840 #define K(op0, op1, crn, crm, op2) \
5841 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5843 static const struct E2HAlias aliases
[] = {
5844 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5845 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5846 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5847 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5848 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5849 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5850 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5851 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5852 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5853 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5854 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5855 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5856 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5857 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5858 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5859 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5860 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5861 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5862 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5863 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5864 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5865 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5866 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5867 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5868 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5869 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5870 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5871 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5872 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5873 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5874 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5875 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5878 * Note that redirection of ZCR is mentioned in the description
5879 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5880 * not in the summary table.
5882 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5883 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
5885 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5886 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte
},
5888 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5889 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5895 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
5896 const struct E2HAlias
*a
= &aliases
[i
];
5897 ARMCPRegInfo
*src_reg
, *dst_reg
;
5899 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
5903 src_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->src_key
);
5904 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->dst_key
);
5905 g_assert(src_reg
!= NULL
);
5906 g_assert(dst_reg
!= NULL
);
5908 /* Cross-compare names to detect typos in the keys. */
5909 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
5910 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
5912 /* None of the core system registers use opaque; we will. */
5913 g_assert(src_reg
->opaque
== NULL
);
5915 /* Create alias before redirection so we dup the right data. */
5917 ARMCPRegInfo
*new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
5918 uint32_t *new_key
= g_memdup(&a
->new_key
, sizeof(uint32_t));
5921 new_reg
->name
= a
->new_name
;
5922 new_reg
->type
|= ARM_CP_ALIAS
;
5923 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5924 new_reg
->access
&= PL2_RW
| PL3_RW
;
5926 ok
= g_hash_table_insert(cpu
->cp_regs
, new_key
, new_reg
);
5930 src_reg
->opaque
= dst_reg
;
5931 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
5932 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
5933 if (!src_reg
->raw_readfn
) {
5934 src_reg
->raw_readfn
= raw_read
;
5936 if (!src_reg
->raw_writefn
) {
5937 src_reg
->raw_writefn
= raw_write
;
5939 src_reg
->readfn
= el2_e2h_read
;
5940 src_reg
->writefn
= el2_e2h_write
;
5945 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5948 int cur_el
= arm_current_el(env
);
5951 uint64_t hcr
= arm_hcr_el2_eff(env
);
5954 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
5955 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
5956 return CP_ACCESS_TRAP_EL2
;
5959 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5960 return CP_ACCESS_TRAP
;
5962 if (hcr
& HCR_TID2
) {
5963 return CP_ACCESS_TRAP_EL2
;
5966 } else if (hcr
& HCR_TID2
) {
5967 return CP_ACCESS_TRAP_EL2
;
5971 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
5972 return CP_ACCESS_TRAP_EL2
;
5975 return CP_ACCESS_OK
;
5978 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5981 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5982 * read via a bit in OSLSR_EL1.
5986 if (ri
->state
== ARM_CP_STATE_AA32
) {
5987 oslock
= (value
== 0xC5ACCE55);
5992 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5995 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5996 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5997 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5998 * unlike DBGDRAR it is never accessible from EL0.
5999 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6002 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
6003 .access
= PL0_R
, .accessfn
= access_tdra
,
6004 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6005 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
6006 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6007 .access
= PL1_R
, .accessfn
= access_tdra
,
6008 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6009 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
6010 .access
= PL0_R
, .accessfn
= access_tdra
,
6011 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6012 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6013 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
6014 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6015 .access
= PL1_RW
, .accessfn
= access_tda
,
6016 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
6018 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
6019 * We don't implement the configurable EL0 access.
6021 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
6022 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6023 .type
= ARM_CP_ALIAS
,
6024 .access
= PL1_R
, .accessfn
= access_tda
,
6025 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
6026 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
6027 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
6028 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6029 .accessfn
= access_tdosa
,
6030 .writefn
= oslar_write
},
6031 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
6032 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
6033 .access
= PL1_R
, .resetvalue
= 10,
6034 .accessfn
= access_tdosa
,
6035 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
6036 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6037 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6038 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
6039 .access
= PL1_RW
, .accessfn
= access_tdosa
,
6040 .type
= ARM_CP_NOP
},
6041 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6042 * implement vector catch debug events yet.
6045 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6046 .access
= PL1_RW
, .accessfn
= access_tda
,
6047 .type
= ARM_CP_NOP
},
6048 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6049 * to save and restore a 32-bit guest's DBGVCR)
6051 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
6052 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
6053 .access
= PL2_RW
, .accessfn
= access_tda
,
6054 .type
= ARM_CP_NOP
},
6055 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6056 * Channel but Linux may try to access this register. The 32-bit
6057 * alias is DBGDCCINT.
6059 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
6060 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6061 .access
= PL1_RW
, .accessfn
= access_tda
,
6062 .type
= ARM_CP_NOP
},
6066 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
6067 /* 64 bit access versions of the (dummy) debug registers */
6068 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
6069 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6070 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
6071 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6075 /* Return the exception level to which exceptions should be taken
6076 * via SVEAccessTrap. If an exception should be routed through
6077 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6078 * take care of raising that exception.
6079 * C.f. the ARM pseudocode function CheckSVEEnabled.
6081 int sve_exception_el(CPUARMState
*env
, int el
)
6083 #ifndef CONFIG_USER_ONLY
6084 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
6086 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6087 bool disabled
= false;
6089 /* The CPACR.ZEN controls traps to EL1:
6090 * 0, 2 : trap EL0 and EL1 accesses
6091 * 1 : trap only EL0 accesses
6092 * 3 : trap no accesses
6094 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
6096 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
6101 return hcr_el2
& HCR_TGE
? 2 : 1;
6104 /* Check CPACR.FPEN. */
6105 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
6107 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
6115 /* CPTR_EL2. Since TZ and TFP are positive,
6116 * they will be zero when EL2 is not present.
6118 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
6119 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
6122 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
6127 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6128 if (arm_feature(env
, ARM_FEATURE_EL3
)
6129 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
6136 static uint32_t sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
6140 end_len
= start_len
&= 0xf;
6141 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
6142 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
6143 assert(end_len
< start_len
);
6149 * Given that SVE is enabled, return the vector length for EL.
6151 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
6153 ARMCPU
*cpu
= env_archcpu(env
);
6154 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
6157 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
6159 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
6160 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
6162 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6163 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
6166 return sve_zcr_get_valid_len(cpu
, zcr_len
);
6169 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6172 int cur_el
= arm_current_el(env
);
6173 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
6176 /* Bits other than [3:0] are RAZ/WI. */
6177 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
6178 raw_write(env
, ri
, value
& 0xf);
6181 * Because we arrived here, we know both FP and SVE are enabled;
6182 * otherwise we would have trapped access to the ZCR_ELn register.
6184 new_len
= sve_zcr_len_for_el(env
, cur_el
);
6185 if (new_len
< old_len
) {
6186 aarch64_sve_narrow_vq(env
, new_len
+ 1);
6190 static const ARMCPRegInfo zcr_el1_reginfo
= {
6191 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
6192 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
6193 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
6194 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
6195 .writefn
= zcr_write
, .raw_writefn
= raw_write
6198 static const ARMCPRegInfo zcr_el2_reginfo
= {
6199 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6200 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6201 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6202 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
6203 .writefn
= zcr_write
, .raw_writefn
= raw_write
6206 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
6207 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6208 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6209 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6210 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
6213 static const ARMCPRegInfo zcr_el3_reginfo
= {
6214 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
6215 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
6216 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
6217 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
6218 .writefn
= zcr_write
, .raw_writefn
= raw_write
6221 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6223 CPUARMState
*env
= &cpu
->env
;
6225 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6226 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6228 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6230 if (env
->cpu_watchpoint
[n
]) {
6231 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6232 env
->cpu_watchpoint
[n
] = NULL
;
6235 if (!extract64(wcr
, 0, 1)) {
6236 /* E bit clear : watchpoint disabled */
6240 switch (extract64(wcr
, 3, 2)) {
6242 /* LSC 00 is reserved and must behave as if the wp is disabled */
6245 flags
|= BP_MEM_READ
;
6248 flags
|= BP_MEM_WRITE
;
6251 flags
|= BP_MEM_ACCESS
;
6255 /* Attempts to use both MASK and BAS fields simultaneously are
6256 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6257 * thus generating a watchpoint for every byte in the masked region.
6259 mask
= extract64(wcr
, 24, 4);
6260 if (mask
== 1 || mask
== 2) {
6261 /* Reserved values of MASK; we must act as if the mask value was
6262 * some non-reserved value, or as if the watchpoint were disabled.
6263 * We choose the latter.
6267 /* Watchpoint covers an aligned area up to 2GB in size */
6269 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6270 * whether the watchpoint fires when the unmasked bits match; we opt
6271 * to generate the exceptions.
6275 /* Watchpoint covers bytes defined by the byte address select bits */
6276 int bas
= extract64(wcr
, 5, 8);
6279 if (extract64(wvr
, 2, 1)) {
6280 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6281 * ignored, and BAS[3:0] define which bytes to watch.
6287 /* This must act as if the watchpoint is disabled */
6291 /* The BAS bits are supposed to be programmed to indicate a contiguous
6292 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6293 * we fire for each byte in the word/doubleword addressed by the WVR.
6294 * We choose to ignore any non-zero bits after the first range of 1s.
6296 basstart
= ctz32(bas
);
6297 len
= cto32(bas
>> basstart
);
6301 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6302 &env
->cpu_watchpoint
[n
]);
6305 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6308 CPUARMState
*env
= &cpu
->env
;
6310 /* Completely clear out existing QEMU watchpoints and our array, to
6311 * avoid possible stale entries following migration load.
6313 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6314 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6316 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6317 hw_watchpoint_update(cpu
, i
);
6321 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6324 ARMCPU
*cpu
= env_archcpu(env
);
6327 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6328 * register reads and behaves as if values written are sign extended.
6329 * Bits [1:0] are RES0.
6331 value
= sextract64(value
, 0, 49) & ~3ULL;
6333 raw_write(env
, ri
, value
);
6334 hw_watchpoint_update(cpu
, i
);
6337 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6340 ARMCPU
*cpu
= env_archcpu(env
);
6343 raw_write(env
, ri
, value
);
6344 hw_watchpoint_update(cpu
, i
);
6347 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6349 CPUARMState
*env
= &cpu
->env
;
6350 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6351 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6356 if (env
->cpu_breakpoint
[n
]) {
6357 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6358 env
->cpu_breakpoint
[n
] = NULL
;
6361 if (!extract64(bcr
, 0, 1)) {
6362 /* E bit clear : watchpoint disabled */
6366 bt
= extract64(bcr
, 20, 4);
6369 case 4: /* unlinked address mismatch (reserved if AArch64) */
6370 case 5: /* linked address mismatch (reserved if AArch64) */
6371 qemu_log_mask(LOG_UNIMP
,
6372 "arm: address mismatch breakpoint types not implemented\n");
6374 case 0: /* unlinked address match */
6375 case 1: /* linked address match */
6377 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6378 * we behave as if the register was sign extended. Bits [1:0] are
6379 * RES0. The BAS field is used to allow setting breakpoints on 16
6380 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6381 * a bp will fire if the addresses covered by the bp and the addresses
6382 * covered by the insn overlap but the insn doesn't start at the
6383 * start of the bp address range. We choose to require the insn and
6384 * the bp to have the same address. The constraints on writing to
6385 * BAS enforced in dbgbcr_write mean we have only four cases:
6386 * 0b0000 => no breakpoint
6387 * 0b0011 => breakpoint on addr
6388 * 0b1100 => breakpoint on addr + 2
6389 * 0b1111 => breakpoint on addr
6390 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6392 int bas
= extract64(bcr
, 5, 4);
6393 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
6402 case 2: /* unlinked context ID match */
6403 case 8: /* unlinked VMID match (reserved if no EL2) */
6404 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6405 qemu_log_mask(LOG_UNIMP
,
6406 "arm: unlinked context breakpoint types not implemented\n");
6408 case 9: /* linked VMID match (reserved if no EL2) */
6409 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6410 case 3: /* linked context ID match */
6412 /* We must generate no events for Linked context matches (unless
6413 * they are linked to by some other bp/wp, which is handled in
6414 * updates for the linking bp/wp). We choose to also generate no events
6415 * for reserved values.
6420 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6423 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6426 CPUARMState
*env
= &cpu
->env
;
6428 /* Completely clear out existing QEMU breakpoints and our array, to
6429 * avoid possible stale entries following migration load.
6431 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6432 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6434 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6435 hw_breakpoint_update(cpu
, i
);
6439 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6442 ARMCPU
*cpu
= env_archcpu(env
);
6445 raw_write(env
, ri
, value
);
6446 hw_breakpoint_update(cpu
, i
);
6449 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6452 ARMCPU
*cpu
= env_archcpu(env
);
6455 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6458 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6459 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6461 raw_write(env
, ri
, value
);
6462 hw_breakpoint_update(cpu
, i
);
6465 static void define_debug_regs(ARMCPU
*cpu
)
6467 /* Define v7 and v8 architectural debug registers.
6468 * These are just dummy implementations for now.
6471 int wrps
, brps
, ctx_cmps
;
6472 ARMCPRegInfo dbgdidr
= {
6473 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
6474 .access
= PL0_R
, .accessfn
= access_tda
,
6475 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->isar
.dbgdidr
,
6478 /* Note that all these register fields hold "number of Xs minus 1". */
6479 brps
= arm_num_brps(cpu
);
6480 wrps
= arm_num_wrps(cpu
);
6481 ctx_cmps
= arm_num_ctx_cmps(cpu
);
6483 assert(ctx_cmps
<= brps
);
6485 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6486 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6488 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6489 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6492 for (i
= 0; i
< brps
; i
++) {
6493 ARMCPRegInfo dbgregs
[] = {
6494 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
6495 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6496 .access
= PL1_RW
, .accessfn
= access_tda
,
6497 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6498 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6500 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
6501 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6502 .access
= PL1_RW
, .accessfn
= access_tda
,
6503 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6504 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6508 define_arm_cp_regs(cpu
, dbgregs
);
6511 for (i
= 0; i
< wrps
; i
++) {
6512 ARMCPRegInfo dbgregs
[] = {
6513 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
6514 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6515 .access
= PL1_RW
, .accessfn
= access_tda
,
6516 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6517 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6519 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
6520 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6521 .access
= PL1_RW
, .accessfn
= access_tda
,
6522 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6523 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6527 define_arm_cp_regs(cpu
, dbgregs
);
6531 static void define_pmu_regs(ARMCPU
*cpu
)
6534 * v7 performance monitor control register: same implementor
6535 * field as main ID register, and we implement four counters in
6536 * addition to the cycle count register.
6538 unsigned int i
, pmcrn
= 4;
6539 ARMCPRegInfo pmcr
= {
6540 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6542 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6543 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6544 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6545 .raw_writefn
= raw_write
,
6547 ARMCPRegInfo pmcr64
= {
6548 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6549 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6550 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6552 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6553 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
) |
6555 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6557 define_one_arm_cp_reg(cpu
, &pmcr
);
6558 define_one_arm_cp_reg(cpu
, &pmcr64
);
6559 for (i
= 0; i
< pmcrn
; i
++) {
6560 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6561 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6562 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6563 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6564 ARMCPRegInfo pmev_regs
[] = {
6565 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6566 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6567 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6568 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6569 .accessfn
= pmreg_access
},
6570 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6571 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6572 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6574 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6575 .raw_readfn
= pmevcntr_rawread
,
6576 .raw_writefn
= pmevcntr_rawwrite
},
6577 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6578 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6579 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6580 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6581 .accessfn
= pmreg_access
},
6582 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6583 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6584 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6586 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6587 .raw_writefn
= pmevtyper_rawwrite
},
6590 define_arm_cp_regs(cpu
, pmev_regs
);
6591 g_free(pmevcntr_name
);
6592 g_free(pmevcntr_el0_name
);
6593 g_free(pmevtyper_name
);
6594 g_free(pmevtyper_el0_name
);
6596 if (cpu_isar_feature(aa32_pmu_8_1
, cpu
)) {
6597 ARMCPRegInfo v81_pmu_regs
[] = {
6598 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6599 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6600 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6601 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6602 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6603 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6604 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6605 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6608 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6610 if (cpu_isar_feature(any_pmu_8_4
, cpu
)) {
6611 static const ARMCPRegInfo v84_pmmir
= {
6612 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
6613 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
6614 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6617 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
6621 /* We don't know until after realize whether there's a GICv3
6622 * attached, and that is what registers the gicv3 sysregs.
6623 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6626 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6628 ARMCPU
*cpu
= env_archcpu(env
);
6629 uint64_t pfr1
= cpu
->isar
.id_pfr1
;
6631 if (env
->gicv3state
) {
6637 #ifndef CONFIG_USER_ONLY
6638 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6640 ARMCPU
*cpu
= env_archcpu(env
);
6641 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6643 if (env
->gicv3state
) {
6650 /* Shared logic between LORID and the rest of the LOR* registers.
6651 * Secure state has already been delt with.
6653 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
6655 int el
= arm_current_el(env
);
6657 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
6658 return CP_ACCESS_TRAP_EL2
;
6660 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
6661 return CP_ACCESS_TRAP_EL3
;
6663 return CP_ACCESS_OK
;
6666 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6669 if (arm_is_secure_below_el3(env
)) {
6670 /* Access ok in secure mode. */
6671 return CP_ACCESS_OK
;
6673 return access_lor_ns(env
);
6676 static CPAccessResult
access_lor_other(CPUARMState
*env
,
6677 const ARMCPRegInfo
*ri
, bool isread
)
6679 if (arm_is_secure_below_el3(env
)) {
6680 /* Access denied in secure mode. */
6681 return CP_ACCESS_TRAP
;
6683 return access_lor_ns(env
);
6687 * A trivial implementation of ARMv8.1-LOR leaves all of these
6688 * registers fixed at 0, which indicates that there are zero
6689 * supported Limited Ordering regions.
6691 static const ARMCPRegInfo lor_reginfo
[] = {
6692 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6693 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6694 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6695 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6696 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6697 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6698 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6699 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6700 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6701 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6702 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6703 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6704 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6705 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6706 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6707 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6708 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6709 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6710 .access
= PL1_R
, .accessfn
= access_lorid
,
6711 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6715 #ifdef TARGET_AARCH64
6716 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6719 int el
= arm_current_el(env
);
6722 arm_feature(env
, ARM_FEATURE_EL2
) &&
6723 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
6724 return CP_ACCESS_TRAP_EL2
;
6727 arm_feature(env
, ARM_FEATURE_EL3
) &&
6728 !(env
->cp15
.scr_el3
& SCR_APK
)) {
6729 return CP_ACCESS_TRAP_EL3
;
6731 return CP_ACCESS_OK
;
6734 static const ARMCPRegInfo pauth_reginfo
[] = {
6735 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6736 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
6737 .access
= PL1_RW
, .accessfn
= access_pauth
,
6738 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
6739 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6740 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
6741 .access
= PL1_RW
, .accessfn
= access_pauth
,
6742 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
6743 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6744 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
6745 .access
= PL1_RW
, .accessfn
= access_pauth
,
6746 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
6747 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6748 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
6749 .access
= PL1_RW
, .accessfn
= access_pauth
,
6750 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
6751 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6752 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
6753 .access
= PL1_RW
, .accessfn
= access_pauth
,
6754 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
6755 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6756 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
6757 .access
= PL1_RW
, .accessfn
= access_pauth
,
6758 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
6759 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6760 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
6761 .access
= PL1_RW
, .accessfn
= access_pauth
,
6762 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
6763 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6764 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
6765 .access
= PL1_RW
, .accessfn
= access_pauth
,
6766 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
6767 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6768 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
6769 .access
= PL1_RW
, .accessfn
= access_pauth
,
6770 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
6771 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6772 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
6773 .access
= PL1_RW
, .accessfn
= access_pauth
,
6774 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
6778 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6783 /* Success sets NZCV = 0000. */
6784 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
6786 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
6788 * ??? Failed, for unknown reasons in the crypto subsystem.
6789 * The best we can do is log the reason and return the
6790 * timed-out indication to the guest. There is no reason
6791 * we know to expect this failure to be transitory, so the
6792 * guest may well hang retrying the operation.
6794 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
6795 ri
->name
, error_get_pretty(err
));
6798 env
->ZF
= 0; /* NZCF = 0100 */
6804 /* We do not support re-seeding, so the two registers operate the same. */
6805 static const ARMCPRegInfo rndr_reginfo
[] = {
6806 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
6807 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6808 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
6809 .access
= PL0_R
, .readfn
= rndr_readfn
},
6810 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
6811 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6812 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
6813 .access
= PL0_R
, .readfn
= rndr_readfn
},
6817 #ifndef CONFIG_USER_ONLY
6818 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
6821 ARMCPU
*cpu
= env_archcpu(env
);
6822 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6823 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
6824 uint64_t vaddr_in
= (uint64_t) value
;
6825 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
6827 int mem_idx
= cpu_mmu_index(env
, false);
6829 /* This won't be crossing page boundaries */
6830 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
6836 /* RCU lock is already being held */
6837 mr
= memory_region_from_host(haddr
, &offset
);
6840 memory_region_writeback(mr
, offset
, dline_size
);
6845 static const ARMCPRegInfo dcpop_reg
[] = {
6846 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
6847 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
6848 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6849 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
6853 static const ARMCPRegInfo dcpodp_reg
[] = {
6854 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
6855 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
6856 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6857 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
6860 #endif /*CONFIG_USER_ONLY*/
6862 static CPAccessResult
access_aa64_tid5(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6865 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID5
)) {
6866 return CP_ACCESS_TRAP_EL2
;
6869 return CP_ACCESS_OK
;
6872 static CPAccessResult
access_mte(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6875 int el
= arm_current_el(env
);
6878 arm_feature(env
, ARM_FEATURE_EL2
) &&
6879 !(arm_hcr_el2_eff(env
) & HCR_ATA
)) {
6880 return CP_ACCESS_TRAP_EL2
;
6883 arm_feature(env
, ARM_FEATURE_EL3
) &&
6884 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
6885 return CP_ACCESS_TRAP_EL3
;
6887 return CP_ACCESS_OK
;
6890 static uint64_t tco_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6892 return env
->pstate
& PSTATE_TCO
;
6895 static void tco_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
6897 env
->pstate
= (env
->pstate
& ~PSTATE_TCO
) | (val
& PSTATE_TCO
);
6900 static const ARMCPRegInfo mte_reginfo
[] = {
6901 { .name
= "TFSRE0_EL1", .state
= ARM_CP_STATE_AA64
,
6902 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 1,
6903 .access
= PL1_RW
, .accessfn
= access_mte
,
6904 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[0]) },
6905 { .name
= "TFSR_EL1", .state
= ARM_CP_STATE_AA64
,
6906 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 0,
6907 .access
= PL1_RW
, .accessfn
= access_mte
,
6908 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[1]) },
6909 { .name
= "TFSR_EL2", .state
= ARM_CP_STATE_AA64
,
6910 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 6, .opc2
= 0,
6911 .access
= PL2_RW
, .accessfn
= access_mte
,
6912 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[2]) },
6913 { .name
= "TFSR_EL3", .state
= ARM_CP_STATE_AA64
,
6914 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 6, .opc2
= 0,
6916 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[3]) },
6917 { .name
= "RGSR_EL1", .state
= ARM_CP_STATE_AA64
,
6918 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 5,
6919 .access
= PL1_RW
, .accessfn
= access_mte
,
6920 .fieldoffset
= offsetof(CPUARMState
, cp15
.rgsr_el1
) },
6921 { .name
= "GCR_EL1", .state
= ARM_CP_STATE_AA64
,
6922 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 6,
6923 .access
= PL1_RW
, .accessfn
= access_mte
,
6924 .fieldoffset
= offsetof(CPUARMState
, cp15
.gcr_el1
) },
6925 { .name
= "GMID_EL1", .state
= ARM_CP_STATE_AA64
,
6926 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 4,
6927 .access
= PL1_R
, .accessfn
= access_aa64_tid5
,
6928 .type
= ARM_CP_CONST
, .resetvalue
= GMID_EL1_BS
},
6929 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
6930 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
6931 .type
= ARM_CP_NO_RAW
,
6932 .access
= PL0_RW
, .readfn
= tco_read
, .writefn
= tco_write
},
6933 { .name
= "DC_IGVAC", .state
= ARM_CP_STATE_AA64
,
6934 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 3,
6935 .type
= ARM_CP_NOP
, .access
= PL1_W
,
6936 .accessfn
= aa64_cacheop_poc_access
},
6937 { .name
= "DC_IGSW", .state
= ARM_CP_STATE_AA64
,
6938 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 4,
6939 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6940 { .name
= "DC_IGDVAC", .state
= ARM_CP_STATE_AA64
,
6941 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 5,
6942 .type
= ARM_CP_NOP
, .access
= PL1_W
,
6943 .accessfn
= aa64_cacheop_poc_access
},
6944 { .name
= "DC_IGDSW", .state
= ARM_CP_STATE_AA64
,
6945 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 6,
6946 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6947 { .name
= "DC_CGSW", .state
= ARM_CP_STATE_AA64
,
6948 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 4,
6949 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6950 { .name
= "DC_CGDSW", .state
= ARM_CP_STATE_AA64
,
6951 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 6,
6952 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6953 { .name
= "DC_CIGSW", .state
= ARM_CP_STATE_AA64
,
6954 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 4,
6955 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6956 { .name
= "DC_CIGDSW", .state
= ARM_CP_STATE_AA64
,
6957 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 6,
6958 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
6962 static const ARMCPRegInfo mte_tco_ro_reginfo
[] = {
6963 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
6964 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
6965 .type
= ARM_CP_CONST
, .access
= PL0_RW
, },
6969 static const ARMCPRegInfo mte_el0_cacheop_reginfo
[] = {
6970 { .name
= "DC_CGVAC", .state
= ARM_CP_STATE_AA64
,
6971 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 3,
6972 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6973 .accessfn
= aa64_cacheop_poc_access
},
6974 { .name
= "DC_CGDVAC", .state
= ARM_CP_STATE_AA64
,
6975 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 5,
6976 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6977 .accessfn
= aa64_cacheop_poc_access
},
6978 { .name
= "DC_CGVAP", .state
= ARM_CP_STATE_AA64
,
6979 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 3,
6980 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6981 .accessfn
= aa64_cacheop_poc_access
},
6982 { .name
= "DC_CGDVAP", .state
= ARM_CP_STATE_AA64
,
6983 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 5,
6984 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6985 .accessfn
= aa64_cacheop_poc_access
},
6986 { .name
= "DC_CGVADP", .state
= ARM_CP_STATE_AA64
,
6987 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 3,
6988 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6989 .accessfn
= aa64_cacheop_poc_access
},
6990 { .name
= "DC_CGDVADP", .state
= ARM_CP_STATE_AA64
,
6991 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 5,
6992 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6993 .accessfn
= aa64_cacheop_poc_access
},
6994 { .name
= "DC_CIGVAC", .state
= ARM_CP_STATE_AA64
,
6995 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 3,
6996 .type
= ARM_CP_NOP
, .access
= PL0_W
,
6997 .accessfn
= aa64_cacheop_poc_access
},
6998 { .name
= "DC_CIGDVAC", .state
= ARM_CP_STATE_AA64
,
6999 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 5,
7000 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7001 .accessfn
= aa64_cacheop_poc_access
},
7002 { .name
= "DC_GVA", .state
= ARM_CP_STATE_AA64
,
7003 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 3,
7004 .access
= PL0_W
, .type
= ARM_CP_DC_GVA
,
7005 #ifndef CONFIG_USER_ONLY
7006 /* Avoid overhead of an access check that always passes in user-mode */
7007 .accessfn
= aa64_zva_access
,
7010 { .name
= "DC_GZVA", .state
= ARM_CP_STATE_AA64
,
7011 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 4,
7012 .access
= PL0_W
, .type
= ARM_CP_DC_GZVA
,
7013 #ifndef CONFIG_USER_ONLY
7014 /* Avoid overhead of an access check that always passes in user-mode */
7015 .accessfn
= aa64_zva_access
,
7023 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7026 int el
= arm_current_el(env
);
7029 uint64_t sctlr
= arm_sctlr(env
, el
);
7030 if (!(sctlr
& SCTLR_EnRCTX
)) {
7031 return CP_ACCESS_TRAP
;
7033 } else if (el
== 1) {
7034 uint64_t hcr
= arm_hcr_el2_eff(env
);
7036 return CP_ACCESS_TRAP_EL2
;
7039 return CP_ACCESS_OK
;
7042 static const ARMCPRegInfo predinv_reginfo
[] = {
7043 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
7044 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
7045 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7046 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
7047 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
7048 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7049 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
7050 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
7051 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7053 * Note the AArch32 opcodes have a different OPC1.
7055 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
7056 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
7057 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7058 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
7059 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
7060 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7061 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
7062 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
7063 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7067 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7069 /* Read the high 32 bits of the current CCSIDR */
7070 return extract64(ccsidr_read(env
, ri
), 32, 32);
7073 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
7074 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
7075 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
7077 .accessfn
= access_aa64_tid2
,
7078 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
7082 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7085 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
7086 return CP_ACCESS_TRAP_EL2
;
7089 return CP_ACCESS_OK
;
7092 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7095 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7096 return access_aa64_tid3(env
, ri
, isread
);
7099 return CP_ACCESS_OK
;
7102 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7105 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
7106 return CP_ACCESS_TRAP_EL2
;
7109 return CP_ACCESS_OK
;
7112 static const ARMCPRegInfo jazelle_regs
[] = {
7114 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
7115 .access
= PL1_R
, .accessfn
= access_jazelle
,
7116 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7118 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
7119 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7121 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
7122 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7126 static const ARMCPRegInfo vhe_reginfo
[] = {
7127 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7128 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
7130 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
7131 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
7132 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
7133 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
7134 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
7135 #ifndef CONFIG_USER_ONLY
7136 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
7137 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
7139 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
7140 .type
= ARM_CP_IO
, .access
= PL2_RW
,
7141 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
7142 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
7143 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
7144 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
7145 .resetfn
= gt_hv_timer_reset
,
7146 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
7147 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
7149 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
7151 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
7152 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
7153 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7154 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
7155 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7156 .access
= PL2_RW
, .accessfn
= e2h_access
,
7157 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
7158 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
7159 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7160 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
7161 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7162 .access
= PL2_RW
, .accessfn
= e2h_access
,
7163 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
7164 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
7165 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7166 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
7167 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7168 .access
= PL2_RW
, .accessfn
= e2h_access
,
7169 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
7170 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7171 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
7172 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7173 .access
= PL2_RW
, .accessfn
= e2h_access
,
7174 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
7175 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7176 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
7177 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7178 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
7179 .access
= PL2_RW
, .accessfn
= e2h_access
,
7180 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
7181 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7182 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
7183 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7184 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
7185 .access
= PL2_RW
, .accessfn
= e2h_access
,
7186 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
7191 #ifndef CONFIG_USER_ONLY
7192 static const ARMCPRegInfo ats1e1_reginfo
[] = {
7193 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
7194 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7195 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7196 .writefn
= ats_write64
},
7197 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
7198 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7199 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7200 .writefn
= ats_write64
},
7204 static const ARMCPRegInfo ats1cp_reginfo
[] = {
7205 { .name
= "ATS1CPRP",
7206 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7207 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7208 .writefn
= ats_write
},
7209 { .name
= "ATS1CPWP",
7210 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7211 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7212 .writefn
= ats_write
},
7218 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7219 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7220 * is non-zero, which is never for ARMv7, optionally in ARMv8
7221 * and mandatorily for ARMv8.2 and up.
7222 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7223 * implementation is RAZ/WI we can ignore this detail, as we
7226 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
7227 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
7228 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
7229 .access
= PL1_RW
, .accessfn
= access_tacr
,
7230 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7231 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7232 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7233 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7238 void register_cp_regs_for_features(ARMCPU
*cpu
)
7240 /* Register all the coprocessor registers based on feature bits */
7241 CPUARMState
*env
= &cpu
->env
;
7242 if (arm_feature(env
, ARM_FEATURE_M
)) {
7243 /* M profile has no coprocessor registers */
7247 define_arm_cp_regs(cpu
, cp_reginfo
);
7248 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7249 /* Must go early as it is full of wildcards that may be
7250 * overridden by later definitions.
7252 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
7255 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7256 /* The ID registers all have impdef reset values */
7257 ARMCPRegInfo v6_idregs
[] = {
7258 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
7259 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
7260 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7261 .accessfn
= access_aa32_tid3
,
7262 .resetvalue
= cpu
->isar
.id_pfr0
},
7263 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7264 * the value of the GIC field until after we define these regs.
7266 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
7267 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
7268 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
7269 .accessfn
= access_aa32_tid3
,
7270 .readfn
= id_pfr1_read
,
7271 .writefn
= arm_cp_write_ignore
},
7272 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
7273 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
7274 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7275 .accessfn
= access_aa32_tid3
,
7276 .resetvalue
= cpu
->isar
.id_dfr0
},
7277 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
7278 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
7279 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7280 .accessfn
= access_aa32_tid3
,
7281 .resetvalue
= cpu
->id_afr0
},
7282 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
7283 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
7284 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7285 .accessfn
= access_aa32_tid3
,
7286 .resetvalue
= cpu
->isar
.id_mmfr0
},
7287 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
7288 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
7289 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7290 .accessfn
= access_aa32_tid3
,
7291 .resetvalue
= cpu
->isar
.id_mmfr1
},
7292 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
7293 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
7294 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7295 .accessfn
= access_aa32_tid3
,
7296 .resetvalue
= cpu
->isar
.id_mmfr2
},
7297 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
7298 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
7299 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7300 .accessfn
= access_aa32_tid3
,
7301 .resetvalue
= cpu
->isar
.id_mmfr3
},
7302 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
7303 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
7304 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7305 .accessfn
= access_aa32_tid3
,
7306 .resetvalue
= cpu
->isar
.id_isar0
},
7307 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
7308 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
7309 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7310 .accessfn
= access_aa32_tid3
,
7311 .resetvalue
= cpu
->isar
.id_isar1
},
7312 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
7313 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
7314 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7315 .accessfn
= access_aa32_tid3
,
7316 .resetvalue
= cpu
->isar
.id_isar2
},
7317 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
7318 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
7319 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7320 .accessfn
= access_aa32_tid3
,
7321 .resetvalue
= cpu
->isar
.id_isar3
},
7322 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
7323 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
7324 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7325 .accessfn
= access_aa32_tid3
,
7326 .resetvalue
= cpu
->isar
.id_isar4
},
7327 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
7328 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
7329 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7330 .accessfn
= access_aa32_tid3
,
7331 .resetvalue
= cpu
->isar
.id_isar5
},
7332 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
7333 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
7334 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7335 .accessfn
= access_aa32_tid3
,
7336 .resetvalue
= cpu
->isar
.id_mmfr4
},
7337 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
7338 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
7339 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7340 .accessfn
= access_aa32_tid3
,
7341 .resetvalue
= cpu
->isar
.id_isar6
},
7344 define_arm_cp_regs(cpu
, v6_idregs
);
7345 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
7347 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
7349 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
7350 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
7352 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
7353 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
7354 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
7356 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
7357 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
7359 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7360 ARMCPRegInfo clidr
= {
7361 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
7362 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
7363 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7364 .accessfn
= access_aa64_tid2
,
7365 .resetvalue
= cpu
->clidr
7367 define_one_arm_cp_reg(cpu
, &clidr
);
7368 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
7369 define_debug_regs(cpu
);
7370 define_pmu_regs(cpu
);
7372 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
7374 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7375 /* AArch64 ID registers, which all have impdef reset values.
7376 * Note that within the ID register ranges the unused slots
7377 * must all RAZ, not UNDEF; future architecture versions may
7378 * define new registers here.
7380 ARMCPRegInfo v8_idregs
[] = {
7382 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7383 * emulation because we don't know the right value for the
7384 * GIC field until after we define these regs.
7386 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7387 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
7389 #ifdef CONFIG_USER_ONLY
7390 .type
= ARM_CP_CONST
,
7391 .resetvalue
= cpu
->isar
.id_aa64pfr0
7393 .type
= ARM_CP_NO_RAW
,
7394 .accessfn
= access_aa64_tid3
,
7395 .readfn
= id_aa64pfr0_read
,
7396 .writefn
= arm_cp_write_ignore
7399 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7400 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
7401 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7402 .accessfn
= access_aa64_tid3
,
7403 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
7404 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7405 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
7406 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7407 .accessfn
= access_aa64_tid3
,
7409 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7410 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
7411 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7412 .accessfn
= access_aa64_tid3
,
7414 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7415 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
7416 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7417 .accessfn
= access_aa64_tid3
,
7418 /* At present, only SVEver == 0 is defined anyway. */
7420 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7421 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
7422 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7423 .accessfn
= access_aa64_tid3
,
7425 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7426 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
7427 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7428 .accessfn
= access_aa64_tid3
,
7430 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7431 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7432 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7433 .accessfn
= access_aa64_tid3
,
7435 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7436 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7437 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7438 .accessfn
= access_aa64_tid3
,
7439 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
7440 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7441 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7442 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7443 .accessfn
= access_aa64_tid3
,
7444 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
7445 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7446 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7447 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7448 .accessfn
= access_aa64_tid3
,
7450 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7451 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7452 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7453 .accessfn
= access_aa64_tid3
,
7455 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7456 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7457 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7458 .accessfn
= access_aa64_tid3
,
7459 .resetvalue
= cpu
->id_aa64afr0
},
7460 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7461 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7462 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7463 .accessfn
= access_aa64_tid3
,
7464 .resetvalue
= cpu
->id_aa64afr1
},
7465 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7466 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7467 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7468 .accessfn
= access_aa64_tid3
,
7470 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7471 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7472 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7473 .accessfn
= access_aa64_tid3
,
7475 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7476 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7477 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7478 .accessfn
= access_aa64_tid3
,
7479 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7480 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7481 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7482 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7483 .accessfn
= access_aa64_tid3
,
7484 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7485 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7486 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7487 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7488 .accessfn
= access_aa64_tid3
,
7490 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7491 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
7492 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7493 .accessfn
= access_aa64_tid3
,
7495 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7496 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
7497 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7498 .accessfn
= access_aa64_tid3
,
7500 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7501 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
7502 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7503 .accessfn
= access_aa64_tid3
,
7505 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7506 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
7507 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7508 .accessfn
= access_aa64_tid3
,
7510 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7511 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
7512 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7513 .accessfn
= access_aa64_tid3
,
7515 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7516 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
7517 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7518 .accessfn
= access_aa64_tid3
,
7519 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
7520 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7521 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
7522 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7523 .accessfn
= access_aa64_tid3
,
7524 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
7525 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7526 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
7527 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7528 .accessfn
= access_aa64_tid3
,
7529 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
7530 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7531 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
7532 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7533 .accessfn
= access_aa64_tid3
,
7535 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7536 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
7537 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7538 .accessfn
= access_aa64_tid3
,
7540 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7541 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
7542 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7543 .accessfn
= access_aa64_tid3
,
7545 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7546 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
7547 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7548 .accessfn
= access_aa64_tid3
,
7550 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7551 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
7552 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7553 .accessfn
= access_aa64_tid3
,
7555 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7556 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
7557 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7558 .accessfn
= access_aa64_tid3
,
7559 .resetvalue
= cpu
->isar
.mvfr0
},
7560 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7561 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
7562 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7563 .accessfn
= access_aa64_tid3
,
7564 .resetvalue
= cpu
->isar
.mvfr1
},
7565 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7566 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
7567 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7568 .accessfn
= access_aa64_tid3
,
7569 .resetvalue
= cpu
->isar
.mvfr2
},
7570 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7571 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
7572 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7573 .accessfn
= access_aa64_tid3
,
7575 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7576 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
7577 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7578 .accessfn
= access_aa64_tid3
,
7580 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7581 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
7582 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7583 .accessfn
= access_aa64_tid3
,
7585 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7586 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
7587 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7588 .accessfn
= access_aa64_tid3
,
7590 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7591 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
7592 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7593 .accessfn
= access_aa64_tid3
,
7595 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
7596 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
7597 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7598 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
7599 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
7600 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
7601 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7602 .resetvalue
= cpu
->pmceid0
},
7603 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
7604 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
7605 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7606 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
7607 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
7608 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
7609 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7610 .resetvalue
= cpu
->pmceid1
},
7613 #ifdef CONFIG_USER_ONLY
7614 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
7615 { .name
= "ID_AA64PFR0_EL1",
7616 .exported_bits
= 0x000f000f00ff0000,
7617 .fixed_bits
= 0x0000000000000011 },
7618 { .name
= "ID_AA64PFR1_EL1",
7619 .exported_bits
= 0x00000000000000f0 },
7620 { .name
= "ID_AA64PFR*_EL1_RESERVED",
7622 { .name
= "ID_AA64ZFR0_EL1" },
7623 { .name
= "ID_AA64MMFR0_EL1",
7624 .fixed_bits
= 0x00000000ff000000 },
7625 { .name
= "ID_AA64MMFR1_EL1" },
7626 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
7628 { .name
= "ID_AA64DFR0_EL1",
7629 .fixed_bits
= 0x0000000000000006 },
7630 { .name
= "ID_AA64DFR1_EL1" },
7631 { .name
= "ID_AA64DFR*_EL1_RESERVED",
7633 { .name
= "ID_AA64AFR*",
7635 { .name
= "ID_AA64ISAR0_EL1",
7636 .exported_bits
= 0x00fffffff0fffff0 },
7637 { .name
= "ID_AA64ISAR1_EL1",
7638 .exported_bits
= 0x000000f0ffffffff },
7639 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
7641 REGUSERINFO_SENTINEL
7643 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
7645 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7646 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
7647 !arm_feature(env
, ARM_FEATURE_EL2
)) {
7648 ARMCPRegInfo rvbar
= {
7649 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7650 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
7651 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
7653 define_one_arm_cp_reg(cpu
, &rvbar
);
7655 define_arm_cp_regs(cpu
, v8_idregs
);
7656 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
7658 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7659 uint64_t vmpidr_def
= mpidr_read_val(env
);
7660 ARMCPRegInfo vpidr_regs
[] = {
7661 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
7662 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7663 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7664 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
7665 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
7666 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7667 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7668 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
7669 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7670 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
7671 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7672 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7673 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
7674 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
7675 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7676 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7678 .resetvalue
= vmpidr_def
,
7679 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
7682 define_arm_cp_regs(cpu
, vpidr_regs
);
7683 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
7684 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7685 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
7687 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7688 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
7689 ARMCPRegInfo rvbar
= {
7690 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
7691 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
7692 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
7694 define_one_arm_cp_reg(cpu
, &rvbar
);
7697 /* If EL2 is missing but higher ELs are enabled, we need to
7698 * register the no_el2 reginfos.
7700 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7701 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7702 * of MIDR_EL1 and MPIDR_EL1.
7704 ARMCPRegInfo vpidr_regs
[] = {
7705 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7706 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7707 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7708 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
7709 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7710 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7711 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7712 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7713 .type
= ARM_CP_NO_RAW
,
7714 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
7717 define_arm_cp_regs(cpu
, vpidr_regs
);
7718 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
7719 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7720 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
7724 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7725 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
7726 ARMCPRegInfo el3_regs
[] = {
7727 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
7728 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
7729 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
7730 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
7731 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
7733 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
7734 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
7735 .resetvalue
= cpu
->reset_sctlr
},
7739 define_arm_cp_regs(cpu
, el3_regs
);
7741 /* The behaviour of NSACR is sufficiently various that we don't
7742 * try to describe it in a single reginfo:
7743 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7744 * reads as constant 0xc00 from NS EL1 and NS EL2
7745 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7746 * if v7 without EL3, register doesn't exist
7747 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7749 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7750 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7751 ARMCPRegInfo nsacr
= {
7752 .name
= "NSACR", .type
= ARM_CP_CONST
,
7753 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7754 .access
= PL1_RW
, .accessfn
= nsacr_access
,
7757 define_one_arm_cp_reg(cpu
, &nsacr
);
7759 ARMCPRegInfo nsacr
= {
7761 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7762 .access
= PL3_RW
| PL1_R
,
7764 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
7766 define_one_arm_cp_reg(cpu
, &nsacr
);
7769 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7770 ARMCPRegInfo nsacr
= {
7771 .name
= "NSACR", .type
= ARM_CP_CONST
,
7772 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7776 define_one_arm_cp_reg(cpu
, &nsacr
);
7780 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
7781 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7782 /* PMSAv6 not implemented */
7783 assert(arm_feature(env
, ARM_FEATURE_V7
));
7784 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7785 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
7787 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
7790 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7791 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
7792 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7793 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
7794 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
7797 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
7798 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
7800 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
7801 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
7803 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
7804 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
7806 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
7807 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
7809 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
7810 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
7812 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
7813 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
7815 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
7816 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
7818 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7819 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
7821 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7822 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
7824 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
7825 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
7827 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
7828 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
7830 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
7831 define_arm_cp_regs(cpu
, jazelle_regs
);
7833 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7834 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7835 * be read-only (ie write causes UNDEF exception).
7838 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
7839 /* Pre-v8 MIDR space.
7840 * Note that the MIDR isn't a simple constant register because
7841 * of the TI925 behaviour where writes to another register can
7842 * cause the MIDR value to change.
7844 * Unimplemented registers in the c15 0 0 0 space default to
7845 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7846 * and friends override accordingly.
7849 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
7850 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
7851 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
7852 .readfn
= midr_read
,
7853 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7854 .type
= ARM_CP_OVERRIDE
},
7855 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7857 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
7858 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7860 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
7861 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7863 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
7864 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7866 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
7867 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7869 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
7870 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7873 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
7874 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7875 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
7876 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
7877 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7878 .readfn
= midr_read
},
7879 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7880 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7881 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7882 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7883 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7884 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
7885 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7886 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7887 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
7889 .accessfn
= access_aa64_tid1
,
7890 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
7893 ARMCPRegInfo id_cp_reginfo
[] = {
7894 /* These are common to v8 and pre-v8 */
7896 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
7897 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
7898 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7899 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
7900 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
7901 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
7902 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7903 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7905 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
7907 .accessfn
= access_aa32_tid1
,
7908 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7911 /* TLBTR is specific to VMSA */
7912 ARMCPRegInfo id_tlbtr_reginfo
= {
7914 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
7916 .accessfn
= access_aa32_tid1
,
7917 .type
= ARM_CP_CONST
, .resetvalue
= 0,
7919 /* MPUIR is specific to PMSA V6+ */
7920 ARMCPRegInfo id_mpuir_reginfo
= {
7922 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7923 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7924 .resetvalue
= cpu
->pmsav7_dregion
<< 8
7926 ARMCPRegInfo crn0_wi_reginfo
= {
7927 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
7928 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
7929 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
7931 #ifdef CONFIG_USER_ONLY
7932 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
7933 { .name
= "MIDR_EL1",
7934 .exported_bits
= 0x00000000ffffffff },
7935 { .name
= "REVIDR_EL1" },
7936 REGUSERINFO_SENTINEL
7938 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
7940 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
7941 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7943 /* Register the blanket "writes ignored" value first to cover the
7944 * whole space. Then update the specific ID registers to allow write
7945 * access, so that they ignore writes rather than causing them to
7948 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
7949 for (r
= id_pre_v8_midr_cp_reginfo
;
7950 r
->type
!= ARM_CP_SENTINEL
; r
++) {
7953 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7956 id_mpuir_reginfo
.access
= PL1_RW
;
7957 id_tlbtr_reginfo
.access
= PL1_RW
;
7959 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7960 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
7962 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
7964 define_arm_cp_regs(cpu
, id_cp_reginfo
);
7965 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
7966 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
7967 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
7968 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
7972 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
7973 ARMCPRegInfo mpidr_cp_reginfo
[] = {
7974 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7975 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
7976 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
7979 #ifdef CONFIG_USER_ONLY
7980 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
7981 { .name
= "MPIDR_EL1",
7982 .fixed_bits
= 0x0000000080000000 },
7983 REGUSERINFO_SENTINEL
7985 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
7987 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
7990 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
7991 ARMCPRegInfo auxcr_reginfo
[] = {
7992 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
7993 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
7994 .access
= PL1_RW
, .accessfn
= access_tacr
,
7995 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
7996 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
7997 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
7998 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
8000 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8001 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
8002 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
8006 define_arm_cp_regs(cpu
, auxcr_reginfo
);
8007 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
8008 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
8012 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
8014 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8015 * There are two flavours:
8016 * (1) older 32-bit only cores have a simple 32-bit CBAR
8017 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8018 * 32-bit register visible to AArch32 at a different encoding
8019 * to the "flavour 1" register and with the bits rearranged to
8020 * be able to squash a 64-bit address into the 32-bit view.
8021 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8022 * in future if we support AArch32-only configs of some of the
8023 * AArch64 cores we might need to add a specific feature flag
8024 * to indicate cores with "flavour 2" CBAR.
8026 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8027 /* 32 bit view is [31:18] 0...0 [43:32]. */
8028 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
8029 | extract64(cpu
->reset_cbar
, 32, 12);
8030 ARMCPRegInfo cbar_reginfo
[] = {
8032 .type
= ARM_CP_CONST
,
8033 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
8034 .access
= PL1_R
, .resetvalue
= cbar32
},
8035 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
8036 .type
= ARM_CP_CONST
,
8037 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
8038 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
8041 /* We don't implement a r/w 64 bit CBAR currently */
8042 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
8043 define_arm_cp_regs(cpu
, cbar_reginfo
);
8045 ARMCPRegInfo cbar
= {
8047 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
8048 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
8049 .fieldoffset
= offsetof(CPUARMState
,
8050 cp15
.c15_config_base_address
)
8052 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
8053 cbar
.access
= PL1_R
;
8054 cbar
.fieldoffset
= 0;
8055 cbar
.type
= ARM_CP_CONST
;
8057 define_one_arm_cp_reg(cpu
, &cbar
);
8061 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
8062 ARMCPRegInfo vbar_cp_reginfo
[] = {
8063 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
8064 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
8065 .access
= PL1_RW
, .writefn
= vbar_write
,
8066 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
8067 offsetof(CPUARMState
, cp15
.vbar_ns
) },
8071 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
8074 /* Generic registers whose values depend on the implementation */
8076 ARMCPRegInfo sctlr
= {
8077 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
8078 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
8079 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
8080 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
8081 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
8082 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
8083 .raw_writefn
= raw_write
,
8085 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8086 /* Normally we would always end the TB on an SCTLR write, but Linux
8087 * arch/arm/mach-pxa/sleep.S expects two instructions following
8088 * an MMU enable to execute from cache. Imitate this behaviour.
8090 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
8092 define_one_arm_cp_reg(cpu
, &sctlr
);
8095 if (cpu_isar_feature(aa64_lor
, cpu
)) {
8096 define_arm_cp_regs(cpu
, lor_reginfo
);
8098 if (cpu_isar_feature(aa64_pan
, cpu
)) {
8099 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
8101 #ifndef CONFIG_USER_ONLY
8102 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
8103 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
8105 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
8106 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
8109 if (cpu_isar_feature(aa64_uao
, cpu
)) {
8110 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
8113 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8114 define_arm_cp_regs(cpu
, vhe_reginfo
);
8117 if (cpu_isar_feature(aa64_sve
, cpu
)) {
8118 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
8119 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8120 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
8122 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
8124 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8125 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
8129 #ifdef TARGET_AARCH64
8130 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
8131 define_arm_cp_regs(cpu
, pauth_reginfo
);
8133 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
8134 define_arm_cp_regs(cpu
, rndr_reginfo
);
8136 #ifndef CONFIG_USER_ONLY
8137 /* Data Cache clean instructions up to PoP */
8138 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
8139 define_one_arm_cp_reg(cpu
, dcpop_reg
);
8141 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
8142 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
8145 #endif /*CONFIG_USER_ONLY*/
8148 * If full MTE is enabled, add all of the system registers.
8149 * If only "instructions available at EL0" are enabled,
8150 * then define only a RAZ/WI version of PSTATE.TCO.
8152 if (cpu_isar_feature(aa64_mte
, cpu
)) {
8153 define_arm_cp_regs(cpu
, mte_reginfo
);
8154 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8155 } else if (cpu_isar_feature(aa64_mte_insn_reg
, cpu
)) {
8156 define_arm_cp_regs(cpu
, mte_tco_ro_reginfo
);
8157 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8161 if (cpu_isar_feature(any_predinv
, cpu
)) {
8162 define_arm_cp_regs(cpu
, predinv_reginfo
);
8165 if (cpu_isar_feature(any_ccidx
, cpu
)) {
8166 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
8169 #ifndef CONFIG_USER_ONLY
8171 * Register redirections and aliases must be done last,
8172 * after the registers from the other extensions have been defined.
8174 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8175 define_arm_vh_e2h_redirects_aliases(cpu
);
8180 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
8182 CPUState
*cs
= CPU(cpu
);
8183 CPUARMState
*env
= &cpu
->env
;
8185 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8187 * The lower part of each SVE register aliases to the FPU
8188 * registers so we don't need to include both.
8190 #ifdef TARGET_AARCH64
8191 if (isar_feature_aa64_sve(&cpu
->isar
)) {
8192 gdb_register_coprocessor(cs
, arm_gdb_get_svereg
, arm_gdb_set_svereg
,
8193 arm_gen_dynamic_svereg_xml(cs
, cs
->gdb_num_regs
),
8194 "sve-registers.xml", 0);
8198 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
8199 aarch64_fpu_gdb_set_reg
,
8200 34, "aarch64-fpu.xml", 0);
8202 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
8203 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8204 51, "arm-neon.xml", 0);
8205 } else if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
8206 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8207 35, "arm-vfp3.xml", 0);
8208 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
8209 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8210 19, "arm-vfp.xml", 0);
8212 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
8213 arm_gen_dynamic_sysreg_xml(cs
, cs
->gdb_num_regs
),
8214 "system-registers.xml", 0);
8218 /* Sort alphabetically by type name, except for "any". */
8219 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
8221 ObjectClass
*class_a
= (ObjectClass
*)a
;
8222 ObjectClass
*class_b
= (ObjectClass
*)b
;
8223 const char *name_a
, *name_b
;
8225 name_a
= object_class_get_name(class_a
);
8226 name_b
= object_class_get_name(class_b
);
8227 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
8229 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
8232 return strcmp(name_a
, name_b
);
8236 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
8238 ObjectClass
*oc
= data
;
8239 const char *typename
;
8242 typename
= object_class_get_name(oc
);
8243 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8244 qemu_printf(" %s\n", name
);
8248 void arm_cpu_list(void)
8252 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8253 list
= g_slist_sort(list
, arm_cpu_list_compare
);
8254 qemu_printf("Available CPUs:\n");
8255 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
8259 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
8261 ObjectClass
*oc
= data
;
8262 CpuDefinitionInfoList
**cpu_list
= user_data
;
8263 CpuDefinitionInfoList
*entry
;
8264 CpuDefinitionInfo
*info
;
8265 const char *typename
;
8267 typename
= object_class_get_name(oc
);
8268 info
= g_malloc0(sizeof(*info
));
8269 info
->name
= g_strndup(typename
,
8270 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8271 info
->q_typename
= g_strdup(typename
);
8273 entry
= g_malloc0(sizeof(*entry
));
8274 entry
->value
= info
;
8275 entry
->next
= *cpu_list
;
8279 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
8281 CpuDefinitionInfoList
*cpu_list
= NULL
;
8284 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8285 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
8291 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
8292 void *opaque
, int state
, int secstate
,
8293 int crm
, int opc1
, int opc2
,
8296 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8297 * add a single reginfo struct to the hash table.
8299 uint32_t *key
= g_new(uint32_t, 1);
8300 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
8301 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
8302 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
8304 r2
->name
= g_strdup(name
);
8305 /* Reset the secure state to the specific incoming state. This is
8306 * necessary as the register may have been defined with both states.
8308 r2
->secure
= secstate
;
8310 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8311 /* Register is banked (using both entries in array).
8312 * Overwriting fieldoffset as the array is only used to define
8313 * banked registers but later only fieldoffset is used.
8315 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
8318 if (state
== ARM_CP_STATE_AA32
) {
8319 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8320 /* If the register is banked then we don't need to migrate or
8321 * reset the 32-bit instance in certain cases:
8323 * 1) If the register has both 32-bit and 64-bit instances then we
8324 * can count on the 64-bit instance taking care of the
8326 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8327 * taking care of the secure bank. This requires that separate
8328 * 32 and 64-bit definitions are provided.
8330 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
8331 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
8332 r2
->type
|= ARM_CP_ALIAS
;
8334 } else if ((secstate
!= r
->secure
) && !ns
) {
8335 /* The register is not banked so we only want to allow migration of
8336 * the non-secure instance.
8338 r2
->type
|= ARM_CP_ALIAS
;
8341 if (r
->state
== ARM_CP_STATE_BOTH
) {
8342 /* We assume it is a cp15 register if the .cp field is left unset.
8348 #ifdef HOST_WORDS_BIGENDIAN
8349 if (r2
->fieldoffset
) {
8350 r2
->fieldoffset
+= sizeof(uint32_t);
8355 if (state
== ARM_CP_STATE_AA64
) {
8356 /* To allow abbreviation of ARMCPRegInfo
8357 * definitions, we treat cp == 0 as equivalent to
8358 * the value for "standard guest-visible sysreg".
8359 * STATE_BOTH definitions are also always "standard
8360 * sysreg" in their AArch64 view (the .cp value may
8361 * be non-zero for the benefit of the AArch32 view).
8363 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
8364 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
8366 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
8367 r2
->opc0
, opc1
, opc2
);
8369 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
8372 r2
->opaque
= opaque
;
8374 /* reginfo passed to helpers is correct for the actual access,
8375 * and is never ARM_CP_STATE_BOTH:
8378 /* Make sure reginfo passed to helpers for wildcarded regs
8379 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8384 /* By convention, for wildcarded registers only the first
8385 * entry is used for migration; the others are marked as
8386 * ALIAS so we don't try to transfer the register
8387 * multiple times. Special registers (ie NOP/WFI) are
8388 * never migratable and not even raw-accessible.
8390 if ((r
->type
& ARM_CP_SPECIAL
)) {
8391 r2
->type
|= ARM_CP_NO_RAW
;
8393 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
8394 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
8395 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
8396 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
8399 /* Check that raw accesses are either forbidden or handled. Note that
8400 * we can't assert this earlier because the setup of fieldoffset for
8401 * banked registers has to be done first.
8403 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
8404 assert(!raw_accessors_invalid(r2
));
8407 /* Overriding of an existing definition must be explicitly
8410 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
8411 ARMCPRegInfo
*oldreg
;
8412 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
8413 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
8414 fprintf(stderr
, "Register redefined: cp=%d %d bit "
8415 "crn=%d crm=%d opc1=%d opc2=%d, "
8416 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
8417 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
8418 oldreg
->name
, r2
->name
);
8419 g_assert_not_reached();
8422 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
8426 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
8427 const ARMCPRegInfo
*r
, void *opaque
)
8429 /* Define implementations of coprocessor registers.
8430 * We store these in a hashtable because typically
8431 * there are less than 150 registers in a space which
8432 * is 16*16*16*8*8 = 262144 in size.
8433 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8434 * If a register is defined twice then the second definition is
8435 * used, so this can be used to define some generic registers and
8436 * then override them with implementation specific variations.
8437 * At least one of the original and the second definition should
8438 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8439 * against accidental use.
8441 * The state field defines whether the register is to be
8442 * visible in the AArch32 or AArch64 execution state. If the
8443 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8444 * reginfo structure for the AArch32 view, which sees the lower
8445 * 32 bits of the 64 bit register.
8447 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8448 * be wildcarded. AArch64 registers are always considered to be 64
8449 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8450 * the register, if any.
8452 int crm
, opc1
, opc2
, state
;
8453 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8454 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8455 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8456 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8457 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8458 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8459 /* 64 bit registers have only CRm and Opc1 fields */
8460 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8461 /* op0 only exists in the AArch64 encodings */
8462 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8463 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8464 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8466 * This API is only for Arm's system coprocessors (14 and 15) or
8467 * (M-profile or v7A-and-earlier only) for implementation defined
8468 * coprocessors in the range 0..7. Our decode assumes this, since
8469 * 8..13 can be used for other insns including VFP and Neon. See
8470 * valid_cp() in translate.c. Assert here that we haven't tried
8471 * to use an invalid coprocessor number.
8474 case ARM_CP_STATE_BOTH
:
8475 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8480 case ARM_CP_STATE_AA32
:
8481 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
8482 !arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
8483 assert(r
->cp
>= 14 && r
->cp
<= 15);
8485 assert(r
->cp
< 8 || (r
->cp
>= 14 && r
->cp
<= 15));
8488 case ARM_CP_STATE_AA64
:
8489 assert(r
->cp
== 0 || r
->cp
== CP_REG_ARM64_SYSREG_CP
);
8492 g_assert_not_reached();
8494 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8495 * encodes a minimum access level for the register. We roll this
8496 * runtime check into our general permission check code, so check
8497 * here that the reginfo's specified permissions are strict enough
8498 * to encompass the generic architectural permission check.
8500 if (r
->state
!= ARM_CP_STATE_AA32
) {
8504 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8505 mask
= PL0U_R
| PL1_RW
;
8525 /* min_EL EL1, secure mode only (we don't check the latter) */
8529 /* broken reginfo with out-of-range opc1 */
8533 /* assert our permissions are not too lax (stricter is fine) */
8534 assert((r
->access
& ~mask
) == 0);
8537 /* Check that the register definition has enough info to handle
8538 * reads and writes if they are permitted.
8540 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
8541 if (r
->access
& PL3_R
) {
8542 assert((r
->fieldoffset
||
8543 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8546 if (r
->access
& PL3_W
) {
8547 assert((r
->fieldoffset
||
8548 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8552 /* Bad type field probably means missing sentinel at end of reg list */
8553 assert(cptype_valid(r
->type
));
8554 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
8555 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
8556 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
8557 for (state
= ARM_CP_STATE_AA32
;
8558 state
<= ARM_CP_STATE_AA64
; state
++) {
8559 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
8562 if (state
== ARM_CP_STATE_AA32
) {
8563 /* Under AArch32 CP registers can be common
8564 * (same for secure and non-secure world) or banked.
8568 switch (r
->secure
) {
8569 case ARM_CP_SECSTATE_S
:
8570 case ARM_CP_SECSTATE_NS
:
8571 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8572 r
->secure
, crm
, opc1
, opc2
,
8576 name
= g_strdup_printf("%s_S", r
->name
);
8577 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8579 crm
, opc1
, opc2
, name
);
8581 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8583 crm
, opc1
, opc2
, r
->name
);
8587 /* AArch64 registers get mapped to non-secure instance
8589 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8591 crm
, opc1
, opc2
, r
->name
);
8599 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
8600 const ARMCPRegInfo
*regs
, void *opaque
)
8602 /* Define a whole list of registers */
8603 const ARMCPRegInfo
*r
;
8604 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8605 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
8610 * Modify ARMCPRegInfo for access from userspace.
8612 * This is a data driven modification directed by
8613 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8614 * user-space cannot alter any values and dynamic values pertaining to
8615 * execution state are hidden from user space view anyway.
8617 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
8619 const ARMCPRegUserSpaceInfo
*m
;
8622 for (m
= mods
; m
->name
; m
++) {
8623 GPatternSpec
*pat
= NULL
;
8625 pat
= g_pattern_spec_new(m
->name
);
8627 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8628 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
8629 r
->type
= ARM_CP_CONST
;
8633 } else if (strcmp(r
->name
, m
->name
) == 0) {
8634 r
->type
= ARM_CP_CONST
;
8636 r
->resetvalue
&= m
->exported_bits
;
8637 r
->resetvalue
|= m
->fixed_bits
;
8642 g_pattern_spec_free(pat
);
8647 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
8649 return g_hash_table_lookup(cpregs
, &encoded_cp
);
8652 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8655 /* Helper coprocessor write function for write-ignore registers */
8658 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8660 /* Helper coprocessor write function for read-as-zero registers */
8664 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
8666 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8669 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
8671 /* Return true if it is not valid for us to switch to
8672 * this CPU mode (ie all the UNPREDICTABLE cases in
8673 * the ARM ARM CPSRWriteByInstr pseudocode).
8676 /* Changes to or from Hyp via MSR and CPS are illegal. */
8677 if (write_type
== CPSRWriteByInstr
&&
8678 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
8679 mode
== ARM_CPU_MODE_HYP
)) {
8684 case ARM_CPU_MODE_USR
:
8686 case ARM_CPU_MODE_SYS
:
8687 case ARM_CPU_MODE_SVC
:
8688 case ARM_CPU_MODE_ABT
:
8689 case ARM_CPU_MODE_UND
:
8690 case ARM_CPU_MODE_IRQ
:
8691 case ARM_CPU_MODE_FIQ
:
8692 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8693 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8695 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8696 * and CPS are treated as illegal mode changes.
8698 if (write_type
== CPSRWriteByInstr
&&
8699 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
8700 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
8704 case ARM_CPU_MODE_HYP
:
8705 return !arm_feature(env
, ARM_FEATURE_EL2
)
8706 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
8707 case ARM_CPU_MODE_MON
:
8708 return arm_current_el(env
) < 3;
8714 uint32_t cpsr_read(CPUARMState
*env
)
8717 ZF
= (env
->ZF
== 0);
8718 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
8719 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
8720 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
8721 | ((env
->condexec_bits
& 0xfc) << 8)
8722 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
8725 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
8726 CPSRWriteType write_type
)
8728 uint32_t changed_daif
;
8730 if (mask
& CPSR_NZCV
) {
8731 env
->ZF
= (~val
) & CPSR_Z
;
8733 env
->CF
= (val
>> 29) & 1;
8734 env
->VF
= (val
<< 3) & 0x80000000;
8737 env
->QF
= ((val
& CPSR_Q
) != 0);
8739 env
->thumb
= ((val
& CPSR_T
) != 0);
8740 if (mask
& CPSR_IT_0_1
) {
8741 env
->condexec_bits
&= ~3;
8742 env
->condexec_bits
|= (val
>> 25) & 3;
8744 if (mask
& CPSR_IT_2_7
) {
8745 env
->condexec_bits
&= 3;
8746 env
->condexec_bits
|= (val
>> 8) & 0xfc;
8748 if (mask
& CPSR_GE
) {
8749 env
->GE
= (val
>> 16) & 0xf;
8752 /* In a V7 implementation that includes the security extensions but does
8753 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8754 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8755 * bits respectively.
8757 * In a V8 implementation, it is permitted for privileged software to
8758 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8760 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
8761 arm_feature(env
, ARM_FEATURE_EL3
) &&
8762 !arm_feature(env
, ARM_FEATURE_EL2
) &&
8763 !arm_is_secure(env
)) {
8765 changed_daif
= (env
->daif
^ val
) & mask
;
8767 if (changed_daif
& CPSR_A
) {
8768 /* Check to see if we are allowed to change the masking of async
8769 * abort exceptions from a non-secure state.
8771 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
8772 qemu_log_mask(LOG_GUEST_ERROR
,
8773 "Ignoring attempt to switch CPSR_A flag from "
8774 "non-secure world with SCR.AW bit clear\n");
8779 if (changed_daif
& CPSR_F
) {
8780 /* Check to see if we are allowed to change the masking of FIQ
8781 * exceptions from a non-secure state.
8783 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
8784 qemu_log_mask(LOG_GUEST_ERROR
,
8785 "Ignoring attempt to switch CPSR_F flag from "
8786 "non-secure world with SCR.FW bit clear\n");
8790 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8791 * If this bit is set software is not allowed to mask
8792 * FIQs, but is allowed to set CPSR_F to 0.
8794 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
8796 qemu_log_mask(LOG_GUEST_ERROR
,
8797 "Ignoring attempt to enable CPSR_F flag "
8798 "(non-maskable FIQ [NMFI] support enabled)\n");
8804 env
->daif
&= ~(CPSR_AIF
& mask
);
8805 env
->daif
|= val
& CPSR_AIF
& mask
;
8807 if (write_type
!= CPSRWriteRaw
&&
8808 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
8809 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
8810 /* Note that we can only get here in USR mode if this is a
8811 * gdb stub write; for this case we follow the architectural
8812 * behaviour for guest writes in USR mode of ignoring an attempt
8813 * to switch mode. (Those are caught by translate.c for writes
8814 * triggered by guest instructions.)
8817 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
8818 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8819 * v7, and has defined behaviour in v8:
8820 * + leave CPSR.M untouched
8821 * + allow changes to the other CPSR fields
8823 * For user changes via the GDB stub, we don't set PSTATE.IL,
8824 * as this would be unnecessarily harsh for a user error.
8827 if (write_type
!= CPSRWriteByGDBStub
&&
8828 arm_feature(env
, ARM_FEATURE_V8
)) {
8832 qemu_log_mask(LOG_GUEST_ERROR
,
8833 "Illegal AArch32 mode switch attempt from %s to %s\n",
8834 aarch32_mode_name(env
->uncached_cpsr
),
8835 aarch32_mode_name(val
));
8837 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
8838 write_type
== CPSRWriteExceptionReturn
?
8839 "Exception return from AArch32" :
8840 "AArch32 mode switch from",
8841 aarch32_mode_name(env
->uncached_cpsr
),
8842 aarch32_mode_name(val
), env
->regs
[15]);
8843 switch_mode(env
, val
& CPSR_M
);
8846 mask
&= ~CACHED_CPSR_BITS
;
8847 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
8850 /* Sign/zero extend */
8851 uint32_t HELPER(sxtb16
)(uint32_t x
)
8854 res
= (uint16_t)(int8_t)x
;
8855 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
8859 uint32_t HELPER(uxtb16
)(uint32_t x
)
8862 res
= (uint16_t)(uint8_t)x
;
8863 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
8867 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
8871 if (num
== INT_MIN
&& den
== -1)
8876 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
8883 uint32_t HELPER(rbit
)(uint32_t x
)
8888 #ifdef CONFIG_USER_ONLY
8890 static void switch_mode(CPUARMState
*env
, int mode
)
8892 ARMCPU
*cpu
= env_archcpu(env
);
8894 if (mode
!= ARM_CPU_MODE_USR
) {
8895 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
8899 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
8900 uint32_t cur_el
, bool secure
)
8905 void aarch64_sync_64_to_32(CPUARMState
*env
)
8907 g_assert_not_reached();
8912 static void switch_mode(CPUARMState
*env
, int mode
)
8917 old_mode
= env
->uncached_cpsr
& CPSR_M
;
8918 if (mode
== old_mode
)
8921 if (old_mode
== ARM_CPU_MODE_FIQ
) {
8922 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8923 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
8924 } else if (mode
== ARM_CPU_MODE_FIQ
) {
8925 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8926 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
8929 i
= bank_number(old_mode
);
8930 env
->banked_r13
[i
] = env
->regs
[13];
8931 env
->banked_spsr
[i
] = env
->spsr
;
8933 i
= bank_number(mode
);
8934 env
->regs
[13] = env
->banked_r13
[i
];
8935 env
->spsr
= env
->banked_spsr
[i
];
8937 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
8938 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
8941 /* Physical Interrupt Target EL Lookup Table
8943 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8945 * The below multi-dimensional table is used for looking up the target
8946 * exception level given numerous condition criteria. Specifically, the
8947 * target EL is based on SCR and HCR routing controls as well as the
8948 * currently executing EL and secure state.
8951 * target_el_table[2][2][2][2][2][4]
8952 * | | | | | +--- Current EL
8953 * | | | | +------ Non-secure(0)/Secure(1)
8954 * | | | +--------- HCR mask override
8955 * | | +------------ SCR exec state control
8956 * | +--------------- SCR mask override
8957 * +------------------ 32-bit(0)/64-bit(1) EL3
8959 * The table values are as such:
8963 * The ARM ARM target EL table includes entries indicating that an "exception
8964 * is not taken". The two cases where this is applicable are:
8965 * 1) An exception is taken from EL3 but the SCR does not have the exception
8967 * 2) An exception is taken from EL2 but the HCR does not have the exception
8969 * In these two cases, the below table contain a target of EL1. This value is
8970 * returned as it is expected that the consumer of the table data will check
8971 * for "target EL >= current EL" to ensure the exception is not taken.
8975 * BIT IRQ IMO Non-secure Secure
8976 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8978 static const int8_t target_el_table
[2][2][2][2][2][4] = {
8979 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8980 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8981 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8982 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8983 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8984 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8985 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8986 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8987 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8988 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
8989 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
8990 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
8991 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8992 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8993 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8994 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
8998 * Determine the target EL for physical exceptions
9000 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9001 uint32_t cur_el
, bool secure
)
9003 CPUARMState
*env
= cs
->env_ptr
;
9008 /* Is the highest EL AArch64? */
9009 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
9012 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9013 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
9015 /* Either EL2 is the highest EL (and so the EL2 register width
9016 * is given by is64); or there is no EL2 or EL3, in which case
9017 * the value of 'rw' does not affect the table lookup anyway.
9022 hcr_el2
= arm_hcr_el2_eff(env
);
9025 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
9026 hcr
= hcr_el2
& HCR_IMO
;
9029 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
9030 hcr
= hcr_el2
& HCR_FMO
;
9033 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
9034 hcr
= hcr_el2
& HCR_AMO
;
9039 * For these purposes, TGE and AMO/IMO/FMO both force the
9040 * interrupt to EL2. Fold TGE into the bit extracted above.
9042 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
9044 /* Perform a table-lookup for the target EL given the current state */
9045 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
9047 assert(target_el
> 0);
9052 void arm_log_exception(int idx
)
9054 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
9055 const char *exc
= NULL
;
9056 static const char * const excnames
[] = {
9057 [EXCP_UDEF
] = "Undefined Instruction",
9059 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
9060 [EXCP_DATA_ABORT
] = "Data Abort",
9063 [EXCP_BKPT
] = "Breakpoint",
9064 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
9065 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
9066 [EXCP_HVC
] = "Hypervisor Call",
9067 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
9068 [EXCP_SMC
] = "Secure Monitor Call",
9069 [EXCP_VIRQ
] = "Virtual IRQ",
9070 [EXCP_VFIQ
] = "Virtual FIQ",
9071 [EXCP_SEMIHOST
] = "Semihosting call",
9072 [EXCP_NOCP
] = "v7M NOCP UsageFault",
9073 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
9074 [EXCP_STKOF
] = "v8M STKOF UsageFault",
9075 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
9076 [EXCP_LSERR
] = "v8M LSERR UsageFault",
9077 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
9080 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
9081 exc
= excnames
[idx
];
9086 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
9091 * Function used to synchronize QEMU's AArch64 register set with AArch32
9092 * register set. This is necessary when switching between AArch32 and AArch64
9095 void aarch64_sync_32_to_64(CPUARMState
*env
)
9098 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9100 /* We can blanket copy R[0:7] to X[0:7] */
9101 for (i
= 0; i
< 8; i
++) {
9102 env
->xregs
[i
] = env
->regs
[i
];
9106 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9107 * Otherwise, they come from the banked user regs.
9109 if (mode
== ARM_CPU_MODE_FIQ
) {
9110 for (i
= 8; i
< 13; i
++) {
9111 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9114 for (i
= 8; i
< 13; i
++) {
9115 env
->xregs
[i
] = env
->regs
[i
];
9120 * Registers x13-x23 are the various mode SP and FP registers. Registers
9121 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9122 * from the mode banked register.
9124 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9125 env
->xregs
[13] = env
->regs
[13];
9126 env
->xregs
[14] = env
->regs
[14];
9128 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9129 /* HYP is an exception in that it is copied from r14 */
9130 if (mode
== ARM_CPU_MODE_HYP
) {
9131 env
->xregs
[14] = env
->regs
[14];
9133 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9137 if (mode
== ARM_CPU_MODE_HYP
) {
9138 env
->xregs
[15] = env
->regs
[13];
9140 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9143 if (mode
== ARM_CPU_MODE_IRQ
) {
9144 env
->xregs
[16] = env
->regs
[14];
9145 env
->xregs
[17] = env
->regs
[13];
9147 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9148 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9151 if (mode
== ARM_CPU_MODE_SVC
) {
9152 env
->xregs
[18] = env
->regs
[14];
9153 env
->xregs
[19] = env
->regs
[13];
9155 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9156 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9159 if (mode
== ARM_CPU_MODE_ABT
) {
9160 env
->xregs
[20] = env
->regs
[14];
9161 env
->xregs
[21] = env
->regs
[13];
9163 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9164 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9167 if (mode
== ARM_CPU_MODE_UND
) {
9168 env
->xregs
[22] = env
->regs
[14];
9169 env
->xregs
[23] = env
->regs
[13];
9171 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9172 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9176 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9177 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9178 * FIQ bank for r8-r14.
9180 if (mode
== ARM_CPU_MODE_FIQ
) {
9181 for (i
= 24; i
< 31; i
++) {
9182 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9185 for (i
= 24; i
< 29; i
++) {
9186 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9188 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9189 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9192 env
->pc
= env
->regs
[15];
9196 * Function used to synchronize QEMU's AArch32 register set with AArch64
9197 * register set. This is necessary when switching between AArch32 and AArch64
9200 void aarch64_sync_64_to_32(CPUARMState
*env
)
9203 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9205 /* We can blanket copy X[0:7] to R[0:7] */
9206 for (i
= 0; i
< 8; i
++) {
9207 env
->regs
[i
] = env
->xregs
[i
];
9211 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9212 * Otherwise, we copy x8-x12 into the banked user regs.
9214 if (mode
== ARM_CPU_MODE_FIQ
) {
9215 for (i
= 8; i
< 13; i
++) {
9216 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9219 for (i
= 8; i
< 13; i
++) {
9220 env
->regs
[i
] = env
->xregs
[i
];
9225 * Registers r13 & r14 depend on the current mode.
9226 * If we are in a given mode, we copy the corresponding x registers to r13
9227 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9230 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9231 env
->regs
[13] = env
->xregs
[13];
9232 env
->regs
[14] = env
->xregs
[14];
9234 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9237 * HYP is an exception in that it does not have its own banked r14 but
9238 * shares the USR r14
9240 if (mode
== ARM_CPU_MODE_HYP
) {
9241 env
->regs
[14] = env
->xregs
[14];
9243 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9247 if (mode
== ARM_CPU_MODE_HYP
) {
9248 env
->regs
[13] = env
->xregs
[15];
9250 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9253 if (mode
== ARM_CPU_MODE_IRQ
) {
9254 env
->regs
[14] = env
->xregs
[16];
9255 env
->regs
[13] = env
->xregs
[17];
9257 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9258 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9261 if (mode
== ARM_CPU_MODE_SVC
) {
9262 env
->regs
[14] = env
->xregs
[18];
9263 env
->regs
[13] = env
->xregs
[19];
9265 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9266 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9269 if (mode
== ARM_CPU_MODE_ABT
) {
9270 env
->regs
[14] = env
->xregs
[20];
9271 env
->regs
[13] = env
->xregs
[21];
9273 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9274 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9277 if (mode
== ARM_CPU_MODE_UND
) {
9278 env
->regs
[14] = env
->xregs
[22];
9279 env
->regs
[13] = env
->xregs
[23];
9281 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9282 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9285 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9286 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9287 * FIQ bank for r8-r14.
9289 if (mode
== ARM_CPU_MODE_FIQ
) {
9290 for (i
= 24; i
< 31; i
++) {
9291 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9294 for (i
= 24; i
< 29; i
++) {
9295 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9297 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9298 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9301 env
->regs
[15] = env
->pc
;
9304 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9305 uint32_t mask
, uint32_t offset
,
9310 /* Change the CPU state so as to actually take the exception. */
9311 switch_mode(env
, new_mode
);
9314 * For exceptions taken to AArch32 we must clear the SS bit in both
9315 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9317 env
->uncached_cpsr
&= ~PSTATE_SS
;
9318 env
->spsr
= cpsr_read(env
);
9319 /* Clear IT bits. */
9320 env
->condexec_bits
= 0;
9321 /* Switch to the new mode, and to the correct instruction set. */
9322 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9324 /* This must be after mode switching. */
9325 new_el
= arm_current_el(env
);
9327 /* Set new mode endianness */
9328 env
->uncached_cpsr
&= ~CPSR_E
;
9329 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
9330 env
->uncached_cpsr
|= CPSR_E
;
9332 /* J and IL must always be cleared for exception entry */
9333 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9336 if (new_mode
== ARM_CPU_MODE_HYP
) {
9337 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9338 env
->elr_el
[2] = env
->regs
[15];
9340 /* CPSR.PAN is normally preserved preserved unless... */
9341 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
9344 if (!arm_is_secure_below_el3(env
)) {
9345 /* ... the target is EL3, from non-secure state. */
9346 env
->uncached_cpsr
&= ~CPSR_PAN
;
9349 /* ... the target is EL3, from secure state ... */
9352 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9353 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
9354 env
->uncached_cpsr
|= CPSR_PAN
;
9360 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9361 * and we should just guard the thumb mode on V4
9363 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9365 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9367 env
->regs
[14] = env
->regs
[15] + offset
;
9369 env
->regs
[15] = newpc
;
9370 arm_rebuild_hflags(env
);
9373 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9376 * Handle exception entry to Hyp mode; this is sufficiently
9377 * different to entry to other AArch32 modes that we handle it
9380 * The vector table entry used is always the 0x14 Hyp mode entry point,
9381 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9382 * The offset applied to the preferred return address is always zero
9383 * (see DDI0487C.a section G1.12.3).
9384 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9386 uint32_t addr
, mask
;
9387 ARMCPU
*cpu
= ARM_CPU(cs
);
9388 CPUARMState
*env
= &cpu
->env
;
9390 switch (cs
->exception_index
) {
9398 /* Fall through to prefetch abort. */
9399 case EXCP_PREFETCH_ABORT
:
9400 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9401 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9402 (uint32_t)env
->exception
.vaddress
);
9405 case EXCP_DATA_ABORT
:
9406 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9407 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9408 (uint32_t)env
->exception
.vaddress
);
9424 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9427 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9428 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9430 * QEMU syndrome values are v8-style. v7 has the IL bit
9431 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9432 * If this is a v7 CPU, squash the IL bit in those cases.
9434 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9435 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9436 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9437 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9438 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9441 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9444 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9449 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9452 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9455 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9459 addr
+= env
->cp15
.hvbar
;
9461 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9464 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9466 ARMCPU
*cpu
= ARM_CPU(cs
);
9467 CPUARMState
*env
= &cpu
->env
;
9474 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9475 switch (syn_get_ec(env
->exception
.syndrome
)) {
9477 case EC_BREAKPOINT_SAME_EL
:
9481 case EC_WATCHPOINT_SAME_EL
:
9487 case EC_VECTORCATCH
:
9496 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9499 if (env
->exception
.target_el
== 2) {
9500 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9504 switch (cs
->exception_index
) {
9506 new_mode
= ARM_CPU_MODE_UND
;
9515 new_mode
= ARM_CPU_MODE_SVC
;
9518 /* The PC already points to the next instruction. */
9522 /* Fall through to prefetch abort. */
9523 case EXCP_PREFETCH_ABORT
:
9524 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9525 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9526 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9527 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9528 new_mode
= ARM_CPU_MODE_ABT
;
9530 mask
= CPSR_A
| CPSR_I
;
9533 case EXCP_DATA_ABORT
:
9534 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9535 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9536 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9538 (uint32_t)env
->exception
.vaddress
);
9539 new_mode
= ARM_CPU_MODE_ABT
;
9541 mask
= CPSR_A
| CPSR_I
;
9545 new_mode
= ARM_CPU_MODE_IRQ
;
9547 /* Disable IRQ and imprecise data aborts. */
9548 mask
= CPSR_A
| CPSR_I
;
9550 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9551 /* IRQ routed to monitor mode */
9552 new_mode
= ARM_CPU_MODE_MON
;
9557 new_mode
= ARM_CPU_MODE_FIQ
;
9559 /* Disable FIQ, IRQ and imprecise data aborts. */
9560 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9561 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9562 /* FIQ routed to monitor mode */
9563 new_mode
= ARM_CPU_MODE_MON
;
9568 new_mode
= ARM_CPU_MODE_IRQ
;
9570 /* Disable IRQ and imprecise data aborts. */
9571 mask
= CPSR_A
| CPSR_I
;
9575 new_mode
= ARM_CPU_MODE_FIQ
;
9577 /* Disable FIQ, IRQ and imprecise data aborts. */
9578 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9582 new_mode
= ARM_CPU_MODE_MON
;
9584 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9588 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9589 return; /* Never happens. Keep compiler happy. */
9592 if (new_mode
== ARM_CPU_MODE_MON
) {
9593 addr
+= env
->cp15
.mvbar
;
9594 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9595 /* High vectors. When enabled, base address cannot be remapped. */
9598 /* ARM v7 architectures provide a vector base address register to remap
9599 * the interrupt vector table.
9600 * This register is only followed in non-monitor mode, and is banked.
9601 * Note: only bits 31:5 are valid.
9603 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9606 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9607 env
->cp15
.scr_el3
&= ~SCR_NS
;
9610 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9613 static int aarch64_regnum(CPUARMState
*env
, int aarch32_reg
)
9616 * Return the register number of the AArch64 view of the AArch32
9617 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9618 * be that of the AArch32 mode the exception came from.
9620 int mode
= env
->uncached_cpsr
& CPSR_M
;
9622 switch (aarch32_reg
) {
9626 return mode
== ARM_CPU_MODE_FIQ
? aarch32_reg
+ 16 : aarch32_reg
;
9629 case ARM_CPU_MODE_USR
:
9630 case ARM_CPU_MODE_SYS
:
9632 case ARM_CPU_MODE_HYP
:
9634 case ARM_CPU_MODE_IRQ
:
9636 case ARM_CPU_MODE_SVC
:
9638 case ARM_CPU_MODE_ABT
:
9640 case ARM_CPU_MODE_UND
:
9642 case ARM_CPU_MODE_FIQ
:
9645 g_assert_not_reached();
9649 case ARM_CPU_MODE_USR
:
9650 case ARM_CPU_MODE_SYS
:
9651 case ARM_CPU_MODE_HYP
:
9653 case ARM_CPU_MODE_IRQ
:
9655 case ARM_CPU_MODE_SVC
:
9657 case ARM_CPU_MODE_ABT
:
9659 case ARM_CPU_MODE_UND
:
9661 case ARM_CPU_MODE_FIQ
:
9664 g_assert_not_reached();
9669 g_assert_not_reached();
9673 /* Handle exception entry to a target EL which is using AArch64 */
9674 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9676 ARMCPU
*cpu
= ARM_CPU(cs
);
9677 CPUARMState
*env
= &cpu
->env
;
9678 unsigned int new_el
= env
->exception
.target_el
;
9679 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9680 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9681 unsigned int old_mode
;
9682 unsigned int cur_el
= arm_current_el(env
);
9686 * Note that new_el can never be 0. If cur_el is 0, then
9687 * el0_a64 is is_a64(), else el0_a64 is ignored.
9689 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9691 if (cur_el
< new_el
) {
9692 /* Entry vector offset depends on whether the implemented EL
9693 * immediately lower than the target level is using AArch32 or AArch64
9700 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9703 hcr
= arm_hcr_el2_eff(env
);
9704 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
9705 is_aa64
= (hcr
& HCR_RW
) != 0;
9710 is_aa64
= is_a64(env
);
9713 g_assert_not_reached();
9721 } else if (pstate_read(env
) & PSTATE_SP
) {
9725 switch (cs
->exception_index
) {
9726 case EXCP_PREFETCH_ABORT
:
9727 case EXCP_DATA_ABORT
:
9728 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9729 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9730 env
->cp15
.far_el
[new_el
]);
9738 switch (syn_get_ec(env
->exception
.syndrome
)) {
9739 case EC_ADVSIMDFPACCESSTRAP
:
9741 * QEMU internal FP/SIMD syndromes from AArch32 include the
9742 * TA and coproc fields which are only exposed if the exception
9743 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9744 * AArch64 format syndrome.
9746 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9752 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9753 * the raw register field from the insn; when taking this to
9754 * AArch64 we must convert it to the AArch64 view of the register
9755 * number. Notice that we read a 4-bit AArch32 register number and
9756 * write back a 5-bit AArch64 one.
9758 rt
= extract32(env
->exception
.syndrome
, 5, 4);
9759 rt
= aarch64_regnum(env
, rt
);
9760 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
9763 case EC_CP15RRTTRAP
:
9764 case EC_CP14RRTTRAP
:
9765 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9766 rt
= extract32(env
->exception
.syndrome
, 5, 4);
9767 rt
= aarch64_regnum(env
, rt
);
9768 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
9770 rt
= extract32(env
->exception
.syndrome
, 10, 4);
9771 rt
= aarch64_regnum(env
, rt
);
9772 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
9776 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9787 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9791 old_mode
= pstate_read(env
);
9792 aarch64_save_sp(env
, arm_current_el(env
));
9793 env
->elr_el
[new_el
] = env
->pc
;
9795 old_mode
= cpsr_read(env
);
9796 env
->elr_el
[new_el
] = env
->regs
[15];
9798 aarch64_sync_32_to_64(env
);
9800 env
->condexec_bits
= 0;
9802 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
9804 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9805 env
->elr_el
[new_el
]);
9807 if (cpu_isar_feature(aa64_pan
, cpu
)) {
9808 /* The value of PSTATE.PAN is normally preserved, except when ... */
9809 new_mode
|= old_mode
& PSTATE_PAN
;
9812 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9813 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
9814 != (HCR_E2H
| HCR_TGE
)) {
9819 /* ... the target is EL1 ... */
9820 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9821 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
9822 new_mode
|= PSTATE_PAN
;
9827 if (cpu_isar_feature(aa64_mte
, cpu
)) {
9828 new_mode
|= PSTATE_TCO
;
9831 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9833 aarch64_restore_sp(env
, new_el
);
9834 helper_rebuild_hflags_a64(env
, new_el
);
9838 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9839 new_el
, env
->pc
, pstate_read(env
));
9843 * Do semihosting call and set the appropriate return value. All the
9844 * permission and validity checks have been done at translate time.
9846 * We only see semihosting exceptions in TCG only as they are not
9847 * trapped to the hypervisor in KVM.
9850 static void handle_semihosting(CPUState
*cs
)
9852 ARMCPU
*cpu
= ARM_CPU(cs
);
9853 CPUARMState
*env
= &cpu
->env
;
9856 qemu_log_mask(CPU_LOG_INT
,
9857 "...handling as semihosting call 0x%" PRIx64
"\n",
9859 env
->xregs
[0] = do_arm_semihosting(env
);
9862 qemu_log_mask(CPU_LOG_INT
,
9863 "...handling as semihosting call 0x%x\n",
9865 env
->regs
[0] = do_arm_semihosting(env
);
9866 env
->regs
[15] += env
->thumb
? 2 : 4;
9871 /* Handle a CPU exception for A and R profile CPUs.
9872 * Do any appropriate logging, handle PSCI calls, and then hand off
9873 * to the AArch64-entry or AArch32-entry function depending on the
9874 * target exception level's register width.
9876 void arm_cpu_do_interrupt(CPUState
*cs
)
9878 ARMCPU
*cpu
= ARM_CPU(cs
);
9879 CPUARMState
*env
= &cpu
->env
;
9880 unsigned int new_el
= env
->exception
.target_el
;
9882 assert(!arm_feature(env
, ARM_FEATURE_M
));
9884 arm_log_exception(cs
->exception_index
);
9885 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9887 if (qemu_loglevel_mask(CPU_LOG_INT
)
9888 && !excp_is_internal(cs
->exception_index
)) {
9889 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9890 syn_get_ec(env
->exception
.syndrome
),
9891 env
->exception
.syndrome
);
9894 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9895 arm_handle_psci_call(cpu
);
9896 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9901 * Semihosting semantics depend on the register width of the code
9902 * that caused the exception, not the target exception level, so
9903 * must be handled here.
9906 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9907 handle_semihosting(cs
);
9912 /* Hooks may change global state so BQL should be held, also the
9913 * BQL needs to be held for any modification of
9914 * cs->interrupt_request.
9916 g_assert(qemu_mutex_iothread_locked());
9918 arm_call_pre_el_change_hook(cpu
);
9920 assert(!excp_is_internal(cs
->exception_index
));
9921 if (arm_el_is_aa64(env
, new_el
)) {
9922 arm_cpu_do_interrupt_aarch64(cs
);
9924 arm_cpu_do_interrupt_aarch32(cs
);
9927 arm_call_el_change_hook(cpu
);
9929 if (!kvm_enabled()) {
9930 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9933 #endif /* !CONFIG_USER_ONLY */
9935 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
9937 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
9939 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
9940 el
= (mmu_idx
== ARMMMUIdx_E20_0
? 2 : 1);
9942 return env
->cp15
.sctlr_el
[el
];
9945 /* Return the SCTLR value which controls this address translation regime */
9946 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9948 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9951 #ifndef CONFIG_USER_ONLY
9953 /* Return true if the specified stage of address translation is disabled */
9954 static inline bool regime_translation_disabled(CPUARMState
*env
,
9957 if (arm_feature(env
, ARM_FEATURE_M
)) {
9958 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9959 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9960 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9961 /* Enabled, but not for HardFault and NMI */
9962 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9963 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9964 /* Enabled for all cases */
9968 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9969 * we warned about that in armv7m_nvic.c when the guest set it.
9975 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9976 /* HCR.DC means HCR.VM behaves as 1 */
9977 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9980 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9981 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9982 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9987 if ((env
->cp15
.hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
9988 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9992 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9995 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9998 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
10001 /* Return the TTBR associated with this translation regime */
10002 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10005 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10006 return env
->cp15
.vttbr_el2
;
10009 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
10011 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
10015 #endif /* !CONFIG_USER_ONLY */
10017 /* Convert a possible stage1+2 MMU index into the appropriate
10018 * stage 1 MMU index
10020 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
10023 case ARMMMUIdx_E10_0
:
10024 return ARMMMUIdx_Stage1_E0
;
10025 case ARMMMUIdx_E10_1
:
10026 return ARMMMUIdx_Stage1_E1
;
10027 case ARMMMUIdx_E10_1_PAN
:
10028 return ARMMMUIdx_Stage1_E1_PAN
;
10034 /* Return true if the translation regime is using LPAE format page tables */
10035 static inline bool regime_using_lpae_format(CPUARMState
*env
,
10038 int el
= regime_el(env
, mmu_idx
);
10039 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
10042 if (arm_feature(env
, ARM_FEATURE_LPAE
)
10043 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
10049 /* Returns true if the stage 1 translation regime is using LPAE format page
10050 * tables. Used when raising alignment exceptions, whose FSR changes depending
10051 * on whether the long or short descriptor format is in use. */
10052 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10054 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10056 return regime_using_lpae_format(env
, mmu_idx
);
10059 #ifndef CONFIG_USER_ONLY
10060 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10063 case ARMMMUIdx_SE10_0
:
10064 case ARMMMUIdx_E20_0
:
10065 case ARMMMUIdx_Stage1_E0
:
10066 case ARMMMUIdx_MUser
:
10067 case ARMMMUIdx_MSUser
:
10068 case ARMMMUIdx_MUserNegPri
:
10069 case ARMMMUIdx_MSUserNegPri
:
10073 case ARMMMUIdx_E10_0
:
10074 case ARMMMUIdx_E10_1
:
10075 case ARMMMUIdx_E10_1_PAN
:
10076 g_assert_not_reached();
10080 /* Translate section/page access permissions to page
10081 * R/W protection flags
10083 * @env: CPUARMState
10084 * @mmu_idx: MMU index indicating required translation regime
10085 * @ap: The 3-bit access permissions (AP[2:0])
10086 * @domain_prot: The 2-bit domain access permissions
10088 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10089 int ap
, int domain_prot
)
10091 bool is_user
= regime_is_user(env
, mmu_idx
);
10093 if (domain_prot
== 3) {
10094 return PAGE_READ
| PAGE_WRITE
;
10099 if (arm_feature(env
, ARM_FEATURE_V7
)) {
10102 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
10104 return is_user
? 0 : PAGE_READ
;
10111 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10116 return PAGE_READ
| PAGE_WRITE
;
10119 return PAGE_READ
| PAGE_WRITE
;
10120 case 4: /* Reserved. */
10123 return is_user
? 0 : PAGE_READ
;
10127 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
10132 g_assert_not_reached();
10136 /* Translate section/page access permissions to page
10137 * R/W protection flags.
10139 * @ap: The 2-bit simple AP (AP[2:1])
10140 * @is_user: TRUE if accessing from PL0
10142 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
10146 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10148 return PAGE_READ
| PAGE_WRITE
;
10150 return is_user
? 0 : PAGE_READ
;
10154 g_assert_not_reached();
10159 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
10161 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
10164 /* Translate S2 section/page access permissions to protection flags
10166 * @env: CPUARMState
10167 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10168 * @xn: XN (execute-never) bits
10169 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10171 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
10179 prot
|= PAGE_WRITE
;
10182 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
10200 g_assert_not_reached();
10203 if (!extract32(xn
, 1, 1)) {
10204 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10212 /* Translate section/page access permissions to protection flags
10214 * @env: CPUARMState
10215 * @mmu_idx: MMU index indicating required translation regime
10216 * @is_aa64: TRUE if AArch64
10217 * @ap: The 2-bit simple AP (AP[2:1])
10218 * @ns: NS (non-secure) bit
10219 * @xn: XN (execute-never) bit
10220 * @pxn: PXN (privileged execute-never) bit
10222 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10223 int ap
, int ns
, int xn
, int pxn
)
10225 bool is_user
= regime_is_user(env
, mmu_idx
);
10226 int prot_rw
, user_rw
;
10230 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
10232 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10236 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
10237 /* PAN forbids data accesses but doesn't affect insn fetch */
10240 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10244 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10248 /* TODO have_wxn should be replaced with
10249 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10250 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10251 * compatible processors have EL2, which is required for [U]WXN.
10253 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10256 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10260 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
10261 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10263 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10264 switch (regime_el(env
, mmu_idx
)) {
10268 xn
= xn
|| !(user_rw
& PAGE_READ
);
10272 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10274 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10275 (uwxn
&& (user_rw
& PAGE_WRITE
));
10285 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10288 return prot_rw
| PAGE_EXEC
;
10291 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10292 uint32_t *table
, uint32_t address
)
10294 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10295 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10297 if (address
& tcr
->mask
) {
10298 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10299 /* Translation table walk disabled for TTBR1 */
10302 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10304 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10305 /* Translation table walk disabled for TTBR0 */
10308 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10310 *table
|= (address
>> 18) & 0x3ffc;
10314 /* Translate a S1 pagetable walk through S2 if needed. */
10315 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10316 hwaddr addr
, MemTxAttrs txattrs
,
10317 ARMMMUFaultInfo
*fi
)
10319 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
10320 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10321 target_ulong s2size
;
10325 ARMCacheAttrs cacheattrs
= {};
10327 ret
= get_phys_addr_lpae(env
, addr
, MMU_DATA_LOAD
, ARMMMUIdx_Stage2
,
10329 &s2pa
, &txattrs
, &s2prot
, &s2size
, fi
,
10332 assert(fi
->type
!= ARMFault_None
);
10338 if ((env
->cp15
.hcr_el2
& HCR_PTW
) && (cacheattrs
.attrs
& 0xf0) == 0) {
10340 * PTW set and S1 walk touched S2 Device memory:
10341 * generate Permission fault.
10343 fi
->type
= ARMFault_Permission
;
10354 /* All loads done in the course of a page table walk go through here. */
10355 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10356 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10358 ARMCPU
*cpu
= ARM_CPU(cs
);
10359 CPUARMState
*env
= &cpu
->env
;
10360 MemTxAttrs attrs
= {};
10361 MemTxResult result
= MEMTX_OK
;
10365 attrs
.secure
= is_secure
;
10366 as
= arm_addressspace(cs
, attrs
);
10367 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10371 if (regime_translation_big_endian(env
, mmu_idx
)) {
10372 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10374 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10376 if (result
== MEMTX_OK
) {
10379 fi
->type
= ARMFault_SyncExternalOnWalk
;
10380 fi
->ea
= arm_extabort_type(result
);
10384 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10385 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10387 ARMCPU
*cpu
= ARM_CPU(cs
);
10388 CPUARMState
*env
= &cpu
->env
;
10389 MemTxAttrs attrs
= {};
10390 MemTxResult result
= MEMTX_OK
;
10394 attrs
.secure
= is_secure
;
10395 as
= arm_addressspace(cs
, attrs
);
10396 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10400 if (regime_translation_big_endian(env
, mmu_idx
)) {
10401 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10403 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10405 if (result
== MEMTX_OK
) {
10408 fi
->type
= ARMFault_SyncExternalOnWalk
;
10409 fi
->ea
= arm_extabort_type(result
);
10413 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10414 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10415 hwaddr
*phys_ptr
, int *prot
,
10416 target_ulong
*page_size
,
10417 ARMMMUFaultInfo
*fi
)
10419 CPUState
*cs
= env_cpu(env
);
10430 /* Pagetable walk. */
10431 /* Lookup l1 descriptor. */
10432 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10433 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10434 fi
->type
= ARMFault_Translation
;
10437 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10439 if (fi
->type
!= ARMFault_None
) {
10443 domain
= (desc
>> 5) & 0x0f;
10444 if (regime_el(env
, mmu_idx
) == 1) {
10445 dacr
= env
->cp15
.dacr_ns
;
10447 dacr
= env
->cp15
.dacr_s
;
10449 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10451 /* Section translation fault. */
10452 fi
->type
= ARMFault_Translation
;
10458 if (domain_prot
== 0 || domain_prot
== 2) {
10459 fi
->type
= ARMFault_Domain
;
10464 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10465 ap
= (desc
>> 10) & 3;
10466 *page_size
= 1024 * 1024;
10468 /* Lookup l2 entry. */
10470 /* Coarse pagetable. */
10471 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10473 /* Fine pagetable. */
10474 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10476 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10478 if (fi
->type
!= ARMFault_None
) {
10481 switch (desc
& 3) {
10482 case 0: /* Page translation fault. */
10483 fi
->type
= ARMFault_Translation
;
10485 case 1: /* 64k page. */
10486 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10487 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10488 *page_size
= 0x10000;
10490 case 2: /* 4k page. */
10491 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10492 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10493 *page_size
= 0x1000;
10495 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10497 /* ARMv6/XScale extended small page format */
10498 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10499 || arm_feature(env
, ARM_FEATURE_V6
)) {
10500 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10501 *page_size
= 0x1000;
10503 /* UNPREDICTABLE in ARMv5; we choose to take a
10504 * page translation fault.
10506 fi
->type
= ARMFault_Translation
;
10510 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10511 *page_size
= 0x400;
10513 ap
= (desc
>> 4) & 3;
10516 /* Never happens, but compiler isn't smart enough to tell. */
10520 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10521 *prot
|= *prot
? PAGE_EXEC
: 0;
10522 if (!(*prot
& (1 << access_type
))) {
10523 /* Access permission fault. */
10524 fi
->type
= ARMFault_Permission
;
10527 *phys_ptr
= phys_addr
;
10530 fi
->domain
= domain
;
10535 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10536 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10537 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10538 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10540 CPUState
*cs
= env_cpu(env
);
10541 ARMCPU
*cpu
= env_archcpu(env
);
10555 /* Pagetable walk. */
10556 /* Lookup l1 descriptor. */
10557 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10558 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10559 fi
->type
= ARMFault_Translation
;
10562 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10564 if (fi
->type
!= ARMFault_None
) {
10568 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
10569 /* Section translation fault, or attempt to use the encoding
10570 * which is Reserved on implementations without PXN.
10572 fi
->type
= ARMFault_Translation
;
10575 if ((type
== 1) || !(desc
& (1 << 18))) {
10576 /* Page or Section. */
10577 domain
= (desc
>> 5) & 0x0f;
10579 if (regime_el(env
, mmu_idx
) == 1) {
10580 dacr
= env
->cp15
.dacr_ns
;
10582 dacr
= env
->cp15
.dacr_s
;
10587 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10588 if (domain_prot
== 0 || domain_prot
== 2) {
10589 /* Section or Page domain fault */
10590 fi
->type
= ARMFault_Domain
;
10594 if (desc
& (1 << 18)) {
10595 /* Supersection. */
10596 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10597 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10598 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10599 *page_size
= 0x1000000;
10602 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10603 *page_size
= 0x100000;
10605 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10606 xn
= desc
& (1 << 4);
10608 ns
= extract32(desc
, 19, 1);
10610 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
10611 pxn
= (desc
>> 2) & 1;
10613 ns
= extract32(desc
, 3, 1);
10614 /* Lookup l2 entry. */
10615 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10616 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10618 if (fi
->type
!= ARMFault_None
) {
10621 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10622 switch (desc
& 3) {
10623 case 0: /* Page translation fault. */
10624 fi
->type
= ARMFault_Translation
;
10626 case 1: /* 64k page. */
10627 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10628 xn
= desc
& (1 << 15);
10629 *page_size
= 0x10000;
10631 case 2: case 3: /* 4k page. */
10632 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10634 *page_size
= 0x1000;
10637 /* Never happens, but compiler isn't smart enough to tell. */
10641 if (domain_prot
== 3) {
10642 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10644 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10647 if (xn
&& access_type
== MMU_INST_FETCH
) {
10648 fi
->type
= ARMFault_Permission
;
10652 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10653 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10654 /* The simplified model uses AP[0] as an access control bit. */
10655 if ((ap
& 1) == 0) {
10656 /* Access flag fault. */
10657 fi
->type
= ARMFault_AccessFlag
;
10660 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10662 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10664 if (*prot
&& !xn
) {
10665 *prot
|= PAGE_EXEC
;
10667 if (!(*prot
& (1 << access_type
))) {
10668 /* Access permission fault. */
10669 fi
->type
= ARMFault_Permission
;
10674 /* The NS bit will (as required by the architecture) have no effect if
10675 * the CPU doesn't support TZ or this is a non-secure translation
10676 * regime, because the attribute will already be non-secure.
10678 attrs
->secure
= false;
10680 *phys_ptr
= phys_addr
;
10683 fi
->domain
= domain
;
10689 * check_s2_mmu_setup
10691 * @is_aa64: True if the translation regime is in AArch64 state
10692 * @startlevel: Suggested starting level
10693 * @inputsize: Bitsize of IPAs
10694 * @stride: Page-table stride (See the ARM ARM)
10696 * Returns true if the suggested S2 translation parameters are OK and
10699 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10700 int inputsize
, int stride
)
10702 const int grainsize
= stride
+ 3;
10703 int startsizecheck
;
10705 /* Negative levels are never allowed. */
10710 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10711 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10716 CPUARMState
*env
= &cpu
->env
;
10717 unsigned int pamax
= arm_pamax(cpu
);
10720 case 13: /* 64KB Pages. */
10721 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10725 case 11: /* 16KB Pages. */
10726 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10730 case 9: /* 4KB Pages. */
10731 if (level
== 0 && pamax
<= 42) {
10736 g_assert_not_reached();
10739 /* Inputsize checks. */
10740 if (inputsize
> pamax
&&
10741 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10742 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10746 /* AArch32 only supports 4KB pages. Assert on that. */
10747 assert(stride
== 9);
10756 /* Translate from the 4-bit stage 2 representation of
10757 * memory attributes (without cache-allocation hints) to
10758 * the 8-bit representation of the stage 1 MAIR registers
10759 * (which includes allocation hints).
10761 * ref: shared/translation/attrs/S2AttrDecode()
10762 * .../S2ConvertAttrsHints()
10764 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10766 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10767 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10768 uint8_t hihint
= 0, lohint
= 0;
10770 if (hiattr
!= 0) { /* normal memory */
10771 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10772 hiattr
= loattr
= 1; /* non-cacheable */
10774 if (hiattr
!= 1) { /* Write-through or write-back */
10775 hihint
= 3; /* RW allocate */
10777 if (loattr
!= 1) { /* Write-through or write-back */
10778 lohint
= 3; /* RW allocate */
10783 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10785 #endif /* !CONFIG_USER_ONLY */
10787 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
10789 if (regime_has_2_ranges(mmu_idx
)) {
10790 return extract64(tcr
, 37, 2);
10791 } else if (mmu_idx
== ARMMMUIdx_Stage2
) {
10792 return 0; /* VTCR_EL2 */
10794 /* Replicate the single TBI bit so we always have 2 bits. */
10795 return extract32(tcr
, 20, 1) * 3;
10799 static int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
10801 if (regime_has_2_ranges(mmu_idx
)) {
10802 return extract64(tcr
, 51, 2);
10803 } else if (mmu_idx
== ARMMMUIdx_Stage2
) {
10804 return 0; /* VTCR_EL2 */
10806 /* Replicate the single TBID bit so we always have 2 bits. */
10807 return extract32(tcr
, 29, 1) * 3;
10811 static int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
)
10813 if (regime_has_2_ranges(mmu_idx
)) {
10814 return extract64(tcr
, 57, 2);
10816 /* Replicate the single TCMA bit so we always have 2 bits. */
10817 return extract32(tcr
, 30, 1) * 3;
10821 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10822 ARMMMUIdx mmu_idx
, bool data
)
10824 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10825 bool epd
, hpd
, using16k
, using64k
;
10826 int select
, tsz
, tbi
;
10828 if (!regime_has_2_ranges(mmu_idx
)) {
10830 tsz
= extract32(tcr
, 0, 6);
10831 using64k
= extract32(tcr
, 14, 1);
10832 using16k
= extract32(tcr
, 15, 1);
10833 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10837 hpd
= extract32(tcr
, 24, 1);
10842 * Bit 55 is always between the two regions, and is canonical for
10843 * determining if address tagging is enabled.
10845 select
= extract64(va
, 55, 1);
10847 tsz
= extract32(tcr
, 0, 6);
10848 epd
= extract32(tcr
, 7, 1);
10849 using64k
= extract32(tcr
, 14, 1);
10850 using16k
= extract32(tcr
, 15, 1);
10851 hpd
= extract64(tcr
, 41, 1);
10853 int tg
= extract32(tcr
, 30, 2);
10854 using16k
= tg
== 1;
10855 using64k
= tg
== 3;
10856 tsz
= extract32(tcr
, 16, 6);
10857 epd
= extract32(tcr
, 23, 1);
10858 hpd
= extract64(tcr
, 42, 1);
10861 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10862 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10864 /* Present TBI as a composite with TBID. */
10865 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
10867 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
10869 tbi
= (tbi
>> select
) & 1;
10871 return (ARMVAParameters
) {
10877 .using16k
= using16k
,
10878 .using64k
= using64k
,
10882 #ifndef CONFIG_USER_ONLY
10883 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10886 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10887 uint32_t el
= regime_el(env
, mmu_idx
);
10891 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10893 bool sext
= extract32(tcr
, 4, 1);
10894 bool sign
= extract32(tcr
, 3, 1);
10897 * If the sign-extend bit is not the same as t0sz[3], the result
10898 * is unpredictable. Flag this as a guest error.
10900 if (sign
!= sext
) {
10901 qemu_log_mask(LOG_GUEST_ERROR
,
10902 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10904 tsz
= sextract32(tcr
, 0, 4) + 8;
10908 } else if (el
== 2) {
10910 tsz
= extract32(tcr
, 0, 3);
10912 hpd
= extract64(tcr
, 24, 1);
10915 int t0sz
= extract32(tcr
, 0, 3);
10916 int t1sz
= extract32(tcr
, 16, 3);
10919 select
= va
> (0xffffffffu
>> t0sz
);
10921 /* Note that we will detect errors later. */
10922 select
= va
>= ~(0xffffffffu
>> t1sz
);
10926 epd
= extract32(tcr
, 7, 1);
10927 hpd
= extract64(tcr
, 41, 1);
10930 epd
= extract32(tcr
, 23, 1);
10931 hpd
= extract64(tcr
, 42, 1);
10933 /* For aarch32, hpd0 is not enabled without t2e as well. */
10934 hpd
&= extract32(tcr
, 6, 1);
10937 return (ARMVAParameters
) {
10946 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
10948 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10949 * prot and page_size may not be filled in, and the populated fsr value provides
10950 * information on why the translation aborted, in the format of a long-format
10951 * DFSR/IFSR fault register, with the following caveats:
10952 * * the WnR bit is never set (the caller must do this).
10954 * @env: CPUARMState
10955 * @address: virtual address to get physical address for
10956 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
10957 * @mmu_idx: MMU index indicating required translation regime
10958 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
10959 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
10960 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
10961 * @phys_ptr: set to the physical address corresponding to the virtual address
10962 * @attrs: set to the memory transaction attributes to use
10963 * @prot: set to the permissions for the page containing phys_ptr
10964 * @page_size_ptr: set to the size of the page containing phys_ptr
10965 * @fi: set to fault info if the translation fails
10966 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10968 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10969 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10971 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10972 target_ulong
*page_size_ptr
,
10973 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10975 ARMCPU
*cpu
= env_archcpu(env
);
10976 CPUState
*cs
= CPU(cpu
);
10977 /* Read an LPAE long-descriptor translation table. */
10978 ARMFaultType fault_type
= ARMFault_Translation
;
10980 ARMVAParameters param
;
10982 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10983 uint32_t tableattrs
;
10984 target_ulong page_size
;
10987 int addrsize
, inputsize
;
10988 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10989 int ap
, ns
, xn
, pxn
;
10990 uint32_t el
= regime_el(env
, mmu_idx
);
10991 uint64_t descaddrmask
;
10992 bool aarch64
= arm_el_is_aa64(env
, el
);
10993 bool guarded
= false;
10995 /* TODO: This code does not support shareability levels. */
10997 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10998 access_type
!= MMU_INST_FETCH
);
11000 addrsize
= 64 - 8 * param
.tbi
;
11001 inputsize
= 64 - param
.tsz
;
11003 param
= aa32_va_parameters(env
, address
, mmu_idx
);
11005 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
11006 inputsize
= addrsize
- param
.tsz
;
11010 * We determined the region when collecting the parameters, but we
11011 * have not yet validated that the address is valid for the region.
11012 * Extract the top bits and verify that they all match select.
11014 * For aa32, if inputsize == addrsize, then we have selected the
11015 * region by exclusion in aa32_va_parameters and there is no more
11016 * validation to do here.
11018 if (inputsize
< addrsize
) {
11019 target_ulong top_bits
= sextract64(address
, inputsize
,
11020 addrsize
- inputsize
);
11021 if (-top_bits
!= param
.select
) {
11022 /* The gap between the two regions is a Translation fault */
11023 fault_type
= ARMFault_Translation
;
11028 if (param
.using64k
) {
11030 } else if (param
.using16k
) {
11036 /* Note that QEMU ignores shareability and cacheability attributes,
11037 * so we don't need to do anything with the SH, ORGN, IRGN fields
11038 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11039 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11040 * implement any ASID-like capability so we can ignore it (instead
11041 * we will always flush the TLB any time the ASID is changed).
11043 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
11045 /* Here we should have set up all the parameters for the translation:
11046 * inputsize, ttbr, epd, stride, tbi
11050 /* Translation table walk disabled => Translation fault on TLB miss
11051 * Note: This is always 0 on 64-bit EL2 and EL3.
11056 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
11057 /* The starting level depends on the virtual address size (which can
11058 * be up to 48 bits) and the translation granule size. It indicates
11059 * the number of strides (stride bits at a time) needed to
11060 * consume the bits of the input address. In the pseudocode this is:
11061 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11062 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11063 * our 'stride + 3' and 'stride' is our 'stride'.
11064 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11065 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11066 * = 4 - (inputsize - 4) / stride;
11068 level
= 4 - (inputsize
- 4) / stride
;
11070 /* For stage 2 translations the starting level is specified by the
11071 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11073 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
11074 uint32_t startlevel
;
11077 if (!aarch64
|| stride
== 9) {
11078 /* AArch32 or 4KB pages */
11079 startlevel
= 2 - sl0
;
11081 /* 16KB or 64KB pages */
11082 startlevel
= 3 - sl0
;
11085 /* Check that the starting level is valid. */
11086 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
11087 inputsize
, stride
);
11089 fault_type
= ARMFault_Translation
;
11092 level
= startlevel
;
11095 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
11096 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
11098 /* Now we can extract the actual base address from the TTBR */
11099 descaddr
= extract64(ttbr
, 0, 48);
11101 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11102 * and also to mask out CnP (bit 0) which could validly be non-zero.
11104 descaddr
&= ~indexmask
;
11106 /* The address field in the descriptor goes up to bit 39 for ARMv7
11107 * but up to bit 47 for ARMv8, but we use the descaddrmask
11108 * up to bit 39 for AArch32, because we don't need other bits in that case
11109 * to construct next descriptor address (anyway they should be all zeroes).
11111 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
11112 ~indexmask_grainsize
;
11114 /* Secure accesses start with the page table in secure memory and
11115 * can be downgraded to non-secure at any step. Non-secure accesses
11116 * remain non-secure. We implement this by just ORing in the NSTable/NS
11117 * bits at each step.
11119 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
11121 uint64_t descriptor
;
11124 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
11126 nstable
= extract32(tableattrs
, 4, 1);
11127 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
11128 if (fi
->type
!= ARMFault_None
) {
11132 if (!(descriptor
& 1) ||
11133 (!(descriptor
& 2) && (level
== 3))) {
11134 /* Invalid, or the Reserved level 3 encoding */
11137 descaddr
= descriptor
& descaddrmask
;
11139 if ((descriptor
& 2) && (level
< 3)) {
11140 /* Table entry. The top five bits are attributes which may
11141 * propagate down through lower levels of the table (and
11142 * which are all arranged so that 0 means "no effect", so
11143 * we can gather them up by ORing in the bits at each level).
11145 tableattrs
|= extract64(descriptor
, 59, 5);
11147 indexmask
= indexmask_grainsize
;
11150 /* Block entry at level 1 or 2, or page entry at level 3.
11151 * These are basically the same thing, although the number
11152 * of bits we pull in from the vaddr varies.
11154 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
11155 descaddr
|= (address
& (page_size
- 1));
11156 /* Extract attributes from the descriptor */
11157 attrs
= extract64(descriptor
, 2, 10)
11158 | (extract64(descriptor
, 52, 12) << 10);
11160 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11161 /* Stage 2 table descriptors do not include any attribute fields */
11164 /* Merge in attributes from table descriptors */
11165 attrs
|= nstable
<< 3; /* NS */
11166 guarded
= extract64(descriptor
, 50, 1); /* GP */
11168 /* HPD disables all the table attributes except NSTable. */
11171 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
11172 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11173 * means "force PL1 access only", which means forcing AP[1] to 0.
11175 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
11176 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
11179 /* Here descaddr is the final physical address, and attributes
11180 * are all in attrs.
11182 fault_type
= ARMFault_AccessFlag
;
11183 if ((attrs
& (1 << 8)) == 0) {
11188 ap
= extract32(attrs
, 4, 2);
11190 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11192 xn
= extract32(attrs
, 11, 2);
11193 *prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
11195 ns
= extract32(attrs
, 3, 1);
11196 xn
= extract32(attrs
, 12, 1);
11197 pxn
= extract32(attrs
, 11, 1);
11198 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
11201 fault_type
= ARMFault_Permission
;
11202 if (!(*prot
& (1 << access_type
))) {
11207 /* The NS bit will (as required by the architecture) have no effect if
11208 * the CPU doesn't support TZ or this is a non-secure translation
11209 * regime, because the attribute will already be non-secure.
11211 txattrs
->secure
= false;
11213 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11214 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
11215 arm_tlb_bti_gp(txattrs
) = true;
11218 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11219 cacheattrs
->attrs
= convert_stage2_attrs(env
, extract32(attrs
, 0, 4));
11221 /* Index into MAIR registers for cache attributes */
11222 uint8_t attrindx
= extract32(attrs
, 0, 3);
11223 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
11224 assert(attrindx
<= 7);
11225 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
11227 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
11229 *phys_ptr
= descaddr
;
11230 *page_size_ptr
= page_size
;
11234 fi
->type
= fault_type
;
11236 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11237 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
);
11241 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11243 int32_t address
, int *prot
)
11245 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11246 *prot
= PAGE_READ
| PAGE_WRITE
;
11248 case 0xF0000000 ... 0xFFFFFFFF:
11249 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11250 /* hivecs execing is ok */
11251 *prot
|= PAGE_EXEC
;
11254 case 0x00000000 ... 0x7FFFFFFF:
11255 *prot
|= PAGE_EXEC
;
11259 /* Default system address map for M profile cores.
11260 * The architecture specifies which regions are execute-never;
11261 * at the MPU level no other checks are defined.
11264 case 0x00000000 ... 0x1fffffff: /* ROM */
11265 case 0x20000000 ... 0x3fffffff: /* SRAM */
11266 case 0x60000000 ... 0x7fffffff: /* RAM */
11267 case 0x80000000 ... 0x9fffffff: /* RAM */
11268 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11270 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11271 case 0xa0000000 ... 0xbfffffff: /* Device */
11272 case 0xc0000000 ... 0xdfffffff: /* Device */
11273 case 0xe0000000 ... 0xffffffff: /* System */
11274 *prot
= PAGE_READ
| PAGE_WRITE
;
11277 g_assert_not_reached();
11282 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11283 ARMMMUIdx mmu_idx
, bool is_user
)
11285 /* Return true if we should use the default memory map as a
11286 * "background" region if there are no hits against any MPU regions.
11288 CPUARMState
*env
= &cpu
->env
;
11294 if (arm_feature(env
, ARM_FEATURE_M
)) {
11295 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11296 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11298 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11302 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11304 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11305 return arm_feature(env
, ARM_FEATURE_M
) &&
11306 extract32(address
, 20, 12) == 0xe00;
11309 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11311 /* True if address is in the M profile system region
11312 * 0xe0000000 - 0xffffffff
11314 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11317 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11318 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11319 hwaddr
*phys_ptr
, int *prot
,
11320 target_ulong
*page_size
,
11321 ARMMMUFaultInfo
*fi
)
11323 ARMCPU
*cpu
= env_archcpu(env
);
11325 bool is_user
= regime_is_user(env
, mmu_idx
);
11327 *phys_ptr
= address
;
11328 *page_size
= TARGET_PAGE_SIZE
;
11331 if (regime_translation_disabled(env
, mmu_idx
) ||
11332 m_is_ppb_region(env
, address
)) {
11333 /* MPU disabled or M profile PPB access: use default memory map.
11334 * The other case which uses the default memory map in the
11335 * v7M ARM ARM pseudocode is exception vector reads from the vector
11336 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11337 * which always does a direct read using address_space_ldl(), rather
11338 * than going via this function, so we don't need to check that here.
11340 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11341 } else { /* MPU enabled */
11342 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11343 /* region search */
11344 uint32_t base
= env
->pmsav7
.drbar
[n
];
11345 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11347 bool srdis
= false;
11349 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11354 qemu_log_mask(LOG_GUEST_ERROR
,
11355 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11359 rmask
= (1ull << rsize
) - 1;
11361 if (base
& rmask
) {
11362 qemu_log_mask(LOG_GUEST_ERROR
,
11363 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11364 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11369 if (address
< base
|| address
> base
+ rmask
) {
11371 * Address not in this region. We must check whether the
11372 * region covers addresses in the same page as our address.
11373 * In that case we must not report a size that covers the
11374 * whole page for a subsequent hit against a different MPU
11375 * region or the background region, because it would result in
11376 * incorrect TLB hits for subsequent accesses to addresses that
11377 * are in this MPU region.
11379 if (ranges_overlap(base
, rmask
,
11380 address
& TARGET_PAGE_MASK
,
11381 TARGET_PAGE_SIZE
)) {
11387 /* Region matched */
11389 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11391 uint32_t srdis_mask
;
11393 rsize
-= 3; /* sub region size (power of 2) */
11394 snd
= ((address
- base
) >> rsize
) & 0x7;
11395 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11397 srdis_mask
= srdis
? 0x3 : 0x0;
11398 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11399 /* This will check in groups of 2, 4 and then 8, whether
11400 * the subregion bits are consistent. rsize is incremented
11401 * back up to give the region size, considering consistent
11402 * adjacent subregions as one region. Stop testing if rsize
11403 * is already big enough for an entire QEMU page.
11405 int snd_rounded
= snd
& ~(i
- 1);
11406 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11407 snd_rounded
+ 8, i
);
11408 if (srdis_mask
^ srdis_multi
) {
11411 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11418 if (rsize
< TARGET_PAGE_BITS
) {
11419 *page_size
= 1 << rsize
;
11424 if (n
== -1) { /* no hits */
11425 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11426 /* background fault */
11427 fi
->type
= ARMFault_Background
;
11430 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11431 } else { /* a MPU hit! */
11432 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11433 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11435 if (m_is_system_region(env
, address
)) {
11436 /* System space is always execute never */
11440 if (is_user
) { /* User mode AP bit decoding */
11445 break; /* no access */
11447 *prot
|= PAGE_WRITE
;
11451 *prot
|= PAGE_READ
| PAGE_EXEC
;
11454 /* for v7M, same as 6; for R profile a reserved value */
11455 if (arm_feature(env
, ARM_FEATURE_M
)) {
11456 *prot
|= PAGE_READ
| PAGE_EXEC
;
11461 qemu_log_mask(LOG_GUEST_ERROR
,
11462 "DRACR[%d]: Bad value for AP bits: 0x%"
11463 PRIx32
"\n", n
, ap
);
11465 } else { /* Priv. mode AP bits decoding */
11468 break; /* no access */
11472 *prot
|= PAGE_WRITE
;
11476 *prot
|= PAGE_READ
| PAGE_EXEC
;
11479 /* for v7M, same as 6; for R profile a reserved value */
11480 if (arm_feature(env
, ARM_FEATURE_M
)) {
11481 *prot
|= PAGE_READ
| PAGE_EXEC
;
11486 qemu_log_mask(LOG_GUEST_ERROR
,
11487 "DRACR[%d]: Bad value for AP bits: 0x%"
11488 PRIx32
"\n", n
, ap
);
11492 /* execute never */
11494 *prot
&= ~PAGE_EXEC
;
11499 fi
->type
= ARMFault_Permission
;
11501 return !(*prot
& (1 << access_type
));
11504 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11505 uint32_t address
, MMUAccessType access_type
)
11507 /* The architecture specifies that certain address ranges are
11508 * exempt from v8M SAU/IDAU checks.
11511 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11512 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11513 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11514 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11515 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11516 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11519 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11520 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11521 V8M_SAttributes
*sattrs
)
11523 /* Look up the security attributes for this address. Compare the
11524 * pseudocode SecurityCheck() function.
11525 * We assume the caller has zero-initialized *sattrs.
11527 ARMCPU
*cpu
= env_archcpu(env
);
11529 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11530 int idau_region
= IREGION_NOTVALID
;
11531 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11532 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11535 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11536 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11538 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11542 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11543 /* 0xf0000000..0xffffffff is always S for insn fetches */
11547 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11548 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11552 if (idau_region
!= IREGION_NOTVALID
) {
11553 sattrs
->irvalid
= true;
11554 sattrs
->iregion
= idau_region
;
11557 switch (env
->sau
.ctrl
& 3) {
11558 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11560 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11563 default: /* SAU.ENABLE == 1 */
11564 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11565 if (env
->sau
.rlar
[r
] & 1) {
11566 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11567 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11569 if (base
<= address
&& limit
>= address
) {
11570 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11571 sattrs
->subpage
= true;
11573 if (sattrs
->srvalid
) {
11574 /* If we hit in more than one region then we must report
11575 * as Secure, not NS-Callable, with no valid region
11578 sattrs
->ns
= false;
11579 sattrs
->nsc
= false;
11580 sattrs
->sregion
= 0;
11581 sattrs
->srvalid
= false;
11584 if (env
->sau
.rlar
[r
] & 2) {
11585 sattrs
->nsc
= true;
11589 sattrs
->srvalid
= true;
11590 sattrs
->sregion
= r
;
11594 * Address not in this region. We must check whether the
11595 * region covers addresses in the same page as our address.
11596 * In that case we must not report a size that covers the
11597 * whole page for a subsequent hit against a different MPU
11598 * region or the background region, because it would result
11599 * in incorrect TLB hits for subsequent accesses to
11600 * addresses that are in this MPU region.
11602 if (limit
>= base
&&
11603 ranges_overlap(base
, limit
- base
+ 1,
11605 TARGET_PAGE_SIZE
)) {
11606 sattrs
->subpage
= true;
11615 * The IDAU will override the SAU lookup results if it specifies
11616 * higher security than the SAU does.
11619 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11620 sattrs
->ns
= false;
11621 sattrs
->nsc
= idau_nsc
;
11626 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11627 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11628 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11629 int *prot
, bool *is_subpage
,
11630 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11632 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11633 * that a full phys-to-virt translation does).
11634 * mregion is (if not NULL) set to the region number which matched,
11635 * or -1 if no region number is returned (MPU off, address did not
11636 * hit a region, address hit in multiple regions).
11637 * We set is_subpage to true if the region hit doesn't cover the
11638 * entire TARGET_PAGE the address is within.
11640 ARMCPU
*cpu
= env_archcpu(env
);
11641 bool is_user
= regime_is_user(env
, mmu_idx
);
11642 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11644 int matchregion
= -1;
11646 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11647 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11649 *is_subpage
= false;
11650 *phys_ptr
= address
;
11656 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11657 * was an exception vector read from the vector table (which is always
11658 * done using the default system address map), because those accesses
11659 * are done in arm_v7m_load_vector(), which always does a direct
11660 * read using address_space_ldl(), rather than going via this function.
11662 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11664 } else if (m_is_ppb_region(env
, address
)) {
11667 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11671 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11672 /* region search */
11673 /* Note that the base address is bits [31:5] from the register
11674 * with bits [4:0] all zeroes, but the limit address is bits
11675 * [31:5] from the register with bits [4:0] all ones.
11677 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11678 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11680 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11681 /* Region disabled */
11685 if (address
< base
|| address
> limit
) {
11687 * Address not in this region. We must check whether the
11688 * region covers addresses in the same page as our address.
11689 * In that case we must not report a size that covers the
11690 * whole page for a subsequent hit against a different MPU
11691 * region or the background region, because it would result in
11692 * incorrect TLB hits for subsequent accesses to addresses that
11693 * are in this MPU region.
11695 if (limit
>= base
&&
11696 ranges_overlap(base
, limit
- base
+ 1,
11698 TARGET_PAGE_SIZE
)) {
11699 *is_subpage
= true;
11704 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11705 *is_subpage
= true;
11708 if (matchregion
!= -1) {
11709 /* Multiple regions match -- always a failure (unlike
11710 * PMSAv7 where highest-numbered-region wins)
11712 fi
->type
= ARMFault_Permission
;
11723 /* background fault */
11724 fi
->type
= ARMFault_Background
;
11728 if (matchregion
== -1) {
11729 /* hit using the background region */
11730 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11732 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11733 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11735 if (m_is_system_region(env
, address
)) {
11736 /* System space is always execute never */
11740 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11741 if (*prot
&& !xn
) {
11742 *prot
|= PAGE_EXEC
;
11744 /* We don't need to look the attribute up in the MAIR0/MAIR1
11745 * registers because that only tells us about cacheability.
11748 *mregion
= matchregion
;
11752 fi
->type
= ARMFault_Permission
;
11754 return !(*prot
& (1 << access_type
));
11758 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11759 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11760 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11761 int *prot
, target_ulong
*page_size
,
11762 ARMMMUFaultInfo
*fi
)
11764 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11765 V8M_SAttributes sattrs
= {};
11767 bool mpu_is_subpage
;
11769 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11770 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11771 if (access_type
== MMU_INST_FETCH
) {
11772 /* Instruction fetches always use the MMU bank and the
11773 * transaction attribute determined by the fetch address,
11774 * regardless of CPU state. This is painful for QEMU
11775 * to handle, because it would mean we need to encode
11776 * into the mmu_idx not just the (user, negpri) information
11777 * for the current security state but also that for the
11778 * other security state, which would balloon the number
11779 * of mmu_idx values needed alarmingly.
11780 * Fortunately we can avoid this because it's not actually
11781 * possible to arbitrarily execute code from memory with
11782 * the wrong security attribute: it will always generate
11783 * an exception of some kind or another, apart from the
11784 * special case of an NS CPU executing an SG instruction
11785 * in S&NSC memory. So we always just fail the translation
11786 * here and sort things out in the exception handler
11787 * (including possibly emulating an SG instruction).
11789 if (sattrs
.ns
!= !secure
) {
11791 fi
->type
= ARMFault_QEMU_NSCExec
;
11793 fi
->type
= ARMFault_QEMU_SFault
;
11795 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11796 *phys_ptr
= address
;
11801 /* For data accesses we always use the MMU bank indicated
11802 * by the current CPU state, but the security attributes
11803 * might downgrade a secure access to nonsecure.
11806 txattrs
->secure
= false;
11807 } else if (!secure
) {
11808 /* NS access to S memory must fault.
11809 * Architecturally we should first check whether the
11810 * MPU information for this address indicates that we
11811 * are doing an unaligned access to Device memory, which
11812 * should generate a UsageFault instead. QEMU does not
11813 * currently check for that kind of unaligned access though.
11814 * If we added it we would need to do so as a special case
11815 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11817 fi
->type
= ARMFault_QEMU_SFault
;
11818 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11819 *phys_ptr
= address
;
11826 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11827 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11828 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11832 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11833 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11834 hwaddr
*phys_ptr
, int *prot
,
11835 ARMMMUFaultInfo
*fi
)
11840 bool is_user
= regime_is_user(env
, mmu_idx
);
11842 if (regime_translation_disabled(env
, mmu_idx
)) {
11843 /* MPU disabled. */
11844 *phys_ptr
= address
;
11845 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11849 *phys_ptr
= address
;
11850 for (n
= 7; n
>= 0; n
--) {
11851 base
= env
->cp15
.c6_region
[n
];
11852 if ((base
& 1) == 0) {
11855 mask
= 1 << ((base
>> 1) & 0x1f);
11856 /* Keep this shift separate from the above to avoid an
11857 (undefined) << 32. */
11858 mask
= (mask
<< 1) - 1;
11859 if (((base
^ address
) & ~mask
) == 0) {
11864 fi
->type
= ARMFault_Background
;
11868 if (access_type
== MMU_INST_FETCH
) {
11869 mask
= env
->cp15
.pmsav5_insn_ap
;
11871 mask
= env
->cp15
.pmsav5_data_ap
;
11873 mask
= (mask
>> (n
* 4)) & 0xf;
11876 fi
->type
= ARMFault_Permission
;
11881 fi
->type
= ARMFault_Permission
;
11885 *prot
= PAGE_READ
| PAGE_WRITE
;
11890 *prot
|= PAGE_WRITE
;
11894 *prot
= PAGE_READ
| PAGE_WRITE
;
11898 fi
->type
= ARMFault_Permission
;
11908 /* Bad permission. */
11909 fi
->type
= ARMFault_Permission
;
11913 *prot
|= PAGE_EXEC
;
11917 /* Combine either inner or outer cacheability attributes for normal
11918 * memory, according to table D4-42 and pseudocode procedure
11919 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11921 * NB: only stage 1 includes allocation hints (RW bits), leading to
11924 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11926 if (s1
== 4 || s2
== 4) {
11927 /* non-cacheable has precedence */
11929 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11930 /* stage 1 write-through takes precedence */
11932 } else if (extract32(s2
, 2, 2) == 2) {
11933 /* stage 2 write-through takes precedence, but the allocation hint
11934 * is still taken from stage 1
11936 return (2 << 2) | extract32(s1
, 0, 2);
11937 } else { /* write-back */
11942 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11943 * and CombineS1S2Desc()
11945 * @s1: Attributes from stage 1 walk
11946 * @s2: Attributes from stage 2 walk
11948 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11950 uint8_t s1lo
, s2lo
, s1hi
, s2hi
;
11952 bool tagged
= false;
11954 if (s1
.attrs
== 0xf0) {
11959 s1lo
= extract32(s1
.attrs
, 0, 4);
11960 s2lo
= extract32(s2
.attrs
, 0, 4);
11961 s1hi
= extract32(s1
.attrs
, 4, 4);
11962 s2hi
= extract32(s2
.attrs
, 4, 4);
11964 /* Combine shareability attributes (table D4-43) */
11965 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11966 /* if either are outer-shareable, the result is outer-shareable */
11967 ret
.shareability
= 2;
11968 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11969 /* if either are inner-shareable, the result is inner-shareable */
11970 ret
.shareability
= 3;
11972 /* both non-shareable */
11973 ret
.shareability
= 0;
11976 /* Combine memory type and cacheability attributes */
11977 if (s1hi
== 0 || s2hi
== 0) {
11978 /* Device has precedence over normal */
11979 if (s1lo
== 0 || s2lo
== 0) {
11980 /* nGnRnE has precedence over anything */
11982 } else if (s1lo
== 4 || s2lo
== 4) {
11983 /* non-Reordering has precedence over Reordering */
11984 ret
.attrs
= 4; /* nGnRE */
11985 } else if (s1lo
== 8 || s2lo
== 8) {
11986 /* non-Gathering has precedence over Gathering */
11987 ret
.attrs
= 8; /* nGRE */
11989 ret
.attrs
= 0xc; /* GRE */
11992 /* Any location for which the resultant memory type is any
11993 * type of Device memory is always treated as Outer Shareable.
11995 ret
.shareability
= 2;
11996 } else { /* Normal memory */
11997 /* Outer/inner cacheability combine independently */
11998 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11999 | combine_cacheattr_nibble(s1lo
, s2lo
);
12001 if (ret
.attrs
== 0x44) {
12002 /* Any location for which the resultant memory type is Normal
12003 * Inner Non-cacheable, Outer Non-cacheable is always treated
12004 * as Outer Shareable.
12006 ret
.shareability
= 2;
12010 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12011 if (tagged
&& ret
.attrs
== 0xff) {
12019 /* get_phys_addr - get the physical address for this virtual address
12021 * Find the physical address corresponding to the given virtual address,
12022 * by doing a translation table walk on MMU based systems or using the
12023 * MPU state on MPU based systems.
12025 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12026 * prot and page_size may not be filled in, and the populated fsr value provides
12027 * information on why the translation aborted, in the format of a
12028 * DFSR/IFSR fault register, with the following caveats:
12029 * * we honour the short vs long DFSR format differences.
12030 * * the WnR bit is never set (the caller must do this).
12031 * * for PSMAv5 based systems we don't bother to return a full FSR format
12034 * @env: CPUARMState
12035 * @address: virtual address to get physical address for
12036 * @access_type: 0 for read, 1 for write, 2 for execute
12037 * @mmu_idx: MMU index indicating required translation regime
12038 * @phys_ptr: set to the physical address corresponding to the virtual address
12039 * @attrs: set to the memory transaction attributes to use
12040 * @prot: set to the permissions for the page containing phys_ptr
12041 * @page_size: set to the size of the page containing phys_ptr
12042 * @fi: set to fault info if the translation fails
12043 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12045 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
12046 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12047 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
12048 target_ulong
*page_size
,
12049 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
12051 if (mmu_idx
== ARMMMUIdx_E10_0
||
12052 mmu_idx
== ARMMMUIdx_E10_1
||
12053 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
12054 /* Call ourselves recursively to do the stage 1 and then stage 2
12057 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
12061 ARMCacheAttrs cacheattrs2
= {};
12063 ret
= get_phys_addr(env
, address
, access_type
,
12064 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
12065 prot
, page_size
, fi
, cacheattrs
);
12067 /* If S1 fails or S2 is disabled, return early. */
12068 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
12073 /* S1 is done. Now do S2 translation. */
12074 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_Stage2
,
12075 mmu_idx
== ARMMMUIdx_E10_0
,
12076 phys_ptr
, attrs
, &s2_prot
,
12077 page_size
, fi
, &cacheattrs2
);
12079 /* Combine the S1 and S2 perms. */
12082 /* If S2 fails, return early. */
12087 /* Combine the S1 and S2 cache attributes. */
12088 if (env
->cp15
.hcr_el2
& HCR_DC
) {
12090 * HCR.DC forces the first stage attributes to
12091 * Normal Non-Shareable,
12092 * Inner Write-Back Read-Allocate Write-Allocate,
12093 * Outer Write-Back Read-Allocate Write-Allocate.
12094 * Do not overwrite Tagged within attrs.
12096 if (cacheattrs
->attrs
!= 0xf0) {
12097 cacheattrs
->attrs
= 0xff;
12099 cacheattrs
->shareability
= 0;
12101 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
12105 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12107 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12111 /* The page table entries may downgrade secure to non-secure, but
12112 * cannot upgrade an non-secure translation regime's attributes
12115 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
12116 attrs
->user
= regime_is_user(env
, mmu_idx
);
12118 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12119 * In v7 and earlier it affects all stage 1 translations.
12121 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
12122 && !arm_feature(env
, ARM_FEATURE_V8
)) {
12123 if (regime_el(env
, mmu_idx
) == 3) {
12124 address
+= env
->cp15
.fcseidr_s
;
12126 address
+= env
->cp15
.fcseidr_ns
;
12130 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
12132 *page_size
= TARGET_PAGE_SIZE
;
12134 if (arm_feature(env
, ARM_FEATURE_V8
)) {
12136 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
12137 phys_ptr
, attrs
, prot
, page_size
, fi
);
12138 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
12140 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
12141 phys_ptr
, prot
, page_size
, fi
);
12144 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
12145 phys_ptr
, prot
, fi
);
12147 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
12148 " mmu_idx %u -> %s (prot %c%c%c)\n",
12149 access_type
== MMU_DATA_LOAD
? "reading" :
12150 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
12151 (uint32_t)address
, mmu_idx
,
12152 ret
? "Miss" : "Hit",
12153 *prot
& PAGE_READ
? 'r' : '-',
12154 *prot
& PAGE_WRITE
? 'w' : '-',
12155 *prot
& PAGE_EXEC
? 'x' : '-');
12160 /* Definitely a real MMU, not an MPU */
12162 if (regime_translation_disabled(env
, mmu_idx
)) {
12167 * MMU disabled. S1 addresses within aa64 translation regimes are
12168 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12170 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
12171 int r_el
= regime_el(env
, mmu_idx
);
12172 if (arm_el_is_aa64(env
, r_el
)) {
12173 int pamax
= arm_pamax(env_archcpu(env
));
12174 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
12177 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12178 if (access_type
== MMU_INST_FETCH
) {
12179 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12181 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
12182 addrtop
= (tbi
? 55 : 63);
12184 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
12185 fi
->type
= ARMFault_AddressSize
;
12187 fi
->stage2
= false;
12192 * When TBI is disabled, we've just validated that all of the
12193 * bits above PAMax are zero, so logically we only need to
12194 * clear the top byte for TBI. But it's clearer to follow
12195 * the pseudocode set of addrdesc.paddress.
12197 address
= extract64(address
, 0, 52);
12200 *phys_ptr
= address
;
12201 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12202 *page_size
= TARGET_PAGE_SIZE
;
12204 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12205 hcr
= arm_hcr_el2_eff(env
);
12206 cacheattrs
->shareability
= 0;
12207 if (hcr
& HCR_DC
) {
12208 if (hcr
& HCR_DCT
) {
12209 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
12211 memattr
= 0xff; /* Normal, WB, RWA */
12213 } else if (access_type
== MMU_INST_FETCH
) {
12214 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
12215 memattr
= 0xee; /* Normal, WT, RA, NT */
12217 memattr
= 0x44; /* Normal, NC, No */
12219 cacheattrs
->shareability
= 2; /* outer sharable */
12221 memattr
= 0x00; /* Device, nGnRnE */
12223 cacheattrs
->attrs
= memattr
;
12227 if (regime_using_lpae_format(env
, mmu_idx
)) {
12228 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, false,
12229 phys_ptr
, attrs
, prot
, page_size
,
12231 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
12232 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
12233 phys_ptr
, attrs
, prot
, page_size
, fi
);
12235 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
12236 phys_ptr
, prot
, page_size
, fi
);
12240 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
12243 ARMCPU
*cpu
= ARM_CPU(cs
);
12244 CPUARMState
*env
= &cpu
->env
;
12246 target_ulong page_size
;
12249 ARMMMUFaultInfo fi
= {};
12250 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12251 ARMCacheAttrs cacheattrs
= {};
12253 *attrs
= (MemTxAttrs
) {};
12255 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
12256 attrs
, &prot
, &page_size
, &fi
, &cacheattrs
);
12266 /* Note that signed overflow is undefined in C. The following routines are
12267 careful to use unsigned types where modulo arithmetic is required.
12268 Failure to do so _will_ break on newer gcc. */
12270 /* Signed saturating arithmetic. */
12272 /* Perform 16-bit signed saturating addition. */
12273 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12278 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12287 /* Perform 8-bit signed saturating addition. */
12288 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12293 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12302 /* Perform 16-bit signed saturating subtraction. */
12303 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12308 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12317 /* Perform 8-bit signed saturating subtraction. */
12318 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12323 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12332 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12333 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12334 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12335 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12338 #include "op_addsub.h"
12340 /* Unsigned saturating arithmetic. */
12341 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12350 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12358 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12367 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12375 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12376 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12377 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12378 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12381 #include "op_addsub.h"
12383 /* Signed modulo arithmetic. */
12384 #define SARITH16(a, b, n, op) do { \
12386 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12387 RESULT(sum, n, 16); \
12389 ge |= 3 << (n * 2); \
12392 #define SARITH8(a, b, n, op) do { \
12394 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12395 RESULT(sum, n, 8); \
12401 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12402 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12403 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12404 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12408 #include "op_addsub.h"
12410 /* Unsigned modulo arithmetic. */
12411 #define ADD16(a, b, n) do { \
12413 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12414 RESULT(sum, n, 16); \
12415 if ((sum >> 16) == 1) \
12416 ge |= 3 << (n * 2); \
12419 #define ADD8(a, b, n) do { \
12421 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12422 RESULT(sum, n, 8); \
12423 if ((sum >> 8) == 1) \
12427 #define SUB16(a, b, n) do { \
12429 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12430 RESULT(sum, n, 16); \
12431 if ((sum >> 16) == 0) \
12432 ge |= 3 << (n * 2); \
12435 #define SUB8(a, b, n) do { \
12437 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12438 RESULT(sum, n, 8); \
12439 if ((sum >> 8) == 0) \
12446 #include "op_addsub.h"
12448 /* Halved signed arithmetic. */
12449 #define ADD16(a, b, n) \
12450 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12451 #define SUB16(a, b, n) \
12452 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12453 #define ADD8(a, b, n) \
12454 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12455 #define SUB8(a, b, n) \
12456 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12459 #include "op_addsub.h"
12461 /* Halved unsigned arithmetic. */
12462 #define ADD16(a, b, n) \
12463 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12464 #define SUB16(a, b, n) \
12465 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12466 #define ADD8(a, b, n) \
12467 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12468 #define SUB8(a, b, n) \
12469 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12472 #include "op_addsub.h"
12474 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12482 /* Unsigned sum of absolute byte differences. */
12483 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12486 sum
= do_usad(a
, b
);
12487 sum
+= do_usad(a
>> 8, b
>> 8);
12488 sum
+= do_usad(a
>> 16, b
>>16);
12489 sum
+= do_usad(a
>> 24, b
>> 24);
12493 /* For ARMv6 SEL instruction. */
12494 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12506 mask
|= 0xff000000;
12507 return (a
& mask
) | (b
& ~mask
);
12511 * The upper bytes of val (above the number specified by 'bytes') must have
12512 * been zeroed out by the caller.
12514 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12518 stl_le_p(buf
, val
);
12520 /* zlib crc32 converts the accumulator and output to one's complement. */
12521 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12524 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12528 stl_le_p(buf
, val
);
12530 /* Linux crc32c converts the output to one's complement. */
12531 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12534 /* Return the exception level to which FP-disabled exceptions should
12535 * be taken, or 0 if FP is enabled.
12537 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12539 #ifndef CONFIG_USER_ONLY
12540 /* CPACR and the CPTR registers don't exist before v6, so FP is
12541 * always accessible
12543 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12547 if (arm_feature(env
, ARM_FEATURE_M
)) {
12548 /* CPACR can cause a NOCP UsageFault taken to current security state */
12549 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
12553 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
12554 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
12555 /* FP insns cause a NOCP UsageFault taken to Secure */
12563 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12564 * 0, 2 : trap EL0 and EL1/PL1 accesses
12565 * 1 : trap only EL0 accesses
12566 * 3 : trap no accesses
12567 * This register is ignored if E2H+TGE are both set.
12569 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12570 int fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12575 if (cur_el
== 0 || cur_el
== 1) {
12576 /* Trap to PL1, which might be EL1 or EL3 */
12577 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12582 if (cur_el
== 3 && !is_a64(env
)) {
12583 /* Secure PL1 running at EL3 */
12598 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12599 * to control non-secure access to the FPU. It doesn't have any
12600 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12602 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
12603 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
12604 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
12605 /* FP insns act as UNDEF */
12606 return cur_el
== 2 ? 2 : 1;
12610 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12611 * check because zero bits in the registers mean "don't trap".
12614 /* CPTR_EL2 : present in v7VE or v8 */
12615 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12616 && !arm_is_secure_below_el3(env
)) {
12617 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12621 /* CPTR_EL3 : present in v8 */
12622 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12623 /* Trap all FP ops to EL3 */
12630 /* Return the exception level we're running at if this is our mmu_idx */
12631 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
12633 if (mmu_idx
& ARM_MMU_IDX_M
) {
12634 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
12638 case ARMMMUIdx_E10_0
:
12639 case ARMMMUIdx_E20_0
:
12640 case ARMMMUIdx_SE10_0
:
12642 case ARMMMUIdx_E10_1
:
12643 case ARMMMUIdx_E10_1_PAN
:
12644 case ARMMMUIdx_SE10_1
:
12645 case ARMMMUIdx_SE10_1_PAN
:
12648 case ARMMMUIdx_E20_2
:
12649 case ARMMMUIdx_E20_2_PAN
:
12651 case ARMMMUIdx_SE3
:
12654 g_assert_not_reached();
12659 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12661 g_assert_not_reached();
12665 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
12667 if (arm_feature(env
, ARM_FEATURE_M
)) {
12668 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12671 /* See ARM pseudo-function ELIsInHost. */
12674 if (arm_is_secure_below_el3(env
)) {
12675 return ARMMMUIdx_SE10_0
;
12677 if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)
12678 && arm_el_is_aa64(env
, 2)) {
12679 return ARMMMUIdx_E20_0
;
12681 return ARMMMUIdx_E10_0
;
12683 if (arm_is_secure_below_el3(env
)) {
12684 if (env
->pstate
& PSTATE_PAN
) {
12685 return ARMMMUIdx_SE10_1_PAN
;
12687 return ARMMMUIdx_SE10_1
;
12689 if (env
->pstate
& PSTATE_PAN
) {
12690 return ARMMMUIdx_E10_1_PAN
;
12692 return ARMMMUIdx_E10_1
;
12694 /* TODO: ARMv8.4-SecEL2 */
12695 /* Note that TGE does not apply at EL2. */
12696 if ((env
->cp15
.hcr_el2
& HCR_E2H
) && arm_el_is_aa64(env
, 2)) {
12697 if (env
->pstate
& PSTATE_PAN
) {
12698 return ARMMMUIdx_E20_2_PAN
;
12700 return ARMMMUIdx_E20_2
;
12702 return ARMMMUIdx_E2
;
12704 return ARMMMUIdx_SE3
;
12706 g_assert_not_reached();
12710 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12712 return arm_mmu_idx_el(env
, arm_current_el(env
));
12715 #ifndef CONFIG_USER_ONLY
12716 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
12718 return stage_1_mmu_idx(arm_mmu_idx(env
));
12722 static uint32_t rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
12723 ARMMMUIdx mmu_idx
, uint32_t flags
)
12725 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
12726 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
,
12727 arm_to_core_mmu_idx(mmu_idx
));
12729 if (arm_singlestep_active(env
)) {
12730 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
12735 static uint32_t rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
12736 ARMMMUIdx mmu_idx
, uint32_t flags
)
12738 bool sctlr_b
= arm_sctlr_b(env
);
12741 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, 1);
12743 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
12744 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12746 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
12748 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12751 static uint32_t rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
12754 uint32_t flags
= 0;
12756 if (arm_v7m_is_handler_mode(env
)) {
12757 flags
= FIELD_DP32(flags
, TBFLAG_M32
, HANDLER
, 1);
12761 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12762 * is suppressing them because the requested execution priority
12765 if (arm_feature(env
, ARM_FEATURE_V8
) &&
12766 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
12767 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
12768 flags
= FIELD_DP32(flags
, TBFLAG_M32
, STACKCHECK
, 1);
12771 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12774 static uint32_t rebuild_hflags_aprofile(CPUARMState
*env
)
12778 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
,
12779 arm_debug_target_el(env
));
12783 static uint32_t rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
12786 uint32_t flags
= rebuild_hflags_aprofile(env
);
12788 if (arm_el_is_aa64(env
, 1)) {
12789 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12792 if (arm_current_el(env
) < 2 && env
->cp15
.hstr_el2
&&
12793 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12794 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HSTR_ACTIVE
, 1);
12797 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12800 static uint32_t rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
12803 uint32_t flags
= rebuild_hflags_aprofile(env
);
12804 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
12805 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
12809 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
12811 /* Get control bits for tagged addresses. */
12812 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12813 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12815 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
12816 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
12818 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
12819 int sve_el
= sve_exception_el(env
, el
);
12823 * If SVE is disabled, but FP is enabled,
12824 * then the effective len is 0.
12826 if (sve_el
!= 0 && fp_el
== 0) {
12829 zcr_len
= sve_zcr_len_for_el(env
, el
);
12831 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
12832 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
12835 sctlr
= regime_sctlr(env
, stage1
);
12837 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
12838 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12841 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
12843 * In order to save space in flags, we record only whether
12844 * pauth is "inactive", meaning all insns are implemented as
12845 * a nop, or "active" when some action must be performed.
12846 * The decision of which action to take is left to a helper.
12848 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
12849 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
12853 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12854 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12855 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
12856 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
12860 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
12861 if (!(env
->pstate
& PSTATE_UAO
)) {
12863 case ARMMMUIdx_E10_1
:
12864 case ARMMMUIdx_E10_1_PAN
:
12865 case ARMMMUIdx_SE10_1
:
12866 case ARMMMUIdx_SE10_1_PAN
:
12867 /* TODO: ARMv8.3-NV */
12868 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12870 case ARMMMUIdx_E20_2
:
12871 case ARMMMUIdx_E20_2_PAN
:
12872 /* TODO: ARMv8.4-SecEL2 */
12874 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
12875 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
12877 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
12878 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12886 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
12888 * Set MTE_ACTIVE if any access may be Checked, and leave clear
12889 * if all accesses must be Unchecked:
12890 * 1) If no TBI, then there are no tags in the address to check,
12891 * 2) If Tag Check Override, then all accesses are Unchecked,
12892 * 3) If Tag Check Fail == 0, then Checked access have no effect,
12893 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
12895 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
12896 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ATA
, 1);
12898 && !(env
->pstate
& PSTATE_TCO
)
12899 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
12900 flags
= FIELD_DP32(flags
, TBFLAG_A64
, MTE_ACTIVE
, 1);
12903 /* And again for unprivileged accesses, if required. */
12904 if (FIELD_EX32(flags
, TBFLAG_A64
, UNPRIV
)
12906 && !(env
->pstate
& PSTATE_TCO
)
12907 && (sctlr
& SCTLR_TCF0
)
12908 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
12909 flags
= FIELD_DP32(flags
, TBFLAG_A64
, MTE0_ACTIVE
, 1);
12911 /* Cache TCMA as well as TBI. */
12912 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TCMA
,
12913 aa64_va_parameter_tcma(tcr
, mmu_idx
));
12916 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12919 static uint32_t rebuild_hflags_internal(CPUARMState
*env
)
12921 int el
= arm_current_el(env
);
12922 int fp_el
= fp_exception_el(env
, el
);
12923 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12926 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12927 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
12928 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12930 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12934 void arm_rebuild_hflags(CPUARMState
*env
)
12936 env
->hflags
= rebuild_hflags_internal(env
);
12940 * If we have triggered a EL state change we can't rely on the
12941 * translator having passed it to us, we need to recompute.
12943 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
12945 int el
= arm_current_el(env
);
12946 int fp_el
= fp_exception_el(env
, el
);
12947 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12948 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12951 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
12953 int fp_el
= fp_exception_el(env
, el
);
12954 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12956 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12960 * If we have triggered a EL state change we can't rely on the
12961 * translator having passed it to us, we need to recompute.
12963 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
12965 int el
= arm_current_el(env
);
12966 int fp_el
= fp_exception_el(env
, el
);
12967 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12968 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12971 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
12973 int fp_el
= fp_exception_el(env
, el
);
12974 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12976 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12979 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
12981 int fp_el
= fp_exception_el(env
, el
);
12982 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12984 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12987 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
12989 #ifdef CONFIG_DEBUG_TCG
12990 uint32_t env_flags_current
= env
->hflags
;
12991 uint32_t env_flags_rebuilt
= rebuild_hflags_internal(env
);
12993 if (unlikely(env_flags_current
!= env_flags_rebuilt
)) {
12994 fprintf(stderr
, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
12995 env_flags_current
, env_flags_rebuilt
);
13001 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13002 target_ulong
*cs_base
, uint32_t *pflags
)
13004 uint32_t flags
= env
->hflags
;
13005 uint32_t pstate_for_ss
;
13008 assert_hflags_rebuild_correctly(env
);
13010 if (FIELD_EX32(flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
13012 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13013 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
13015 pstate_for_ss
= env
->pstate
;
13017 *pc
= env
->regs
[15];
13019 if (arm_feature(env
, ARM_FEATURE_M
)) {
13020 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13021 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
13022 != env
->v7m
.secure
) {
13023 flags
= FIELD_DP32(flags
, TBFLAG_M32
, FPCCR_S_WRONG
, 1);
13026 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
13027 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
13028 (env
->v7m
.secure
&&
13029 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
13031 * ASPEN is set, but FPCA/SFPA indicate that there is no
13032 * active FP context; we must create a new FP context before
13033 * executing any FP insn.
13035 flags
= FIELD_DP32(flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
, 1);
13038 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
13039 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
13040 flags
= FIELD_DP32(flags
, TBFLAG_M32
, LSPACT
, 1);
13044 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13045 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13047 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
13048 flags
= FIELD_DP32(flags
, TBFLAG_A32
,
13049 XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13051 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
,
13053 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
,
13054 env
->vfp
.vec_stride
);
13056 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
13057 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
13061 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, THUMB
, env
->thumb
);
13062 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, CONDEXEC
, env
->condexec_bits
);
13063 pstate_for_ss
= env
->uncached_cpsr
;
13067 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13068 * states defined in the ARM ARM for software singlestep:
13069 * SS_ACTIVE PSTATE.SS State
13070 * 0 x Inactive (the TB flag for SS is always 0)
13071 * 1 0 Active-pending
13072 * 1 1 Active-not-pending
13073 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
13075 if (FIELD_EX32(flags
, TBFLAG_ANY
, SS_ACTIVE
) &&
13076 (pstate_for_ss
& PSTATE_SS
)) {
13077 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13083 #ifdef TARGET_AARCH64
13085 * The manual says that when SVE is enabled and VQ is widened the
13086 * implementation is allowed to zero the previously inaccessible
13087 * portion of the registers. The corollary to that is that when
13088 * SVE is enabled and VQ is narrowed we are also allowed to zero
13089 * the now inaccessible portion of the registers.
13091 * The intent of this is that no predicate bit beyond VQ is ever set.
13092 * Which means that some operations on predicate registers themselves
13093 * may operate on full uint64_t or even unrolled across the maximum
13094 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13095 * may well be cheaper than conditionals to restrict the operation
13096 * to the relevant portion of a uint16_t[16].
13098 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13103 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13104 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
13106 /* Zap the high bits of the zregs. */
13107 for (i
= 0; i
< 32; i
++) {
13108 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13111 /* Zap the high bits of the pregs and ffr. */
13114 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13116 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13117 for (i
= 0; i
< 17; ++i
) {
13118 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13125 * Notice a change in SVE vector size when changing EL.
13127 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13128 int new_el
, bool el0_a64
)
13130 ARMCPU
*cpu
= env_archcpu(env
);
13131 int old_len
, new_len
;
13132 bool old_a64
, new_a64
;
13134 /* Nothing to do if no SVE. */
13135 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13139 /* Nothing to do if FP is disabled in either EL. */
13140 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13145 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13146 * at ELx, or not available because the EL is in AArch32 state, then
13147 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13148 * has an effective value of 0".
13150 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13151 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13152 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13153 * we already have the correct register contents when encountering the
13154 * vq0->vq0 transition between EL0->EL1.
13156 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13157 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13158 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13159 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13160 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13161 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13163 /* When changing vector length, clear inaccessible state. */
13164 if (new_len
< old_len
) {
13165 aarch64_sve_narrow_vq(env
, new_len
+ 1);