4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/tcg.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
36 #include "exec/cpu_ldst.h"
37 #include "semihosting/common-semi.h"
40 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
41 #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
43 #ifndef CONFIG_USER_ONLY
45 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
46 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
48 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
49 target_ulong
*page_size_ptr
,
50 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
51 __attribute__((nonnull
));
54 static void switch_mode(CPUARMState
*env
, int mode
);
55 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
);
57 static int vfp_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
59 ARMCPU
*cpu
= env_archcpu(env
);
60 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
62 /* VFP data registers are always little-endian. */
64 return gdb_get_reg64(buf
, *aa32_vfp_dreg(env
, reg
));
66 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
67 /* Aliases for Q regs. */
70 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
71 return gdb_get_reg128(buf
, q
[0], q
[1]);
74 switch (reg
- nregs
) {
75 case 0: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); break;
76 case 1: return gdb_get_reg32(buf
, vfp_get_fpscr(env
)); break;
77 case 2: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); break;
82 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
84 ARMCPU
*cpu
= env_archcpu(env
);
85 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ? 32 : 16;
88 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
91 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
94 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
96 q
[1] = ldq_le_p(buf
+ 8);
100 switch (reg
- nregs
) {
101 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
102 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
103 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
108 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
113 /* 128 bit FP register - quads are in LE order */
114 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
115 return gdb_get_reg128(buf
, q
[1], q
[0]);
119 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
122 return gdb_get_reg32(buf
,vfp_get_fpcr(env
));
128 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
132 /* 128 bit FP register */
134 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
135 q
[0] = ldq_le_p(buf
);
136 q
[1] = ldq_le_p(buf
+ 8);
141 vfp_set_fpsr(env
, ldl_p(buf
));
145 vfp_set_fpcr(env
, ldl_p(buf
));
152 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
154 assert(ri
->fieldoffset
);
155 if (cpreg_field_is_64bit(ri
)) {
156 return CPREG_FIELD64(env
, ri
);
158 return CPREG_FIELD32(env
, ri
);
162 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
165 assert(ri
->fieldoffset
);
166 if (cpreg_field_is_64bit(ri
)) {
167 CPREG_FIELD64(env
, ri
) = value
;
169 CPREG_FIELD32(env
, ri
) = value
;
173 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
175 return (char *)env
+ ri
->fieldoffset
;
178 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
180 /* Raw read of a coprocessor register (as needed for migration, etc). */
181 if (ri
->type
& ARM_CP_CONST
) {
182 return ri
->resetvalue
;
183 } else if (ri
->raw_readfn
) {
184 return ri
->raw_readfn(env
, ri
);
185 } else if (ri
->readfn
) {
186 return ri
->readfn(env
, ri
);
188 return raw_read(env
, ri
);
192 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
195 /* Raw write of a coprocessor register (as needed for migration, etc).
196 * Note that constant registers are treated as write-ignored; the
197 * caller should check for success by whether a readback gives the
200 if (ri
->type
& ARM_CP_CONST
) {
202 } else if (ri
->raw_writefn
) {
203 ri
->raw_writefn(env
, ri
, v
);
204 } else if (ri
->writefn
) {
205 ri
->writefn(env
, ri
, v
);
207 raw_write(env
, ri
, v
);
212 * arm_get/set_gdb_*: get/set a gdb register
213 * @env: the CPU state
214 * @buf: a buffer to copy to/from
215 * @reg: register number (offset from start of group)
217 * We return the number of bytes copied
220 static int arm_gdb_get_sysreg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
222 ARMCPU
*cpu
= env_archcpu(env
);
223 const ARMCPRegInfo
*ri
;
226 key
= cpu
->dyn_sysreg_xml
.data
.cpregs
.keys
[reg
];
227 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
229 if (cpreg_field_is_64bit(ri
)) {
230 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
232 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
238 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
243 #ifdef TARGET_AARCH64
244 static int arm_gdb_get_svereg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
246 ARMCPU
*cpu
= env_archcpu(env
);
249 /* The first 32 registers are the zregs */
253 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
254 len
+= gdb_get_reg128(buf
,
255 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1],
256 env
->vfp
.zregs
[reg
].d
[vq
* 2]);
261 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
263 return gdb_get_reg32(buf
, vfp_get_fpcr(env
));
264 /* then 16 predicates and the ffr */
269 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
270 len
+= gdb_get_reg64(buf
, env
->vfp
.pregs
[preg
].p
[vq
/ 4]);
277 * We report in Vector Granules (VG) which is 64bit in a Z reg
278 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
280 int vq
= sve_zcr_len_for_el(env
, arm_current_el(env
)) + 1;
281 return gdb_get_reg64(buf
, vq
* 2);
284 /* gdbstub asked for something out our range */
285 qemu_log_mask(LOG_UNIMP
, "%s: out of range register %d", __func__
, reg
);
292 static int arm_gdb_set_svereg(CPUARMState
*env
, uint8_t *buf
, int reg
)
294 ARMCPU
*cpu
= env_archcpu(env
);
296 /* The first 32 registers are the zregs */
298 /* The first 32 registers are the zregs */
302 uint64_t *p
= (uint64_t *) buf
;
303 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
304 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1] = *p
++;
305 env
->vfp
.zregs
[reg
].d
[vq
* 2] = *p
++;
311 vfp_set_fpsr(env
, *(uint32_t *)buf
);
314 vfp_set_fpcr(env
, *(uint32_t *)buf
);
320 uint64_t *p
= (uint64_t *) buf
;
321 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
322 env
->vfp
.pregs
[preg
].p
[vq
/ 4] = *p
++;
328 /* cannot set vg via gdbstub */
331 /* gdbstub asked for something out our range */
337 #endif /* TARGET_AARCH64 */
339 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
341 /* Return true if the regdef would cause an assertion if you called
342 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
343 * program bug for it not to have the NO_RAW flag).
344 * NB that returning false here doesn't necessarily mean that calling
345 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
346 * read/write access functions which are safe for raw use" from "has
347 * read/write access functions which have side effects but has forgotten
348 * to provide raw access functions".
349 * The tests here line up with the conditions in read/write_raw_cp_reg()
350 * and assertions in raw_read()/raw_write().
352 if ((ri
->type
& ARM_CP_CONST
) ||
354 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
360 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
362 /* Write the coprocessor state from cpu->env to the (index,value) list. */
366 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
367 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
368 const ARMCPRegInfo
*ri
;
371 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
376 if (ri
->type
& ARM_CP_NO_RAW
) {
380 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
383 * Only sync if the previous list->cpustate sync succeeded.
384 * Rather than tracking the success/failure state for every
385 * item in the list, we just recheck "does the raw write we must
386 * have made in write_list_to_cpustate() read back OK" here.
388 uint64_t oldval
= cpu
->cpreg_values
[i
];
390 if (oldval
== newval
) {
394 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
395 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
399 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
401 cpu
->cpreg_values
[i
] = newval
;
406 bool write_list_to_cpustate(ARMCPU
*cpu
)
411 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
412 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
413 uint64_t v
= cpu
->cpreg_values
[i
];
414 const ARMCPRegInfo
*ri
;
416 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
421 if (ri
->type
& ARM_CP_NO_RAW
) {
424 /* Write value and confirm it reads back as written
425 * (to catch read-only registers and partially read-only
426 * registers where the incoming migration value doesn't match)
428 write_raw_cp_reg(&cpu
->env
, ri
, v
);
429 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
436 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
438 ARMCPU
*cpu
= opaque
;
440 const ARMCPRegInfo
*ri
;
442 regidx
= *(uint32_t *)key
;
443 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
445 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
446 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
447 /* The value array need not be initialized at this point */
448 cpu
->cpreg_array_len
++;
452 static void count_cpreg(gpointer key
, gpointer opaque
)
454 ARMCPU
*cpu
= opaque
;
456 const ARMCPRegInfo
*ri
;
458 regidx
= *(uint32_t *)key
;
459 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
461 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
462 cpu
->cpreg_array_len
++;
466 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
468 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
469 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
480 void init_cpreg_list(ARMCPU
*cpu
)
482 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
483 * Note that we require cpreg_tuples[] to be sorted by key ID.
488 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
489 keys
= g_list_sort(keys
, cpreg_key_compare
);
491 cpu
->cpreg_array_len
= 0;
493 g_list_foreach(keys
, count_cpreg
, cpu
);
495 arraylen
= cpu
->cpreg_array_len
;
496 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
497 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
498 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
499 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
500 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
501 cpu
->cpreg_array_len
= 0;
503 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
505 assert(cpu
->cpreg_array_len
== arraylen
);
511 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
513 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
514 const ARMCPRegInfo
*ri
,
517 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
518 arm_is_secure_below_el3(env
)) {
519 return CP_ACCESS_TRAP_UNCATEGORIZED
;
524 /* Some secure-only AArch32 registers trap to EL3 if used from
525 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
526 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
527 * We assume that the .access field is set to PL1_RW.
529 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
530 const ARMCPRegInfo
*ri
,
533 if (arm_current_el(env
) == 3) {
536 if (arm_is_secure_below_el3(env
)) {
537 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
538 return CP_ACCESS_TRAP_EL2
;
540 return CP_ACCESS_TRAP_EL3
;
542 /* This will be EL1 NS and EL2 NS, which just UNDEF */
543 return CP_ACCESS_TRAP_UNCATEGORIZED
;
546 static uint64_t arm_mdcr_el2_eff(CPUARMState
*env
)
548 return arm_is_el2_enabled(env
) ? env
->cp15
.mdcr_el2
: 0;
551 /* Check for traps to "powerdown debug" registers, which are controlled
554 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
557 int el
= arm_current_el(env
);
558 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
559 bool mdcr_el2_tdosa
= (mdcr_el2
& MDCR_TDOSA
) || (mdcr_el2
& MDCR_TDE
) ||
560 (arm_hcr_el2_eff(env
) & HCR_TGE
);
562 if (el
< 2 && mdcr_el2_tdosa
) {
563 return CP_ACCESS_TRAP_EL2
;
565 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
566 return CP_ACCESS_TRAP_EL3
;
571 /* Check for traps to "debug ROM" registers, which are controlled
572 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
574 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
577 int el
= arm_current_el(env
);
578 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
579 bool mdcr_el2_tdra
= (mdcr_el2
& MDCR_TDRA
) || (mdcr_el2
& MDCR_TDE
) ||
580 (arm_hcr_el2_eff(env
) & HCR_TGE
);
582 if (el
< 2 && mdcr_el2_tdra
) {
583 return CP_ACCESS_TRAP_EL2
;
585 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
586 return CP_ACCESS_TRAP_EL3
;
591 /* Check for traps to general debug registers, which are controlled
592 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
594 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
597 int el
= arm_current_el(env
);
598 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
599 bool mdcr_el2_tda
= (mdcr_el2
& MDCR_TDA
) || (mdcr_el2
& MDCR_TDE
) ||
600 (arm_hcr_el2_eff(env
) & HCR_TGE
);
602 if (el
< 2 && mdcr_el2_tda
) {
603 return CP_ACCESS_TRAP_EL2
;
605 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
606 return CP_ACCESS_TRAP_EL3
;
611 /* Check for traps to performance monitor registers, which are controlled
612 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
614 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
617 int el
= arm_current_el(env
);
618 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
620 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
621 return CP_ACCESS_TRAP_EL2
;
623 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
624 return CP_ACCESS_TRAP_EL3
;
629 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
630 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
633 if (arm_current_el(env
) == 1) {
634 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
635 if (arm_hcr_el2_eff(env
) & trap
) {
636 return CP_ACCESS_TRAP_EL2
;
642 /* Check for traps from EL1 due to HCR_EL2.TSW. */
643 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
646 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
647 return CP_ACCESS_TRAP_EL2
;
652 /* Check for traps from EL1 due to HCR_EL2.TACR. */
653 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
656 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
657 return CP_ACCESS_TRAP_EL2
;
662 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
663 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
666 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
667 return CP_ACCESS_TRAP_EL2
;
672 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
674 ARMCPU
*cpu
= env_archcpu(env
);
676 raw_write(env
, ri
, value
);
677 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
680 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
682 ARMCPU
*cpu
= env_archcpu(env
);
684 if (raw_read(env
, ri
) != value
) {
685 /* Unlike real hardware the qemu TLB uses virtual addresses,
686 * not modified virtual addresses, so this causes a TLB flush.
689 raw_write(env
, ri
, value
);
693 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
696 ARMCPU
*cpu
= env_archcpu(env
);
698 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
699 && !extended_addresses_enabled(env
)) {
700 /* For VMSA (when not using the LPAE long descriptor page table
701 * format) this register includes the ASID, so do a TLB flush.
702 * For PMSA it is purely a process ID and no action is needed.
706 raw_write(env
, ri
, value
);
709 /* IS variants of TLB operations must affect all cores */
710 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
713 CPUState
*cs
= env_cpu(env
);
715 tlb_flush_all_cpus_synced(cs
);
718 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
721 CPUState
*cs
= env_cpu(env
);
723 tlb_flush_all_cpus_synced(cs
);
726 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
729 CPUState
*cs
= env_cpu(env
);
731 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
734 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
737 CPUState
*cs
= env_cpu(env
);
739 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
743 * Non-IS variants of TLB operations are upgraded to
744 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
745 * force broadcast of these operations.
747 static bool tlb_force_broadcast(CPUARMState
*env
)
749 return arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_FB
);
752 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
755 /* Invalidate all (TLBIALL) */
756 CPUState
*cs
= env_cpu(env
);
758 if (tlb_force_broadcast(env
)) {
759 tlb_flush_all_cpus_synced(cs
);
765 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
768 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
769 CPUState
*cs
= env_cpu(env
);
771 value
&= TARGET_PAGE_MASK
;
772 if (tlb_force_broadcast(env
)) {
773 tlb_flush_page_all_cpus_synced(cs
, value
);
775 tlb_flush_page(cs
, value
);
779 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
782 /* Invalidate by ASID (TLBIASID) */
783 CPUState
*cs
= env_cpu(env
);
785 if (tlb_force_broadcast(env
)) {
786 tlb_flush_all_cpus_synced(cs
);
792 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
795 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
796 CPUState
*cs
= env_cpu(env
);
798 value
&= TARGET_PAGE_MASK
;
799 if (tlb_force_broadcast(env
)) {
800 tlb_flush_page_all_cpus_synced(cs
, value
);
802 tlb_flush_page(cs
, value
);
806 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
809 CPUState
*cs
= env_cpu(env
);
811 tlb_flush_by_mmuidx(cs
,
813 ARMMMUIdxBit_E10_1_PAN
|
817 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
820 CPUState
*cs
= env_cpu(env
);
822 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
824 ARMMMUIdxBit_E10_1_PAN
|
829 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
832 CPUState
*cs
= env_cpu(env
);
834 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
837 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
840 CPUState
*cs
= env_cpu(env
);
842 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
845 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
848 CPUState
*cs
= env_cpu(env
);
849 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
851 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
854 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
857 CPUState
*cs
= env_cpu(env
);
858 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
860 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
864 static const ARMCPRegInfo cp_reginfo
[] = {
865 /* Define the secure and non-secure FCSE identifier CP registers
866 * separately because there is no secure bank in V8 (no _EL3). This allows
867 * the secure register to be properly reset and migrated. There is also no
868 * v8 EL1 version of the register so the non-secure instance stands alone.
871 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
872 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
873 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
874 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
875 { .name
= "FCSEIDR_S",
876 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
877 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
878 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
879 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
880 /* Define the secure and non-secure context identifier CP registers
881 * separately because there is no secure bank in V8 (no _EL3). This allows
882 * the secure register to be properly reset and migrated. In the
883 * non-secure case, the 32-bit register will have reset and migration
884 * disabled during registration as it is handled by the 64-bit instance.
886 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
887 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
888 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
889 .secure
= ARM_CP_SECSTATE_NS
,
890 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
891 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
892 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
893 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
894 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
895 .secure
= ARM_CP_SECSTATE_S
,
896 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
897 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
901 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
902 /* NB: Some of these registers exist in v8 but with more precise
903 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
905 /* MMU Domain access control / MPU write buffer control */
907 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
908 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
909 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
910 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
911 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
912 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
913 * For v6 and v5, these mappings are overly broad.
915 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
916 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
917 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
918 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
919 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
920 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
921 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
922 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
923 /* Cache maintenance ops; some of this space may be overridden later. */
924 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
925 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
926 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
930 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
931 /* Not all pre-v6 cores implemented this WFI, so this is slightly
934 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
935 .access
= PL1_W
, .type
= ARM_CP_WFI
},
939 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
940 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
941 * is UNPREDICTABLE; we choose to NOP as most implementations do).
943 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
944 .access
= PL1_W
, .type
= ARM_CP_WFI
},
945 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
946 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
947 * OMAPCP will override this space.
949 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
950 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
952 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
953 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
955 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
956 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
957 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
959 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
960 * implementing it as RAZ means the "debug architecture version" bits
961 * will read as a reserved value, which should cause Linux to not try
962 * to use the debug hardware.
964 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
965 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
966 /* MMU TLB control. Note that the wildcarding means we cover not just
967 * the unified TLB ops but also the dside/iside/inner-shareable variants.
969 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
970 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
971 .type
= ARM_CP_NO_RAW
},
972 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
973 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
974 .type
= ARM_CP_NO_RAW
},
975 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
976 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
977 .type
= ARM_CP_NO_RAW
},
978 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
979 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
980 .type
= ARM_CP_NO_RAW
},
981 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
982 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
983 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
984 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
988 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
993 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
994 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
995 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
996 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
997 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
999 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
1000 /* VFP coprocessor: cp10 & cp11 [23:20] */
1001 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
1003 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
1004 /* ASEDIS [31] bit is RAO/WI */
1008 /* VFPv3 and upwards with NEON implement 32 double precision
1009 * registers (D0-D31).
1011 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
1012 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1020 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1021 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1023 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1024 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1025 value
&= ~(0xf << 20);
1026 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
1029 env
->cp15
.cpacr_el1
= value
;
1032 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1035 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1036 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1038 uint64_t value
= env
->cp15
.cpacr_el1
;
1040 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1041 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1042 value
&= ~(0xf << 20);
1048 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1050 /* Call cpacr_write() so that we reset with the correct RAO bits set
1051 * for our CPU features.
1053 cpacr_write(env
, ri
, 0);
1056 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1059 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1060 /* Check if CPACR accesses are to be trapped to EL2 */
1061 if (arm_current_el(env
) == 1 && arm_is_el2_enabled(env
) &&
1062 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
)) {
1063 return CP_ACCESS_TRAP_EL2
;
1064 /* Check if CPACR accesses are to be trapped to EL3 */
1065 } else if (arm_current_el(env
) < 3 &&
1066 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1067 return CP_ACCESS_TRAP_EL3
;
1071 return CP_ACCESS_OK
;
1074 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1077 /* Check if CPTR accesses are set to trap to EL3 */
1078 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1079 return CP_ACCESS_TRAP_EL3
;
1082 return CP_ACCESS_OK
;
1085 static const ARMCPRegInfo v6_cp_reginfo
[] = {
1086 /* prefetch by MVA in v6, NOP in v7 */
1087 { .name
= "MVA_prefetch",
1088 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
1089 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1090 /* We need to break the TB after ISB to execute self-modifying code
1091 * correctly and also to take any pending interrupts immediately.
1092 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1094 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
1095 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
1096 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
1097 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1098 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
1099 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1100 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1101 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
1102 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1103 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1105 /* Watchpoint Fault Address Register : should actually only be present
1106 * for 1136, 1176, 11MPCore.
1108 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1109 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1110 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1111 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1112 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1113 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1117 /* Definitions for the PMU registers */
1118 #define PMCRN_MASK 0xf800
1119 #define PMCRN_SHIFT 11
1128 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1129 * which can be written as 1 to trigger behaviour but which stay RAZ).
1131 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1133 #define PMXEVTYPER_P 0x80000000
1134 #define PMXEVTYPER_U 0x40000000
1135 #define PMXEVTYPER_NSK 0x20000000
1136 #define PMXEVTYPER_NSU 0x10000000
1137 #define PMXEVTYPER_NSH 0x08000000
1138 #define PMXEVTYPER_M 0x04000000
1139 #define PMXEVTYPER_MT 0x02000000
1140 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1141 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1142 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1143 PMXEVTYPER_M | PMXEVTYPER_MT | \
1144 PMXEVTYPER_EVTCOUNT)
1146 #define PMCCFILTR 0xf8000000
1147 #define PMCCFILTR_M PMXEVTYPER_M
1148 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1150 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1152 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1155 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1156 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1158 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1161 typedef struct pm_event
{
1162 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1163 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1164 bool (*supported
)(CPUARMState
*);
1166 * Retrieve the current count of the underlying event. The programmed
1167 * counters hold a difference from the return value from this function
1169 uint64_t (*get_count
)(CPUARMState
*);
1171 * Return how many nanoseconds it will take (at a minimum) for count events
1172 * to occur. A negative value indicates the counter will never overflow, or
1173 * that the counter has otherwise arranged for the overflow bit to be set
1174 * and the PMU interrupt to be raised on overflow.
1176 int64_t (*ns_per_count
)(uint64_t);
1179 static bool event_always_supported(CPUARMState
*env
)
1184 static uint64_t swinc_get_count(CPUARMState
*env
)
1187 * SW_INCR events are written directly to the pmevcntr's by writes to
1188 * PMSWINC, so there is no underlying count maintained by the PMU itself
1193 static int64_t swinc_ns_per(uint64_t ignored
)
1199 * Return the underlying cycle count for the PMU cycle counters. If we're in
1200 * usermode, simply return 0.
1202 static uint64_t cycles_get_count(CPUARMState
*env
)
1204 #ifndef CONFIG_USER_ONLY
1205 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1206 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1208 return cpu_get_host_ticks();
1212 #ifndef CONFIG_USER_ONLY
1213 static int64_t cycles_ns_per(uint64_t cycles
)
1215 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1218 static bool instructions_supported(CPUARMState
*env
)
1220 return icount_enabled() == 1; /* Precise instruction counting */
1223 static uint64_t instructions_get_count(CPUARMState
*env
)
1225 return (uint64_t)icount_get_raw();
1228 static int64_t instructions_ns_per(uint64_t icount
)
1230 return icount_to_ns((int64_t)icount
);
1234 static bool pmu_8_1_events_supported(CPUARMState
*env
)
1236 /* For events which are supported in any v8.1 PMU */
1237 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
1240 static bool pmu_8_4_events_supported(CPUARMState
*env
)
1242 /* For events which are supported in any v8.1 PMU */
1243 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
1246 static uint64_t zero_event_get_count(CPUARMState
*env
)
1248 /* For events which on QEMU never fire, so their count is always zero */
1252 static int64_t zero_event_ns_per(uint64_t cycles
)
1254 /* An event which never fires can never overflow */
1258 static const pm_event pm_events
[] = {
1259 { .number
= 0x000, /* SW_INCR */
1260 .supported
= event_always_supported
,
1261 .get_count
= swinc_get_count
,
1262 .ns_per_count
= swinc_ns_per
,
1264 #ifndef CONFIG_USER_ONLY
1265 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1266 .supported
= instructions_supported
,
1267 .get_count
= instructions_get_count
,
1268 .ns_per_count
= instructions_ns_per
,
1270 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1271 .supported
= event_always_supported
,
1272 .get_count
= cycles_get_count
,
1273 .ns_per_count
= cycles_ns_per
,
1276 { .number
= 0x023, /* STALL_FRONTEND */
1277 .supported
= pmu_8_1_events_supported
,
1278 .get_count
= zero_event_get_count
,
1279 .ns_per_count
= zero_event_ns_per
,
1281 { .number
= 0x024, /* STALL_BACKEND */
1282 .supported
= pmu_8_1_events_supported
,
1283 .get_count
= zero_event_get_count
,
1284 .ns_per_count
= zero_event_ns_per
,
1286 { .number
= 0x03c, /* STALL */
1287 .supported
= pmu_8_4_events_supported
,
1288 .get_count
= zero_event_get_count
,
1289 .ns_per_count
= zero_event_ns_per
,
1294 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1295 * events (i.e. the statistical profiling extension), this implementation
1296 * should first be updated to something sparse instead of the current
1297 * supported_event_map[] array.
1299 #define MAX_EVENT_ID 0x3c
1300 #define UNSUPPORTED_EVENT UINT16_MAX
1301 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1304 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1305 * of ARM event numbers to indices in our pm_events array.
1307 * Note: Events in the 0x40XX range are not currently supported.
1309 void pmu_init(ARMCPU
*cpu
)
1314 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1317 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1318 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1323 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1324 const pm_event
*cnt
= &pm_events
[i
];
1325 assert(cnt
->number
<= MAX_EVENT_ID
);
1326 /* We do not currently support events in the 0x40xx range */
1327 assert(cnt
->number
<= 0x3f);
1329 if (cnt
->supported(&cpu
->env
)) {
1330 supported_event_map
[cnt
->number
] = i
;
1331 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1332 if (cnt
->number
& 0x20) {
1333 cpu
->pmceid1
|= event_mask
;
1335 cpu
->pmceid0
|= event_mask
;
1342 * Check at runtime whether a PMU event is supported for the current machine
1344 static bool event_supported(uint16_t number
)
1346 if (number
> MAX_EVENT_ID
) {
1349 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1352 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1355 /* Performance monitor registers user accessibility is controlled
1356 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1357 * trapping to EL2 or EL3 for other accesses.
1359 int el
= arm_current_el(env
);
1360 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1362 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1363 return CP_ACCESS_TRAP
;
1365 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
1366 return CP_ACCESS_TRAP_EL2
;
1368 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1369 return CP_ACCESS_TRAP_EL3
;
1372 return CP_ACCESS_OK
;
1375 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1376 const ARMCPRegInfo
*ri
,
1379 /* ER: event counter read trap control */
1380 if (arm_feature(env
, ARM_FEATURE_V8
)
1381 && arm_current_el(env
) == 0
1382 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1384 return CP_ACCESS_OK
;
1387 return pmreg_access(env
, ri
, isread
);
1390 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1391 const ARMCPRegInfo
*ri
,
1394 /* SW: software increment write trap control */
1395 if (arm_feature(env
, ARM_FEATURE_V8
)
1396 && arm_current_el(env
) == 0
1397 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1399 return CP_ACCESS_OK
;
1402 return pmreg_access(env
, ri
, isread
);
1405 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1406 const ARMCPRegInfo
*ri
,
1409 /* ER: event counter read trap control */
1410 if (arm_feature(env
, ARM_FEATURE_V8
)
1411 && arm_current_el(env
) == 0
1412 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1413 return CP_ACCESS_OK
;
1416 return pmreg_access(env
, ri
, isread
);
1419 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1420 const ARMCPRegInfo
*ri
,
1423 /* CR: cycle counter read trap control */
1424 if (arm_feature(env
, ARM_FEATURE_V8
)
1425 && arm_current_el(env
) == 0
1426 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1428 return CP_ACCESS_OK
;
1431 return pmreg_access(env
, ri
, isread
);
1434 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1435 * the current EL, security state, and register configuration.
1437 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1440 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1441 bool enabled
, prohibited
, filtered
;
1442 bool secure
= arm_is_secure(env
);
1443 int el
= arm_current_el(env
);
1444 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1445 uint8_t hpmn
= mdcr_el2
& MDCR_HPMN
;
1447 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1451 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1452 (counter
< hpmn
|| counter
== 31)) {
1453 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1455 e
= mdcr_el2
& MDCR_HPME
;
1457 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1460 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1461 prohibited
= mdcr_el2
& MDCR_HPMD
;
1466 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1467 !(env
->cp15
.mdcr_el3
& MDCR_SPME
);
1470 if (prohibited
&& counter
== 31) {
1471 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1474 if (counter
== 31) {
1475 filter
= env
->cp15
.pmccfiltr_el0
;
1477 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1480 p
= filter
& PMXEVTYPER_P
;
1481 u
= filter
& PMXEVTYPER_U
;
1482 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1483 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1484 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1485 m
= arm_el_is_aa64(env
, 1) &&
1486 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1489 filtered
= secure
? u
: u
!= nsu
;
1490 } else if (el
== 1) {
1491 filtered
= secure
? p
: p
!= nsk
;
1492 } else if (el
== 2) {
1498 if (counter
!= 31) {
1500 * If not checking PMCCNTR, ensure the counter is setup to an event we
1503 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1504 if (!event_supported(event
)) {
1509 return enabled
&& !prohibited
&& !filtered
;
1512 static void pmu_update_irq(CPUARMState
*env
)
1514 ARMCPU
*cpu
= env_archcpu(env
);
1515 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1516 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1520 * Ensure c15_ccnt is the guest-visible count so that operations such as
1521 * enabling/disabling the counter or filtering, modifying the count itself,
1522 * etc. can be done logically. This is essentially a no-op if the counter is
1523 * not enabled at the time of the call.
1525 static void pmccntr_op_start(CPUARMState
*env
)
1527 uint64_t cycles
= cycles_get_count(env
);
1529 if (pmu_counter_enabled(env
, 31)) {
1530 uint64_t eff_cycles
= cycles
;
1531 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1532 /* Increment once every 64 processor clock cycles */
1536 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1538 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1539 1ull << 63 : 1ull << 31;
1540 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1541 env
->cp15
.c9_pmovsr
|= (1 << 31);
1542 pmu_update_irq(env
);
1545 env
->cp15
.c15_ccnt
= new_pmccntr
;
1547 env
->cp15
.c15_ccnt_delta
= cycles
;
1551 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1552 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1555 static void pmccntr_op_finish(CPUARMState
*env
)
1557 if (pmu_counter_enabled(env
, 31)) {
1558 #ifndef CONFIG_USER_ONLY
1559 /* Calculate when the counter will next overflow */
1560 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1561 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1562 remaining_cycles
= (uint32_t)remaining_cycles
;
1564 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1566 if (overflow_in
> 0) {
1567 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1569 ARMCPU
*cpu
= env_archcpu(env
);
1570 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1574 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1575 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1576 /* Increment once every 64 processor clock cycles */
1579 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1583 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1586 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1588 if (event_supported(event
)) {
1589 uint16_t event_idx
= supported_event_map
[event
];
1590 count
= pm_events
[event_idx
].get_count(env
);
1593 if (pmu_counter_enabled(env
, counter
)) {
1594 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1596 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1597 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1598 pmu_update_irq(env
);
1600 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1602 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1605 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1607 if (pmu_counter_enabled(env
, counter
)) {
1608 #ifndef CONFIG_USER_ONLY
1609 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1610 uint16_t event_idx
= supported_event_map
[event
];
1611 uint64_t delta
= UINT32_MAX
-
1612 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1613 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1615 if (overflow_in
> 0) {
1616 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1618 ARMCPU
*cpu
= env_archcpu(env
);
1619 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1623 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1624 env
->cp15
.c14_pmevcntr
[counter
];
1628 void pmu_op_start(CPUARMState
*env
)
1631 pmccntr_op_start(env
);
1632 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1633 pmevcntr_op_start(env
, i
);
1637 void pmu_op_finish(CPUARMState
*env
)
1640 pmccntr_op_finish(env
);
1641 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1642 pmevcntr_op_finish(env
, i
);
1646 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1648 pmu_op_start(&cpu
->env
);
1651 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1653 pmu_op_finish(&cpu
->env
);
1656 void arm_pmu_timer_cb(void *opaque
)
1658 ARMCPU
*cpu
= opaque
;
1661 * Update all the counter values based on the current underlying counts,
1662 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1663 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1664 * counter may expire.
1666 pmu_op_start(&cpu
->env
);
1667 pmu_op_finish(&cpu
->env
);
1670 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1675 if (value
& PMCRC
) {
1676 /* The counter has been reset */
1677 env
->cp15
.c15_ccnt
= 0;
1680 if (value
& PMCRP
) {
1682 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1683 env
->cp15
.c14_pmevcntr
[i
] = 0;
1687 env
->cp15
.c9_pmcr
&= ~PMCR_WRITEABLE_MASK
;
1688 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITEABLE_MASK
);
1693 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1697 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1698 /* Increment a counter's count iff: */
1699 if ((value
& (1 << i
)) && /* counter's bit is set */
1700 /* counter is enabled and not filtered */
1701 pmu_counter_enabled(env
, i
) &&
1702 /* counter is SW_INCR */
1703 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1704 pmevcntr_op_start(env
, i
);
1707 * Detect if this write causes an overflow since we can't predict
1708 * PMSWINC overflows like we can for other events
1710 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1712 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1713 env
->cp15
.c9_pmovsr
|= (1 << i
);
1714 pmu_update_irq(env
);
1717 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1719 pmevcntr_op_finish(env
, i
);
1724 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1727 pmccntr_op_start(env
);
1728 ret
= env
->cp15
.c15_ccnt
;
1729 pmccntr_op_finish(env
);
1733 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1736 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1737 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1738 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1741 env
->cp15
.c9_pmselr
= value
& 0x1f;
1744 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1747 pmccntr_op_start(env
);
1748 env
->cp15
.c15_ccnt
= value
;
1749 pmccntr_op_finish(env
);
1752 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1755 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1757 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1760 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1763 pmccntr_op_start(env
);
1764 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1765 pmccntr_op_finish(env
);
1768 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1771 pmccntr_op_start(env
);
1772 /* M is not accessible from AArch32 */
1773 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1774 (value
& PMCCFILTR
);
1775 pmccntr_op_finish(env
);
1778 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1780 /* M is not visible in AArch32 */
1781 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1784 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1787 value
&= pmu_counter_mask(env
);
1788 env
->cp15
.c9_pmcnten
|= value
;
1791 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1794 value
&= pmu_counter_mask(env
);
1795 env
->cp15
.c9_pmcnten
&= ~value
;
1798 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1801 value
&= pmu_counter_mask(env
);
1802 env
->cp15
.c9_pmovsr
&= ~value
;
1803 pmu_update_irq(env
);
1806 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1809 value
&= pmu_counter_mask(env
);
1810 env
->cp15
.c9_pmovsr
|= value
;
1811 pmu_update_irq(env
);
1814 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1815 uint64_t value
, const uint8_t counter
)
1817 if (counter
== 31) {
1818 pmccfiltr_write(env
, ri
, value
);
1819 } else if (counter
< pmu_num_counters(env
)) {
1820 pmevcntr_op_start(env
, counter
);
1823 * If this counter's event type is changing, store the current
1824 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1825 * pmevcntr_op_finish has the correct baseline when it converts back to
1828 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1829 PMXEVTYPER_EVTCOUNT
;
1830 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1831 if (old_event
!= new_event
) {
1833 if (event_supported(new_event
)) {
1834 uint16_t event_idx
= supported_event_map
[new_event
];
1835 count
= pm_events
[event_idx
].get_count(env
);
1837 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1840 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1841 pmevcntr_op_finish(env
, counter
);
1843 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1844 * PMSELR value is equal to or greater than the number of implemented
1845 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1849 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1850 const uint8_t counter
)
1852 if (counter
== 31) {
1853 return env
->cp15
.pmccfiltr_el0
;
1854 } else if (counter
< pmu_num_counters(env
)) {
1855 return env
->cp15
.c14_pmevtyper
[counter
];
1858 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1859 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1865 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1868 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1869 pmevtyper_write(env
, ri
, value
, counter
);
1872 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1875 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1876 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1879 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1880 * pmu_op_finish calls when loading saved state for a migration. Because
1881 * we're potentially updating the type of event here, the value written to
1882 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1883 * different counter type. Therefore, we need to set this value to the
1884 * current count for the counter type we're writing so that pmu_op_finish
1885 * has the correct count for its calculation.
1887 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1888 if (event_supported(event
)) {
1889 uint16_t event_idx
= supported_event_map
[event
];
1890 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1891 pm_events
[event_idx
].get_count(env
);
1895 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1897 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1898 return pmevtyper_read(env
, ri
, counter
);
1901 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1904 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1907 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1909 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1912 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1913 uint64_t value
, uint8_t counter
)
1915 if (counter
< pmu_num_counters(env
)) {
1916 pmevcntr_op_start(env
, counter
);
1917 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1918 pmevcntr_op_finish(env
, counter
);
1921 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1922 * are CONSTRAINED UNPREDICTABLE.
1926 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1929 if (counter
< pmu_num_counters(env
)) {
1931 pmevcntr_op_start(env
, counter
);
1932 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1933 pmevcntr_op_finish(env
, counter
);
1936 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1937 * are CONSTRAINED UNPREDICTABLE. */
1942 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1945 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1946 pmevcntr_write(env
, ri
, value
, counter
);
1949 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1951 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1952 return pmevcntr_read(env
, ri
, counter
);
1955 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1958 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1959 assert(counter
< pmu_num_counters(env
));
1960 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1961 pmevcntr_write(env
, ri
, value
, counter
);
1964 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1966 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1967 assert(counter
< pmu_num_counters(env
));
1968 return env
->cp15
.c14_pmevcntr
[counter
];
1971 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1974 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1977 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1979 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1982 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1985 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1986 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1988 env
->cp15
.c9_pmuserenr
= value
& 1;
1992 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1995 /* We have no event counters so only the C bit can be changed */
1996 value
&= pmu_counter_mask(env
);
1997 env
->cp15
.c9_pminten
|= value
;
1998 pmu_update_irq(env
);
2001 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2004 value
&= pmu_counter_mask(env
);
2005 env
->cp15
.c9_pminten
&= ~value
;
2006 pmu_update_irq(env
);
2009 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2012 /* Note that even though the AArch64 view of this register has bits
2013 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2014 * architectural requirements for bits which are RES0 only in some
2015 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2016 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2018 raw_write(env
, ri
, value
& ~0x1FULL
);
2021 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2023 /* Begin with base v8.0 state. */
2024 uint32_t valid_mask
= 0x3fff;
2025 ARMCPU
*cpu
= env_archcpu(env
);
2027 if (ri
->state
== ARM_CP_STATE_AA64
) {
2028 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
2029 !cpu_isar_feature(aa64_aa32_el1
, cpu
)) {
2030 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
2032 valid_mask
&= ~SCR_NET
;
2034 if (cpu_isar_feature(aa64_lor
, cpu
)) {
2035 valid_mask
|= SCR_TLOR
;
2037 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
2038 valid_mask
|= SCR_API
| SCR_APK
;
2040 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
2041 valid_mask
|= SCR_EEL2
;
2043 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2044 valid_mask
|= SCR_ATA
;
2047 valid_mask
&= ~(SCR_RW
| SCR_ST
);
2050 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2051 valid_mask
&= ~SCR_HCE
;
2053 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2054 * supported if EL2 exists. The bit is UNK/SBZP when
2055 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2056 * when EL2 is unavailable.
2057 * On ARMv8, this bit is always available.
2059 if (arm_feature(env
, ARM_FEATURE_V7
) &&
2060 !arm_feature(env
, ARM_FEATURE_V8
)) {
2061 valid_mask
&= ~SCR_SMD
;
2065 /* Clear all-context RES0 bits. */
2066 value
&= valid_mask
;
2067 raw_write(env
, ri
, value
);
2070 static void scr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2073 * scr_write will set the RES1 bits on an AArch64-only CPU.
2074 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
2076 scr_write(env
, ri
, 0);
2079 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
2080 const ARMCPRegInfo
*ri
,
2083 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
2084 return CP_ACCESS_TRAP_EL2
;
2087 return CP_ACCESS_OK
;
2090 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2092 ARMCPU
*cpu
= env_archcpu(env
);
2094 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2097 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
2098 ri
->secure
& ARM_CP_SECSTATE_S
);
2100 return cpu
->ccsidr
[index
];
2103 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2106 raw_write(env
, ri
, value
& 0xf);
2109 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2111 CPUState
*cs
= env_cpu(env
);
2112 bool el1
= arm_current_el(env
) == 1;
2113 uint64_t hcr_el2
= el1
? arm_hcr_el2_eff(env
) : 0;
2116 if (hcr_el2
& HCR_IMO
) {
2117 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
2121 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
2126 if (hcr_el2
& HCR_FMO
) {
2127 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
2131 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
2136 /* External aborts are not possible in QEMU so A bit is always clear */
2140 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2143 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
2144 return CP_ACCESS_TRAP_EL2
;
2147 return CP_ACCESS_OK
;
2150 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2153 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2154 return access_aa64_tid1(env
, ri
, isread
);
2157 return CP_ACCESS_OK
;
2160 static const ARMCPRegInfo v7_cp_reginfo
[] = {
2161 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2162 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
2163 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2164 /* Performance monitors are implementation defined in v7,
2165 * but with an ARM recommended set of registers, which we
2168 * Performance registers fall into three categories:
2169 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2170 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2171 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2172 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2173 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2175 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2176 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2177 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2178 .writefn
= pmcntenset_write
,
2179 .accessfn
= pmreg_access
,
2180 .raw_writefn
= raw_write
},
2181 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2182 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2183 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2184 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2185 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2186 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2188 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2189 .accessfn
= pmreg_access
,
2190 .writefn
= pmcntenclr_write
,
2191 .type
= ARM_CP_ALIAS
},
2192 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2193 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2194 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2195 .type
= ARM_CP_ALIAS
,
2196 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2197 .writefn
= pmcntenclr_write
},
2198 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2199 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2200 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2201 .accessfn
= pmreg_access
,
2202 .writefn
= pmovsr_write
,
2203 .raw_writefn
= raw_write
},
2204 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2205 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2206 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2207 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2208 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2209 .writefn
= pmovsr_write
,
2210 .raw_writefn
= raw_write
},
2211 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2212 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2213 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2214 .writefn
= pmswinc_write
},
2215 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2216 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2217 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2218 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2219 .writefn
= pmswinc_write
},
2220 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2221 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2222 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2223 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2224 .raw_writefn
= raw_write
},
2225 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2226 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2227 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2228 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2229 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2230 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2231 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2232 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2233 .accessfn
= pmreg_access_ccntr
},
2234 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2235 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2236 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2238 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2239 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2240 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2241 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2242 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2243 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2244 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2246 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2247 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2248 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2249 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2251 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2253 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2254 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2255 .accessfn
= pmreg_access
,
2256 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2257 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2258 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2259 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2260 .accessfn
= pmreg_access
,
2261 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2262 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2263 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2264 .accessfn
= pmreg_access_xevcntr
,
2265 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2266 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2267 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2268 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2269 .accessfn
= pmreg_access_xevcntr
,
2270 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2271 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2272 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2273 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2275 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2276 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2277 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2278 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2279 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2281 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2282 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2283 .access
= PL1_RW
, .accessfn
= access_tpm
,
2284 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2285 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2287 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2288 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2289 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2290 .access
= PL1_RW
, .accessfn
= access_tpm
,
2292 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2293 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2294 .resetvalue
= 0x0 },
2295 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2296 .access
= PL1_RW
, .accessfn
= access_tpm
,
2297 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2298 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2299 .writefn
= pmintenclr_write
, },
2300 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2301 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2302 .access
= PL1_RW
, .accessfn
= access_tpm
,
2303 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2304 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2305 .writefn
= pmintenclr_write
},
2306 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2307 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2309 .accessfn
= access_aa64_tid2
,
2310 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2311 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2312 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2314 .accessfn
= access_aa64_tid2
,
2315 .writefn
= csselr_write
, .resetvalue
= 0,
2316 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2317 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2318 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2319 * just RAZ for all cores:
2321 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2322 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2323 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2324 .accessfn
= access_aa64_tid1
,
2326 /* Auxiliary fault status registers: these also are IMPDEF, and we
2327 * choose to RAZ/WI for all cores.
2329 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2330 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2331 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2332 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2333 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2334 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2335 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2336 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2337 /* MAIR can just read-as-written because we don't implement caches
2338 * and so don't need to care about memory attributes.
2340 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2341 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2342 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2343 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2345 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2346 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2347 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2349 /* For non-long-descriptor page tables these are PRRR and NMRR;
2350 * regardless they still act as reads-as-written for QEMU.
2352 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2353 * allows them to assign the correct fieldoffset based on the endianness
2354 * handled in the field definitions.
2356 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2357 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2358 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2359 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2360 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2361 .resetfn
= arm_cp_reset_ignore
},
2362 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2363 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2364 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2365 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2366 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2367 .resetfn
= arm_cp_reset_ignore
},
2368 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2369 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2370 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2371 /* 32 bit ITLB invalidates */
2372 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2373 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2374 .writefn
= tlbiall_write
},
2375 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2376 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2377 .writefn
= tlbimva_write
},
2378 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2379 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2380 .writefn
= tlbiasid_write
},
2381 /* 32 bit DTLB invalidates */
2382 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2383 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2384 .writefn
= tlbiall_write
},
2385 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2386 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2387 .writefn
= tlbimva_write
},
2388 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2389 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2390 .writefn
= tlbiasid_write
},
2391 /* 32 bit TLB invalidates */
2392 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2393 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2394 .writefn
= tlbiall_write
},
2395 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2396 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2397 .writefn
= tlbimva_write
},
2398 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2399 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2400 .writefn
= tlbiasid_write
},
2401 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2402 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2403 .writefn
= tlbimvaa_write
},
2407 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2408 /* 32 bit TLB invalidates, Inner Shareable */
2409 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2410 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2411 .writefn
= tlbiall_is_write
},
2412 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2413 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2414 .writefn
= tlbimva_is_write
},
2415 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2416 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2417 .writefn
= tlbiasid_is_write
},
2418 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2419 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2420 .writefn
= tlbimvaa_is_write
},
2424 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2425 /* PMOVSSET is not implemented in v7 before v7ve */
2426 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2427 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2428 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2429 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2430 .writefn
= pmovsset_write
,
2431 .raw_writefn
= raw_write
},
2432 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2433 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2434 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2435 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2436 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2437 .writefn
= pmovsset_write
,
2438 .raw_writefn
= raw_write
},
2442 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2449 static CPAccessResult
teecr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2453 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2454 * at all, so we don't need to check whether we're v8A.
2456 if (arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
2457 (env
->cp15
.hstr_el2
& HSTR_TTEE
)) {
2458 return CP_ACCESS_TRAP_EL2
;
2460 return CP_ACCESS_OK
;
2463 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2466 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2467 return CP_ACCESS_TRAP
;
2469 return teecr_access(env
, ri
, isread
);
2472 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2473 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2474 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2476 .writefn
= teecr_write
, .accessfn
= teecr_access
},
2477 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2478 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2479 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2483 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2484 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2485 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2487 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2488 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2490 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2491 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2492 .resetfn
= arm_cp_reset_ignore
},
2493 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2494 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2495 .access
= PL0_R
|PL1_W
,
2496 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2498 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2499 .access
= PL0_R
|PL1_W
,
2500 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2501 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2502 .resetfn
= arm_cp_reset_ignore
},
2503 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2504 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2506 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2507 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2509 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2510 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2515 #ifndef CONFIG_USER_ONLY
2517 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2520 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2521 * Writable only at the highest implemented exception level.
2523 int el
= arm_current_el(env
);
2529 hcr
= arm_hcr_el2_eff(env
);
2530 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2531 cntkctl
= env
->cp15
.cnthctl_el2
;
2533 cntkctl
= env
->cp15
.c14_cntkctl
;
2535 if (!extract32(cntkctl
, 0, 2)) {
2536 return CP_ACCESS_TRAP
;
2540 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2541 arm_is_secure_below_el3(env
)) {
2542 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2543 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2551 if (!isread
&& el
< arm_highest_el(env
)) {
2552 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2555 return CP_ACCESS_OK
;
2558 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2561 unsigned int cur_el
= arm_current_el(env
);
2562 bool has_el2
= arm_is_el2_enabled(env
);
2563 uint64_t hcr
= arm_hcr_el2_eff(env
);
2567 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2568 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2569 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2570 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2573 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2574 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2575 return CP_ACCESS_TRAP
;
2578 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2579 if (hcr
& HCR_E2H
) {
2580 if (timeridx
== GTIMER_PHYS
&&
2581 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2582 return CP_ACCESS_TRAP_EL2
;
2585 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2586 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2587 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2588 return CP_ACCESS_TRAP_EL2
;
2594 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2595 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2597 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2598 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2599 return CP_ACCESS_TRAP_EL2
;
2603 return CP_ACCESS_OK
;
2606 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2609 unsigned int cur_el
= arm_current_el(env
);
2610 bool has_el2
= arm_is_el2_enabled(env
);
2611 uint64_t hcr
= arm_hcr_el2_eff(env
);
2615 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2616 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2617 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2618 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2622 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2623 * EL0 if EL0[PV]TEN is zero.
2625 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2626 return CP_ACCESS_TRAP
;
2631 if (has_el2
&& timeridx
== GTIMER_PHYS
) {
2632 if (hcr
& HCR_E2H
) {
2633 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2634 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2635 return CP_ACCESS_TRAP_EL2
;
2638 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2639 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2640 return CP_ACCESS_TRAP_EL2
;
2646 return CP_ACCESS_OK
;
2649 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2650 const ARMCPRegInfo
*ri
,
2653 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2656 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2657 const ARMCPRegInfo
*ri
,
2660 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2663 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2666 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2669 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2672 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2675 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2676 const ARMCPRegInfo
*ri
,
2679 /* The AArch64 register view of the secure physical timer is
2680 * always accessible from EL3, and configurably accessible from
2683 switch (arm_current_el(env
)) {
2685 if (!arm_is_secure(env
)) {
2686 return CP_ACCESS_TRAP
;
2688 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2689 return CP_ACCESS_TRAP_EL3
;
2691 return CP_ACCESS_OK
;
2694 return CP_ACCESS_TRAP
;
2696 return CP_ACCESS_OK
;
2698 g_assert_not_reached();
2702 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2704 ARMCPU
*cpu
= env_archcpu(env
);
2706 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2709 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2711 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2714 /* Timer enabled: calculate and set current ISTATUS, irq, and
2715 * reset timer to when ISTATUS next has to change
2717 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2718 cpu
->env
.cp15
.cntvoff_el2
: 0;
2719 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2720 /* Note that this must be unsigned 64 bit arithmetic: */
2721 int istatus
= count
- offset
>= gt
->cval
;
2725 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2727 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2728 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2731 /* Next transition is when count rolls back over to zero */
2732 nexttick
= UINT64_MAX
;
2734 /* Next transition is when we hit cval */
2735 nexttick
= gt
->cval
+ offset
;
2737 /* Note that the desired next expiry time might be beyond the
2738 * signed-64-bit range of a QEMUTimer -- in this case we just
2739 * set the timer for as far in the future as possible. When the
2740 * timer expires we will reset the timer for any remaining period.
2742 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2743 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2745 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2747 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2749 /* Timer disabled: ISTATUS and timer output always clear */
2751 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2752 timer_del(cpu
->gt_timer
[timeridx
]);
2753 trace_arm_gt_recalc_disabled(timeridx
);
2757 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2760 ARMCPU
*cpu
= env_archcpu(env
);
2762 timer_del(cpu
->gt_timer
[timeridx
]);
2765 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2767 return gt_get_countervalue(env
);
2770 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2774 switch (arm_current_el(env
)) {
2776 hcr
= arm_hcr_el2_eff(env
);
2777 if (hcr
& HCR_E2H
) {
2782 hcr
= arm_hcr_el2_eff(env
);
2783 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2789 return env
->cp15
.cntvoff_el2
;
2792 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2794 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2797 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2801 trace_arm_gt_cval_write(timeridx
, value
);
2802 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2803 gt_recalc_timer(env_archcpu(env
), timeridx
);
2806 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2809 uint64_t offset
= 0;
2813 case GTIMER_HYPVIRT
:
2814 offset
= gt_virt_cnt_offset(env
);
2818 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2819 (gt_get_countervalue(env
) - offset
));
2822 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2826 uint64_t offset
= 0;
2830 case GTIMER_HYPVIRT
:
2831 offset
= gt_virt_cnt_offset(env
);
2835 trace_arm_gt_tval_write(timeridx
, value
);
2836 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2837 sextract64(value
, 0, 32);
2838 gt_recalc_timer(env_archcpu(env
), timeridx
);
2841 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2845 ARMCPU
*cpu
= env_archcpu(env
);
2846 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2848 trace_arm_gt_ctl_write(timeridx
, value
);
2849 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2850 if ((oldval
^ value
) & 1) {
2851 /* Enable toggled */
2852 gt_recalc_timer(cpu
, timeridx
);
2853 } else if ((oldval
^ value
) & 2) {
2854 /* IMASK toggled: don't need to recalculate,
2855 * just set the interrupt line based on ISTATUS
2857 int irqstate
= (oldval
& 4) && !(value
& 2);
2859 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2860 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2864 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2866 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2869 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2872 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2875 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2877 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2880 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2883 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2886 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2889 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2892 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2894 switch (arm_mmu_idx(env
)) {
2895 case ARMMMUIdx_E20_0
:
2896 case ARMMMUIdx_E20_2
:
2897 case ARMMMUIdx_E20_2_PAN
:
2898 case ARMMMUIdx_SE20_0
:
2899 case ARMMMUIdx_SE20_2
:
2900 case ARMMMUIdx_SE20_2_PAN
:
2907 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2909 switch (arm_mmu_idx(env
)) {
2910 case ARMMMUIdx_E20_0
:
2911 case ARMMMUIdx_E20_2
:
2912 case ARMMMUIdx_E20_2_PAN
:
2913 case ARMMMUIdx_SE20_0
:
2914 case ARMMMUIdx_SE20_2
:
2915 case ARMMMUIdx_SE20_2_PAN
:
2916 return GTIMER_HYPVIRT
;
2922 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2923 const ARMCPRegInfo
*ri
)
2925 int timeridx
= gt_phys_redir_timeridx(env
);
2926 return env
->cp15
.c14_timer
[timeridx
].cval
;
2929 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2932 int timeridx
= gt_phys_redir_timeridx(env
);
2933 gt_cval_write(env
, ri
, timeridx
, value
);
2936 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2937 const ARMCPRegInfo
*ri
)
2939 int timeridx
= gt_phys_redir_timeridx(env
);
2940 return gt_tval_read(env
, ri
, timeridx
);
2943 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2946 int timeridx
= gt_phys_redir_timeridx(env
);
2947 gt_tval_write(env
, ri
, timeridx
, value
);
2950 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2951 const ARMCPRegInfo
*ri
)
2953 int timeridx
= gt_phys_redir_timeridx(env
);
2954 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2957 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2960 int timeridx
= gt_phys_redir_timeridx(env
);
2961 gt_ctl_write(env
, ri
, timeridx
, value
);
2964 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2966 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2969 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2972 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2975 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2977 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2980 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2983 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2986 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2989 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2992 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2995 ARMCPU
*cpu
= env_archcpu(env
);
2997 trace_arm_gt_cntvoff_write(value
);
2998 raw_write(env
, ri
, value
);
2999 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3002 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
3003 const ARMCPRegInfo
*ri
)
3005 int timeridx
= gt_virt_redir_timeridx(env
);
3006 return env
->cp15
.c14_timer
[timeridx
].cval
;
3009 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3012 int timeridx
= gt_virt_redir_timeridx(env
);
3013 gt_cval_write(env
, ri
, timeridx
, value
);
3016 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
3017 const ARMCPRegInfo
*ri
)
3019 int timeridx
= gt_virt_redir_timeridx(env
);
3020 return gt_tval_read(env
, ri
, timeridx
);
3023 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3026 int timeridx
= gt_virt_redir_timeridx(env
);
3027 gt_tval_write(env
, ri
, timeridx
, value
);
3030 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
3031 const ARMCPRegInfo
*ri
)
3033 int timeridx
= gt_virt_redir_timeridx(env
);
3034 return env
->cp15
.c14_timer
[timeridx
].ctl
;
3037 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3040 int timeridx
= gt_virt_redir_timeridx(env
);
3041 gt_ctl_write(env
, ri
, timeridx
, value
);
3044 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3046 gt_timer_reset(env
, ri
, GTIMER_HYP
);
3049 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3052 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
3055 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3057 return gt_tval_read(env
, ri
, GTIMER_HYP
);
3060 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3063 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
3066 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3069 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
3072 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3074 gt_timer_reset(env
, ri
, GTIMER_SEC
);
3077 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3080 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
3083 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3085 return gt_tval_read(env
, ri
, GTIMER_SEC
);
3088 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3091 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
3094 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3097 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
3100 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3102 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
3105 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3108 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3111 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3113 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
3116 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3119 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3122 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3125 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3128 void arm_gt_ptimer_cb(void *opaque
)
3130 ARMCPU
*cpu
= opaque
;
3132 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3135 void arm_gt_vtimer_cb(void *opaque
)
3137 ARMCPU
*cpu
= opaque
;
3139 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3142 void arm_gt_htimer_cb(void *opaque
)
3144 ARMCPU
*cpu
= opaque
;
3146 gt_recalc_timer(cpu
, GTIMER_HYP
);
3149 void arm_gt_stimer_cb(void *opaque
)
3151 ARMCPU
*cpu
= opaque
;
3153 gt_recalc_timer(cpu
, GTIMER_SEC
);
3156 void arm_gt_hvtimer_cb(void *opaque
)
3158 ARMCPU
*cpu
= opaque
;
3160 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
3163 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
3165 ARMCPU
*cpu
= env_archcpu(env
);
3167 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
3170 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3171 /* Note that CNTFRQ is purely reads-as-written for the benefit
3172 * of software; writing it doesn't actually change the timer frequency.
3173 * Our reset value matches the fixed frequency we implement the timer at.
3175 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
3176 .type
= ARM_CP_ALIAS
,
3177 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3178 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
3180 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3181 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3182 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3183 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3184 .resetfn
= arm_gt_cntfrq_reset
,
3186 /* overall control: mostly access permissions */
3187 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
3188 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
3190 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
3193 /* per-timer control */
3194 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3195 .secure
= ARM_CP_SECSTATE_NS
,
3196 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3197 .accessfn
= gt_ptimer_access
,
3198 .fieldoffset
= offsetoflow32(CPUARMState
,
3199 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3200 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3201 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3203 { .name
= "CNTP_CTL_S",
3204 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3205 .secure
= ARM_CP_SECSTATE_S
,
3206 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3207 .accessfn
= gt_ptimer_access
,
3208 .fieldoffset
= offsetoflow32(CPUARMState
,
3209 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3210 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3212 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3213 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
3214 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3215 .accessfn
= gt_ptimer_access
,
3216 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3218 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3219 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3221 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
3222 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3223 .accessfn
= gt_vtimer_access
,
3224 .fieldoffset
= offsetoflow32(CPUARMState
,
3225 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3226 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3227 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3229 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3230 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
3231 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3232 .accessfn
= gt_vtimer_access
,
3233 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3235 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3236 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3238 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3239 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3240 .secure
= ARM_CP_SECSTATE_NS
,
3241 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3242 .accessfn
= gt_ptimer_access
,
3243 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3245 { .name
= "CNTP_TVAL_S",
3246 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3247 .secure
= ARM_CP_SECSTATE_S
,
3248 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3249 .accessfn
= gt_ptimer_access
,
3250 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
3252 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3253 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
3254 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3255 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
3256 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3258 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
3259 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3260 .accessfn
= gt_vtimer_access
,
3261 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3263 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3264 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
3265 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3266 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3267 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3269 /* The counter itself */
3270 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3271 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3272 .accessfn
= gt_pct_access
,
3273 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3275 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3276 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3277 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3278 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3280 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3281 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3282 .accessfn
= gt_vct_access
,
3283 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3285 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3286 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3287 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3288 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3290 /* Comparison value, indicating when the timer goes off */
3291 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3292 .secure
= ARM_CP_SECSTATE_NS
,
3294 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3295 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3296 .accessfn
= gt_ptimer_access
,
3297 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3298 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3300 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3301 .secure
= ARM_CP_SECSTATE_S
,
3303 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3304 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3305 .accessfn
= gt_ptimer_access
,
3306 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3308 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3309 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3312 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3313 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3314 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3315 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3317 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3319 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3320 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3321 .accessfn
= gt_vtimer_access
,
3322 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3323 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3325 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3326 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3329 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3330 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3331 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3332 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3334 /* Secure timer -- this is actually restricted to only EL3
3335 * and configurably Secure-EL1 via the accessfn.
3337 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3338 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3339 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3340 .accessfn
= gt_stimer_access
,
3341 .readfn
= gt_sec_tval_read
,
3342 .writefn
= gt_sec_tval_write
,
3343 .resetfn
= gt_sec_timer_reset
,
3345 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3346 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3347 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3348 .accessfn
= gt_stimer_access
,
3349 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3351 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3353 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3354 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3355 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3356 .accessfn
= gt_stimer_access
,
3357 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3358 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3363 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3366 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3367 return CP_ACCESS_TRAP
;
3369 return CP_ACCESS_OK
;
3374 /* In user-mode most of the generic timer registers are inaccessible
3375 * however modern kernels (4.12+) allow access to cntvct_el0
3378 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3380 ARMCPU
*cpu
= env_archcpu(env
);
3382 /* Currently we have no support for QEMUTimer in linux-user so we
3383 * can't call gt_get_countervalue(env), instead we directly
3384 * call the lower level functions.
3386 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3389 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3390 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3391 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3392 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3393 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3394 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3396 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3397 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3398 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3399 .readfn
= gt_virt_cnt_read
,
3406 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3408 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3409 raw_write(env
, ri
, value
);
3410 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3411 raw_write(env
, ri
, value
& 0xfffff6ff);
3413 raw_write(env
, ri
, value
& 0xfffff1ff);
3417 #ifndef CONFIG_USER_ONLY
3418 /* get_phys_addr() isn't present for user-mode-only targets */
3420 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3424 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3425 * Secure EL1 (which can only happen if EL3 is AArch64).
3426 * They are simply UNDEF if executed from NS EL1.
3427 * They function normally from EL2 or EL3.
3429 if (arm_current_el(env
) == 1) {
3430 if (arm_is_secure_below_el3(env
)) {
3431 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
3432 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2
;
3434 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3436 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3439 return CP_ACCESS_OK
;
3443 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3444 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3447 target_ulong page_size
;
3451 bool format64
= false;
3452 MemTxAttrs attrs
= {};
3453 ARMMMUFaultInfo fi
= {};
3454 ARMCacheAttrs cacheattrs
= {};
3456 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3457 &prot
, &page_size
, &fi
, &cacheattrs
);
3461 * Some kinds of translation fault must cause exceptions rather
3462 * than being reported in the PAR.
3464 int current_el
= arm_current_el(env
);
3466 uint32_t syn
, fsr
, fsc
;
3467 bool take_exc
= false;
3469 if (fi
.s1ptw
&& current_el
== 1
3470 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3472 * Synchronous stage 2 fault on an access made as part of the
3473 * translation table walk for AT S1E0* or AT S1E1* insn
3474 * executed from NS EL1. If this is a synchronous external abort
3475 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3476 * to EL3. Otherwise the fault is taken as an exception to EL2,
3477 * and HPFAR_EL2 holds the faulting IPA.
3479 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3480 (env
->cp15
.scr_el3
& SCR_EA
)) {
3483 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3484 if (arm_is_secure_below_el3(env
) && fi
.s1ns
) {
3485 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
3490 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3492 * Synchronous external aborts during a translation table walk
3493 * are taken as Data Abort exceptions.
3496 if (current_el
== 3) {
3502 target_el
= exception_target_el(env
);
3508 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3509 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3510 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3511 fsr
= arm_fi_to_lfsc(&fi
);
3512 fsc
= extract32(fsr
, 0, 6);
3514 fsr
= arm_fi_to_sfsc(&fi
);
3518 * Report exception with ESR indicating a fault due to a
3519 * translation table walk for a cache maintenance instruction.
3521 syn
= syn_data_abort_no_iss(current_el
== target_el
, 0,
3522 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3523 env
->exception
.vaddress
= value
;
3524 env
->exception
.fsr
= fsr
;
3525 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3531 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3534 * * TTBCR.EAE determines whether the result is returned using the
3535 * 32-bit or the 64-bit PAR format
3536 * * Instructions executed in Hyp mode always use the 64bit format
3538 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3539 * * The Non-secure TTBCR.EAE bit is set to 1
3540 * * The implementation includes EL2, and the value of HCR.VM is 1
3542 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3544 * ATS1Hx always uses the 64bit format.
3546 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3548 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3549 if (mmu_idx
== ARMMMUIdx_E10_0
||
3550 mmu_idx
== ARMMMUIdx_E10_1
||
3551 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3552 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3554 format64
|= arm_current_el(env
) == 2;
3560 /* Create a 64-bit PAR */
3561 par64
= (1 << 11); /* LPAE bit always set */
3563 par64
|= phys_addr
& ~0xfffULL
;
3564 if (!attrs
.secure
) {
3565 par64
|= (1 << 9); /* NS */
3567 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3568 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3570 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3573 par64
|= (fsr
& 0x3f) << 1; /* FS */
3575 par64
|= (1 << 9); /* S */
3578 par64
|= (1 << 8); /* PTW */
3582 /* fsr is a DFSR/IFSR value for the short descriptor
3583 * translation table format (with WnR always clear).
3584 * Convert it to a 32-bit PAR.
3587 /* We do not set any attribute bits in the PAR */
3588 if (page_size
== (1 << 24)
3589 && arm_feature(env
, ARM_FEATURE_V7
)) {
3590 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3592 par64
= phys_addr
& 0xfffff000;
3594 if (!attrs
.secure
) {
3595 par64
|= (1 << 9); /* NS */
3598 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3600 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3601 ((fsr
& 0xf) << 1) | 1;
3606 #endif /* CONFIG_TCG */
3608 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3611 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3614 int el
= arm_current_el(env
);
3615 bool secure
= arm_is_secure_below_el3(env
);
3617 switch (ri
->opc2
& 6) {
3619 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3622 mmu_idx
= ARMMMUIdx_SE3
;
3625 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3628 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3629 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3630 : ARMMMUIdx_Stage1_E1_PAN
);
3632 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3636 g_assert_not_reached();
3640 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3643 mmu_idx
= ARMMMUIdx_SE10_0
;
3646 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3647 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3650 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3653 g_assert_not_reached();
3657 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3658 mmu_idx
= ARMMMUIdx_E10_1
;
3661 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3662 mmu_idx
= ARMMMUIdx_E10_0
;
3665 g_assert_not_reached();
3668 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3670 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3672 /* Handled by hardware accelerator. */
3673 g_assert_not_reached();
3674 #endif /* CONFIG_TCG */
3677 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3681 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3684 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3686 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3688 /* Handled by hardware accelerator. */
3689 g_assert_not_reached();
3690 #endif /* CONFIG_TCG */
3693 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3696 if (arm_current_el(env
) == 3 &&
3697 !(env
->cp15
.scr_el3
& (SCR_NS
| SCR_EEL2
))) {
3698 return CP_ACCESS_TRAP
;
3700 return CP_ACCESS_OK
;
3703 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3707 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3709 int secure
= arm_is_secure_below_el3(env
);
3711 switch (ri
->opc2
& 6) {
3714 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3715 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3716 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3717 : ARMMMUIdx_Stage1_E1_PAN
);
3719 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3722 case 4: /* AT S1E2R, AT S1E2W */
3723 mmu_idx
= secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
;
3725 case 6: /* AT S1E3R, AT S1E3W */
3726 mmu_idx
= ARMMMUIdx_SE3
;
3729 g_assert_not_reached();
3732 case 2: /* AT S1E0R, AT S1E0W */
3733 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3735 case 4: /* AT S12E1R, AT S12E1W */
3736 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3738 case 6: /* AT S12E0R, AT S12E0W */
3739 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3742 g_assert_not_reached();
3745 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3747 /* Handled by hardware accelerator. */
3748 g_assert_not_reached();
3749 #endif /* CONFIG_TCG */
3753 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3754 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3755 .access
= PL1_RW
, .resetvalue
= 0,
3756 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3757 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3758 .writefn
= par_write
},
3759 #ifndef CONFIG_USER_ONLY
3760 /* This underdecoding is safe because the reginfo is NO_RAW. */
3761 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3762 .access
= PL1_W
, .accessfn
= ats_access
,
3763 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3768 /* Return basic MPU access permission bits. */
3769 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3776 for (i
= 0; i
< 16; i
+= 2) {
3777 ret
|= (val
>> i
) & mask
;
3783 /* Pad basic MPU access permission bits to extended format. */
3784 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3791 for (i
= 0; i
< 16; i
+= 2) {
3792 ret
|= (val
& mask
) << i
;
3798 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3801 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3804 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3806 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3809 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3812 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3815 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3817 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3820 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3822 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3828 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3832 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3835 ARMCPU
*cpu
= env_archcpu(env
);
3836 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3842 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3843 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3847 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3850 ARMCPU
*cpu
= env_archcpu(env
);
3851 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3853 if (value
>= nrgs
) {
3854 qemu_log_mask(LOG_GUEST_ERROR
,
3855 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3856 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3860 raw_write(env
, ri
, value
);
3863 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3864 /* Reset for all these registers is handled in arm_cpu_reset(),
3865 * because the PMSAv7 is also used by M-profile CPUs, which do
3866 * not register cpregs but still need the state to be reset.
3868 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3869 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3870 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3871 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3872 .resetfn
= arm_cp_reset_ignore
},
3873 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3874 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3875 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3876 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3877 .resetfn
= arm_cp_reset_ignore
},
3878 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3879 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3880 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3881 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3882 .resetfn
= arm_cp_reset_ignore
},
3883 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3885 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3886 .writefn
= pmsav7_rgnr_write
,
3887 .resetfn
= arm_cp_reset_ignore
},
3891 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3892 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3893 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3894 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3895 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3896 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3897 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3898 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3899 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3900 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3902 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3904 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3906 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3908 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3910 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3911 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3913 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3914 /* Protection region base and size registers */
3915 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3916 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3918 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3919 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3920 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3921 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3922 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3923 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3924 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3925 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3926 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3927 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3928 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3929 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3930 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3931 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3932 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3933 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3934 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3936 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3937 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3938 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3942 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3945 TCR
*tcr
= raw_ptr(env
, ri
);
3946 int maskshift
= extract32(value
, 0, 3);
3948 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3949 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3950 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3951 * using Long-desciptor translation table format */
3952 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3953 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3954 /* In an implementation that includes the Security Extensions
3955 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3956 * Short-descriptor translation table format.
3958 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3964 /* Update the masks corresponding to the TCR bank being written
3965 * Note that we always calculate mask and base_mask, but
3966 * they are only used for short-descriptor tables (ie if EAE is 0);
3967 * for long-descriptor tables the TCR fields are used differently
3968 * and the mask and base_mask values are meaningless.
3970 tcr
->raw_tcr
= value
;
3971 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3972 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3975 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3978 ARMCPU
*cpu
= env_archcpu(env
);
3979 TCR
*tcr
= raw_ptr(env
, ri
);
3981 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3982 /* With LPAE the TTBCR could result in a change of ASID
3983 * via the TTBCR.A1 bit, so do a TLB flush.
3985 tlb_flush(CPU(cpu
));
3987 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3988 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3989 vmsa_ttbcr_raw_write(env
, ri
, value
);
3992 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3994 TCR
*tcr
= raw_ptr(env
, ri
);
3996 /* Reset both the TCR as well as the masks corresponding to the bank of
3997 * the TCR being reset.
4001 tcr
->base_mask
= 0xffffc000u
;
4004 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4007 ARMCPU
*cpu
= env_archcpu(env
);
4008 TCR
*tcr
= raw_ptr(env
, ri
);
4010 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4011 tlb_flush(CPU(cpu
));
4012 tcr
->raw_tcr
= value
;
4015 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4018 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
4019 if (cpreg_field_is_64bit(ri
) &&
4020 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
4021 ARMCPU
*cpu
= env_archcpu(env
);
4022 tlb_flush(CPU(cpu
));
4024 raw_write(env
, ri
, value
);
4027 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4031 * If we are running with E2&0 regime, then an ASID is active.
4032 * Flush if that might be changing. Note we're not checking
4033 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4034 * holds the active ASID, only checking the field that might.
4036 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
4037 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
4038 uint16_t mask
= ARMMMUIdxBit_E20_2
|
4039 ARMMMUIdxBit_E20_2_PAN
|
4042 if (arm_is_secure_below_el3(env
)) {
4043 mask
>>= ARM_MMU_IDX_A_NS
;
4046 tlb_flush_by_mmuidx(env_cpu(env
), mask
);
4048 raw_write(env
, ri
, value
);
4051 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4054 ARMCPU
*cpu
= env_archcpu(env
);
4055 CPUState
*cs
= CPU(cpu
);
4058 * A change in VMID to the stage2 page table (Stage2) invalidates
4059 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
4061 if (raw_read(env
, ri
) != value
) {
4062 uint16_t mask
= ARMMMUIdxBit_E10_1
|
4063 ARMMMUIdxBit_E10_1_PAN
|
4066 if (arm_is_secure_below_el3(env
)) {
4067 mask
>>= ARM_MMU_IDX_A_NS
;
4070 tlb_flush_by_mmuidx(cs
, mask
);
4071 raw_write(env
, ri
, value
);
4075 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
4076 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
4077 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
4078 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
4079 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
4080 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
4081 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4082 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
4083 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
4084 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
4085 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4086 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
4087 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
4088 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
4089 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
4090 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4091 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
4096 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
4097 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
4098 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
4099 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4100 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
4101 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
4102 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
4103 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4104 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4105 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4106 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
4107 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
4108 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
4109 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4110 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4111 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4112 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
4113 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
4114 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4115 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4116 .writefn
= vmsa_tcr_el12_write
,
4117 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
4118 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
4119 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4120 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4121 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
4122 .raw_writefn
= vmsa_ttbcr_raw_write
,
4123 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
4124 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.tcr_el
[3]),
4125 offsetof(CPUARMState
, cp15
.tcr_el
[1])} },
4129 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4130 * qemu tlbs nor adjusting cached masks.
4132 static const ARMCPRegInfo ttbcr2_reginfo
= {
4133 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
4134 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4135 .type
= ARM_CP_ALIAS
,
4136 .bank_fieldoffsets
= {
4137 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3].raw_tcr
),
4138 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1].raw_tcr
),
4142 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4145 env
->cp15
.c15_ticonfig
= value
& 0xe7;
4146 /* The OS_TYPE bit in this register changes the reported CPUID! */
4147 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
4148 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
4151 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4154 env
->cp15
.c15_threadid
= value
& 0xffff;
4157 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4160 /* Wait-for-interrupt (deprecated) */
4161 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
4164 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4167 /* On OMAP there are registers indicating the max/min index of dcache lines
4168 * containing a dirty line; cache flush operations have to reset these.
4170 env
->cp15
.c15_i_max
= 0x000;
4171 env
->cp15
.c15_i_min
= 0xff0;
4174 static const ARMCPRegInfo omap_cp_reginfo
[] = {
4175 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
4176 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
4177 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
4179 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
4180 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4181 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
4183 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
4184 .writefn
= omap_ticonfig_write
},
4185 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
4187 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
4188 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
4189 .access
= PL1_RW
, .resetvalue
= 0xff0,
4190 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
4191 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
4193 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
4194 .writefn
= omap_threadid_write
},
4195 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
4196 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4197 .type
= ARM_CP_NO_RAW
,
4198 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
4199 /* TODO: Peripheral port remap register:
4200 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4201 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4204 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
4205 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
4206 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
4207 .writefn
= omap_cachemaint_write
},
4208 { .name
= "C9", .cp
= 15, .crn
= 9,
4209 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
4210 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
4214 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4217 env
->cp15
.c15_cpar
= value
& 0x3fff;
4220 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
4221 { .name
= "XSCALE_CPAR",
4222 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4223 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
4224 .writefn
= xscale_cpar_write
, },
4225 { .name
= "XSCALE_AUXCR",
4226 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
4227 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
4229 /* XScale specific cache-lockdown: since we have no cache we NOP these
4230 * and hope the guest does not really rely on cache behaviour.
4232 { .name
= "XSCALE_LOCK_ICACHE_LINE",
4233 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
4234 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4235 { .name
= "XSCALE_UNLOCK_ICACHE",
4236 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
4237 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4238 { .name
= "XSCALE_DCACHE_LOCK",
4239 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
4240 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4241 { .name
= "XSCALE_UNLOCK_DCACHE",
4242 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
4243 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4247 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
4248 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4249 * implementation of this implementation-defined space.
4250 * Ideally this should eventually disappear in favour of actually
4251 * implementing the correct behaviour for all cores.
4253 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
4254 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4256 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
4261 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
4262 /* Cache status: RAZ because we have no cache so it's always clean */
4263 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
4264 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4269 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4270 /* We never have a a block transfer operation in progress */
4271 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4272 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4274 /* The cache ops themselves: these all NOP for QEMU */
4275 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4276 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4277 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4278 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4279 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4280 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4281 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4282 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4283 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4284 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4285 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4286 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4290 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4291 /* The cache test-and-clean instructions always return (1 << 30)
4292 * to indicate that there are no dirty cache lines.
4294 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4295 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4296 .resetvalue
= (1 << 30) },
4297 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4298 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4299 .resetvalue
= (1 << 30) },
4303 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4304 /* Ignore ReadBuffer accesses */
4305 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4306 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4307 .access
= PL1_RW
, .resetvalue
= 0,
4308 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4312 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4314 unsigned int cur_el
= arm_current_el(env
);
4316 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4317 return env
->cp15
.vpidr_el2
;
4319 return raw_read(env
, ri
);
4322 static uint64_t mpidr_read_val(CPUARMState
*env
)
4324 ARMCPU
*cpu
= env_archcpu(env
);
4325 uint64_t mpidr
= cpu
->mp_affinity
;
4327 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4328 mpidr
|= (1U << 31);
4329 /* Cores which are uniprocessor (non-coherent)
4330 * but still implement the MP extensions set
4331 * bit 30. (For instance, Cortex-R5).
4333 if (cpu
->mp_is_up
) {
4334 mpidr
|= (1u << 30);
4340 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4342 unsigned int cur_el
= arm_current_el(env
);
4344 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4345 return env
->cp15
.vmpidr_el2
;
4347 return mpidr_read_val(env
);
4350 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4352 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4353 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4354 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4355 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4356 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4357 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4358 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4359 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4360 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4361 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4362 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4363 offsetof(CPUARMState
, cp15
.par_ns
)} },
4364 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4365 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4366 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4367 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4368 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4369 .writefn
= vmsa_ttbr_write
, },
4370 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4371 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4372 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4373 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4374 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4375 .writefn
= vmsa_ttbr_write
, },
4379 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4381 return vfp_get_fpcr(env
);
4384 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4387 vfp_set_fpcr(env
, value
);
4390 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4392 return vfp_get_fpsr(env
);
4395 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4398 vfp_set_fpsr(env
, value
);
4401 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4404 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4405 return CP_ACCESS_TRAP
;
4407 return CP_ACCESS_OK
;
4410 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4413 env
->daif
= value
& PSTATE_DAIF
;
4416 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4418 return env
->pstate
& PSTATE_PAN
;
4421 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4424 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4427 static const ARMCPRegInfo pan_reginfo
= {
4428 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4429 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4430 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4431 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4434 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4436 return env
->pstate
& PSTATE_UAO
;
4439 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4442 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4445 static const ARMCPRegInfo uao_reginfo
= {
4446 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4447 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4448 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4449 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4452 static uint64_t aa64_dit_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4454 return env
->pstate
& PSTATE_DIT
;
4457 static void aa64_dit_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4460 env
->pstate
= (env
->pstate
& ~PSTATE_DIT
) | (value
& PSTATE_DIT
);
4463 static const ARMCPRegInfo dit_reginfo
= {
4464 .name
= "DIT", .state
= ARM_CP_STATE_AA64
,
4465 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 5,
4466 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4467 .readfn
= aa64_dit_read
, .writefn
= aa64_dit_write
4470 static uint64_t aa64_ssbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4472 return env
->pstate
& PSTATE_SSBS
;
4475 static void aa64_ssbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4478 env
->pstate
= (env
->pstate
& ~PSTATE_SSBS
) | (value
& PSTATE_SSBS
);
4481 static const ARMCPRegInfo ssbs_reginfo
= {
4482 .name
= "SSBS", .state
= ARM_CP_STATE_AA64
,
4483 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 6,
4484 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4485 .readfn
= aa64_ssbs_read
, .writefn
= aa64_ssbs_write
4488 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4489 const ARMCPRegInfo
*ri
,
4492 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4493 switch (arm_current_el(env
)) {
4495 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4496 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4497 return CP_ACCESS_TRAP
;
4501 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4502 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4503 return CP_ACCESS_TRAP_EL2
;
4507 return CP_ACCESS_OK
;
4510 static CPAccessResult
aa64_cacheop_pou_access(CPUARMState
*env
,
4511 const ARMCPRegInfo
*ri
,
4514 /* Cache invalidate/clean to Point of Unification... */
4515 switch (arm_current_el(env
)) {
4517 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4518 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4519 return CP_ACCESS_TRAP
;
4523 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4524 if (arm_hcr_el2_eff(env
) & HCR_TPU
) {
4525 return CP_ACCESS_TRAP_EL2
;
4529 return CP_ACCESS_OK
;
4532 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4533 * Page D4-1736 (DDI0487A.b)
4536 static int vae1_tlbmask(CPUARMState
*env
)
4538 uint64_t hcr
= arm_hcr_el2_eff(env
);
4541 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4542 mask
= ARMMMUIdxBit_E20_2
|
4543 ARMMMUIdxBit_E20_2_PAN
|
4546 mask
= ARMMMUIdxBit_E10_1
|
4547 ARMMMUIdxBit_E10_1_PAN
|
4551 if (arm_is_secure_below_el3(env
)) {
4552 mask
>>= ARM_MMU_IDX_A_NS
;
4558 /* Return 56 if TBI is enabled, 64 otherwise. */
4559 static int tlbbits_for_regime(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
4562 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
4563 int tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
4564 int select
= extract64(addr
, 55, 1);
4566 return (tbi
>> select
) & 1 ? 56 : 64;
4569 static int vae1_tlbbits(CPUARMState
*env
, uint64_t addr
)
4571 uint64_t hcr
= arm_hcr_el2_eff(env
);
4574 /* Only the regime of the mmu_idx below is significant. */
4575 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4576 mmu_idx
= ARMMMUIdx_E20_0
;
4578 mmu_idx
= ARMMMUIdx_E10_0
;
4581 if (arm_is_secure_below_el3(env
)) {
4582 mmu_idx
&= ~ARM_MMU_IDX_A_NS
;
4585 return tlbbits_for_regime(env
, mmu_idx
, addr
);
4588 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4591 CPUState
*cs
= env_cpu(env
);
4592 int mask
= vae1_tlbmask(env
);
4594 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4597 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4600 CPUState
*cs
= env_cpu(env
);
4601 int mask
= vae1_tlbmask(env
);
4603 if (tlb_force_broadcast(env
)) {
4604 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4606 tlb_flush_by_mmuidx(cs
, mask
);
4610 static int alle1_tlbmask(CPUARMState
*env
)
4613 * Note that the 'ALL' scope must invalidate both stage 1 and
4614 * stage 2 translations, whereas most other scopes only invalidate
4615 * stage 1 translations.
4617 if (arm_is_secure_below_el3(env
)) {
4618 return ARMMMUIdxBit_SE10_1
|
4619 ARMMMUIdxBit_SE10_1_PAN
|
4620 ARMMMUIdxBit_SE10_0
;
4622 return ARMMMUIdxBit_E10_1
|
4623 ARMMMUIdxBit_E10_1_PAN
|
4628 static int e2_tlbmask(CPUARMState
*env
)
4630 if (arm_is_secure_below_el3(env
)) {
4631 return ARMMMUIdxBit_SE20_0
|
4632 ARMMMUIdxBit_SE20_2
|
4633 ARMMMUIdxBit_SE20_2_PAN
|
4636 return ARMMMUIdxBit_E20_0
|
4637 ARMMMUIdxBit_E20_2
|
4638 ARMMMUIdxBit_E20_2_PAN
|
4643 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4646 CPUState
*cs
= env_cpu(env
);
4647 int mask
= alle1_tlbmask(env
);
4649 tlb_flush_by_mmuidx(cs
, mask
);
4652 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4655 CPUState
*cs
= env_cpu(env
);
4656 int mask
= e2_tlbmask(env
);
4658 tlb_flush_by_mmuidx(cs
, mask
);
4661 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4664 ARMCPU
*cpu
= env_archcpu(env
);
4665 CPUState
*cs
= CPU(cpu
);
4667 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4670 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4673 CPUState
*cs
= env_cpu(env
);
4674 int mask
= alle1_tlbmask(env
);
4676 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4679 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4682 CPUState
*cs
= env_cpu(env
);
4683 int mask
= e2_tlbmask(env
);
4685 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4688 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4691 CPUState
*cs
= env_cpu(env
);
4693 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4696 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4699 /* Invalidate by VA, EL2
4700 * Currently handles both VAE2 and VALE2, since we don't support
4701 * flush-last-level-only.
4703 CPUState
*cs
= env_cpu(env
);
4704 int mask
= e2_tlbmask(env
);
4705 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4707 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4710 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4713 /* Invalidate by VA, EL3
4714 * Currently handles both VAE3 and VALE3, since we don't support
4715 * flush-last-level-only.
4717 ARMCPU
*cpu
= env_archcpu(env
);
4718 CPUState
*cs
= CPU(cpu
);
4719 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4721 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4724 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4727 CPUState
*cs
= env_cpu(env
);
4728 int mask
= vae1_tlbmask(env
);
4729 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4730 int bits
= vae1_tlbbits(env
, pageaddr
);
4732 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4735 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4738 /* Invalidate by VA, EL1&0 (AArch64 version).
4739 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4740 * since we don't support flush-for-specific-ASID-only or
4741 * flush-last-level-only.
4743 CPUState
*cs
= env_cpu(env
);
4744 int mask
= vae1_tlbmask(env
);
4745 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4746 int bits
= vae1_tlbbits(env
, pageaddr
);
4748 if (tlb_force_broadcast(env
)) {
4749 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4751 tlb_flush_page_bits_by_mmuidx(cs
, pageaddr
, mask
, bits
);
4755 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4758 CPUState
*cs
= env_cpu(env
);
4759 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4760 bool secure
= arm_is_secure_below_el3(env
);
4761 int mask
= secure
? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
;
4762 int bits
= tlbbits_for_regime(env
, secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
,
4765 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4768 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4771 CPUState
*cs
= env_cpu(env
);
4772 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4773 int bits
= tlbbits_for_regime(env
, ARMMMUIdx_SE3
, pageaddr
);
4775 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4776 ARMMMUIdxBit_SE3
, bits
);
4779 #ifdef TARGET_AARCH64
4780 static uint64_t tlbi_aa64_range_get_length(CPUARMState
*env
,
4783 unsigned int page_shift
;
4784 unsigned int page_size_granule
;
4790 num
= extract64(value
, 39, 4);
4791 scale
= extract64(value
, 44, 2);
4792 page_size_granule
= extract64(value
, 46, 2);
4794 page_shift
= page_size_granule
* 2 + 12;
4796 if (page_size_granule
== 0) {
4797 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid page size granule %d\n",
4802 exponent
= (5 * scale
) + 1;
4803 length
= (num
+ 1) << (exponent
+ page_shift
);
4808 static uint64_t tlbi_aa64_range_get_base(CPUARMState
*env
, uint64_t value
,
4811 /* TODO: ARMv8.7 FEAT_LPA2 */
4815 pageaddr
= sextract64(value
, 0, 37) << TARGET_PAGE_BITS
;
4817 pageaddr
= extract64(value
, 0, 37) << TARGET_PAGE_BITS
;
4823 static void do_rvae_write(CPUARMState
*env
, uint64_t value
,
4824 int idxmap
, bool synced
)
4826 ARMMMUIdx one_idx
= ARM_MMU_IDX_A
| ctz32(idxmap
);
4827 bool two_ranges
= regime_has_2_ranges(one_idx
);
4828 uint64_t baseaddr
, length
;
4831 baseaddr
= tlbi_aa64_range_get_base(env
, value
, two_ranges
);
4832 length
= tlbi_aa64_range_get_length(env
, value
);
4833 bits
= tlbbits_for_regime(env
, one_idx
, baseaddr
);
4836 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env
),
4842 tlb_flush_range_by_mmuidx(env_cpu(env
), baseaddr
,
4843 length
, idxmap
, bits
);
4847 static void tlbi_aa64_rvae1_write(CPUARMState
*env
,
4848 const ARMCPRegInfo
*ri
,
4852 * Invalidate by VA range, EL1&0.
4853 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4854 * since we don't support flush-for-specific-ASID-only or
4855 * flush-last-level-only.
4858 do_rvae_write(env
, value
, vae1_tlbmask(env
),
4859 tlb_force_broadcast(env
));
4862 static void tlbi_aa64_rvae1is_write(CPUARMState
*env
,
4863 const ARMCPRegInfo
*ri
,
4867 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4868 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4869 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4870 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4871 * shareable specific flushes.
4874 do_rvae_write(env
, value
, vae1_tlbmask(env
), true);
4877 static int vae2_tlbmask(CPUARMState
*env
)
4879 return (arm_is_secure_below_el3(env
)
4880 ? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
);
4883 static void tlbi_aa64_rvae2_write(CPUARMState
*env
,
4884 const ARMCPRegInfo
*ri
,
4888 * Invalidate by VA range, EL2.
4889 * Currently handles all of RVAE2 and RVALE2,
4890 * since we don't support flush-for-specific-ASID-only or
4891 * flush-last-level-only.
4894 do_rvae_write(env
, value
, vae2_tlbmask(env
),
4895 tlb_force_broadcast(env
));
4900 static void tlbi_aa64_rvae2is_write(CPUARMState
*env
,
4901 const ARMCPRegInfo
*ri
,
4905 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4906 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4907 * since we don't support flush-for-specific-ASID-only,
4908 * flush-last-level-only or inner/outer shareable specific flushes.
4911 do_rvae_write(env
, value
, vae2_tlbmask(env
), true);
4915 static void tlbi_aa64_rvae3_write(CPUARMState
*env
,
4916 const ARMCPRegInfo
*ri
,
4920 * Invalidate by VA range, EL3.
4921 * Currently handles all of RVAE3 and RVALE3,
4922 * since we don't support flush-for-specific-ASID-only or
4923 * flush-last-level-only.
4926 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
,
4927 tlb_force_broadcast(env
));
4930 static void tlbi_aa64_rvae3is_write(CPUARMState
*env
,
4931 const ARMCPRegInfo
*ri
,
4935 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4936 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4937 * since we don't support flush-for-specific-ASID-only,
4938 * flush-last-level-only or inner/outer specific flushes.
4941 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
, true);
4945 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4948 int cur_el
= arm_current_el(env
);
4951 uint64_t hcr
= arm_hcr_el2_eff(env
);
4954 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4955 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4956 return CP_ACCESS_TRAP_EL2
;
4959 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4960 return CP_ACCESS_TRAP
;
4962 if (hcr
& HCR_TDZ
) {
4963 return CP_ACCESS_TRAP_EL2
;
4966 } else if (hcr
& HCR_TDZ
) {
4967 return CP_ACCESS_TRAP_EL2
;
4970 return CP_ACCESS_OK
;
4973 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4975 ARMCPU
*cpu
= env_archcpu(env
);
4976 int dzp_bit
= 1 << 4;
4978 /* DZP indicates whether DC ZVA access is allowed */
4979 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4982 return cpu
->dcz_blocksize
| dzp_bit
;
4985 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4988 if (!(env
->pstate
& PSTATE_SP
)) {
4989 /* Access to SP_EL0 is undefined if it's being used as
4990 * the stack pointer.
4992 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4994 return CP_ACCESS_OK
;
4997 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4999 return env
->pstate
& PSTATE_SP
;
5002 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
5004 update_spsel(env
, val
);
5007 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5010 ARMCPU
*cpu
= env_archcpu(env
);
5012 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
5013 /* M bit is RAZ/WI for PMSA with no MPU implemented */
5017 /* ??? Lots of these bits are not implemented. */
5019 if (ri
->state
== ARM_CP_STATE_AA64
&& !cpu_isar_feature(aa64_mte
, cpu
)) {
5020 if (ri
->opc1
== 6) { /* SCTLR_EL3 */
5021 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF
| SCTLR_ATA
);
5023 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF0
| SCTLR_TCF
|
5024 SCTLR_ATA0
| SCTLR_ATA
);
5028 if (raw_read(env
, ri
) == value
) {
5029 /* Skip the TLB flush if nothing actually changed; Linux likes
5030 * to do a lot of pointless SCTLR writes.
5035 raw_write(env
, ri
, value
);
5037 /* This may enable/disable the MMU, so do a TLB flush. */
5038 tlb_flush(CPU(cpu
));
5040 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
5042 * Normally we would always end the TB on an SCTLR write; see the
5043 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5044 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5045 * of hflags from the translator, so do it here.
5047 arm_rebuild_hflags(env
);
5051 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5054 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
5055 return CP_ACCESS_TRAP_FP_EL2
;
5057 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
5058 return CP_ACCESS_TRAP_FP_EL3
;
5060 return CP_ACCESS_OK
;
5063 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5066 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
5069 static const ARMCPRegInfo v8_cp_reginfo
[] = {
5070 /* Minimal set of EL0-visible registers. This will need to be expanded
5071 * significantly for system emulation of AArch64 CPUs.
5073 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
5074 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
5075 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
5076 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
5077 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
5078 .type
= ARM_CP_NO_RAW
,
5079 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
5080 .fieldoffset
= offsetof(CPUARMState
, daif
),
5081 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
5082 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
5083 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
5084 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
5085 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
5086 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
5087 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
5088 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
5089 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
5090 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
5091 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
5092 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
5093 .readfn
= aa64_dczid_read
},
5094 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
5095 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
5096 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
5097 #ifndef CONFIG_USER_ONLY
5098 /* Avoid overhead of an access check that always passes in user-mode */
5099 .accessfn
= aa64_zva_access
,
5102 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
5103 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
5104 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
5105 /* Cache ops: all NOPs since we don't emulate caches */
5106 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
5107 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5108 .access
= PL1_W
, .type
= ARM_CP_NOP
,
5109 .accessfn
= aa64_cacheop_pou_access
},
5110 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
5111 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5112 .access
= PL1_W
, .type
= ARM_CP_NOP
,
5113 .accessfn
= aa64_cacheop_pou_access
},
5114 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
5115 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
5116 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5117 .accessfn
= aa64_cacheop_pou_access
},
5118 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
5119 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5120 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
5121 .type
= ARM_CP_NOP
},
5122 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
5123 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5124 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5125 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
5126 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
5127 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5128 .accessfn
= aa64_cacheop_poc_access
},
5129 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
5130 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5131 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5132 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
5133 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
5134 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5135 .accessfn
= aa64_cacheop_pou_access
},
5136 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
5137 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
5138 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5139 .accessfn
= aa64_cacheop_poc_access
},
5140 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
5141 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5142 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5143 /* TLBI operations */
5144 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
5145 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
5146 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5147 .writefn
= tlbi_aa64_vmalle1is_write
},
5148 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
5149 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
5150 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5151 .writefn
= tlbi_aa64_vae1is_write
},
5152 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
5153 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
5154 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5155 .writefn
= tlbi_aa64_vmalle1is_write
},
5156 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
5157 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
5158 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5159 .writefn
= tlbi_aa64_vae1is_write
},
5160 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
5161 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5162 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5163 .writefn
= tlbi_aa64_vae1is_write
},
5164 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
5165 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5166 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5167 .writefn
= tlbi_aa64_vae1is_write
},
5168 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
5169 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
5170 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5171 .writefn
= tlbi_aa64_vmalle1_write
},
5172 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
5173 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
5174 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5175 .writefn
= tlbi_aa64_vae1_write
},
5176 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
5177 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
5178 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5179 .writefn
= tlbi_aa64_vmalle1_write
},
5180 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
5181 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
5182 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5183 .writefn
= tlbi_aa64_vae1_write
},
5184 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
5185 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5186 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5187 .writefn
= tlbi_aa64_vae1_write
},
5188 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
5189 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5190 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5191 .writefn
= tlbi_aa64_vae1_write
},
5192 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
5193 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5194 .access
= PL2_W
, .type
= ARM_CP_NOP
},
5195 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
5196 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5197 .access
= PL2_W
, .type
= ARM_CP_NOP
},
5198 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
5199 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5200 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5201 .writefn
= tlbi_aa64_alle1is_write
},
5202 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
5203 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
5204 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5205 .writefn
= tlbi_aa64_alle1is_write
},
5206 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
5207 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5208 .access
= PL2_W
, .type
= ARM_CP_NOP
},
5209 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
5210 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5211 .access
= PL2_W
, .type
= ARM_CP_NOP
},
5212 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
5213 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5214 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5215 .writefn
= tlbi_aa64_alle1_write
},
5216 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
5217 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
5218 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5219 .writefn
= tlbi_aa64_alle1is_write
},
5220 #ifndef CONFIG_USER_ONLY
5221 /* 64 bit address translation operations */
5222 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
5223 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
5224 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5225 .writefn
= ats_write64
},
5226 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
5227 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
5228 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5229 .writefn
= ats_write64
},
5230 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
5231 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
5232 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5233 .writefn
= ats_write64
},
5234 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
5235 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
5236 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5237 .writefn
= ats_write64
},
5238 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
5239 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
5240 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5241 .writefn
= ats_write64
},
5242 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
5243 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
5244 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5245 .writefn
= ats_write64
},
5246 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
5247 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
5248 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5249 .writefn
= ats_write64
},
5250 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
5251 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
5252 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5253 .writefn
= ats_write64
},
5254 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5255 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
5256 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
5257 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5258 .writefn
= ats_write64
},
5259 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
5260 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
5261 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5262 .writefn
= ats_write64
},
5263 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
5264 .type
= ARM_CP_ALIAS
,
5265 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
5266 .access
= PL1_RW
, .resetvalue
= 0,
5267 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
5268 .writefn
= par_write
},
5270 /* TLB invalidate last level of translation table walk */
5271 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5272 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5273 .writefn
= tlbimva_is_write
},
5274 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5275 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5276 .writefn
= tlbimvaa_is_write
},
5277 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5278 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5279 .writefn
= tlbimva_write
},
5280 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5281 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5282 .writefn
= tlbimvaa_write
},
5283 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5284 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5285 .writefn
= tlbimva_hyp_write
},
5286 { .name
= "TLBIMVALHIS",
5287 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5288 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5289 .writefn
= tlbimva_hyp_is_write
},
5290 { .name
= "TLBIIPAS2",
5291 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5292 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5293 { .name
= "TLBIIPAS2IS",
5294 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5295 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5296 { .name
= "TLBIIPAS2L",
5297 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5298 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5299 { .name
= "TLBIIPAS2LIS",
5300 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5301 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5302 /* 32 bit cache operations */
5303 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5304 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5305 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5306 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5307 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5308 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5309 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5310 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5311 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5312 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5313 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5314 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5315 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5316 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5317 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5318 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5319 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5320 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5321 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5322 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5323 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5324 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5325 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5326 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5327 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5328 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5329 /* MMU Domain access control / MPU write buffer control */
5330 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5331 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5332 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5333 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5334 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5335 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5336 .type
= ARM_CP_ALIAS
,
5337 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5339 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5340 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5341 .type
= ARM_CP_ALIAS
,
5342 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5344 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5345 /* We rely on the access checks not allowing the guest to write to the
5346 * state field when SPSel indicates that it's being used as the stack
5349 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5350 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5351 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5352 .type
= ARM_CP_ALIAS
,
5353 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5354 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5355 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5356 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5357 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5358 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5359 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5360 .type
= ARM_CP_NO_RAW
,
5361 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5362 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5363 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5364 .type
= ARM_CP_ALIAS
,
5365 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
5366 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
5367 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5368 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5369 .access
= PL2_RW
, .resetvalue
= 0,
5370 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5371 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5372 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5373 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5374 .access
= PL2_RW
, .resetvalue
= 0,
5375 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5376 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5377 .type
= ARM_CP_ALIAS
,
5378 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5380 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5381 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5382 .type
= ARM_CP_ALIAS
,
5383 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5385 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5386 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5387 .type
= ARM_CP_ALIAS
,
5388 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5390 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5391 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5392 .type
= ARM_CP_ALIAS
,
5393 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5395 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5396 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5397 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5399 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5400 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
5401 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5402 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5403 .writefn
= sdcr_write
,
5404 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5408 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5409 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
5410 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5411 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5413 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
5414 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5415 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5417 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5418 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5419 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5420 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5421 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5422 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5424 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5425 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5426 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5427 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5428 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5429 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5430 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5432 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5433 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5434 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5435 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5436 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5437 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5439 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5440 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5441 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5443 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5444 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5445 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5447 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5448 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5449 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5451 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5452 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5453 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5454 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5455 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5456 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5457 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5458 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5459 .cp
= 15, .opc1
= 6, .crm
= 2,
5460 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5461 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
5462 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5463 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5464 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5465 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5466 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5467 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5468 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5469 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5470 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5471 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5472 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5473 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5474 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5475 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5477 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5478 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5479 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5480 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5481 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5482 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5483 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5484 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5486 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5487 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5488 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5489 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5490 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5492 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5493 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5494 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5495 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5496 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5497 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5498 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5499 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5500 .access
= PL2_RW
, .accessfn
= access_tda
,
5501 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5502 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5503 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5504 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5505 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5506 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5507 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5508 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5509 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5510 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5511 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5512 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5513 .type
= ARM_CP_CONST
,
5514 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5515 .access
= PL2_RW
, .resetvalue
= 0 },
5519 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5520 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
5521 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5522 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5524 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5528 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5530 ARMCPU
*cpu
= env_archcpu(env
);
5532 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5533 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5535 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5538 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5539 valid_mask
&= ~HCR_HCD
;
5540 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5541 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5542 * However, if we're using the SMC PSCI conduit then QEMU is
5543 * effectively acting like EL3 firmware and so the guest at
5544 * EL2 should retain the ability to prevent EL1 from being
5545 * able to make SMC calls into the ersatz firmware, so in
5546 * that case HCR.TSC should be read/write.
5548 valid_mask
&= ~HCR_TSC
;
5551 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5552 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5553 valid_mask
|= HCR_E2H
;
5555 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5556 valid_mask
|= HCR_TLOR
;
5558 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5559 valid_mask
|= HCR_API
| HCR_APK
;
5561 if (cpu_isar_feature(aa64_mte
, cpu
)) {
5562 valid_mask
|= HCR_ATA
| HCR_DCT
| HCR_TID5
;
5566 /* Clear RES0 bits. */
5567 value
&= valid_mask
;
5570 * These bits change the MMU setup:
5571 * HCR_VM enables stage 2 translation
5572 * HCR_PTW forbids certain page-table setups
5573 * HCR_DC disables stage1 and enables stage2 translation
5574 * HCR_DCT enables tagging on (disabled) stage1 translation
5576 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
| HCR_DCT
)) {
5577 tlb_flush(CPU(cpu
));
5579 env
->cp15
.hcr_el2
= value
;
5582 * Updates to VI and VF require us to update the status of
5583 * virtual interrupts, which are the logical OR of these bits
5584 * and the state of the input lines from the GIC. (This requires
5585 * that we have the iothread lock, which is done by marking the
5586 * reginfo structs as ARM_CP_IO.)
5587 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5588 * possible for it to be taken immediately, because VIRQ and
5589 * VFIQ are masked unless running at EL0 or EL1, and HCR
5590 * can only be written at EL2.
5592 g_assert(qemu_mutex_iothread_locked());
5593 arm_cpu_update_virq(cpu
);
5594 arm_cpu_update_vfiq(cpu
);
5597 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5599 do_hcr_write(env
, value
, 0);
5602 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5605 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5606 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5607 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
5610 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5613 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5614 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5615 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
5619 * Return the effective value of HCR_EL2.
5620 * Bits that are not included here:
5621 * RW (read from SCR_EL3.RW as needed)
5623 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5625 uint64_t ret
= env
->cp15
.hcr_el2
;
5627 if (!arm_is_el2_enabled(env
)) {
5629 * "This register has no effect if EL2 is not enabled in the
5630 * current Security state". This is ARMv8.4-SecEL2 speak for
5631 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5633 * Prior to that, the language was "In an implementation that
5634 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5635 * as if this field is 0 for all purposes other than a direct
5636 * read or write access of HCR_EL2". With lots of enumeration
5637 * on a per-field basis. In current QEMU, this is condition
5638 * is arm_is_secure_below_el3.
5640 * Since the v8.4 language applies to the entire register, and
5641 * appears to be backward compatible, use that.
5647 * For a cpu that supports both aarch64 and aarch32, we can set bits
5648 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5649 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5651 if (!arm_el_is_aa64(env
, 2)) {
5652 uint64_t aa32_valid
;
5655 * These bits are up-to-date as of ARMv8.6.
5656 * For HCR, it's easiest to list just the 2 bits that are invalid.
5657 * For HCR2, list those that are valid.
5659 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
5660 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
5661 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
5665 if (ret
& HCR_TGE
) {
5666 /* These bits are up-to-date as of ARMv8.6. */
5667 if (ret
& HCR_E2H
) {
5668 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5669 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5670 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5671 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
5672 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
5673 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
5675 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5677 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5678 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5679 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5686 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5690 * For A-profile AArch32 EL3, if NSACR.CP10
5691 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5693 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5694 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5695 value
&= ~(0x3 << 10);
5696 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
5698 env
->cp15
.cptr_el
[2] = value
;
5701 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5704 * For A-profile AArch32 EL3, if NSACR.CP10
5705 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5707 uint64_t value
= env
->cp15
.cptr_el
[2];
5709 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5710 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5716 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5717 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5719 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5720 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5721 .writefn
= hcr_write
},
5722 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5723 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5724 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5725 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5726 .writefn
= hcr_writelow
},
5727 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5728 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5729 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5730 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5731 .type
= ARM_CP_ALIAS
,
5732 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5734 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5735 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5736 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5737 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5738 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5739 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5740 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5741 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5742 .type
= ARM_CP_ALIAS
,
5743 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5745 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5746 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5747 .type
= ARM_CP_ALIAS
,
5748 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5750 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5751 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5752 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5753 .access
= PL2_RW
, .writefn
= vbar_write
,
5754 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5756 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5757 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5758 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5759 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5760 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5761 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5762 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5763 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5764 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5765 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5766 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5767 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5769 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5770 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5771 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5772 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5773 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5774 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5775 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5777 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5778 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5779 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5780 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5782 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5783 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5784 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5786 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5787 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5788 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5790 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5791 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5792 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5793 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5794 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5795 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5796 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5797 .type
= ARM_CP_ALIAS
,
5798 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5799 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5800 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5801 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5803 /* no .writefn needed as this can't cause an ASID change;
5804 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5806 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5807 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5808 .cp
= 15, .opc1
= 6, .crm
= 2,
5809 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5810 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5811 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5812 .writefn
= vttbr_write
},
5813 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5814 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5815 .access
= PL2_RW
, .writefn
= vttbr_write
,
5816 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5817 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5818 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5819 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5820 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5821 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5822 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5823 .access
= PL2_RW
, .resetvalue
= 0,
5824 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5825 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5826 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5827 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5828 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5829 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5830 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5831 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5832 { .name
= "TLBIALLNSNH",
5833 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5834 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5835 .writefn
= tlbiall_nsnh_write
},
5836 { .name
= "TLBIALLNSNHIS",
5837 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5838 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5839 .writefn
= tlbiall_nsnh_is_write
},
5840 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5841 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5842 .writefn
= tlbiall_hyp_write
},
5843 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5844 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5845 .writefn
= tlbiall_hyp_is_write
},
5846 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5847 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5848 .writefn
= tlbimva_hyp_write
},
5849 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5850 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5851 .writefn
= tlbimva_hyp_is_write
},
5852 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5853 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5854 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5855 .writefn
= tlbi_aa64_alle2_write
},
5856 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5857 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5858 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5859 .writefn
= tlbi_aa64_vae2_write
},
5860 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5861 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5862 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5863 .writefn
= tlbi_aa64_vae2_write
},
5864 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5865 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5866 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5867 .writefn
= tlbi_aa64_alle2is_write
},
5868 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5869 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5870 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5871 .writefn
= tlbi_aa64_vae2is_write
},
5872 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5873 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5874 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5875 .writefn
= tlbi_aa64_vae2is_write
},
5876 #ifndef CONFIG_USER_ONLY
5877 /* Unlike the other EL2-related AT operations, these must
5878 * UNDEF from EL3 if EL2 is not implemented, which is why we
5879 * define them here rather than with the rest of the AT ops.
5881 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5882 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5883 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5884 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5885 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5886 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5887 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5888 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5889 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5890 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5891 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5892 * to behave as if SCR.NS was 1.
5894 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5896 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5897 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5899 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5900 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5901 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5902 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5903 * reset values as IMPDEF. We choose to reset to 3 to comply with
5904 * both ARMv7 and ARMv8.
5906 .access
= PL2_RW
, .resetvalue
= 3,
5907 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5908 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5909 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5910 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5911 .writefn
= gt_cntvoff_write
,
5912 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5913 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5914 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5915 .writefn
= gt_cntvoff_write
,
5916 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5917 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5918 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5919 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5920 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5921 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5922 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5923 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5924 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5925 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5926 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5927 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5928 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5929 .resetfn
= gt_hyp_timer_reset
,
5930 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5931 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5933 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5937 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5939 /* The only field of MDCR_EL2 that has a defined architectural reset value
5940 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
5942 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5943 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5944 .access
= PL2_RW
, .resetvalue
= PMCR_NUM_COUNTERS
,
5945 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5946 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5947 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5948 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5949 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5950 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5951 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5953 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5954 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5955 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5957 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5961 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5962 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5963 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5964 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5966 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5967 .writefn
= hcr_writehigh
},
5971 static CPAccessResult
sel2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5974 if (arm_current_el(env
) == 3 || arm_is_secure_below_el3(env
)) {
5975 return CP_ACCESS_OK
;
5977 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5980 static const ARMCPRegInfo el2_sec_cp_reginfo
[] = {
5981 { .name
= "VSTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5982 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 0,
5983 .access
= PL2_RW
, .accessfn
= sel2_access
,
5984 .fieldoffset
= offsetof(CPUARMState
, cp15
.vsttbr_el2
) },
5985 { .name
= "VSTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5986 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 2,
5987 .access
= PL2_RW
, .accessfn
= sel2_access
,
5988 .fieldoffset
= offsetof(CPUARMState
, cp15
.vstcr_el2
) },
5992 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5995 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5996 * At Secure EL1 it traps to EL3 or EL2.
5998 if (arm_current_el(env
) == 3) {
5999 return CP_ACCESS_OK
;
6001 if (arm_is_secure_below_el3(env
)) {
6002 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
6003 return CP_ACCESS_TRAP_EL2
;
6005 return CP_ACCESS_TRAP_EL3
;
6007 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6009 return CP_ACCESS_OK
;
6011 return CP_ACCESS_TRAP_UNCATEGORIZED
;
6014 static const ARMCPRegInfo el3_cp_reginfo
[] = {
6015 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
6016 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
6017 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
6018 .resetfn
= scr_reset
, .writefn
= scr_write
},
6019 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
6020 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
6021 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
6022 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
6023 .writefn
= scr_write
},
6024 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
6025 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
6026 .access
= PL3_RW
, .resetvalue
= 0,
6027 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
6029 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
6030 .access
= PL3_RW
, .resetvalue
= 0,
6031 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
6032 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6033 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
6034 .writefn
= vbar_write
, .resetvalue
= 0,
6035 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
6036 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
6037 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
6038 .access
= PL3_RW
, .resetvalue
= 0,
6039 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
6040 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
6041 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
6043 /* no .writefn needed as this can't cause an ASID change;
6044 * we must provide a .raw_writefn and .resetfn because we handle
6045 * reset and migration for the AArch32 TTBCR(S), which might be
6046 * using mask and base_mask.
6048 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
6049 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
6050 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
6051 .type
= ARM_CP_ALIAS
,
6052 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
6054 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
6055 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
6056 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
6057 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
6058 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
6059 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
6060 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
6061 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
6062 .type
= ARM_CP_ALIAS
,
6063 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
6065 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
6066 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6067 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
6068 .access
= PL3_RW
, .writefn
= vbar_write
,
6069 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
6071 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
6072 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
6073 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
6074 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
6075 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
6076 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
6077 .access
= PL3_RW
, .resetvalue
= 0,
6078 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
6079 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
6080 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
6081 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6083 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
6084 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
6085 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6087 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
6088 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
6089 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6091 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
6092 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
6093 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6094 .writefn
= tlbi_aa64_alle3is_write
},
6095 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
6096 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
6097 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6098 .writefn
= tlbi_aa64_vae3is_write
},
6099 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
6100 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
6101 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6102 .writefn
= tlbi_aa64_vae3is_write
},
6103 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
6104 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
6105 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6106 .writefn
= tlbi_aa64_alle3_write
},
6107 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
6108 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
6109 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6110 .writefn
= tlbi_aa64_vae3_write
},
6111 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
6112 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
6113 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6114 .writefn
= tlbi_aa64_vae3_write
},
6118 #ifndef CONFIG_USER_ONLY
6119 /* Test if system register redirection is to occur in the current state. */
6120 static bool redirect_for_e2h(CPUARMState
*env
)
6122 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
6125 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6129 if (redirect_for_e2h(env
)) {
6130 /* Switch to the saved EL2 version of the register. */
6132 readfn
= ri
->readfn
;
6134 readfn
= ri
->orig_readfn
;
6136 if (readfn
== NULL
) {
6139 return readfn(env
, ri
);
6142 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6147 if (redirect_for_e2h(env
)) {
6148 /* Switch to the saved EL2 version of the register. */
6150 writefn
= ri
->writefn
;
6152 writefn
= ri
->orig_writefn
;
6154 if (writefn
== NULL
) {
6155 writefn
= raw_write
;
6157 writefn(env
, ri
, value
);
6160 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
6163 uint32_t src_key
, dst_key
, new_key
;
6164 const char *src_name
, *dst_name
, *new_name
;
6165 bool (*feature
)(const ARMISARegisters
*id
);
6168 #define K(op0, op1, crn, crm, op2) \
6169 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6171 static const struct E2HAlias aliases
[] = {
6172 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
6173 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6174 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
6175 "CPACR", "CPTR_EL2", "CPACR_EL12" },
6176 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
6177 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6178 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
6179 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6180 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
6181 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6182 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
6183 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6184 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
6185 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6186 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
6187 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6188 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
6189 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6190 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
6191 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6192 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
6193 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6194 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6195 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6196 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6197 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6198 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6199 "VBAR", "VBAR_EL2", "VBAR_EL12" },
6200 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6201 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6202 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6203 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6206 * Note that redirection of ZCR is mentioned in the description
6207 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6208 * not in the summary table.
6210 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
6211 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
6213 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
6214 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte
},
6216 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6217 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6223 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
6224 const struct E2HAlias
*a
= &aliases
[i
];
6225 ARMCPRegInfo
*src_reg
, *dst_reg
;
6227 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
6231 src_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->src_key
);
6232 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->dst_key
);
6233 g_assert(src_reg
!= NULL
);
6234 g_assert(dst_reg
!= NULL
);
6236 /* Cross-compare names to detect typos in the keys. */
6237 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
6238 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
6240 /* None of the core system registers use opaque; we will. */
6241 g_assert(src_reg
->opaque
== NULL
);
6243 /* Create alias before redirection so we dup the right data. */
6245 ARMCPRegInfo
*new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
6246 uint32_t *new_key
= g_memdup(&a
->new_key
, sizeof(uint32_t));
6249 new_reg
->name
= a
->new_name
;
6250 new_reg
->type
|= ARM_CP_ALIAS
;
6251 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
6252 new_reg
->access
&= PL2_RW
| PL3_RW
;
6254 ok
= g_hash_table_insert(cpu
->cp_regs
, new_key
, new_reg
);
6258 src_reg
->opaque
= dst_reg
;
6259 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
6260 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
6261 if (!src_reg
->raw_readfn
) {
6262 src_reg
->raw_readfn
= raw_read
;
6264 if (!src_reg
->raw_writefn
) {
6265 src_reg
->raw_writefn
= raw_write
;
6267 src_reg
->readfn
= el2_e2h_read
;
6268 src_reg
->writefn
= el2_e2h_write
;
6273 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6276 int cur_el
= arm_current_el(env
);
6279 uint64_t hcr
= arm_hcr_el2_eff(env
);
6282 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
6283 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
6284 return CP_ACCESS_TRAP_EL2
;
6287 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
6288 return CP_ACCESS_TRAP
;
6290 if (hcr
& HCR_TID2
) {
6291 return CP_ACCESS_TRAP_EL2
;
6294 } else if (hcr
& HCR_TID2
) {
6295 return CP_ACCESS_TRAP_EL2
;
6299 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
6300 return CP_ACCESS_TRAP_EL2
;
6303 return CP_ACCESS_OK
;
6306 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6309 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6310 * read via a bit in OSLSR_EL1.
6314 if (ri
->state
== ARM_CP_STATE_AA32
) {
6315 oslock
= (value
== 0xC5ACCE55);
6320 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
6323 static const ARMCPRegInfo debug_cp_reginfo
[] = {
6324 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6325 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6326 * unlike DBGDRAR it is never accessible from EL0.
6327 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6330 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
6331 .access
= PL0_R
, .accessfn
= access_tdra
,
6332 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6333 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
6334 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6335 .access
= PL1_R
, .accessfn
= access_tdra
,
6336 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6337 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
6338 .access
= PL0_R
, .accessfn
= access_tdra
,
6339 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6340 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6341 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
6342 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6343 .access
= PL1_RW
, .accessfn
= access_tda
,
6344 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
6347 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
6348 * Debug Communication Channel is not implemented.
6350 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_AA64
,
6351 .opc0
= 2, .opc1
= 3, .crn
= 0, .crm
= 1, .opc2
= 0,
6352 .access
= PL0_R
, .accessfn
= access_tda
,
6353 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6355 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
6356 * it is unlikely a guest will care.
6357 * We don't implement the configurable EL0 access.
6359 { .name
= "DBGDSCRint", .state
= ARM_CP_STATE_AA32
,
6360 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6361 .type
= ARM_CP_ALIAS
,
6362 .access
= PL1_R
, .accessfn
= access_tda
,
6363 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
6364 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
6365 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
6366 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6367 .accessfn
= access_tdosa
,
6368 .writefn
= oslar_write
},
6369 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
6370 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
6371 .access
= PL1_R
, .resetvalue
= 10,
6372 .accessfn
= access_tdosa
,
6373 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
6374 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6375 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6376 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
6377 .access
= PL1_RW
, .accessfn
= access_tdosa
,
6378 .type
= ARM_CP_NOP
},
6379 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6380 * implement vector catch debug events yet.
6383 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6384 .access
= PL1_RW
, .accessfn
= access_tda
,
6385 .type
= ARM_CP_NOP
},
6386 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6387 * to save and restore a 32-bit guest's DBGVCR)
6389 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
6390 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
6391 .access
= PL2_RW
, .accessfn
= access_tda
,
6392 .type
= ARM_CP_NOP
},
6393 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6394 * Channel but Linux may try to access this register. The 32-bit
6395 * alias is DBGDCCINT.
6397 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
6398 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6399 .access
= PL1_RW
, .accessfn
= access_tda
,
6400 .type
= ARM_CP_NOP
},
6404 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
6405 /* 64 bit access versions of the (dummy) debug registers */
6406 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
6407 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6408 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
6409 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6413 /* Return the exception level to which exceptions should be taken
6414 * via SVEAccessTrap. If an exception should be routed through
6415 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6416 * take care of raising that exception.
6417 * C.f. the ARM pseudocode function CheckSVEEnabled.
6419 int sve_exception_el(CPUARMState
*env
, int el
)
6421 #ifndef CONFIG_USER_ONLY
6422 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
6424 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6425 bool disabled
= false;
6427 /* The CPACR.ZEN controls traps to EL1:
6428 * 0, 2 : trap EL0 and EL1 accesses
6429 * 1 : trap only EL0 accesses
6430 * 3 : trap no accesses
6432 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
6434 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
6439 return hcr_el2
& HCR_TGE
? 2 : 1;
6442 /* Check CPACR.FPEN. */
6443 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
6445 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
6453 /* CPTR_EL2. Since TZ and TFP are positive,
6454 * they will be zero when EL2 is not present.
6456 if (el
<= 2 && arm_is_el2_enabled(env
)) {
6457 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
6460 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
6465 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6466 if (arm_feature(env
, ARM_FEATURE_EL3
)
6467 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
6474 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
6478 start_len
= MIN(start_len
, ARM_MAX_VQ
- 1);
6479 end_len
= start_len
;
6481 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
6482 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
6483 assert(end_len
< start_len
);
6489 * Given that SVE is enabled, return the vector length for EL.
6491 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
6493 ARMCPU
*cpu
= env_archcpu(env
);
6494 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
6497 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
6499 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
6500 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
6502 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6503 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
6506 return aarch64_sve_zcr_get_valid_len(cpu
, zcr_len
);
6509 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6512 int cur_el
= arm_current_el(env
);
6513 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
6516 /* Bits other than [3:0] are RAZ/WI. */
6517 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
6518 raw_write(env
, ri
, value
& 0xf);
6521 * Because we arrived here, we know both FP and SVE are enabled;
6522 * otherwise we would have trapped access to the ZCR_ELn register.
6524 new_len
= sve_zcr_len_for_el(env
, cur_el
);
6525 if (new_len
< old_len
) {
6526 aarch64_sve_narrow_vq(env
, new_len
+ 1);
6530 static const ARMCPRegInfo zcr_el1_reginfo
= {
6531 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
6532 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
6533 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
6534 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
6535 .writefn
= zcr_write
, .raw_writefn
= raw_write
6538 static const ARMCPRegInfo zcr_el2_reginfo
= {
6539 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6540 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6541 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6542 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
6543 .writefn
= zcr_write
, .raw_writefn
= raw_write
6546 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
6547 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6548 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6549 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6550 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
6553 static const ARMCPRegInfo zcr_el3_reginfo
= {
6554 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
6555 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
6556 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
6557 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
6558 .writefn
= zcr_write
, .raw_writefn
= raw_write
6561 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6563 CPUARMState
*env
= &cpu
->env
;
6565 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6566 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6568 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6570 if (env
->cpu_watchpoint
[n
]) {
6571 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6572 env
->cpu_watchpoint
[n
] = NULL
;
6575 if (!extract64(wcr
, 0, 1)) {
6576 /* E bit clear : watchpoint disabled */
6580 switch (extract64(wcr
, 3, 2)) {
6582 /* LSC 00 is reserved and must behave as if the wp is disabled */
6585 flags
|= BP_MEM_READ
;
6588 flags
|= BP_MEM_WRITE
;
6591 flags
|= BP_MEM_ACCESS
;
6595 /* Attempts to use both MASK and BAS fields simultaneously are
6596 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6597 * thus generating a watchpoint for every byte in the masked region.
6599 mask
= extract64(wcr
, 24, 4);
6600 if (mask
== 1 || mask
== 2) {
6601 /* Reserved values of MASK; we must act as if the mask value was
6602 * some non-reserved value, or as if the watchpoint were disabled.
6603 * We choose the latter.
6607 /* Watchpoint covers an aligned area up to 2GB in size */
6609 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6610 * whether the watchpoint fires when the unmasked bits match; we opt
6611 * to generate the exceptions.
6615 /* Watchpoint covers bytes defined by the byte address select bits */
6616 int bas
= extract64(wcr
, 5, 8);
6619 if (extract64(wvr
, 2, 1)) {
6620 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6621 * ignored, and BAS[3:0] define which bytes to watch.
6627 /* This must act as if the watchpoint is disabled */
6631 /* The BAS bits are supposed to be programmed to indicate a contiguous
6632 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6633 * we fire for each byte in the word/doubleword addressed by the WVR.
6634 * We choose to ignore any non-zero bits after the first range of 1s.
6636 basstart
= ctz32(bas
);
6637 len
= cto32(bas
>> basstart
);
6641 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6642 &env
->cpu_watchpoint
[n
]);
6645 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6648 CPUARMState
*env
= &cpu
->env
;
6650 /* Completely clear out existing QEMU watchpoints and our array, to
6651 * avoid possible stale entries following migration load.
6653 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6654 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6656 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6657 hw_watchpoint_update(cpu
, i
);
6661 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6664 ARMCPU
*cpu
= env_archcpu(env
);
6667 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6668 * register reads and behaves as if values written are sign extended.
6669 * Bits [1:0] are RES0.
6671 value
= sextract64(value
, 0, 49) & ~3ULL;
6673 raw_write(env
, ri
, value
);
6674 hw_watchpoint_update(cpu
, i
);
6677 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6680 ARMCPU
*cpu
= env_archcpu(env
);
6683 raw_write(env
, ri
, value
);
6684 hw_watchpoint_update(cpu
, i
);
6687 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6689 CPUARMState
*env
= &cpu
->env
;
6690 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6691 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6696 if (env
->cpu_breakpoint
[n
]) {
6697 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6698 env
->cpu_breakpoint
[n
] = NULL
;
6701 if (!extract64(bcr
, 0, 1)) {
6702 /* E bit clear : watchpoint disabled */
6706 bt
= extract64(bcr
, 20, 4);
6709 case 4: /* unlinked address mismatch (reserved if AArch64) */
6710 case 5: /* linked address mismatch (reserved if AArch64) */
6711 qemu_log_mask(LOG_UNIMP
,
6712 "arm: address mismatch breakpoint types not implemented\n");
6714 case 0: /* unlinked address match */
6715 case 1: /* linked address match */
6717 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6718 * we behave as if the register was sign extended. Bits [1:0] are
6719 * RES0. The BAS field is used to allow setting breakpoints on 16
6720 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6721 * a bp will fire if the addresses covered by the bp and the addresses
6722 * covered by the insn overlap but the insn doesn't start at the
6723 * start of the bp address range. We choose to require the insn and
6724 * the bp to have the same address. The constraints on writing to
6725 * BAS enforced in dbgbcr_write mean we have only four cases:
6726 * 0b0000 => no breakpoint
6727 * 0b0011 => breakpoint on addr
6728 * 0b1100 => breakpoint on addr + 2
6729 * 0b1111 => breakpoint on addr
6730 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6732 int bas
= extract64(bcr
, 5, 4);
6733 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
6742 case 2: /* unlinked context ID match */
6743 case 8: /* unlinked VMID match (reserved if no EL2) */
6744 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6745 qemu_log_mask(LOG_UNIMP
,
6746 "arm: unlinked context breakpoint types not implemented\n");
6748 case 9: /* linked VMID match (reserved if no EL2) */
6749 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6750 case 3: /* linked context ID match */
6752 /* We must generate no events for Linked context matches (unless
6753 * they are linked to by some other bp/wp, which is handled in
6754 * updates for the linking bp/wp). We choose to also generate no events
6755 * for reserved values.
6760 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6763 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6766 CPUARMState
*env
= &cpu
->env
;
6768 /* Completely clear out existing QEMU breakpoints and our array, to
6769 * avoid possible stale entries following migration load.
6771 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6772 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6774 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6775 hw_breakpoint_update(cpu
, i
);
6779 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6782 ARMCPU
*cpu
= env_archcpu(env
);
6785 raw_write(env
, ri
, value
);
6786 hw_breakpoint_update(cpu
, i
);
6789 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6792 ARMCPU
*cpu
= env_archcpu(env
);
6795 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6798 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6799 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6801 raw_write(env
, ri
, value
);
6802 hw_breakpoint_update(cpu
, i
);
6805 static void define_debug_regs(ARMCPU
*cpu
)
6807 /* Define v7 and v8 architectural debug registers.
6808 * These are just dummy implementations for now.
6811 int wrps
, brps
, ctx_cmps
;
6814 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6815 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
6816 * the register must not exist for this cpu.
6818 if (cpu
->isar
.dbgdidr
!= 0) {
6819 ARMCPRegInfo dbgdidr
= {
6820 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0,
6821 .opc1
= 0, .opc2
= 0,
6822 .access
= PL0_R
, .accessfn
= access_tda
,
6823 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->isar
.dbgdidr
,
6825 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6828 /* Note that all these register fields hold "number of Xs minus 1". */
6829 brps
= arm_num_brps(cpu
);
6830 wrps
= arm_num_wrps(cpu
);
6831 ctx_cmps
= arm_num_ctx_cmps(cpu
);
6833 assert(ctx_cmps
<= brps
);
6835 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6837 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6838 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6841 for (i
= 0; i
< brps
; i
++) {
6842 ARMCPRegInfo dbgregs
[] = {
6843 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
6844 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6845 .access
= PL1_RW
, .accessfn
= access_tda
,
6846 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6847 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6849 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
6850 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6851 .access
= PL1_RW
, .accessfn
= access_tda
,
6852 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6853 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6857 define_arm_cp_regs(cpu
, dbgregs
);
6860 for (i
= 0; i
< wrps
; i
++) {
6861 ARMCPRegInfo dbgregs
[] = {
6862 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
6863 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6864 .access
= PL1_RW
, .accessfn
= access_tda
,
6865 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6866 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6868 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
6869 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6870 .access
= PL1_RW
, .accessfn
= access_tda
,
6871 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6872 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6876 define_arm_cp_regs(cpu
, dbgregs
);
6880 static void define_pmu_regs(ARMCPU
*cpu
)
6883 * v7 performance monitor control register: same implementor
6884 * field as main ID register, and we implement four counters in
6885 * addition to the cycle count register.
6887 unsigned int i
, pmcrn
= PMCR_NUM_COUNTERS
;
6888 ARMCPRegInfo pmcr
= {
6889 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6891 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6892 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6893 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6894 .raw_writefn
= raw_write
,
6896 ARMCPRegInfo pmcr64
= {
6897 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6898 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6899 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6901 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6902 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
) |
6904 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6906 define_one_arm_cp_reg(cpu
, &pmcr
);
6907 define_one_arm_cp_reg(cpu
, &pmcr64
);
6908 for (i
= 0; i
< pmcrn
; i
++) {
6909 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6910 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6911 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6912 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6913 ARMCPRegInfo pmev_regs
[] = {
6914 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6915 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6916 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6917 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6918 .accessfn
= pmreg_access
},
6919 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6920 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6921 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6923 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6924 .raw_readfn
= pmevcntr_rawread
,
6925 .raw_writefn
= pmevcntr_rawwrite
},
6926 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6927 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6928 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6929 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6930 .accessfn
= pmreg_access
},
6931 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6932 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6933 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6935 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6936 .raw_writefn
= pmevtyper_rawwrite
},
6939 define_arm_cp_regs(cpu
, pmev_regs
);
6940 g_free(pmevcntr_name
);
6941 g_free(pmevcntr_el0_name
);
6942 g_free(pmevtyper_name
);
6943 g_free(pmevtyper_el0_name
);
6945 if (cpu_isar_feature(aa32_pmu_8_1
, cpu
)) {
6946 ARMCPRegInfo v81_pmu_regs
[] = {
6947 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6948 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6949 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6950 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6951 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6952 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6953 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6954 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6957 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6959 if (cpu_isar_feature(any_pmu_8_4
, cpu
)) {
6960 static const ARMCPRegInfo v84_pmmir
= {
6961 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
6962 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
6963 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6966 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
6970 /* We don't know until after realize whether there's a GICv3
6971 * attached, and that is what registers the gicv3 sysregs.
6972 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6975 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6977 ARMCPU
*cpu
= env_archcpu(env
);
6978 uint64_t pfr1
= cpu
->isar
.id_pfr1
;
6980 if (env
->gicv3state
) {
6986 #ifndef CONFIG_USER_ONLY
6987 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6989 ARMCPU
*cpu
= env_archcpu(env
);
6990 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6992 if (env
->gicv3state
) {
6999 /* Shared logic between LORID and the rest of the LOR* registers.
7000 * Secure state exclusion has already been dealt with.
7002 static CPAccessResult
access_lor_ns(CPUARMState
*env
,
7003 const ARMCPRegInfo
*ri
, bool isread
)
7005 int el
= arm_current_el(env
);
7007 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
7008 return CP_ACCESS_TRAP_EL2
;
7010 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
7011 return CP_ACCESS_TRAP_EL3
;
7013 return CP_ACCESS_OK
;
7016 static CPAccessResult
access_lor_other(CPUARMState
*env
,
7017 const ARMCPRegInfo
*ri
, bool isread
)
7019 if (arm_is_secure_below_el3(env
)) {
7020 /* Access denied in secure mode. */
7021 return CP_ACCESS_TRAP
;
7023 return access_lor_ns(env
, ri
, isread
);
7027 * A trivial implementation of ARMv8.1-LOR leaves all of these
7028 * registers fixed at 0, which indicates that there are zero
7029 * supported Limited Ordering regions.
7031 static const ARMCPRegInfo lor_reginfo
[] = {
7032 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
7033 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
7034 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7035 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7036 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
7037 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
7038 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7039 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7040 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
7041 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
7042 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7043 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7044 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
7045 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
7046 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7047 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7048 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
7049 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
7050 .access
= PL1_R
, .accessfn
= access_lor_ns
,
7051 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7055 #ifdef TARGET_AARCH64
7056 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7059 int el
= arm_current_el(env
);
7062 arm_feature(env
, ARM_FEATURE_EL2
) &&
7063 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
7064 return CP_ACCESS_TRAP_EL2
;
7067 arm_feature(env
, ARM_FEATURE_EL3
) &&
7068 !(env
->cp15
.scr_el3
& SCR_APK
)) {
7069 return CP_ACCESS_TRAP_EL3
;
7071 return CP_ACCESS_OK
;
7074 static const ARMCPRegInfo pauth_reginfo
[] = {
7075 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7076 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
7077 .access
= PL1_RW
, .accessfn
= access_pauth
,
7078 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
7079 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7080 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
7081 .access
= PL1_RW
, .accessfn
= access_pauth
,
7082 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
7083 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7084 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
7085 .access
= PL1_RW
, .accessfn
= access_pauth
,
7086 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
7087 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7088 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
7089 .access
= PL1_RW
, .accessfn
= access_pauth
,
7090 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
7091 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7092 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
7093 .access
= PL1_RW
, .accessfn
= access_pauth
,
7094 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
7095 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7096 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
7097 .access
= PL1_RW
, .accessfn
= access_pauth
,
7098 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
7099 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7100 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
7101 .access
= PL1_RW
, .accessfn
= access_pauth
,
7102 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
7103 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7104 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
7105 .access
= PL1_RW
, .accessfn
= access_pauth
,
7106 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
7107 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7108 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
7109 .access
= PL1_RW
, .accessfn
= access_pauth
,
7110 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
7111 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7112 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
7113 .access
= PL1_RW
, .accessfn
= access_pauth
,
7114 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
7118 static const ARMCPRegInfo tlbirange_reginfo
[] = {
7119 { .name
= "TLBI_RVAE1IS", .state
= ARM_CP_STATE_AA64
,
7120 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 1,
7121 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7122 .writefn
= tlbi_aa64_rvae1is_write
},
7123 { .name
= "TLBI_RVAAE1IS", .state
= ARM_CP_STATE_AA64
,
7124 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 3,
7125 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7126 .writefn
= tlbi_aa64_rvae1is_write
},
7127 { .name
= "TLBI_RVALE1IS", .state
= ARM_CP_STATE_AA64
,
7128 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 5,
7129 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7130 .writefn
= tlbi_aa64_rvae1is_write
},
7131 { .name
= "TLBI_RVAALE1IS", .state
= ARM_CP_STATE_AA64
,
7132 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 7,
7133 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7134 .writefn
= tlbi_aa64_rvae1is_write
},
7135 { .name
= "TLBI_RVAE1OS", .state
= ARM_CP_STATE_AA64
,
7136 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
7137 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7138 .writefn
= tlbi_aa64_rvae1is_write
},
7139 { .name
= "TLBI_RVAAE1OS", .state
= ARM_CP_STATE_AA64
,
7140 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 3,
7141 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7142 .writefn
= tlbi_aa64_rvae1is_write
},
7143 { .name
= "TLBI_RVALE1OS", .state
= ARM_CP_STATE_AA64
,
7144 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 5,
7145 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7146 .writefn
= tlbi_aa64_rvae1is_write
},
7147 { .name
= "TLBI_RVAALE1OS", .state
= ARM_CP_STATE_AA64
,
7148 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 7,
7149 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7150 .writefn
= tlbi_aa64_rvae1is_write
},
7151 { .name
= "TLBI_RVAE1", .state
= ARM_CP_STATE_AA64
,
7152 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
7153 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7154 .writefn
= tlbi_aa64_rvae1_write
},
7155 { .name
= "TLBI_RVAAE1", .state
= ARM_CP_STATE_AA64
,
7156 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 3,
7157 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7158 .writefn
= tlbi_aa64_rvae1_write
},
7159 { .name
= "TLBI_RVALE1", .state
= ARM_CP_STATE_AA64
,
7160 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 5,
7161 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7162 .writefn
= tlbi_aa64_rvae1_write
},
7163 { .name
= "TLBI_RVAALE1", .state
= ARM_CP_STATE_AA64
,
7164 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 7,
7165 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7166 .writefn
= tlbi_aa64_rvae1_write
},
7167 { .name
= "TLBI_RIPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
7168 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 2,
7169 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7170 { .name
= "TLBI_RIPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
7171 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 6,
7172 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7173 { .name
= "TLBI_RVAE2IS", .state
= ARM_CP_STATE_AA64
,
7174 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 1,
7175 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7176 .writefn
= tlbi_aa64_rvae2is_write
},
7177 { .name
= "TLBI_RVALE2IS", .state
= ARM_CP_STATE_AA64
,
7178 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 5,
7179 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7180 .writefn
= tlbi_aa64_rvae2is_write
},
7181 { .name
= "TLBI_RIPAS2E1", .state
= ARM_CP_STATE_AA64
,
7182 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 2,
7183 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7184 { .name
= "TLBI_RIPAS2LE1", .state
= ARM_CP_STATE_AA64
,
7185 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 6,
7186 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7187 { .name
= "TLBI_RVAE2OS", .state
= ARM_CP_STATE_AA64
,
7188 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 1,
7189 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7190 .writefn
= tlbi_aa64_rvae2is_write
},
7191 { .name
= "TLBI_RVALE2OS", .state
= ARM_CP_STATE_AA64
,
7192 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 5,
7193 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7194 .writefn
= tlbi_aa64_rvae2is_write
},
7195 { .name
= "TLBI_RVAE2", .state
= ARM_CP_STATE_AA64
,
7196 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 1,
7197 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7198 .writefn
= tlbi_aa64_rvae2_write
},
7199 { .name
= "TLBI_RVALE2", .state
= ARM_CP_STATE_AA64
,
7200 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 5,
7201 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7202 .writefn
= tlbi_aa64_rvae2_write
},
7203 { .name
= "TLBI_RVAE3IS", .state
= ARM_CP_STATE_AA64
,
7204 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 1,
7205 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7206 .writefn
= tlbi_aa64_rvae3is_write
},
7207 { .name
= "TLBI_RVALE3IS", .state
= ARM_CP_STATE_AA64
,
7208 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 5,
7209 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7210 .writefn
= tlbi_aa64_rvae3is_write
},
7211 { .name
= "TLBI_RVAE3OS", .state
= ARM_CP_STATE_AA64
,
7212 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 1,
7213 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7214 .writefn
= tlbi_aa64_rvae3is_write
},
7215 { .name
= "TLBI_RVALE3OS", .state
= ARM_CP_STATE_AA64
,
7216 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 5,
7217 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7218 .writefn
= tlbi_aa64_rvae3is_write
},
7219 { .name
= "TLBI_RVAE3", .state
= ARM_CP_STATE_AA64
,
7220 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 1,
7221 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7222 .writefn
= tlbi_aa64_rvae3_write
},
7223 { .name
= "TLBI_RVALE3", .state
= ARM_CP_STATE_AA64
,
7224 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 5,
7225 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7226 .writefn
= tlbi_aa64_rvae3_write
},
7230 static const ARMCPRegInfo tlbios_reginfo
[] = {
7231 { .name
= "TLBI_VMALLE1OS", .state
= ARM_CP_STATE_AA64
,
7232 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 0,
7233 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7234 .writefn
= tlbi_aa64_vmalle1is_write
},
7235 { .name
= "TLBI_ASIDE1OS", .state
= ARM_CP_STATE_AA64
,
7236 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 2,
7237 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7238 .writefn
= tlbi_aa64_vmalle1is_write
},
7239 { .name
= "TLBI_ALLE2OS", .state
= ARM_CP_STATE_AA64
,
7240 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 0,
7241 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7242 .writefn
= tlbi_aa64_alle2is_write
},
7243 { .name
= "TLBI_ALLE1OS", .state
= ARM_CP_STATE_AA64
,
7244 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 4,
7245 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7246 .writefn
= tlbi_aa64_alle1is_write
},
7247 { .name
= "TLBI_VMALLS12E1OS", .state
= ARM_CP_STATE_AA64
,
7248 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 6,
7249 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7250 .writefn
= tlbi_aa64_alle1is_write
},
7251 { .name
= "TLBI_IPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7252 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 0,
7253 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7254 { .name
= "TLBI_RIPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7255 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 3,
7256 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7257 { .name
= "TLBI_IPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7258 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 4,
7259 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7260 { .name
= "TLBI_RIPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7261 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 7,
7262 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7263 { .name
= "TLBI_ALLE3OS", .state
= ARM_CP_STATE_AA64
,
7264 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 0,
7265 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7266 .writefn
= tlbi_aa64_alle3is_write
},
7270 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7275 /* Success sets NZCV = 0000. */
7276 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
7278 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
7280 * ??? Failed, for unknown reasons in the crypto subsystem.
7281 * The best we can do is log the reason and return the
7282 * timed-out indication to the guest. There is no reason
7283 * we know to expect this failure to be transitory, so the
7284 * guest may well hang retrying the operation.
7286 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
7287 ri
->name
, error_get_pretty(err
));
7290 env
->ZF
= 0; /* NZCF = 0100 */
7296 /* We do not support re-seeding, so the two registers operate the same. */
7297 static const ARMCPRegInfo rndr_reginfo
[] = {
7298 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
7299 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7300 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
7301 .access
= PL0_R
, .readfn
= rndr_readfn
},
7302 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
7303 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7304 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
7305 .access
= PL0_R
, .readfn
= rndr_readfn
},
7309 #ifndef CONFIG_USER_ONLY
7310 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
7313 ARMCPU
*cpu
= env_archcpu(env
);
7314 /* CTR_EL0 System register -> DminLine, bits [19:16] */
7315 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
7316 uint64_t vaddr_in
= (uint64_t) value
;
7317 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
7319 int mem_idx
= cpu_mmu_index(env
, false);
7321 /* This won't be crossing page boundaries */
7322 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
7328 /* RCU lock is already being held */
7329 mr
= memory_region_from_host(haddr
, &offset
);
7332 memory_region_writeback(mr
, offset
, dline_size
);
7337 static const ARMCPRegInfo dcpop_reg
[] = {
7338 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
7339 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
7340 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7341 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7345 static const ARMCPRegInfo dcpodp_reg
[] = {
7346 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
7347 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
7348 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7349 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7352 #endif /*CONFIG_USER_ONLY*/
7354 static CPAccessResult
access_aa64_tid5(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7357 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID5
)) {
7358 return CP_ACCESS_TRAP_EL2
;
7361 return CP_ACCESS_OK
;
7364 static CPAccessResult
access_mte(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7367 int el
= arm_current_el(env
);
7369 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
7370 uint64_t hcr
= arm_hcr_el2_eff(env
);
7371 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
7372 return CP_ACCESS_TRAP_EL2
;
7376 arm_feature(env
, ARM_FEATURE_EL3
) &&
7377 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
7378 return CP_ACCESS_TRAP_EL3
;
7380 return CP_ACCESS_OK
;
7383 static uint64_t tco_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7385 return env
->pstate
& PSTATE_TCO
;
7388 static void tco_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
7390 env
->pstate
= (env
->pstate
& ~PSTATE_TCO
) | (val
& PSTATE_TCO
);
7393 static const ARMCPRegInfo mte_reginfo
[] = {
7394 { .name
= "TFSRE0_EL1", .state
= ARM_CP_STATE_AA64
,
7395 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 1,
7396 .access
= PL1_RW
, .accessfn
= access_mte
,
7397 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[0]) },
7398 { .name
= "TFSR_EL1", .state
= ARM_CP_STATE_AA64
,
7399 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 0,
7400 .access
= PL1_RW
, .accessfn
= access_mte
,
7401 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[1]) },
7402 { .name
= "TFSR_EL2", .state
= ARM_CP_STATE_AA64
,
7403 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 6, .opc2
= 0,
7404 .access
= PL2_RW
, .accessfn
= access_mte
,
7405 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[2]) },
7406 { .name
= "TFSR_EL3", .state
= ARM_CP_STATE_AA64
,
7407 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 6, .opc2
= 0,
7409 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[3]) },
7410 { .name
= "RGSR_EL1", .state
= ARM_CP_STATE_AA64
,
7411 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 5,
7412 .access
= PL1_RW
, .accessfn
= access_mte
,
7413 .fieldoffset
= offsetof(CPUARMState
, cp15
.rgsr_el1
) },
7414 { .name
= "GCR_EL1", .state
= ARM_CP_STATE_AA64
,
7415 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 6,
7416 .access
= PL1_RW
, .accessfn
= access_mte
,
7417 .fieldoffset
= offsetof(CPUARMState
, cp15
.gcr_el1
) },
7418 { .name
= "GMID_EL1", .state
= ARM_CP_STATE_AA64
,
7419 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 4,
7420 .access
= PL1_R
, .accessfn
= access_aa64_tid5
,
7421 .type
= ARM_CP_CONST
, .resetvalue
= GMID_EL1_BS
},
7422 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7423 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7424 .type
= ARM_CP_NO_RAW
,
7425 .access
= PL0_RW
, .readfn
= tco_read
, .writefn
= tco_write
},
7426 { .name
= "DC_IGVAC", .state
= ARM_CP_STATE_AA64
,
7427 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 3,
7428 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7429 .accessfn
= aa64_cacheop_poc_access
},
7430 { .name
= "DC_IGSW", .state
= ARM_CP_STATE_AA64
,
7431 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 4,
7432 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7433 { .name
= "DC_IGDVAC", .state
= ARM_CP_STATE_AA64
,
7434 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 5,
7435 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7436 .accessfn
= aa64_cacheop_poc_access
},
7437 { .name
= "DC_IGDSW", .state
= ARM_CP_STATE_AA64
,
7438 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 6,
7439 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7440 { .name
= "DC_CGSW", .state
= ARM_CP_STATE_AA64
,
7441 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 4,
7442 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7443 { .name
= "DC_CGDSW", .state
= ARM_CP_STATE_AA64
,
7444 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 6,
7445 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7446 { .name
= "DC_CIGSW", .state
= ARM_CP_STATE_AA64
,
7447 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 4,
7448 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7449 { .name
= "DC_CIGDSW", .state
= ARM_CP_STATE_AA64
,
7450 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 6,
7451 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7455 static const ARMCPRegInfo mte_tco_ro_reginfo
[] = {
7456 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7457 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7458 .type
= ARM_CP_CONST
, .access
= PL0_RW
, },
7462 static const ARMCPRegInfo mte_el0_cacheop_reginfo
[] = {
7463 { .name
= "DC_CGVAC", .state
= ARM_CP_STATE_AA64
,
7464 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 3,
7465 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7466 .accessfn
= aa64_cacheop_poc_access
},
7467 { .name
= "DC_CGDVAC", .state
= ARM_CP_STATE_AA64
,
7468 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 5,
7469 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7470 .accessfn
= aa64_cacheop_poc_access
},
7471 { .name
= "DC_CGVAP", .state
= ARM_CP_STATE_AA64
,
7472 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 3,
7473 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7474 .accessfn
= aa64_cacheop_poc_access
},
7475 { .name
= "DC_CGDVAP", .state
= ARM_CP_STATE_AA64
,
7476 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 5,
7477 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7478 .accessfn
= aa64_cacheop_poc_access
},
7479 { .name
= "DC_CGVADP", .state
= ARM_CP_STATE_AA64
,
7480 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 3,
7481 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7482 .accessfn
= aa64_cacheop_poc_access
},
7483 { .name
= "DC_CGDVADP", .state
= ARM_CP_STATE_AA64
,
7484 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 5,
7485 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7486 .accessfn
= aa64_cacheop_poc_access
},
7487 { .name
= "DC_CIGVAC", .state
= ARM_CP_STATE_AA64
,
7488 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 3,
7489 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7490 .accessfn
= aa64_cacheop_poc_access
},
7491 { .name
= "DC_CIGDVAC", .state
= ARM_CP_STATE_AA64
,
7492 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 5,
7493 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7494 .accessfn
= aa64_cacheop_poc_access
},
7495 { .name
= "DC_GVA", .state
= ARM_CP_STATE_AA64
,
7496 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 3,
7497 .access
= PL0_W
, .type
= ARM_CP_DC_GVA
,
7498 #ifndef CONFIG_USER_ONLY
7499 /* Avoid overhead of an access check that always passes in user-mode */
7500 .accessfn
= aa64_zva_access
,
7503 { .name
= "DC_GZVA", .state
= ARM_CP_STATE_AA64
,
7504 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 4,
7505 .access
= PL0_W
, .type
= ARM_CP_DC_GZVA
,
7506 #ifndef CONFIG_USER_ONLY
7507 /* Avoid overhead of an access check that always passes in user-mode */
7508 .accessfn
= aa64_zva_access
,
7516 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7519 int el
= arm_current_el(env
);
7522 uint64_t sctlr
= arm_sctlr(env
, el
);
7523 if (!(sctlr
& SCTLR_EnRCTX
)) {
7524 return CP_ACCESS_TRAP
;
7526 } else if (el
== 1) {
7527 uint64_t hcr
= arm_hcr_el2_eff(env
);
7529 return CP_ACCESS_TRAP_EL2
;
7532 return CP_ACCESS_OK
;
7535 static const ARMCPRegInfo predinv_reginfo
[] = {
7536 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
7537 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
7538 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7539 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
7540 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
7541 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7542 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
7543 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
7544 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7546 * Note the AArch32 opcodes have a different OPC1.
7548 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
7549 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
7550 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7551 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
7552 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
7553 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7554 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
7555 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
7556 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7560 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7562 /* Read the high 32 bits of the current CCSIDR */
7563 return extract64(ccsidr_read(env
, ri
), 32, 32);
7566 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
7567 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
7568 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
7570 .accessfn
= access_aa64_tid2
,
7571 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
7575 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7578 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
7579 return CP_ACCESS_TRAP_EL2
;
7582 return CP_ACCESS_OK
;
7585 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7588 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7589 return access_aa64_tid3(env
, ri
, isread
);
7592 return CP_ACCESS_OK
;
7595 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7598 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
7599 return CP_ACCESS_TRAP_EL2
;
7602 return CP_ACCESS_OK
;
7605 static CPAccessResult
access_joscr_jmcr(CPUARMState
*env
,
7606 const ARMCPRegInfo
*ri
, bool isread
)
7609 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7610 * in v7A, not in v8A.
7612 if (!arm_feature(env
, ARM_FEATURE_V8
) &&
7613 arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
7614 (env
->cp15
.hstr_el2
& HSTR_TJDBX
)) {
7615 return CP_ACCESS_TRAP_EL2
;
7617 return CP_ACCESS_OK
;
7620 static const ARMCPRegInfo jazelle_regs
[] = {
7622 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
7623 .access
= PL1_R
, .accessfn
= access_jazelle
,
7624 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7626 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
7627 .accessfn
= access_joscr_jmcr
,
7628 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7630 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
7631 .accessfn
= access_joscr_jmcr
,
7632 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7636 static const ARMCPRegInfo vhe_reginfo
[] = {
7637 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7638 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
7640 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
7641 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
7642 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
7643 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
7644 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
7645 #ifndef CONFIG_USER_ONLY
7646 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
7647 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
7649 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
7650 .type
= ARM_CP_IO
, .access
= PL2_RW
,
7651 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
7652 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
7653 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
7654 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
7655 .resetfn
= gt_hv_timer_reset
,
7656 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
7657 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
7659 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
7661 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
7662 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
7663 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7664 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
7665 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7666 .access
= PL2_RW
, .accessfn
= e2h_access
,
7667 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
7668 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
7669 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7670 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
7671 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7672 .access
= PL2_RW
, .accessfn
= e2h_access
,
7673 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
7674 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
7675 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7676 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
7677 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7678 .access
= PL2_RW
, .accessfn
= e2h_access
,
7679 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
7680 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7681 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
7682 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7683 .access
= PL2_RW
, .accessfn
= e2h_access
,
7684 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
7685 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7686 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
7687 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7688 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
7689 .access
= PL2_RW
, .accessfn
= e2h_access
,
7690 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
7691 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7692 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
7693 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7694 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
7695 .access
= PL2_RW
, .accessfn
= e2h_access
,
7696 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
7701 #ifndef CONFIG_USER_ONLY
7702 static const ARMCPRegInfo ats1e1_reginfo
[] = {
7703 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
7704 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7705 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7706 .writefn
= ats_write64
},
7707 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
7708 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7709 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7710 .writefn
= ats_write64
},
7714 static const ARMCPRegInfo ats1cp_reginfo
[] = {
7715 { .name
= "ATS1CPRP",
7716 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7717 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7718 .writefn
= ats_write
},
7719 { .name
= "ATS1CPWP",
7720 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7721 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7722 .writefn
= ats_write
},
7728 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7729 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7730 * is non-zero, which is never for ARMv7, optionally in ARMv8
7731 * and mandatorily for ARMv8.2 and up.
7732 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7733 * implementation is RAZ/WI we can ignore this detail, as we
7736 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
7737 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
7738 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
7739 .access
= PL1_RW
, .accessfn
= access_tacr
,
7740 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7741 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7742 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7743 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7748 void register_cp_regs_for_features(ARMCPU
*cpu
)
7750 /* Register all the coprocessor registers based on feature bits */
7751 CPUARMState
*env
= &cpu
->env
;
7752 if (arm_feature(env
, ARM_FEATURE_M
)) {
7753 /* M profile has no coprocessor registers */
7757 define_arm_cp_regs(cpu
, cp_reginfo
);
7758 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7759 /* Must go early as it is full of wildcards that may be
7760 * overridden by later definitions.
7762 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
7765 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7766 /* The ID registers all have impdef reset values */
7767 ARMCPRegInfo v6_idregs
[] = {
7768 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
7769 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
7770 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7771 .accessfn
= access_aa32_tid3
,
7772 .resetvalue
= cpu
->isar
.id_pfr0
},
7773 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7774 * the value of the GIC field until after we define these regs.
7776 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
7777 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
7778 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
7779 .accessfn
= access_aa32_tid3
,
7780 .readfn
= id_pfr1_read
,
7781 .writefn
= arm_cp_write_ignore
},
7782 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
7783 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
7784 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7785 .accessfn
= access_aa32_tid3
,
7786 .resetvalue
= cpu
->isar
.id_dfr0
},
7787 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
7788 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
7789 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7790 .accessfn
= access_aa32_tid3
,
7791 .resetvalue
= cpu
->id_afr0
},
7792 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
7793 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
7794 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7795 .accessfn
= access_aa32_tid3
,
7796 .resetvalue
= cpu
->isar
.id_mmfr0
},
7797 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
7798 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
7799 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7800 .accessfn
= access_aa32_tid3
,
7801 .resetvalue
= cpu
->isar
.id_mmfr1
},
7802 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
7803 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
7804 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7805 .accessfn
= access_aa32_tid3
,
7806 .resetvalue
= cpu
->isar
.id_mmfr2
},
7807 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
7808 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
7809 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7810 .accessfn
= access_aa32_tid3
,
7811 .resetvalue
= cpu
->isar
.id_mmfr3
},
7812 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
7813 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
7814 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7815 .accessfn
= access_aa32_tid3
,
7816 .resetvalue
= cpu
->isar
.id_isar0
},
7817 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
7818 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
7819 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7820 .accessfn
= access_aa32_tid3
,
7821 .resetvalue
= cpu
->isar
.id_isar1
},
7822 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
7823 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
7824 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7825 .accessfn
= access_aa32_tid3
,
7826 .resetvalue
= cpu
->isar
.id_isar2
},
7827 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
7828 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
7829 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7830 .accessfn
= access_aa32_tid3
,
7831 .resetvalue
= cpu
->isar
.id_isar3
},
7832 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
7833 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
7834 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7835 .accessfn
= access_aa32_tid3
,
7836 .resetvalue
= cpu
->isar
.id_isar4
},
7837 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
7838 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
7839 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7840 .accessfn
= access_aa32_tid3
,
7841 .resetvalue
= cpu
->isar
.id_isar5
},
7842 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
7843 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
7844 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7845 .accessfn
= access_aa32_tid3
,
7846 .resetvalue
= cpu
->isar
.id_mmfr4
},
7847 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
7848 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
7849 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7850 .accessfn
= access_aa32_tid3
,
7851 .resetvalue
= cpu
->isar
.id_isar6
},
7854 define_arm_cp_regs(cpu
, v6_idregs
);
7855 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
7857 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
7859 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
7860 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
7862 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
7863 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
7864 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
7866 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
7867 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
7869 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7870 ARMCPRegInfo clidr
= {
7871 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
7872 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
7873 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7874 .accessfn
= access_aa64_tid2
,
7875 .resetvalue
= cpu
->clidr
7877 define_one_arm_cp_reg(cpu
, &clidr
);
7878 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
7879 define_debug_regs(cpu
);
7880 define_pmu_regs(cpu
);
7882 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
7884 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7885 /* AArch64 ID registers, which all have impdef reset values.
7886 * Note that within the ID register ranges the unused slots
7887 * must all RAZ, not UNDEF; future architecture versions may
7888 * define new registers here.
7890 ARMCPRegInfo v8_idregs
[] = {
7892 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7893 * emulation because we don't know the right value for the
7894 * GIC field until after we define these regs.
7896 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7897 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
7899 #ifdef CONFIG_USER_ONLY
7900 .type
= ARM_CP_CONST
,
7901 .resetvalue
= cpu
->isar
.id_aa64pfr0
7903 .type
= ARM_CP_NO_RAW
,
7904 .accessfn
= access_aa64_tid3
,
7905 .readfn
= id_aa64pfr0_read
,
7906 .writefn
= arm_cp_write_ignore
7909 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7910 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
7911 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7912 .accessfn
= access_aa64_tid3
,
7913 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
7914 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7915 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
7916 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7917 .accessfn
= access_aa64_tid3
,
7919 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7920 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
7921 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7922 .accessfn
= access_aa64_tid3
,
7924 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7925 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
7926 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7927 .accessfn
= access_aa64_tid3
,
7928 .resetvalue
= cpu
->isar
.id_aa64zfr0
},
7929 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7930 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
7931 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7932 .accessfn
= access_aa64_tid3
,
7934 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7935 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
7936 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7937 .accessfn
= access_aa64_tid3
,
7939 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7940 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7941 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7942 .accessfn
= access_aa64_tid3
,
7944 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7945 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7946 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7947 .accessfn
= access_aa64_tid3
,
7948 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
7949 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7950 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7951 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7952 .accessfn
= access_aa64_tid3
,
7953 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
7954 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7955 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7956 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7957 .accessfn
= access_aa64_tid3
,
7959 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7960 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7961 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7962 .accessfn
= access_aa64_tid3
,
7964 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7965 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7966 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7967 .accessfn
= access_aa64_tid3
,
7968 .resetvalue
= cpu
->id_aa64afr0
},
7969 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7970 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7971 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7972 .accessfn
= access_aa64_tid3
,
7973 .resetvalue
= cpu
->id_aa64afr1
},
7974 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7975 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7976 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7977 .accessfn
= access_aa64_tid3
,
7979 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7980 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7981 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7982 .accessfn
= access_aa64_tid3
,
7984 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7985 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7986 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7987 .accessfn
= access_aa64_tid3
,
7988 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7989 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7990 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7991 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7992 .accessfn
= access_aa64_tid3
,
7993 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7994 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7995 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7996 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7997 .accessfn
= access_aa64_tid3
,
7999 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8000 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
8001 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8002 .accessfn
= access_aa64_tid3
,
8004 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
8006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8007 .accessfn
= access_aa64_tid3
,
8009 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8010 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
8011 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8012 .accessfn
= access_aa64_tid3
,
8014 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8015 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
8016 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8017 .accessfn
= access_aa64_tid3
,
8019 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8020 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
8021 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8022 .accessfn
= access_aa64_tid3
,
8024 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8025 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
8026 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8027 .accessfn
= access_aa64_tid3
,
8028 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
8029 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8030 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
8031 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8032 .accessfn
= access_aa64_tid3
,
8033 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
8034 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
8035 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
8036 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8037 .accessfn
= access_aa64_tid3
,
8038 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
8039 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8040 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
8041 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8042 .accessfn
= access_aa64_tid3
,
8044 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8045 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
8046 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8047 .accessfn
= access_aa64_tid3
,
8049 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8050 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
8051 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8052 .accessfn
= access_aa64_tid3
,
8054 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8055 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
8056 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8057 .accessfn
= access_aa64_tid3
,
8059 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8060 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
8061 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8062 .accessfn
= access_aa64_tid3
,
8064 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8065 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
8066 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8067 .accessfn
= access_aa64_tid3
,
8068 .resetvalue
= cpu
->isar
.mvfr0
},
8069 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8070 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
8071 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8072 .accessfn
= access_aa64_tid3
,
8073 .resetvalue
= cpu
->isar
.mvfr1
},
8074 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
8075 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
8076 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8077 .accessfn
= access_aa64_tid3
,
8078 .resetvalue
= cpu
->isar
.mvfr2
},
8079 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8080 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
8081 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8082 .accessfn
= access_aa64_tid3
,
8084 { .name
= "ID_PFR2", .state
= ARM_CP_STATE_BOTH
,
8085 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
8086 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8087 .accessfn
= access_aa64_tid3
,
8088 .resetvalue
= cpu
->isar
.id_pfr2
},
8089 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8090 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
8091 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8092 .accessfn
= access_aa64_tid3
,
8094 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8095 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
8096 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8097 .accessfn
= access_aa64_tid3
,
8099 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8100 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
8101 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8102 .accessfn
= access_aa64_tid3
,
8104 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
8105 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
8106 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
8107 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
8108 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
8109 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
8110 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
8111 .resetvalue
= cpu
->pmceid0
},
8112 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
8113 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
8114 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
8115 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
8116 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
8117 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
8118 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
8119 .resetvalue
= cpu
->pmceid1
},
8122 #ifdef CONFIG_USER_ONLY
8123 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
8124 { .name
= "ID_AA64PFR0_EL1",
8125 .exported_bits
= 0x000f000f00ff0000,
8126 .fixed_bits
= 0x0000000000000011 },
8127 { .name
= "ID_AA64PFR1_EL1",
8128 .exported_bits
= 0x00000000000000f0 },
8129 { .name
= "ID_AA64PFR*_EL1_RESERVED",
8131 { .name
= "ID_AA64ZFR0_EL1" },
8132 { .name
= "ID_AA64MMFR0_EL1",
8133 .fixed_bits
= 0x00000000ff000000 },
8134 { .name
= "ID_AA64MMFR1_EL1" },
8135 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
8137 { .name
= "ID_AA64DFR0_EL1",
8138 .fixed_bits
= 0x0000000000000006 },
8139 { .name
= "ID_AA64DFR1_EL1" },
8140 { .name
= "ID_AA64DFR*_EL1_RESERVED",
8142 { .name
= "ID_AA64AFR*",
8144 { .name
= "ID_AA64ISAR0_EL1",
8145 .exported_bits
= 0x00fffffff0fffff0 },
8146 { .name
= "ID_AA64ISAR1_EL1",
8147 .exported_bits
= 0x000000f0ffffffff },
8148 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
8150 REGUSERINFO_SENTINEL
8152 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
8154 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
8155 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
8156 !arm_feature(env
, ARM_FEATURE_EL2
)) {
8157 ARMCPRegInfo rvbar
= {
8158 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
8159 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
8160 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
8162 define_one_arm_cp_reg(cpu
, &rvbar
);
8164 define_arm_cp_regs(cpu
, v8_idregs
);
8165 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
8167 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8168 uint64_t vmpidr_def
= mpidr_read_val(env
);
8169 ARMCPRegInfo vpidr_regs
[] = {
8170 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
8171 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
8172 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8173 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
8174 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
8175 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
8176 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
8177 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
8178 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
8179 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
8180 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8181 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8182 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
8183 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
8184 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
8185 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8187 .resetvalue
= vmpidr_def
,
8188 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
8191 define_arm_cp_regs(cpu
, vpidr_regs
);
8192 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
8193 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8194 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
8196 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
8197 define_arm_cp_regs(cpu
, el2_sec_cp_reginfo
);
8199 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
8200 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
8201 ARMCPRegInfo rvbar
= {
8202 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
8203 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
8204 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
8206 define_one_arm_cp_reg(cpu
, &rvbar
);
8209 /* If EL2 is missing but higher ELs are enabled, we need to
8210 * register the no_el2 reginfos.
8212 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8213 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
8214 * of MIDR_EL1 and MPIDR_EL1.
8216 ARMCPRegInfo vpidr_regs
[] = {
8217 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
8218 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
8219 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8220 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
8221 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
8222 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
8223 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8224 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8225 .type
= ARM_CP_NO_RAW
,
8226 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
8229 define_arm_cp_regs(cpu
, vpidr_regs
);
8230 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
8231 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8232 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
8236 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8237 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
8238 ARMCPRegInfo el3_regs
[] = {
8239 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
8240 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
8241 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
8242 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8243 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
8245 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
8246 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
8247 .resetvalue
= cpu
->reset_sctlr
},
8251 define_arm_cp_regs(cpu
, el3_regs
);
8253 /* The behaviour of NSACR is sufficiently various that we don't
8254 * try to describe it in a single reginfo:
8255 * if EL3 is 64 bit, then trap to EL3 from S EL1,
8256 * reads as constant 0xc00 from NS EL1 and NS EL2
8257 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8258 * if v7 without EL3, register doesn't exist
8259 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8261 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8262 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8263 ARMCPRegInfo nsacr
= {
8264 .name
= "NSACR", .type
= ARM_CP_CONST
,
8265 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8266 .access
= PL1_RW
, .accessfn
= nsacr_access
,
8269 define_one_arm_cp_reg(cpu
, &nsacr
);
8271 ARMCPRegInfo nsacr
= {
8273 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8274 .access
= PL3_RW
| PL1_R
,
8276 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
8278 define_one_arm_cp_reg(cpu
, &nsacr
);
8281 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8282 ARMCPRegInfo nsacr
= {
8283 .name
= "NSACR", .type
= ARM_CP_CONST
,
8284 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8288 define_one_arm_cp_reg(cpu
, &nsacr
);
8292 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
8293 if (arm_feature(env
, ARM_FEATURE_V6
)) {
8294 /* PMSAv6 not implemented */
8295 assert(arm_feature(env
, ARM_FEATURE_V7
));
8296 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8297 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
8299 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
8302 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8303 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
8304 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
8305 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
8306 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
8309 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
8310 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
8312 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
8313 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
8315 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
8316 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
8318 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
8319 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
8321 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
8322 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
8324 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
8325 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
8327 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
8328 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
8330 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8331 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
8333 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8334 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
8336 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
8337 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
8339 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
8340 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
8342 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
8343 define_arm_cp_regs(cpu
, jazelle_regs
);
8345 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
8346 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8347 * be read-only (ie write causes UNDEF exception).
8350 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
8351 /* Pre-v8 MIDR space.
8352 * Note that the MIDR isn't a simple constant register because
8353 * of the TI925 behaviour where writes to another register can
8354 * cause the MIDR value to change.
8356 * Unimplemented registers in the c15 0 0 0 space default to
8357 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8358 * and friends override accordingly.
8361 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
8362 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
8363 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
8364 .readfn
= midr_read
,
8365 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8366 .type
= ARM_CP_OVERRIDE
},
8367 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8369 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
8370 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8372 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
8373 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8375 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
8376 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8378 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
8379 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8381 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
8382 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8385 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
8386 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8387 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
8388 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
8389 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8390 .readfn
= midr_read
},
8391 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8392 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8393 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8394 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8395 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8396 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
8397 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8398 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8399 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
8401 .accessfn
= access_aa64_tid1
,
8402 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
8405 ARMCPRegInfo id_cp_reginfo
[] = {
8406 /* These are common to v8 and pre-v8 */
8408 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
8409 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
8410 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8411 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
8412 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
8413 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
8414 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8415 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8417 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
8419 .accessfn
= access_aa32_tid1
,
8420 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8423 /* TLBTR is specific to VMSA */
8424 ARMCPRegInfo id_tlbtr_reginfo
= {
8426 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
8428 .accessfn
= access_aa32_tid1
,
8429 .type
= ARM_CP_CONST
, .resetvalue
= 0,
8431 /* MPUIR is specific to PMSA V6+ */
8432 ARMCPRegInfo id_mpuir_reginfo
= {
8434 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8435 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8436 .resetvalue
= cpu
->pmsav7_dregion
<< 8
8438 ARMCPRegInfo crn0_wi_reginfo
= {
8439 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
8440 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
8441 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
8443 #ifdef CONFIG_USER_ONLY
8444 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
8445 { .name
= "MIDR_EL1",
8446 .exported_bits
= 0x00000000ffffffff },
8447 { .name
= "REVIDR_EL1" },
8448 REGUSERINFO_SENTINEL
8450 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
8452 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
8453 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8455 /* Register the blanket "writes ignored" value first to cover the
8456 * whole space. Then update the specific ID registers to allow write
8457 * access, so that they ignore writes rather than causing them to
8460 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
8461 for (r
= id_pre_v8_midr_cp_reginfo
;
8462 r
->type
!= ARM_CP_SENTINEL
; r
++) {
8465 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8468 id_mpuir_reginfo
.access
= PL1_RW
;
8469 id_tlbtr_reginfo
.access
= PL1_RW
;
8471 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8472 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
8474 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
8476 define_arm_cp_regs(cpu
, id_cp_reginfo
);
8477 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
8478 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
8479 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8480 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
8484 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
8485 ARMCPRegInfo mpidr_cp_reginfo
[] = {
8486 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8487 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
8488 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
8491 #ifdef CONFIG_USER_ONLY
8492 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
8493 { .name
= "MPIDR_EL1",
8494 .fixed_bits
= 0x0000000080000000 },
8495 REGUSERINFO_SENTINEL
8497 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
8499 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
8502 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
8503 ARMCPRegInfo auxcr_reginfo
[] = {
8504 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
8505 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
8506 .access
= PL1_RW
, .accessfn
= access_tacr
,
8507 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
8508 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
8509 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
8510 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
8512 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8513 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
8514 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
8518 define_arm_cp_regs(cpu
, auxcr_reginfo
);
8519 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
8520 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
8524 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
8526 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8527 * There are two flavours:
8528 * (1) older 32-bit only cores have a simple 32-bit CBAR
8529 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8530 * 32-bit register visible to AArch32 at a different encoding
8531 * to the "flavour 1" register and with the bits rearranged to
8532 * be able to squash a 64-bit address into the 32-bit view.
8533 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8534 * in future if we support AArch32-only configs of some of the
8535 * AArch64 cores we might need to add a specific feature flag
8536 * to indicate cores with "flavour 2" CBAR.
8538 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8539 /* 32 bit view is [31:18] 0...0 [43:32]. */
8540 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
8541 | extract64(cpu
->reset_cbar
, 32, 12);
8542 ARMCPRegInfo cbar_reginfo
[] = {
8544 .type
= ARM_CP_CONST
,
8545 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
8546 .access
= PL1_R
, .resetvalue
= cbar32
},
8547 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
8548 .type
= ARM_CP_CONST
,
8549 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
8550 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
8553 /* We don't implement a r/w 64 bit CBAR currently */
8554 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
8555 define_arm_cp_regs(cpu
, cbar_reginfo
);
8557 ARMCPRegInfo cbar
= {
8559 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
8560 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
8561 .fieldoffset
= offsetof(CPUARMState
,
8562 cp15
.c15_config_base_address
)
8564 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
8565 cbar
.access
= PL1_R
;
8566 cbar
.fieldoffset
= 0;
8567 cbar
.type
= ARM_CP_CONST
;
8569 define_one_arm_cp_reg(cpu
, &cbar
);
8573 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
8574 ARMCPRegInfo vbar_cp_reginfo
[] = {
8575 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
8576 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
8577 .access
= PL1_RW
, .writefn
= vbar_write
,
8578 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
8579 offsetof(CPUARMState
, cp15
.vbar_ns
) },
8583 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
8586 /* Generic registers whose values depend on the implementation */
8588 ARMCPRegInfo sctlr
= {
8589 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
8590 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
8591 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
8592 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
8593 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
8594 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
8595 .raw_writefn
= raw_write
,
8597 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8598 /* Normally we would always end the TB on an SCTLR write, but Linux
8599 * arch/arm/mach-pxa/sleep.S expects two instructions following
8600 * an MMU enable to execute from cache. Imitate this behaviour.
8602 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
8604 define_one_arm_cp_reg(cpu
, &sctlr
);
8607 if (cpu_isar_feature(aa64_lor
, cpu
)) {
8608 define_arm_cp_regs(cpu
, lor_reginfo
);
8610 if (cpu_isar_feature(aa64_pan
, cpu
)) {
8611 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
8613 #ifndef CONFIG_USER_ONLY
8614 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
8615 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
8617 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
8618 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
8621 if (cpu_isar_feature(aa64_uao
, cpu
)) {
8622 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
8625 if (cpu_isar_feature(aa64_dit
, cpu
)) {
8626 define_one_arm_cp_reg(cpu
, &dit_reginfo
);
8628 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
8629 define_one_arm_cp_reg(cpu
, &ssbs_reginfo
);
8632 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8633 define_arm_cp_regs(cpu
, vhe_reginfo
);
8636 if (cpu_isar_feature(aa64_sve
, cpu
)) {
8637 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
8638 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8639 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
8641 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
8643 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8644 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
8648 #ifdef TARGET_AARCH64
8649 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
8650 define_arm_cp_regs(cpu
, pauth_reginfo
);
8652 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
8653 define_arm_cp_regs(cpu
, rndr_reginfo
);
8655 if (cpu_isar_feature(aa64_tlbirange
, cpu
)) {
8656 define_arm_cp_regs(cpu
, tlbirange_reginfo
);
8658 if (cpu_isar_feature(aa64_tlbios
, cpu
)) {
8659 define_arm_cp_regs(cpu
, tlbios_reginfo
);
8661 #ifndef CONFIG_USER_ONLY
8662 /* Data Cache clean instructions up to PoP */
8663 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
8664 define_one_arm_cp_reg(cpu
, dcpop_reg
);
8666 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
8667 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
8670 #endif /*CONFIG_USER_ONLY*/
8673 * If full MTE is enabled, add all of the system registers.
8674 * If only "instructions available at EL0" are enabled,
8675 * then define only a RAZ/WI version of PSTATE.TCO.
8677 if (cpu_isar_feature(aa64_mte
, cpu
)) {
8678 define_arm_cp_regs(cpu
, mte_reginfo
);
8679 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8680 } else if (cpu_isar_feature(aa64_mte_insn_reg
, cpu
)) {
8681 define_arm_cp_regs(cpu
, mte_tco_ro_reginfo
);
8682 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8686 if (cpu_isar_feature(any_predinv
, cpu
)) {
8687 define_arm_cp_regs(cpu
, predinv_reginfo
);
8690 if (cpu_isar_feature(any_ccidx
, cpu
)) {
8691 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
8694 #ifndef CONFIG_USER_ONLY
8696 * Register redirections and aliases must be done last,
8697 * after the registers from the other extensions have been defined.
8699 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8700 define_arm_vh_e2h_redirects_aliases(cpu
);
8705 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
8707 CPUState
*cs
= CPU(cpu
);
8708 CPUARMState
*env
= &cpu
->env
;
8710 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8712 * The lower part of each SVE register aliases to the FPU
8713 * registers so we don't need to include both.
8715 #ifdef TARGET_AARCH64
8716 if (isar_feature_aa64_sve(&cpu
->isar
)) {
8717 gdb_register_coprocessor(cs
, arm_gdb_get_svereg
, arm_gdb_set_svereg
,
8718 arm_gen_dynamic_svereg_xml(cs
, cs
->gdb_num_regs
),
8719 "sve-registers.xml", 0);
8723 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
8724 aarch64_fpu_gdb_set_reg
,
8725 34, "aarch64-fpu.xml", 0);
8727 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
8728 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8729 51, "arm-neon.xml", 0);
8730 } else if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
8731 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8732 35, "arm-vfp3.xml", 0);
8733 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
8734 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
8735 19, "arm-vfp.xml", 0);
8737 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
8738 arm_gen_dynamic_sysreg_xml(cs
, cs
->gdb_num_regs
),
8739 "system-registers.xml", 0);
8743 /* Sort alphabetically by type name, except for "any". */
8744 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
8746 ObjectClass
*class_a
= (ObjectClass
*)a
;
8747 ObjectClass
*class_b
= (ObjectClass
*)b
;
8748 const char *name_a
, *name_b
;
8750 name_a
= object_class_get_name(class_a
);
8751 name_b
= object_class_get_name(class_b
);
8752 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
8754 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
8757 return strcmp(name_a
, name_b
);
8761 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
8763 ObjectClass
*oc
= data
;
8764 const char *typename
;
8767 typename
= object_class_get_name(oc
);
8768 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8769 qemu_printf(" %s\n", name
);
8773 void arm_cpu_list(void)
8777 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8778 list
= g_slist_sort(list
, arm_cpu_list_compare
);
8779 qemu_printf("Available CPUs:\n");
8780 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
8784 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
8786 ObjectClass
*oc
= data
;
8787 CpuDefinitionInfoList
**cpu_list
= user_data
;
8788 CpuDefinitionInfo
*info
;
8789 const char *typename
;
8791 typename
= object_class_get_name(oc
);
8792 info
= g_malloc0(sizeof(*info
));
8793 info
->name
= g_strndup(typename
,
8794 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8795 info
->q_typename
= g_strdup(typename
);
8797 QAPI_LIST_PREPEND(*cpu_list
, info
);
8800 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
8802 CpuDefinitionInfoList
*cpu_list
= NULL
;
8805 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8806 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
8812 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
8813 void *opaque
, int state
, int secstate
,
8814 int crm
, int opc1
, int opc2
,
8817 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8818 * add a single reginfo struct to the hash table.
8820 uint32_t *key
= g_new(uint32_t, 1);
8821 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
8822 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
8823 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
8825 r2
->name
= g_strdup(name
);
8826 /* Reset the secure state to the specific incoming state. This is
8827 * necessary as the register may have been defined with both states.
8829 r2
->secure
= secstate
;
8831 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8832 /* Register is banked (using both entries in array).
8833 * Overwriting fieldoffset as the array is only used to define
8834 * banked registers but later only fieldoffset is used.
8836 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
8839 if (state
== ARM_CP_STATE_AA32
) {
8840 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8841 /* If the register is banked then we don't need to migrate or
8842 * reset the 32-bit instance in certain cases:
8844 * 1) If the register has both 32-bit and 64-bit instances then we
8845 * can count on the 64-bit instance taking care of the
8847 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8848 * taking care of the secure bank. This requires that separate
8849 * 32 and 64-bit definitions are provided.
8851 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
8852 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
8853 r2
->type
|= ARM_CP_ALIAS
;
8855 } else if ((secstate
!= r
->secure
) && !ns
) {
8856 /* The register is not banked so we only want to allow migration of
8857 * the non-secure instance.
8859 r2
->type
|= ARM_CP_ALIAS
;
8862 if (r
->state
== ARM_CP_STATE_BOTH
) {
8863 /* We assume it is a cp15 register if the .cp field is left unset.
8869 #ifdef HOST_WORDS_BIGENDIAN
8870 if (r2
->fieldoffset
) {
8871 r2
->fieldoffset
+= sizeof(uint32_t);
8876 if (state
== ARM_CP_STATE_AA64
) {
8877 /* To allow abbreviation of ARMCPRegInfo
8878 * definitions, we treat cp == 0 as equivalent to
8879 * the value for "standard guest-visible sysreg".
8880 * STATE_BOTH definitions are also always "standard
8881 * sysreg" in their AArch64 view (the .cp value may
8882 * be non-zero for the benefit of the AArch32 view).
8884 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
8885 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
8887 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
8888 r2
->opc0
, opc1
, opc2
);
8890 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
8893 r2
->opaque
= opaque
;
8895 /* reginfo passed to helpers is correct for the actual access,
8896 * and is never ARM_CP_STATE_BOTH:
8899 /* Make sure reginfo passed to helpers for wildcarded regs
8900 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8905 /* By convention, for wildcarded registers only the first
8906 * entry is used for migration; the others are marked as
8907 * ALIAS so we don't try to transfer the register
8908 * multiple times. Special registers (ie NOP/WFI) are
8909 * never migratable and not even raw-accessible.
8911 if ((r
->type
& ARM_CP_SPECIAL
)) {
8912 r2
->type
|= ARM_CP_NO_RAW
;
8914 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
8915 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
8916 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
8917 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
8920 /* Check that raw accesses are either forbidden or handled. Note that
8921 * we can't assert this earlier because the setup of fieldoffset for
8922 * banked registers has to be done first.
8924 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
8925 assert(!raw_accessors_invalid(r2
));
8928 /* Overriding of an existing definition must be explicitly
8931 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
8932 ARMCPRegInfo
*oldreg
;
8933 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
8934 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
8935 fprintf(stderr
, "Register redefined: cp=%d %d bit "
8936 "crn=%d crm=%d opc1=%d opc2=%d, "
8937 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
8938 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
8939 oldreg
->name
, r2
->name
);
8940 g_assert_not_reached();
8943 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
8947 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
8948 const ARMCPRegInfo
*r
, void *opaque
)
8950 /* Define implementations of coprocessor registers.
8951 * We store these in a hashtable because typically
8952 * there are less than 150 registers in a space which
8953 * is 16*16*16*8*8 = 262144 in size.
8954 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8955 * If a register is defined twice then the second definition is
8956 * used, so this can be used to define some generic registers and
8957 * then override them with implementation specific variations.
8958 * At least one of the original and the second definition should
8959 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8960 * against accidental use.
8962 * The state field defines whether the register is to be
8963 * visible in the AArch32 or AArch64 execution state. If the
8964 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8965 * reginfo structure for the AArch32 view, which sees the lower
8966 * 32 bits of the 64 bit register.
8968 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8969 * be wildcarded. AArch64 registers are always considered to be 64
8970 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8971 * the register, if any.
8973 int crm
, opc1
, opc2
, state
;
8974 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8975 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8976 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8977 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8978 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8979 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8980 /* 64 bit registers have only CRm and Opc1 fields */
8981 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8982 /* op0 only exists in the AArch64 encodings */
8983 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8984 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8985 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8987 * This API is only for Arm's system coprocessors (14 and 15) or
8988 * (M-profile or v7A-and-earlier only) for implementation defined
8989 * coprocessors in the range 0..7. Our decode assumes this, since
8990 * 8..13 can be used for other insns including VFP and Neon. See
8991 * valid_cp() in translate.c. Assert here that we haven't tried
8992 * to use an invalid coprocessor number.
8995 case ARM_CP_STATE_BOTH
:
8996 /* 0 has a special meaning, but otherwise the same rules as AA32. */
9001 case ARM_CP_STATE_AA32
:
9002 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
9003 !arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
9004 assert(r
->cp
>= 14 && r
->cp
<= 15);
9006 assert(r
->cp
< 8 || (r
->cp
>= 14 && r
->cp
<= 15));
9009 case ARM_CP_STATE_AA64
:
9010 assert(r
->cp
== 0 || r
->cp
== CP_REG_ARM64_SYSREG_CP
);
9013 g_assert_not_reached();
9015 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
9016 * encodes a minimum access level for the register. We roll this
9017 * runtime check into our general permission check code, so check
9018 * here that the reginfo's specified permissions are strict enough
9019 * to encompass the generic architectural permission check.
9021 if (r
->state
!= ARM_CP_STATE_AA32
) {
9025 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
9026 mask
= PL0U_R
| PL1_RW
;
9046 /* min_EL EL1, secure mode only (we don't check the latter) */
9050 /* broken reginfo with out-of-range opc1 */
9054 /* assert our permissions are not too lax (stricter is fine) */
9055 assert((r
->access
& ~mask
) == 0);
9058 /* Check that the register definition has enough info to handle
9059 * reads and writes if they are permitted.
9061 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
9062 if (r
->access
& PL3_R
) {
9063 assert((r
->fieldoffset
||
9064 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
9067 if (r
->access
& PL3_W
) {
9068 assert((r
->fieldoffset
||
9069 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
9073 /* Bad type field probably means missing sentinel at end of reg list */
9074 assert(cptype_valid(r
->type
));
9075 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
9076 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
9077 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
9078 for (state
= ARM_CP_STATE_AA32
;
9079 state
<= ARM_CP_STATE_AA64
; state
++) {
9080 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
9083 if (state
== ARM_CP_STATE_AA32
) {
9084 /* Under AArch32 CP registers can be common
9085 * (same for secure and non-secure world) or banked.
9089 switch (r
->secure
) {
9090 case ARM_CP_SECSTATE_S
:
9091 case ARM_CP_SECSTATE_NS
:
9092 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
9093 r
->secure
, crm
, opc1
, opc2
,
9097 name
= g_strdup_printf("%s_S", r
->name
);
9098 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
9100 crm
, opc1
, opc2
, name
);
9102 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
9104 crm
, opc1
, opc2
, r
->name
);
9108 /* AArch64 registers get mapped to non-secure instance
9110 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
9112 crm
, opc1
, opc2
, r
->name
);
9120 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
9121 const ARMCPRegInfo
*regs
, void *opaque
)
9123 /* Define a whole list of registers */
9124 const ARMCPRegInfo
*r
;
9125 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
9126 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
9131 * Modify ARMCPRegInfo for access from userspace.
9133 * This is a data driven modification directed by
9134 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
9135 * user-space cannot alter any values and dynamic values pertaining to
9136 * execution state are hidden from user space view anyway.
9138 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
9140 const ARMCPRegUserSpaceInfo
*m
;
9143 for (m
= mods
; m
->name
; m
++) {
9144 GPatternSpec
*pat
= NULL
;
9146 pat
= g_pattern_spec_new(m
->name
);
9148 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
9149 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
9150 r
->type
= ARM_CP_CONST
;
9154 } else if (strcmp(r
->name
, m
->name
) == 0) {
9155 r
->type
= ARM_CP_CONST
;
9157 r
->resetvalue
&= m
->exported_bits
;
9158 r
->resetvalue
|= m
->fixed_bits
;
9163 g_pattern_spec_free(pat
);
9168 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
9170 return g_hash_table_lookup(cpregs
, &encoded_cp
);
9173 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
9176 /* Helper coprocessor write function for write-ignore registers */
9179 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
9181 /* Helper coprocessor write function for read-as-zero registers */
9185 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
9187 /* Helper coprocessor reset function for do-nothing-on-reset registers */
9190 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
9192 /* Return true if it is not valid for us to switch to
9193 * this CPU mode (ie all the UNPREDICTABLE cases in
9194 * the ARM ARM CPSRWriteByInstr pseudocode).
9197 /* Changes to or from Hyp via MSR and CPS are illegal. */
9198 if (write_type
== CPSRWriteByInstr
&&
9199 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
9200 mode
== ARM_CPU_MODE_HYP
)) {
9205 case ARM_CPU_MODE_USR
:
9207 case ARM_CPU_MODE_SYS
:
9208 case ARM_CPU_MODE_SVC
:
9209 case ARM_CPU_MODE_ABT
:
9210 case ARM_CPU_MODE_UND
:
9211 case ARM_CPU_MODE_IRQ
:
9212 case ARM_CPU_MODE_FIQ
:
9213 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
9214 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9216 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
9217 * and CPS are treated as illegal mode changes.
9219 if (write_type
== CPSRWriteByInstr
&&
9220 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
9221 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
9225 case ARM_CPU_MODE_HYP
:
9226 return !arm_is_el2_enabled(env
) || arm_current_el(env
) < 2;
9227 case ARM_CPU_MODE_MON
:
9228 return arm_current_el(env
) < 3;
9234 uint32_t cpsr_read(CPUARMState
*env
)
9237 ZF
= (env
->ZF
== 0);
9238 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
9239 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
9240 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
9241 | ((env
->condexec_bits
& 0xfc) << 8)
9242 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
9245 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
9246 CPSRWriteType write_type
)
9248 uint32_t changed_daif
;
9249 bool rebuild_hflags
= (write_type
!= CPSRWriteRaw
) &&
9250 (mask
& (CPSR_M
| CPSR_E
| CPSR_IL
));
9252 if (mask
& CPSR_NZCV
) {
9253 env
->ZF
= (~val
) & CPSR_Z
;
9255 env
->CF
= (val
>> 29) & 1;
9256 env
->VF
= (val
<< 3) & 0x80000000;
9259 env
->QF
= ((val
& CPSR_Q
) != 0);
9261 env
->thumb
= ((val
& CPSR_T
) != 0);
9262 if (mask
& CPSR_IT_0_1
) {
9263 env
->condexec_bits
&= ~3;
9264 env
->condexec_bits
|= (val
>> 25) & 3;
9266 if (mask
& CPSR_IT_2_7
) {
9267 env
->condexec_bits
&= 3;
9268 env
->condexec_bits
|= (val
>> 8) & 0xfc;
9270 if (mask
& CPSR_GE
) {
9271 env
->GE
= (val
>> 16) & 0xf;
9274 /* In a V7 implementation that includes the security extensions but does
9275 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9276 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9277 * bits respectively.
9279 * In a V8 implementation, it is permitted for privileged software to
9280 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9282 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
9283 arm_feature(env
, ARM_FEATURE_EL3
) &&
9284 !arm_feature(env
, ARM_FEATURE_EL2
) &&
9285 !arm_is_secure(env
)) {
9287 changed_daif
= (env
->daif
^ val
) & mask
;
9289 if (changed_daif
& CPSR_A
) {
9290 /* Check to see if we are allowed to change the masking of async
9291 * abort exceptions from a non-secure state.
9293 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
9294 qemu_log_mask(LOG_GUEST_ERROR
,
9295 "Ignoring attempt to switch CPSR_A flag from "
9296 "non-secure world with SCR.AW bit clear\n");
9301 if (changed_daif
& CPSR_F
) {
9302 /* Check to see if we are allowed to change the masking of FIQ
9303 * exceptions from a non-secure state.
9305 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
9306 qemu_log_mask(LOG_GUEST_ERROR
,
9307 "Ignoring attempt to switch CPSR_F flag from "
9308 "non-secure world with SCR.FW bit clear\n");
9312 /* Check whether non-maskable FIQ (NMFI) support is enabled.
9313 * If this bit is set software is not allowed to mask
9314 * FIQs, but is allowed to set CPSR_F to 0.
9316 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
9318 qemu_log_mask(LOG_GUEST_ERROR
,
9319 "Ignoring attempt to enable CPSR_F flag "
9320 "(non-maskable FIQ [NMFI] support enabled)\n");
9326 env
->daif
&= ~(CPSR_AIF
& mask
);
9327 env
->daif
|= val
& CPSR_AIF
& mask
;
9329 if (write_type
!= CPSRWriteRaw
&&
9330 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
9331 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
9332 /* Note that we can only get here in USR mode if this is a
9333 * gdb stub write; for this case we follow the architectural
9334 * behaviour for guest writes in USR mode of ignoring an attempt
9335 * to switch mode. (Those are caught by translate.c for writes
9336 * triggered by guest instructions.)
9339 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
9340 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9341 * v7, and has defined behaviour in v8:
9342 * + leave CPSR.M untouched
9343 * + allow changes to the other CPSR fields
9345 * For user changes via the GDB stub, we don't set PSTATE.IL,
9346 * as this would be unnecessarily harsh for a user error.
9349 if (write_type
!= CPSRWriteByGDBStub
&&
9350 arm_feature(env
, ARM_FEATURE_V8
)) {
9354 qemu_log_mask(LOG_GUEST_ERROR
,
9355 "Illegal AArch32 mode switch attempt from %s to %s\n",
9356 aarch32_mode_name(env
->uncached_cpsr
),
9357 aarch32_mode_name(val
));
9359 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
9360 write_type
== CPSRWriteExceptionReturn
?
9361 "Exception return from AArch32" :
9362 "AArch32 mode switch from",
9363 aarch32_mode_name(env
->uncached_cpsr
),
9364 aarch32_mode_name(val
), env
->regs
[15]);
9365 switch_mode(env
, val
& CPSR_M
);
9368 mask
&= ~CACHED_CPSR_BITS
;
9369 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
9370 if (rebuild_hflags
) {
9371 arm_rebuild_hflags(env
);
9375 /* Sign/zero extend */
9376 uint32_t HELPER(sxtb16
)(uint32_t x
)
9379 res
= (uint16_t)(int8_t)x
;
9380 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
9384 static void handle_possible_div0_trap(CPUARMState
*env
, uintptr_t ra
)
9387 * Take a division-by-zero exception if necessary; otherwise return
9388 * to get the usual non-trapping division behaviour (result of 0)
9390 if (arm_feature(env
, ARM_FEATURE_M
)
9391 && (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_DIV_0_TRP_MASK
)) {
9392 raise_exception_ra(env
, EXCP_DIVBYZERO
, 0, 1, ra
);
9396 uint32_t HELPER(uxtb16
)(uint32_t x
)
9399 res
= (uint16_t)(uint8_t)x
;
9400 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
9404 int32_t HELPER(sdiv
)(CPUARMState
*env
, int32_t num
, int32_t den
)
9407 handle_possible_div0_trap(env
, GETPC());
9410 if (num
== INT_MIN
&& den
== -1) {
9416 uint32_t HELPER(udiv
)(CPUARMState
*env
, uint32_t num
, uint32_t den
)
9419 handle_possible_div0_trap(env
, GETPC());
9425 uint32_t HELPER(rbit
)(uint32_t x
)
9430 #ifdef CONFIG_USER_ONLY
9432 static void switch_mode(CPUARMState
*env
, int mode
)
9434 ARMCPU
*cpu
= env_archcpu(env
);
9436 if (mode
!= ARM_CPU_MODE_USR
) {
9437 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
9441 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9442 uint32_t cur_el
, bool secure
)
9447 void aarch64_sync_64_to_32(CPUARMState
*env
)
9449 g_assert_not_reached();
9454 static void switch_mode(CPUARMState
*env
, int mode
)
9459 old_mode
= env
->uncached_cpsr
& CPSR_M
;
9460 if (mode
== old_mode
)
9463 if (old_mode
== ARM_CPU_MODE_FIQ
) {
9464 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9465 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
9466 } else if (mode
== ARM_CPU_MODE_FIQ
) {
9467 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9468 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
9471 i
= bank_number(old_mode
);
9472 env
->banked_r13
[i
] = env
->regs
[13];
9473 env
->banked_spsr
[i
] = env
->spsr
;
9475 i
= bank_number(mode
);
9476 env
->regs
[13] = env
->banked_r13
[i
];
9477 env
->spsr
= env
->banked_spsr
[i
];
9479 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
9480 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
9483 /* Physical Interrupt Target EL Lookup Table
9485 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9487 * The below multi-dimensional table is used for looking up the target
9488 * exception level given numerous condition criteria. Specifically, the
9489 * target EL is based on SCR and HCR routing controls as well as the
9490 * currently executing EL and secure state.
9493 * target_el_table[2][2][2][2][2][4]
9494 * | | | | | +--- Current EL
9495 * | | | | +------ Non-secure(0)/Secure(1)
9496 * | | | +--------- HCR mask override
9497 * | | +------------ SCR exec state control
9498 * | +--------------- SCR mask override
9499 * +------------------ 32-bit(0)/64-bit(1) EL3
9501 * The table values are as such:
9505 * The ARM ARM target EL table includes entries indicating that an "exception
9506 * is not taken". The two cases where this is applicable are:
9507 * 1) An exception is taken from EL3 but the SCR does not have the exception
9509 * 2) An exception is taken from EL2 but the HCR does not have the exception
9511 * In these two cases, the below table contain a target of EL1. This value is
9512 * returned as it is expected that the consumer of the table data will check
9513 * for "target EL >= current EL" to ensure the exception is not taken.
9517 * BIT IRQ IMO Non-secure Secure
9518 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9520 static const int8_t target_el_table
[2][2][2][2][2][4] = {
9521 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9522 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9523 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9524 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9525 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9526 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9527 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9528 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9529 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9530 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9531 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9532 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9533 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9534 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9535 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9536 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9540 * Determine the target EL for physical exceptions
9542 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9543 uint32_t cur_el
, bool secure
)
9545 CPUARMState
*env
= cs
->env_ptr
;
9550 /* Is the highest EL AArch64? */
9551 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
9554 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9555 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
9557 /* Either EL2 is the highest EL (and so the EL2 register width
9558 * is given by is64); or there is no EL2 or EL3, in which case
9559 * the value of 'rw' does not affect the table lookup anyway.
9564 hcr_el2
= arm_hcr_el2_eff(env
);
9567 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
9568 hcr
= hcr_el2
& HCR_IMO
;
9571 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
9572 hcr
= hcr_el2
& HCR_FMO
;
9575 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
9576 hcr
= hcr_el2
& HCR_AMO
;
9581 * For these purposes, TGE and AMO/IMO/FMO both force the
9582 * interrupt to EL2. Fold TGE into the bit extracted above.
9584 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
9586 /* Perform a table-lookup for the target EL given the current state */
9587 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
9589 assert(target_el
> 0);
9594 void arm_log_exception(int idx
)
9596 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
9597 const char *exc
= NULL
;
9598 static const char * const excnames
[] = {
9599 [EXCP_UDEF
] = "Undefined Instruction",
9601 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
9602 [EXCP_DATA_ABORT
] = "Data Abort",
9605 [EXCP_BKPT
] = "Breakpoint",
9606 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
9607 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
9608 [EXCP_HVC
] = "Hypervisor Call",
9609 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
9610 [EXCP_SMC
] = "Secure Monitor Call",
9611 [EXCP_VIRQ
] = "Virtual IRQ",
9612 [EXCP_VFIQ
] = "Virtual FIQ",
9613 [EXCP_SEMIHOST
] = "Semihosting call",
9614 [EXCP_NOCP
] = "v7M NOCP UsageFault",
9615 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
9616 [EXCP_STKOF
] = "v8M STKOF UsageFault",
9617 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
9618 [EXCP_LSERR
] = "v8M LSERR UsageFault",
9619 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
9620 [EXCP_DIVBYZERO
] = "v7M DIVBYZERO UsageFault",
9623 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
9624 exc
= excnames
[idx
];
9629 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
9634 * Function used to synchronize QEMU's AArch64 register set with AArch32
9635 * register set. This is necessary when switching between AArch32 and AArch64
9638 void aarch64_sync_32_to_64(CPUARMState
*env
)
9641 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9643 /* We can blanket copy R[0:7] to X[0:7] */
9644 for (i
= 0; i
< 8; i
++) {
9645 env
->xregs
[i
] = env
->regs
[i
];
9649 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9650 * Otherwise, they come from the banked user regs.
9652 if (mode
== ARM_CPU_MODE_FIQ
) {
9653 for (i
= 8; i
< 13; i
++) {
9654 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9657 for (i
= 8; i
< 13; i
++) {
9658 env
->xregs
[i
] = env
->regs
[i
];
9663 * Registers x13-x23 are the various mode SP and FP registers. Registers
9664 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9665 * from the mode banked register.
9667 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9668 env
->xregs
[13] = env
->regs
[13];
9669 env
->xregs
[14] = env
->regs
[14];
9671 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9672 /* HYP is an exception in that it is copied from r14 */
9673 if (mode
== ARM_CPU_MODE_HYP
) {
9674 env
->xregs
[14] = env
->regs
[14];
9676 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9680 if (mode
== ARM_CPU_MODE_HYP
) {
9681 env
->xregs
[15] = env
->regs
[13];
9683 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9686 if (mode
== ARM_CPU_MODE_IRQ
) {
9687 env
->xregs
[16] = env
->regs
[14];
9688 env
->xregs
[17] = env
->regs
[13];
9690 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9691 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9694 if (mode
== ARM_CPU_MODE_SVC
) {
9695 env
->xregs
[18] = env
->regs
[14];
9696 env
->xregs
[19] = env
->regs
[13];
9698 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9699 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9702 if (mode
== ARM_CPU_MODE_ABT
) {
9703 env
->xregs
[20] = env
->regs
[14];
9704 env
->xregs
[21] = env
->regs
[13];
9706 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9707 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9710 if (mode
== ARM_CPU_MODE_UND
) {
9711 env
->xregs
[22] = env
->regs
[14];
9712 env
->xregs
[23] = env
->regs
[13];
9714 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9715 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9719 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9720 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9721 * FIQ bank for r8-r14.
9723 if (mode
== ARM_CPU_MODE_FIQ
) {
9724 for (i
= 24; i
< 31; i
++) {
9725 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9728 for (i
= 24; i
< 29; i
++) {
9729 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9731 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9732 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9735 env
->pc
= env
->regs
[15];
9739 * Function used to synchronize QEMU's AArch32 register set with AArch64
9740 * register set. This is necessary when switching between AArch32 and AArch64
9743 void aarch64_sync_64_to_32(CPUARMState
*env
)
9746 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9748 /* We can blanket copy X[0:7] to R[0:7] */
9749 for (i
= 0; i
< 8; i
++) {
9750 env
->regs
[i
] = env
->xregs
[i
];
9754 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9755 * Otherwise, we copy x8-x12 into the banked user regs.
9757 if (mode
== ARM_CPU_MODE_FIQ
) {
9758 for (i
= 8; i
< 13; i
++) {
9759 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9762 for (i
= 8; i
< 13; i
++) {
9763 env
->regs
[i
] = env
->xregs
[i
];
9768 * Registers r13 & r14 depend on the current mode.
9769 * If we are in a given mode, we copy the corresponding x registers to r13
9770 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9773 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9774 env
->regs
[13] = env
->xregs
[13];
9775 env
->regs
[14] = env
->xregs
[14];
9777 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9780 * HYP is an exception in that it does not have its own banked r14 but
9781 * shares the USR r14
9783 if (mode
== ARM_CPU_MODE_HYP
) {
9784 env
->regs
[14] = env
->xregs
[14];
9786 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9790 if (mode
== ARM_CPU_MODE_HYP
) {
9791 env
->regs
[13] = env
->xregs
[15];
9793 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9796 if (mode
== ARM_CPU_MODE_IRQ
) {
9797 env
->regs
[14] = env
->xregs
[16];
9798 env
->regs
[13] = env
->xregs
[17];
9800 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9801 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9804 if (mode
== ARM_CPU_MODE_SVC
) {
9805 env
->regs
[14] = env
->xregs
[18];
9806 env
->regs
[13] = env
->xregs
[19];
9808 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9809 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9812 if (mode
== ARM_CPU_MODE_ABT
) {
9813 env
->regs
[14] = env
->xregs
[20];
9814 env
->regs
[13] = env
->xregs
[21];
9816 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9817 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9820 if (mode
== ARM_CPU_MODE_UND
) {
9821 env
->regs
[14] = env
->xregs
[22];
9822 env
->regs
[13] = env
->xregs
[23];
9824 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9825 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9828 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9829 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9830 * FIQ bank for r8-r14.
9832 if (mode
== ARM_CPU_MODE_FIQ
) {
9833 for (i
= 24; i
< 31; i
++) {
9834 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9837 for (i
= 24; i
< 29; i
++) {
9838 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9840 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9841 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9844 env
->regs
[15] = env
->pc
;
9847 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9848 uint32_t mask
, uint32_t offset
,
9853 /* Change the CPU state so as to actually take the exception. */
9854 switch_mode(env
, new_mode
);
9857 * For exceptions taken to AArch32 we must clear the SS bit in both
9858 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9860 env
->pstate
&= ~PSTATE_SS
;
9861 env
->spsr
= cpsr_read(env
);
9862 /* Clear IT bits. */
9863 env
->condexec_bits
= 0;
9864 /* Switch to the new mode, and to the correct instruction set. */
9865 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9867 /* This must be after mode switching. */
9868 new_el
= arm_current_el(env
);
9870 /* Set new mode endianness */
9871 env
->uncached_cpsr
&= ~CPSR_E
;
9872 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
9873 env
->uncached_cpsr
|= CPSR_E
;
9875 /* J and IL must always be cleared for exception entry */
9876 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9879 if (cpu_isar_feature(aa32_ssbs
, env_archcpu(env
))) {
9880 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_32
) {
9881 env
->uncached_cpsr
|= CPSR_SSBS
;
9883 env
->uncached_cpsr
&= ~CPSR_SSBS
;
9887 if (new_mode
== ARM_CPU_MODE_HYP
) {
9888 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9889 env
->elr_el
[2] = env
->regs
[15];
9891 /* CPSR.PAN is normally preserved preserved unless... */
9892 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
9895 if (!arm_is_secure_below_el3(env
)) {
9896 /* ... the target is EL3, from non-secure state. */
9897 env
->uncached_cpsr
&= ~CPSR_PAN
;
9900 /* ... the target is EL3, from secure state ... */
9903 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9904 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
9905 env
->uncached_cpsr
|= CPSR_PAN
;
9911 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9912 * and we should just guard the thumb mode on V4
9914 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9916 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9918 env
->regs
[14] = env
->regs
[15] + offset
;
9920 env
->regs
[15] = newpc
;
9921 arm_rebuild_hflags(env
);
9924 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9927 * Handle exception entry to Hyp mode; this is sufficiently
9928 * different to entry to other AArch32 modes that we handle it
9931 * The vector table entry used is always the 0x14 Hyp mode entry point,
9932 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9933 * The offset applied to the preferred return address is always zero
9934 * (see DDI0487C.a section G1.12.3).
9935 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9937 uint32_t addr
, mask
;
9938 ARMCPU
*cpu
= ARM_CPU(cs
);
9939 CPUARMState
*env
= &cpu
->env
;
9941 switch (cs
->exception_index
) {
9949 /* Fall through to prefetch abort. */
9950 case EXCP_PREFETCH_ABORT
:
9951 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9952 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9953 (uint32_t)env
->exception
.vaddress
);
9956 case EXCP_DATA_ABORT
:
9957 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9958 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9959 (uint32_t)env
->exception
.vaddress
);
9975 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9978 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9979 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9981 * QEMU syndrome values are v8-style. v7 has the IL bit
9982 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9983 * If this is a v7 CPU, squash the IL bit in those cases.
9985 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9986 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9987 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9988 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9989 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9992 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9995 if (arm_current_el(env
) != 2 && addr
< 0x14) {
10000 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
10003 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
10006 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
10010 addr
+= env
->cp15
.hvbar
;
10012 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
10015 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
10017 ARMCPU
*cpu
= ARM_CPU(cs
);
10018 CPUARMState
*env
= &cpu
->env
;
10025 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10026 switch (syn_get_ec(env
->exception
.syndrome
)) {
10027 case EC_BREAKPOINT
:
10028 case EC_BREAKPOINT_SAME_EL
:
10031 case EC_WATCHPOINT
:
10032 case EC_WATCHPOINT_SAME_EL
:
10038 case EC_VECTORCATCH
:
10047 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
10050 if (env
->exception
.target_el
== 2) {
10051 arm_cpu_do_interrupt_aarch32_hyp(cs
);
10055 switch (cs
->exception_index
) {
10057 new_mode
= ARM_CPU_MODE_UND
;
10066 new_mode
= ARM_CPU_MODE_SVC
;
10069 /* The PC already points to the next instruction. */
10073 /* Fall through to prefetch abort. */
10074 case EXCP_PREFETCH_ABORT
:
10075 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
10076 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
10077 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
10078 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
10079 new_mode
= ARM_CPU_MODE_ABT
;
10081 mask
= CPSR_A
| CPSR_I
;
10084 case EXCP_DATA_ABORT
:
10085 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
10086 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
10087 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
10088 env
->exception
.fsr
,
10089 (uint32_t)env
->exception
.vaddress
);
10090 new_mode
= ARM_CPU_MODE_ABT
;
10092 mask
= CPSR_A
| CPSR_I
;
10096 new_mode
= ARM_CPU_MODE_IRQ
;
10098 /* Disable IRQ and imprecise data aborts. */
10099 mask
= CPSR_A
| CPSR_I
;
10101 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
10102 /* IRQ routed to monitor mode */
10103 new_mode
= ARM_CPU_MODE_MON
;
10108 new_mode
= ARM_CPU_MODE_FIQ
;
10110 /* Disable FIQ, IRQ and imprecise data aborts. */
10111 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10112 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
10113 /* FIQ routed to monitor mode */
10114 new_mode
= ARM_CPU_MODE_MON
;
10119 new_mode
= ARM_CPU_MODE_IRQ
;
10121 /* Disable IRQ and imprecise data aborts. */
10122 mask
= CPSR_A
| CPSR_I
;
10126 new_mode
= ARM_CPU_MODE_FIQ
;
10128 /* Disable FIQ, IRQ and imprecise data aborts. */
10129 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10133 new_mode
= ARM_CPU_MODE_MON
;
10135 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10139 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10140 return; /* Never happens. Keep compiler happy. */
10143 if (new_mode
== ARM_CPU_MODE_MON
) {
10144 addr
+= env
->cp15
.mvbar
;
10145 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
10146 /* High vectors. When enabled, base address cannot be remapped. */
10147 addr
+= 0xffff0000;
10149 /* ARM v7 architectures provide a vector base address register to remap
10150 * the interrupt vector table.
10151 * This register is only followed in non-monitor mode, and is banked.
10152 * Note: only bits 31:5 are valid.
10154 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
10157 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
10158 env
->cp15
.scr_el3
&= ~SCR_NS
;
10161 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
10164 static int aarch64_regnum(CPUARMState
*env
, int aarch32_reg
)
10167 * Return the register number of the AArch64 view of the AArch32
10168 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
10169 * be that of the AArch32 mode the exception came from.
10171 int mode
= env
->uncached_cpsr
& CPSR_M
;
10173 switch (aarch32_reg
) {
10175 return aarch32_reg
;
10177 return mode
== ARM_CPU_MODE_FIQ
? aarch32_reg
+ 16 : aarch32_reg
;
10180 case ARM_CPU_MODE_USR
:
10181 case ARM_CPU_MODE_SYS
:
10183 case ARM_CPU_MODE_HYP
:
10185 case ARM_CPU_MODE_IRQ
:
10187 case ARM_CPU_MODE_SVC
:
10189 case ARM_CPU_MODE_ABT
:
10191 case ARM_CPU_MODE_UND
:
10193 case ARM_CPU_MODE_FIQ
:
10196 g_assert_not_reached();
10200 case ARM_CPU_MODE_USR
:
10201 case ARM_CPU_MODE_SYS
:
10202 case ARM_CPU_MODE_HYP
:
10204 case ARM_CPU_MODE_IRQ
:
10206 case ARM_CPU_MODE_SVC
:
10208 case ARM_CPU_MODE_ABT
:
10210 case ARM_CPU_MODE_UND
:
10212 case ARM_CPU_MODE_FIQ
:
10215 g_assert_not_reached();
10220 g_assert_not_reached();
10224 static uint32_t cpsr_read_for_spsr_elx(CPUARMState
*env
)
10226 uint32_t ret
= cpsr_read(env
);
10228 /* Move DIT to the correct location for SPSR_ELx */
10229 if (ret
& CPSR_DIT
) {
10233 /* Merge PSTATE.SS into SPSR_ELx */
10234 ret
|= env
->pstate
& PSTATE_SS
;
10239 /* Handle exception entry to a target EL which is using AArch64 */
10240 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
10242 ARMCPU
*cpu
= ARM_CPU(cs
);
10243 CPUARMState
*env
= &cpu
->env
;
10244 unsigned int new_el
= env
->exception
.target_el
;
10245 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
10246 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
10247 unsigned int old_mode
;
10248 unsigned int cur_el
= arm_current_el(env
);
10252 * Note that new_el can never be 0. If cur_el is 0, then
10253 * el0_a64 is is_a64(), else el0_a64 is ignored.
10255 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
10257 if (cur_el
< new_el
) {
10258 /* Entry vector offset depends on whether the implemented EL
10259 * immediately lower than the target level is using AArch32 or AArch64
10266 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
10269 hcr
= arm_hcr_el2_eff(env
);
10270 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
10271 is_aa64
= (hcr
& HCR_RW
) != 0;
10276 is_aa64
= is_a64(env
);
10279 g_assert_not_reached();
10287 } else if (pstate_read(env
) & PSTATE_SP
) {
10291 switch (cs
->exception_index
) {
10292 case EXCP_PREFETCH_ABORT
:
10293 case EXCP_DATA_ABORT
:
10294 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
10295 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
10296 env
->cp15
.far_el
[new_el
]);
10302 case EXCP_HYP_TRAP
:
10304 switch (syn_get_ec(env
->exception
.syndrome
)) {
10305 case EC_ADVSIMDFPACCESSTRAP
:
10307 * QEMU internal FP/SIMD syndromes from AArch32 include the
10308 * TA and coproc fields which are only exposed if the exception
10309 * is taken to AArch32 Hyp mode. Mask them out to get a valid
10310 * AArch64 format syndrome.
10312 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
10314 case EC_CP14RTTRAP
:
10315 case EC_CP15RTTRAP
:
10316 case EC_CP14DTTRAP
:
10318 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10319 * the raw register field from the insn; when taking this to
10320 * AArch64 we must convert it to the AArch64 view of the register
10321 * number. Notice that we read a 4-bit AArch32 register number and
10322 * write back a 5-bit AArch64 one.
10324 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10325 rt
= aarch64_regnum(env
, rt
);
10326 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10329 case EC_CP15RRTTRAP
:
10330 case EC_CP14RRTTRAP
:
10331 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10332 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10333 rt
= aarch64_regnum(env
, rt
);
10334 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10336 rt
= extract32(env
->exception
.syndrome
, 10, 4);
10337 rt
= aarch64_regnum(env
, rt
);
10338 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10342 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
10353 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10357 old_mode
= pstate_read(env
);
10358 aarch64_save_sp(env
, arm_current_el(env
));
10359 env
->elr_el
[new_el
] = env
->pc
;
10361 old_mode
= cpsr_read_for_spsr_elx(env
);
10362 env
->elr_el
[new_el
] = env
->regs
[15];
10364 aarch64_sync_32_to_64(env
);
10366 env
->condexec_bits
= 0;
10368 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
10370 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
10371 env
->elr_el
[new_el
]);
10373 if (cpu_isar_feature(aa64_pan
, cpu
)) {
10374 /* The value of PSTATE.PAN is normally preserved, except when ... */
10375 new_mode
|= old_mode
& PSTATE_PAN
;
10378 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
10379 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
10380 != (HCR_E2H
| HCR_TGE
)) {
10385 /* ... the target is EL1 ... */
10386 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
10387 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
10388 new_mode
|= PSTATE_PAN
;
10393 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10394 new_mode
|= PSTATE_TCO
;
10397 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
10398 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_64
) {
10399 new_mode
|= PSTATE_SSBS
;
10401 new_mode
&= ~PSTATE_SSBS
;
10405 pstate_write(env
, PSTATE_DAIF
| new_mode
);
10407 aarch64_restore_sp(env
, new_el
);
10408 helper_rebuild_hflags_a64(env
, new_el
);
10412 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
10413 new_el
, env
->pc
, pstate_read(env
));
10417 * Do semihosting call and set the appropriate return value. All the
10418 * permission and validity checks have been done at translate time.
10420 * We only see semihosting exceptions in TCG only as they are not
10421 * trapped to the hypervisor in KVM.
10424 static void handle_semihosting(CPUState
*cs
)
10426 ARMCPU
*cpu
= ARM_CPU(cs
);
10427 CPUARMState
*env
= &cpu
->env
;
10430 qemu_log_mask(CPU_LOG_INT
,
10431 "...handling as semihosting call 0x%" PRIx64
"\n",
10433 env
->xregs
[0] = do_common_semihosting(cs
);
10436 qemu_log_mask(CPU_LOG_INT
,
10437 "...handling as semihosting call 0x%x\n",
10439 env
->regs
[0] = do_common_semihosting(cs
);
10440 env
->regs
[15] += env
->thumb
? 2 : 4;
10445 /* Handle a CPU exception for A and R profile CPUs.
10446 * Do any appropriate logging, handle PSCI calls, and then hand off
10447 * to the AArch64-entry or AArch32-entry function depending on the
10448 * target exception level's register width.
10450 * Note: this is used for both TCG (as the do_interrupt tcg op),
10451 * and KVM to re-inject guest debug exceptions, and to
10452 * inject a Synchronous-External-Abort.
10454 void arm_cpu_do_interrupt(CPUState
*cs
)
10456 ARMCPU
*cpu
= ARM_CPU(cs
);
10457 CPUARMState
*env
= &cpu
->env
;
10458 unsigned int new_el
= env
->exception
.target_el
;
10460 assert(!arm_feature(env
, ARM_FEATURE_M
));
10462 arm_log_exception(cs
->exception_index
);
10463 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
10465 if (qemu_loglevel_mask(CPU_LOG_INT
)
10466 && !excp_is_internal(cs
->exception_index
)) {
10467 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
10468 syn_get_ec(env
->exception
.syndrome
),
10469 env
->exception
.syndrome
);
10472 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
10473 arm_handle_psci_call(cpu
);
10474 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
10479 * Semihosting semantics depend on the register width of the code
10480 * that caused the exception, not the target exception level, so
10481 * must be handled here.
10484 if (cs
->exception_index
== EXCP_SEMIHOST
) {
10485 handle_semihosting(cs
);
10490 /* Hooks may change global state so BQL should be held, also the
10491 * BQL needs to be held for any modification of
10492 * cs->interrupt_request.
10494 g_assert(qemu_mutex_iothread_locked());
10496 arm_call_pre_el_change_hook(cpu
);
10498 assert(!excp_is_internal(cs
->exception_index
));
10499 if (arm_el_is_aa64(env
, new_el
)) {
10500 arm_cpu_do_interrupt_aarch64(cs
);
10502 arm_cpu_do_interrupt_aarch32(cs
);
10505 arm_call_el_change_hook(cpu
);
10507 if (!kvm_enabled()) {
10508 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
10511 #endif /* !CONFIG_USER_ONLY */
10513 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
10515 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10517 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
10518 el
= (mmu_idx
== ARMMMUIdx_E20_0
|| mmu_idx
== ARMMMUIdx_SE20_0
)
10521 return env
->cp15
.sctlr_el
[el
];
10524 /* Return the SCTLR value which controls this address translation regime */
10525 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10527 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
10530 #ifndef CONFIG_USER_ONLY
10532 /* Return true if the specified stage of address translation is disabled */
10533 static inline bool regime_translation_disabled(CPUARMState
*env
,
10538 if (arm_feature(env
, ARM_FEATURE_M
)) {
10539 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
10540 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
10541 case R_V7M_MPU_CTRL_ENABLE_MASK
:
10542 /* Enabled, but not for HardFault and NMI */
10543 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
10544 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
10545 /* Enabled for all cases */
10549 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10550 * we warned about that in armv7m_nvic.c when the guest set it.
10556 hcr_el2
= arm_hcr_el2_eff(env
);
10558 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
10559 /* HCR.DC means HCR.VM behaves as 1 */
10560 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
10563 if (hcr_el2
& HCR_TGE
) {
10564 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10565 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
10570 if ((hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
10571 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10575 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
10578 static inline bool regime_translation_big_endian(CPUARMState
*env
,
10581 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
10584 /* Return the TTBR associated with this translation regime */
10585 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10588 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10589 return env
->cp15
.vttbr_el2
;
10591 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
10592 return env
->cp15
.vsttbr_el2
;
10595 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
10597 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
10601 #endif /* !CONFIG_USER_ONLY */
10603 /* Convert a possible stage1+2 MMU index into the appropriate
10604 * stage 1 MMU index
10606 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
10609 case ARMMMUIdx_SE10_0
:
10610 return ARMMMUIdx_Stage1_SE0
;
10611 case ARMMMUIdx_SE10_1
:
10612 return ARMMMUIdx_Stage1_SE1
;
10613 case ARMMMUIdx_SE10_1_PAN
:
10614 return ARMMMUIdx_Stage1_SE1_PAN
;
10615 case ARMMMUIdx_E10_0
:
10616 return ARMMMUIdx_Stage1_E0
;
10617 case ARMMMUIdx_E10_1
:
10618 return ARMMMUIdx_Stage1_E1
;
10619 case ARMMMUIdx_E10_1_PAN
:
10620 return ARMMMUIdx_Stage1_E1_PAN
;
10626 /* Return true if the translation regime is using LPAE format page tables */
10627 static inline bool regime_using_lpae_format(CPUARMState
*env
,
10630 int el
= regime_el(env
, mmu_idx
);
10631 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
10634 if (arm_feature(env
, ARM_FEATURE_LPAE
)
10635 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
10641 /* Returns true if the stage 1 translation regime is using LPAE format page
10642 * tables. Used when raising alignment exceptions, whose FSR changes depending
10643 * on whether the long or short descriptor format is in use. */
10644 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10646 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10648 return regime_using_lpae_format(env
, mmu_idx
);
10651 #ifndef CONFIG_USER_ONLY
10652 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10655 case ARMMMUIdx_SE10_0
:
10656 case ARMMMUIdx_E20_0
:
10657 case ARMMMUIdx_SE20_0
:
10658 case ARMMMUIdx_Stage1_E0
:
10659 case ARMMMUIdx_Stage1_SE0
:
10660 case ARMMMUIdx_MUser
:
10661 case ARMMMUIdx_MSUser
:
10662 case ARMMMUIdx_MUserNegPri
:
10663 case ARMMMUIdx_MSUserNegPri
:
10667 case ARMMMUIdx_E10_0
:
10668 case ARMMMUIdx_E10_1
:
10669 case ARMMMUIdx_E10_1_PAN
:
10670 g_assert_not_reached();
10674 /* Translate section/page access permissions to page
10675 * R/W protection flags
10677 * @env: CPUARMState
10678 * @mmu_idx: MMU index indicating required translation regime
10679 * @ap: The 3-bit access permissions (AP[2:0])
10680 * @domain_prot: The 2-bit domain access permissions
10682 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10683 int ap
, int domain_prot
)
10685 bool is_user
= regime_is_user(env
, mmu_idx
);
10687 if (domain_prot
== 3) {
10688 return PAGE_READ
| PAGE_WRITE
;
10693 if (arm_feature(env
, ARM_FEATURE_V7
)) {
10696 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
10698 return is_user
? 0 : PAGE_READ
;
10705 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10710 return PAGE_READ
| PAGE_WRITE
;
10713 return PAGE_READ
| PAGE_WRITE
;
10714 case 4: /* Reserved. */
10717 return is_user
? 0 : PAGE_READ
;
10721 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
10726 g_assert_not_reached();
10730 /* Translate section/page access permissions to page
10731 * R/W protection flags.
10733 * @ap: The 2-bit simple AP (AP[2:1])
10734 * @is_user: TRUE if accessing from PL0
10736 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
10740 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10742 return PAGE_READ
| PAGE_WRITE
;
10744 return is_user
? 0 : PAGE_READ
;
10748 g_assert_not_reached();
10753 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
10755 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
10758 /* Translate S2 section/page access permissions to protection flags
10760 * @env: CPUARMState
10761 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10762 * @xn: XN (execute-never) bits
10763 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10765 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
10773 prot
|= PAGE_WRITE
;
10776 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
10794 g_assert_not_reached();
10797 if (!extract32(xn
, 1, 1)) {
10798 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10806 /* Translate section/page access permissions to protection flags
10808 * @env: CPUARMState
10809 * @mmu_idx: MMU index indicating required translation regime
10810 * @is_aa64: TRUE if AArch64
10811 * @ap: The 2-bit simple AP (AP[2:1])
10812 * @ns: NS (non-secure) bit
10813 * @xn: XN (execute-never) bit
10814 * @pxn: PXN (privileged execute-never) bit
10816 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10817 int ap
, int ns
, int xn
, int pxn
)
10819 bool is_user
= regime_is_user(env
, mmu_idx
);
10820 int prot_rw
, user_rw
;
10824 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
10825 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
10827 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10831 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
10832 /* PAN forbids data accesses but doesn't affect insn fetch */
10835 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10839 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10843 /* TODO have_wxn should be replaced with
10844 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10845 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10846 * compatible processors have EL2, which is required for [U]WXN.
10848 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10851 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10855 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
10856 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10858 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10859 switch (regime_el(env
, mmu_idx
)) {
10863 xn
= xn
|| !(user_rw
& PAGE_READ
);
10867 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10869 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10870 (uwxn
&& (user_rw
& PAGE_WRITE
));
10880 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10883 return prot_rw
| PAGE_EXEC
;
10886 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10887 uint32_t *table
, uint32_t address
)
10889 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10890 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10892 if (address
& tcr
->mask
) {
10893 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10894 /* Translation table walk disabled for TTBR1 */
10897 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10899 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10900 /* Translation table walk disabled for TTBR0 */
10903 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10905 *table
|= (address
>> 18) & 0x3ffc;
10909 /* Translate a S1 pagetable walk through S2 if needed. */
10910 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10911 hwaddr addr
, bool *is_secure
,
10912 ARMMMUFaultInfo
*fi
)
10914 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
10915 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10916 target_ulong s2size
;
10920 ARMMMUIdx s2_mmu_idx
= *is_secure
? ARMMMUIdx_Stage2_S
10921 : ARMMMUIdx_Stage2
;
10922 ARMCacheAttrs cacheattrs
= {};
10923 MemTxAttrs txattrs
= {};
10925 ret
= get_phys_addr_lpae(env
, addr
, MMU_DATA_LOAD
, s2_mmu_idx
, false,
10926 &s2pa
, &txattrs
, &s2prot
, &s2size
, fi
,
10929 assert(fi
->type
!= ARMFault_None
);
10933 fi
->s1ns
= !*is_secure
;
10936 if ((arm_hcr_el2_eff(env
) & HCR_PTW
) &&
10937 (cacheattrs
.attrs
& 0xf0) == 0) {
10939 * PTW set and S1 walk touched S2 Device memory:
10940 * generate Permission fault.
10942 fi
->type
= ARMFault_Permission
;
10946 fi
->s1ns
= !*is_secure
;
10950 if (arm_is_secure_below_el3(env
)) {
10951 /* Check if page table walk is to secure or non-secure PA space. */
10953 *is_secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
10955 *is_secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
10958 assert(!*is_secure
);
10966 /* All loads done in the course of a page table walk go through here. */
10967 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10968 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10970 ARMCPU
*cpu
= ARM_CPU(cs
);
10971 CPUARMState
*env
= &cpu
->env
;
10972 MemTxAttrs attrs
= {};
10973 MemTxResult result
= MEMTX_OK
;
10977 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
10978 attrs
.secure
= is_secure
;
10979 as
= arm_addressspace(cs
, attrs
);
10983 if (regime_translation_big_endian(env
, mmu_idx
)) {
10984 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10986 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10988 if (result
== MEMTX_OK
) {
10991 fi
->type
= ARMFault_SyncExternalOnWalk
;
10992 fi
->ea
= arm_extabort_type(result
);
10996 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10997 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10999 ARMCPU
*cpu
= ARM_CPU(cs
);
11000 CPUARMState
*env
= &cpu
->env
;
11001 MemTxAttrs attrs
= {};
11002 MemTxResult result
= MEMTX_OK
;
11006 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
11007 attrs
.secure
= is_secure
;
11008 as
= arm_addressspace(cs
, attrs
);
11012 if (regime_translation_big_endian(env
, mmu_idx
)) {
11013 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
11015 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
11017 if (result
== MEMTX_OK
) {
11020 fi
->type
= ARMFault_SyncExternalOnWalk
;
11021 fi
->ea
= arm_extabort_type(result
);
11025 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
11026 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11027 hwaddr
*phys_ptr
, int *prot
,
11028 target_ulong
*page_size
,
11029 ARMMMUFaultInfo
*fi
)
11031 CPUState
*cs
= env_cpu(env
);
11042 /* Pagetable walk. */
11043 /* Lookup l1 descriptor. */
11044 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
11045 /* Section translation fault if page walk is disabled by PD0 or PD1 */
11046 fi
->type
= ARMFault_Translation
;
11049 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11051 if (fi
->type
!= ARMFault_None
) {
11055 domain
= (desc
>> 5) & 0x0f;
11056 if (regime_el(env
, mmu_idx
) == 1) {
11057 dacr
= env
->cp15
.dacr_ns
;
11059 dacr
= env
->cp15
.dacr_s
;
11061 domain_prot
= (dacr
>> (domain
* 2)) & 3;
11063 /* Section translation fault. */
11064 fi
->type
= ARMFault_Translation
;
11070 if (domain_prot
== 0 || domain_prot
== 2) {
11071 fi
->type
= ARMFault_Domain
;
11076 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
11077 ap
= (desc
>> 10) & 3;
11078 *page_size
= 1024 * 1024;
11080 /* Lookup l2 entry. */
11082 /* Coarse pagetable. */
11083 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
11085 /* Fine pagetable. */
11086 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
11088 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11090 if (fi
->type
!= ARMFault_None
) {
11093 switch (desc
& 3) {
11094 case 0: /* Page translation fault. */
11095 fi
->type
= ARMFault_Translation
;
11097 case 1: /* 64k page. */
11098 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
11099 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
11100 *page_size
= 0x10000;
11102 case 2: /* 4k page. */
11103 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11104 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
11105 *page_size
= 0x1000;
11107 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
11109 /* ARMv6/XScale extended small page format */
11110 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
11111 || arm_feature(env
, ARM_FEATURE_V6
)) {
11112 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11113 *page_size
= 0x1000;
11115 /* UNPREDICTABLE in ARMv5; we choose to take a
11116 * page translation fault.
11118 fi
->type
= ARMFault_Translation
;
11122 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
11123 *page_size
= 0x400;
11125 ap
= (desc
>> 4) & 3;
11128 /* Never happens, but compiler isn't smart enough to tell. */
11132 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
11133 *prot
|= *prot
? PAGE_EXEC
: 0;
11134 if (!(*prot
& (1 << access_type
))) {
11135 /* Access permission fault. */
11136 fi
->type
= ARMFault_Permission
;
11139 *phys_ptr
= phys_addr
;
11142 fi
->domain
= domain
;
11147 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
11148 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11149 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11150 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
11152 CPUState
*cs
= env_cpu(env
);
11153 ARMCPU
*cpu
= env_archcpu(env
);
11167 /* Pagetable walk. */
11168 /* Lookup l1 descriptor. */
11169 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
11170 /* Section translation fault if page walk is disabled by PD0 or PD1 */
11171 fi
->type
= ARMFault_Translation
;
11174 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11176 if (fi
->type
!= ARMFault_None
) {
11180 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
11181 /* Section translation fault, or attempt to use the encoding
11182 * which is Reserved on implementations without PXN.
11184 fi
->type
= ARMFault_Translation
;
11187 if ((type
== 1) || !(desc
& (1 << 18))) {
11188 /* Page or Section. */
11189 domain
= (desc
>> 5) & 0x0f;
11191 if (regime_el(env
, mmu_idx
) == 1) {
11192 dacr
= env
->cp15
.dacr_ns
;
11194 dacr
= env
->cp15
.dacr_s
;
11199 domain_prot
= (dacr
>> (domain
* 2)) & 3;
11200 if (domain_prot
== 0 || domain_prot
== 2) {
11201 /* Section or Page domain fault */
11202 fi
->type
= ARMFault_Domain
;
11206 if (desc
& (1 << 18)) {
11207 /* Supersection. */
11208 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
11209 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
11210 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
11211 *page_size
= 0x1000000;
11214 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
11215 *page_size
= 0x100000;
11217 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
11218 xn
= desc
& (1 << 4);
11220 ns
= extract32(desc
, 19, 1);
11222 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
11223 pxn
= (desc
>> 2) & 1;
11225 ns
= extract32(desc
, 3, 1);
11226 /* Lookup l2 entry. */
11227 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
11228 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11230 if (fi
->type
!= ARMFault_None
) {
11233 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
11234 switch (desc
& 3) {
11235 case 0: /* Page translation fault. */
11236 fi
->type
= ARMFault_Translation
;
11238 case 1: /* 64k page. */
11239 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
11240 xn
= desc
& (1 << 15);
11241 *page_size
= 0x10000;
11243 case 2: case 3: /* 4k page. */
11244 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11246 *page_size
= 0x1000;
11249 /* Never happens, but compiler isn't smart enough to tell. */
11253 if (domain_prot
== 3) {
11254 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11256 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
11259 if (xn
&& access_type
== MMU_INST_FETCH
) {
11260 fi
->type
= ARMFault_Permission
;
11264 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
11265 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
11266 /* The simplified model uses AP[0] as an access control bit. */
11267 if ((ap
& 1) == 0) {
11268 /* Access flag fault. */
11269 fi
->type
= ARMFault_AccessFlag
;
11272 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
11274 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
11276 if (*prot
&& !xn
) {
11277 *prot
|= PAGE_EXEC
;
11279 if (!(*prot
& (1 << access_type
))) {
11280 /* Access permission fault. */
11281 fi
->type
= ARMFault_Permission
;
11286 /* The NS bit will (as required by the architecture) have no effect if
11287 * the CPU doesn't support TZ or this is a non-secure translation
11288 * regime, because the attribute will already be non-secure.
11290 attrs
->secure
= false;
11292 *phys_ptr
= phys_addr
;
11295 fi
->domain
= domain
;
11301 * check_s2_mmu_setup
11303 * @is_aa64: True if the translation regime is in AArch64 state
11304 * @startlevel: Suggested starting level
11305 * @inputsize: Bitsize of IPAs
11306 * @stride: Page-table stride (See the ARM ARM)
11308 * Returns true if the suggested S2 translation parameters are OK and
11311 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
11312 int inputsize
, int stride
)
11314 const int grainsize
= stride
+ 3;
11315 int startsizecheck
;
11317 /* Negative levels are never allowed. */
11322 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
11323 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
11328 CPUARMState
*env
= &cpu
->env
;
11329 unsigned int pamax
= arm_pamax(cpu
);
11332 case 13: /* 64KB Pages. */
11333 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
11337 case 11: /* 16KB Pages. */
11338 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
11342 case 9: /* 4KB Pages. */
11343 if (level
== 0 && pamax
<= 42) {
11348 g_assert_not_reached();
11351 /* Inputsize checks. */
11352 if (inputsize
> pamax
&&
11353 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
11354 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
11358 /* AArch32 only supports 4KB pages. Assert on that. */
11359 assert(stride
== 9);
11368 /* Translate from the 4-bit stage 2 representation of
11369 * memory attributes (without cache-allocation hints) to
11370 * the 8-bit representation of the stage 1 MAIR registers
11371 * (which includes allocation hints).
11373 * ref: shared/translation/attrs/S2AttrDecode()
11374 * .../S2ConvertAttrsHints()
11376 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
11378 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
11379 uint8_t loattr
= extract32(s2attrs
, 0, 2);
11380 uint8_t hihint
= 0, lohint
= 0;
11382 if (hiattr
!= 0) { /* normal memory */
11383 if (arm_hcr_el2_eff(env
) & HCR_CD
) { /* cache disabled */
11384 hiattr
= loattr
= 1; /* non-cacheable */
11386 if (hiattr
!= 1) { /* Write-through or write-back */
11387 hihint
= 3; /* RW allocate */
11389 if (loattr
!= 1) { /* Write-through or write-back */
11390 lohint
= 3; /* RW allocate */
11395 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
11397 #endif /* !CONFIG_USER_ONLY */
11399 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11401 if (regime_has_2_ranges(mmu_idx
)) {
11402 return extract64(tcr
, 37, 2);
11403 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11404 return 0; /* VTCR_EL2 */
11406 /* Replicate the single TBI bit so we always have 2 bits. */
11407 return extract32(tcr
, 20, 1) * 3;
11411 static int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11413 if (regime_has_2_ranges(mmu_idx
)) {
11414 return extract64(tcr
, 51, 2);
11415 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11416 return 0; /* VTCR_EL2 */
11418 /* Replicate the single TBID bit so we always have 2 bits. */
11419 return extract32(tcr
, 29, 1) * 3;
11423 static int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11425 if (regime_has_2_ranges(mmu_idx
)) {
11426 return extract64(tcr
, 57, 2);
11428 /* Replicate the single TCMA bit so we always have 2 bits. */
11429 return extract32(tcr
, 30, 1) * 3;
11433 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
11434 ARMMMUIdx mmu_idx
, bool data
)
11436 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11437 bool epd
, hpd
, using16k
, using64k
;
11438 int select
, tsz
, tbi
, max_tsz
;
11440 if (!regime_has_2_ranges(mmu_idx
)) {
11442 tsz
= extract32(tcr
, 0, 6);
11443 using64k
= extract32(tcr
, 14, 1);
11444 using16k
= extract32(tcr
, 15, 1);
11445 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11449 hpd
= extract32(tcr
, 24, 1);
11454 * Bit 55 is always between the two regions, and is canonical for
11455 * determining if address tagging is enabled.
11457 select
= extract64(va
, 55, 1);
11459 tsz
= extract32(tcr
, 0, 6);
11460 epd
= extract32(tcr
, 7, 1);
11461 using64k
= extract32(tcr
, 14, 1);
11462 using16k
= extract32(tcr
, 15, 1);
11463 hpd
= extract64(tcr
, 41, 1);
11465 int tg
= extract32(tcr
, 30, 2);
11466 using16k
= tg
== 1;
11467 using64k
= tg
== 3;
11468 tsz
= extract32(tcr
, 16, 6);
11469 epd
= extract32(tcr
, 23, 1);
11470 hpd
= extract64(tcr
, 42, 1);
11474 if (cpu_isar_feature(aa64_st
, env_archcpu(env
))) {
11475 max_tsz
= 48 - using64k
;
11480 tsz
= MIN(tsz
, max_tsz
);
11481 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
11483 /* Present TBI as a composite with TBID. */
11484 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
11486 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
11488 tbi
= (tbi
>> select
) & 1;
11490 return (ARMVAParameters
) {
11496 .using16k
= using16k
,
11497 .using64k
= using64k
,
11501 #ifndef CONFIG_USER_ONLY
11502 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
11505 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11506 uint32_t el
= regime_el(env
, mmu_idx
);
11510 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
11512 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11514 bool sext
= extract32(tcr
, 4, 1);
11515 bool sign
= extract32(tcr
, 3, 1);
11518 * If the sign-extend bit is not the same as t0sz[3], the result
11519 * is unpredictable. Flag this as a guest error.
11521 if (sign
!= sext
) {
11522 qemu_log_mask(LOG_GUEST_ERROR
,
11523 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11525 tsz
= sextract32(tcr
, 0, 4) + 8;
11529 } else if (el
== 2) {
11531 tsz
= extract32(tcr
, 0, 3);
11533 hpd
= extract64(tcr
, 24, 1);
11536 int t0sz
= extract32(tcr
, 0, 3);
11537 int t1sz
= extract32(tcr
, 16, 3);
11540 select
= va
> (0xffffffffu
>> t0sz
);
11542 /* Note that we will detect errors later. */
11543 select
= va
>= ~(0xffffffffu
>> t1sz
);
11547 epd
= extract32(tcr
, 7, 1);
11548 hpd
= extract64(tcr
, 41, 1);
11551 epd
= extract32(tcr
, 23, 1);
11552 hpd
= extract64(tcr
, 42, 1);
11554 /* For aarch32, hpd0 is not enabled without t2e as well. */
11555 hpd
&= extract32(tcr
, 6, 1);
11558 return (ARMVAParameters
) {
11567 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
11569 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11570 * prot and page_size may not be filled in, and the populated fsr value provides
11571 * information on why the translation aborted, in the format of a long-format
11572 * DFSR/IFSR fault register, with the following caveats:
11573 * * the WnR bit is never set (the caller must do this).
11575 * @env: CPUARMState
11576 * @address: virtual address to get physical address for
11577 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
11578 * @mmu_idx: MMU index indicating required translation regime
11579 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
11580 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
11581 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
11582 * @phys_ptr: set to the physical address corresponding to the virtual address
11583 * @attrs: set to the memory transaction attributes to use
11584 * @prot: set to the permissions for the page containing phys_ptr
11585 * @page_size_ptr: set to the size of the page containing phys_ptr
11586 * @fi: set to fault info if the translation fails
11587 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11589 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
11590 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11592 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
11593 target_ulong
*page_size_ptr
,
11594 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11596 ARMCPU
*cpu
= env_archcpu(env
);
11597 CPUState
*cs
= CPU(cpu
);
11598 /* Read an LPAE long-descriptor translation table. */
11599 ARMFaultType fault_type
= ARMFault_Translation
;
11601 ARMVAParameters param
;
11603 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
11604 uint32_t tableattrs
;
11605 target_ulong page_size
;
11608 int addrsize
, inputsize
;
11609 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
11610 int ap
, ns
, xn
, pxn
;
11611 uint32_t el
= regime_el(env
, mmu_idx
);
11612 uint64_t descaddrmask
;
11613 bool aarch64
= arm_el_is_aa64(env
, el
);
11614 bool guarded
= false;
11616 /* TODO: This code does not support shareability levels. */
11618 param
= aa64_va_parameters(env
, address
, mmu_idx
,
11619 access_type
!= MMU_INST_FETCH
);
11621 addrsize
= 64 - 8 * param
.tbi
;
11622 inputsize
= 64 - param
.tsz
;
11624 param
= aa32_va_parameters(env
, address
, mmu_idx
);
11626 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
11627 inputsize
= addrsize
- param
.tsz
;
11631 * We determined the region when collecting the parameters, but we
11632 * have not yet validated that the address is valid for the region.
11633 * Extract the top bits and verify that they all match select.
11635 * For aa32, if inputsize == addrsize, then we have selected the
11636 * region by exclusion in aa32_va_parameters and there is no more
11637 * validation to do here.
11639 if (inputsize
< addrsize
) {
11640 target_ulong top_bits
= sextract64(address
, inputsize
,
11641 addrsize
- inputsize
);
11642 if (-top_bits
!= param
.select
) {
11643 /* The gap between the two regions is a Translation fault */
11644 fault_type
= ARMFault_Translation
;
11649 if (param
.using64k
) {
11651 } else if (param
.using16k
) {
11657 /* Note that QEMU ignores shareability and cacheability attributes,
11658 * so we don't need to do anything with the SH, ORGN, IRGN fields
11659 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11660 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11661 * implement any ASID-like capability so we can ignore it (instead
11662 * we will always flush the TLB any time the ASID is changed).
11664 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
11666 /* Here we should have set up all the parameters for the translation:
11667 * inputsize, ttbr, epd, stride, tbi
11671 /* Translation table walk disabled => Translation fault on TLB miss
11672 * Note: This is always 0 on 64-bit EL2 and EL3.
11677 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
11678 /* The starting level depends on the virtual address size (which can
11679 * be up to 48 bits) and the translation granule size. It indicates
11680 * the number of strides (stride bits at a time) needed to
11681 * consume the bits of the input address. In the pseudocode this is:
11682 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11683 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11684 * our 'stride + 3' and 'stride' is our 'stride'.
11685 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11686 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11687 * = 4 - (inputsize - 4) / stride;
11689 level
= 4 - (inputsize
- 4) / stride
;
11691 /* For stage 2 translations the starting level is specified by the
11692 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11694 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
11695 uint32_t startlevel
;
11698 if (!aarch64
|| stride
== 9) {
11699 /* AArch32 or 4KB pages */
11700 startlevel
= 2 - sl0
;
11702 if (cpu_isar_feature(aa64_st
, cpu
)) {
11706 /* 16KB or 64KB pages */
11707 startlevel
= 3 - sl0
;
11710 /* Check that the starting level is valid. */
11711 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
11712 inputsize
, stride
);
11714 fault_type
= ARMFault_Translation
;
11717 level
= startlevel
;
11720 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
11721 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
11723 /* Now we can extract the actual base address from the TTBR */
11724 descaddr
= extract64(ttbr
, 0, 48);
11726 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11727 * and also to mask out CnP (bit 0) which could validly be non-zero.
11729 descaddr
&= ~indexmask
;
11731 /* The address field in the descriptor goes up to bit 39 for ARMv7
11732 * but up to bit 47 for ARMv8, but we use the descaddrmask
11733 * up to bit 39 for AArch32, because we don't need other bits in that case
11734 * to construct next descriptor address (anyway they should be all zeroes).
11736 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
11737 ~indexmask_grainsize
;
11739 /* Secure accesses start with the page table in secure memory and
11740 * can be downgraded to non-secure at any step. Non-secure accesses
11741 * remain non-secure. We implement this by just ORing in the NSTable/NS
11742 * bits at each step.
11744 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
11746 uint64_t descriptor
;
11749 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
11751 nstable
= extract32(tableattrs
, 4, 1);
11752 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
11753 if (fi
->type
!= ARMFault_None
) {
11757 if (!(descriptor
& 1) ||
11758 (!(descriptor
& 2) && (level
== 3))) {
11759 /* Invalid, or the Reserved level 3 encoding */
11762 descaddr
= descriptor
& descaddrmask
;
11764 if ((descriptor
& 2) && (level
< 3)) {
11765 /* Table entry. The top five bits are attributes which may
11766 * propagate down through lower levels of the table (and
11767 * which are all arranged so that 0 means "no effect", so
11768 * we can gather them up by ORing in the bits at each level).
11770 tableattrs
|= extract64(descriptor
, 59, 5);
11772 indexmask
= indexmask_grainsize
;
11775 /* Block entry at level 1 or 2, or page entry at level 3.
11776 * These are basically the same thing, although the number
11777 * of bits we pull in from the vaddr varies.
11779 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
11780 descaddr
|= (address
& (page_size
- 1));
11781 /* Extract attributes from the descriptor */
11782 attrs
= extract64(descriptor
, 2, 10)
11783 | (extract64(descriptor
, 52, 12) << 10);
11785 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11786 /* Stage 2 table descriptors do not include any attribute fields */
11789 /* Merge in attributes from table descriptors */
11790 attrs
|= nstable
<< 3; /* NS */
11791 guarded
= extract64(descriptor
, 50, 1); /* GP */
11793 /* HPD disables all the table attributes except NSTable. */
11796 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
11797 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11798 * means "force PL1 access only", which means forcing AP[1] to 0.
11800 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
11801 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
11804 /* Here descaddr is the final physical address, and attributes
11805 * are all in attrs.
11807 fault_type
= ARMFault_AccessFlag
;
11808 if ((attrs
& (1 << 8)) == 0) {
11813 ap
= extract32(attrs
, 4, 2);
11815 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11816 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11817 xn
= extract32(attrs
, 11, 2);
11818 *prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
11820 ns
= extract32(attrs
, 3, 1);
11821 xn
= extract32(attrs
, 12, 1);
11822 pxn
= extract32(attrs
, 11, 1);
11823 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
11826 fault_type
= ARMFault_Permission
;
11827 if (!(*prot
& (1 << access_type
))) {
11832 /* The NS bit will (as required by the architecture) have no effect if
11833 * the CPU doesn't support TZ or this is a non-secure translation
11834 * regime, because the attribute will already be non-secure.
11836 txattrs
->secure
= false;
11838 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11839 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
11840 arm_tlb_bti_gp(txattrs
) = true;
11843 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11844 cacheattrs
->attrs
= convert_stage2_attrs(env
, extract32(attrs
, 0, 4));
11846 /* Index into MAIR registers for cache attributes */
11847 uint8_t attrindx
= extract32(attrs
, 0, 3);
11848 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
11849 assert(attrindx
<= 7);
11850 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
11852 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
11854 *phys_ptr
= descaddr
;
11855 *page_size_ptr
= page_size
;
11859 fi
->type
= fault_type
;
11861 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11862 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
||
11863 mmu_idx
== ARMMMUIdx_Stage2_S
);
11864 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11868 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11870 int32_t address
, int *prot
)
11872 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11873 *prot
= PAGE_READ
| PAGE_WRITE
;
11875 case 0xF0000000 ... 0xFFFFFFFF:
11876 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11877 /* hivecs execing is ok */
11878 *prot
|= PAGE_EXEC
;
11881 case 0x00000000 ... 0x7FFFFFFF:
11882 *prot
|= PAGE_EXEC
;
11886 /* Default system address map for M profile cores.
11887 * The architecture specifies which regions are execute-never;
11888 * at the MPU level no other checks are defined.
11891 case 0x00000000 ... 0x1fffffff: /* ROM */
11892 case 0x20000000 ... 0x3fffffff: /* SRAM */
11893 case 0x60000000 ... 0x7fffffff: /* RAM */
11894 case 0x80000000 ... 0x9fffffff: /* RAM */
11895 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11897 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11898 case 0xa0000000 ... 0xbfffffff: /* Device */
11899 case 0xc0000000 ... 0xdfffffff: /* Device */
11900 case 0xe0000000 ... 0xffffffff: /* System */
11901 *prot
= PAGE_READ
| PAGE_WRITE
;
11904 g_assert_not_reached();
11909 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11910 ARMMMUIdx mmu_idx
, bool is_user
)
11912 /* Return true if we should use the default memory map as a
11913 * "background" region if there are no hits against any MPU regions.
11915 CPUARMState
*env
= &cpu
->env
;
11921 if (arm_feature(env
, ARM_FEATURE_M
)) {
11922 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11923 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11925 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11929 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11931 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11932 return arm_feature(env
, ARM_FEATURE_M
) &&
11933 extract32(address
, 20, 12) == 0xe00;
11936 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11938 /* True if address is in the M profile system region
11939 * 0xe0000000 - 0xffffffff
11941 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11944 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11945 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11946 hwaddr
*phys_ptr
, int *prot
,
11947 target_ulong
*page_size
,
11948 ARMMMUFaultInfo
*fi
)
11950 ARMCPU
*cpu
= env_archcpu(env
);
11952 bool is_user
= regime_is_user(env
, mmu_idx
);
11954 *phys_ptr
= address
;
11955 *page_size
= TARGET_PAGE_SIZE
;
11958 if (regime_translation_disabled(env
, mmu_idx
) ||
11959 m_is_ppb_region(env
, address
)) {
11960 /* MPU disabled or M profile PPB access: use default memory map.
11961 * The other case which uses the default memory map in the
11962 * v7M ARM ARM pseudocode is exception vector reads from the vector
11963 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11964 * which always does a direct read using address_space_ldl(), rather
11965 * than going via this function, so we don't need to check that here.
11967 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11968 } else { /* MPU enabled */
11969 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11970 /* region search */
11971 uint32_t base
= env
->pmsav7
.drbar
[n
];
11972 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11974 bool srdis
= false;
11976 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11981 qemu_log_mask(LOG_GUEST_ERROR
,
11982 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11986 rmask
= (1ull << rsize
) - 1;
11988 if (base
& rmask
) {
11989 qemu_log_mask(LOG_GUEST_ERROR
,
11990 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11991 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11996 if (address
< base
|| address
> base
+ rmask
) {
11998 * Address not in this region. We must check whether the
11999 * region covers addresses in the same page as our address.
12000 * In that case we must not report a size that covers the
12001 * whole page for a subsequent hit against a different MPU
12002 * region or the background region, because it would result in
12003 * incorrect TLB hits for subsequent accesses to addresses that
12004 * are in this MPU region.
12006 if (ranges_overlap(base
, rmask
,
12007 address
& TARGET_PAGE_MASK
,
12008 TARGET_PAGE_SIZE
)) {
12014 /* Region matched */
12016 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
12018 uint32_t srdis_mask
;
12020 rsize
-= 3; /* sub region size (power of 2) */
12021 snd
= ((address
- base
) >> rsize
) & 0x7;
12022 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
12024 srdis_mask
= srdis
? 0x3 : 0x0;
12025 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
12026 /* This will check in groups of 2, 4 and then 8, whether
12027 * the subregion bits are consistent. rsize is incremented
12028 * back up to give the region size, considering consistent
12029 * adjacent subregions as one region. Stop testing if rsize
12030 * is already big enough for an entire QEMU page.
12032 int snd_rounded
= snd
& ~(i
- 1);
12033 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
12034 snd_rounded
+ 8, i
);
12035 if (srdis_mask
^ srdis_multi
) {
12038 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
12045 if (rsize
< TARGET_PAGE_BITS
) {
12046 *page_size
= 1 << rsize
;
12051 if (n
== -1) { /* no hits */
12052 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12053 /* background fault */
12054 fi
->type
= ARMFault_Background
;
12057 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12058 } else { /* a MPU hit! */
12059 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
12060 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
12062 if (m_is_system_region(env
, address
)) {
12063 /* System space is always execute never */
12067 if (is_user
) { /* User mode AP bit decoding */
12072 break; /* no access */
12074 *prot
|= PAGE_WRITE
;
12078 *prot
|= PAGE_READ
| PAGE_EXEC
;
12081 /* for v7M, same as 6; for R profile a reserved value */
12082 if (arm_feature(env
, ARM_FEATURE_M
)) {
12083 *prot
|= PAGE_READ
| PAGE_EXEC
;
12088 qemu_log_mask(LOG_GUEST_ERROR
,
12089 "DRACR[%d]: Bad value for AP bits: 0x%"
12090 PRIx32
"\n", n
, ap
);
12092 } else { /* Priv. mode AP bits decoding */
12095 break; /* no access */
12099 *prot
|= PAGE_WRITE
;
12103 *prot
|= PAGE_READ
| PAGE_EXEC
;
12106 /* for v7M, same as 6; for R profile a reserved value */
12107 if (arm_feature(env
, ARM_FEATURE_M
)) {
12108 *prot
|= PAGE_READ
| PAGE_EXEC
;
12113 qemu_log_mask(LOG_GUEST_ERROR
,
12114 "DRACR[%d]: Bad value for AP bits: 0x%"
12115 PRIx32
"\n", n
, ap
);
12119 /* execute never */
12121 *prot
&= ~PAGE_EXEC
;
12126 fi
->type
= ARMFault_Permission
;
12128 return !(*prot
& (1 << access_type
));
12131 static bool v8m_is_sau_exempt(CPUARMState
*env
,
12132 uint32_t address
, MMUAccessType access_type
)
12134 /* The architecture specifies that certain address ranges are
12135 * exempt from v8M SAU/IDAU checks.
12138 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
12139 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
12140 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
12141 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
12142 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
12143 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
12146 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
12147 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12148 V8M_SAttributes
*sattrs
)
12150 /* Look up the security attributes for this address. Compare the
12151 * pseudocode SecurityCheck() function.
12152 * We assume the caller has zero-initialized *sattrs.
12154 ARMCPU
*cpu
= env_archcpu(env
);
12156 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
12157 int idau_region
= IREGION_NOTVALID
;
12158 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12159 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12162 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
12163 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
12165 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
12169 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
12170 /* 0xf0000000..0xffffffff is always S for insn fetches */
12174 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
12175 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
12179 if (idau_region
!= IREGION_NOTVALID
) {
12180 sattrs
->irvalid
= true;
12181 sattrs
->iregion
= idau_region
;
12184 switch (env
->sau
.ctrl
& 3) {
12185 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
12187 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
12190 default: /* SAU.ENABLE == 1 */
12191 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
12192 if (env
->sau
.rlar
[r
] & 1) {
12193 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
12194 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
12196 if (base
<= address
&& limit
>= address
) {
12197 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12198 sattrs
->subpage
= true;
12200 if (sattrs
->srvalid
) {
12201 /* If we hit in more than one region then we must report
12202 * as Secure, not NS-Callable, with no valid region
12205 sattrs
->ns
= false;
12206 sattrs
->nsc
= false;
12207 sattrs
->sregion
= 0;
12208 sattrs
->srvalid
= false;
12211 if (env
->sau
.rlar
[r
] & 2) {
12212 sattrs
->nsc
= true;
12216 sattrs
->srvalid
= true;
12217 sattrs
->sregion
= r
;
12221 * Address not in this region. We must check whether the
12222 * region covers addresses in the same page as our address.
12223 * In that case we must not report a size that covers the
12224 * whole page for a subsequent hit against a different MPU
12225 * region or the background region, because it would result
12226 * in incorrect TLB hits for subsequent accesses to
12227 * addresses that are in this MPU region.
12229 if (limit
>= base
&&
12230 ranges_overlap(base
, limit
- base
+ 1,
12232 TARGET_PAGE_SIZE
)) {
12233 sattrs
->subpage
= true;
12242 * The IDAU will override the SAU lookup results if it specifies
12243 * higher security than the SAU does.
12246 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
12247 sattrs
->ns
= false;
12248 sattrs
->nsc
= idau_nsc
;
12253 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
12254 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12255 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12256 int *prot
, bool *is_subpage
,
12257 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
12259 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
12260 * that a full phys-to-virt translation does).
12261 * mregion is (if not NULL) set to the region number which matched,
12262 * or -1 if no region number is returned (MPU off, address did not
12263 * hit a region, address hit in multiple regions).
12264 * We set is_subpage to true if the region hit doesn't cover the
12265 * entire TARGET_PAGE the address is within.
12267 ARMCPU
*cpu
= env_archcpu(env
);
12268 bool is_user
= regime_is_user(env
, mmu_idx
);
12269 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12271 int matchregion
= -1;
12273 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12274 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12276 *is_subpage
= false;
12277 *phys_ptr
= address
;
12283 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12284 * was an exception vector read from the vector table (which is always
12285 * done using the default system address map), because those accesses
12286 * are done in arm_v7m_load_vector(), which always does a direct
12287 * read using address_space_ldl(), rather than going via this function.
12289 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
12291 } else if (m_is_ppb_region(env
, address
)) {
12294 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12298 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
12299 /* region search */
12300 /* Note that the base address is bits [31:5] from the register
12301 * with bits [4:0] all zeroes, but the limit address is bits
12302 * [31:5] from the register with bits [4:0] all ones.
12304 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
12305 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
12307 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
12308 /* Region disabled */
12312 if (address
< base
|| address
> limit
) {
12314 * Address not in this region. We must check whether the
12315 * region covers addresses in the same page as our address.
12316 * In that case we must not report a size that covers the
12317 * whole page for a subsequent hit against a different MPU
12318 * region or the background region, because it would result in
12319 * incorrect TLB hits for subsequent accesses to addresses that
12320 * are in this MPU region.
12322 if (limit
>= base
&&
12323 ranges_overlap(base
, limit
- base
+ 1,
12325 TARGET_PAGE_SIZE
)) {
12326 *is_subpage
= true;
12331 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12332 *is_subpage
= true;
12335 if (matchregion
!= -1) {
12336 /* Multiple regions match -- always a failure (unlike
12337 * PMSAv7 where highest-numbered-region wins)
12339 fi
->type
= ARMFault_Permission
;
12350 /* background fault */
12351 fi
->type
= ARMFault_Background
;
12355 if (matchregion
== -1) {
12356 /* hit using the background region */
12357 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12359 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
12360 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
12363 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
12364 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
12367 if (m_is_system_region(env
, address
)) {
12368 /* System space is always execute never */
12372 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
12373 if (*prot
&& !xn
&& !(pxn
&& !is_user
)) {
12374 *prot
|= PAGE_EXEC
;
12376 /* We don't need to look the attribute up in the MAIR0/MAIR1
12377 * registers because that only tells us about cacheability.
12380 *mregion
= matchregion
;
12384 fi
->type
= ARMFault_Permission
;
12386 return !(*prot
& (1 << access_type
));
12390 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
12391 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12392 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12393 int *prot
, target_ulong
*page_size
,
12394 ARMMMUFaultInfo
*fi
)
12396 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12397 V8M_SAttributes sattrs
= {};
12399 bool mpu_is_subpage
;
12401 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12402 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
12403 if (access_type
== MMU_INST_FETCH
) {
12404 /* Instruction fetches always use the MMU bank and the
12405 * transaction attribute determined by the fetch address,
12406 * regardless of CPU state. This is painful for QEMU
12407 * to handle, because it would mean we need to encode
12408 * into the mmu_idx not just the (user, negpri) information
12409 * for the current security state but also that for the
12410 * other security state, which would balloon the number
12411 * of mmu_idx values needed alarmingly.
12412 * Fortunately we can avoid this because it's not actually
12413 * possible to arbitrarily execute code from memory with
12414 * the wrong security attribute: it will always generate
12415 * an exception of some kind or another, apart from the
12416 * special case of an NS CPU executing an SG instruction
12417 * in S&NSC memory. So we always just fail the translation
12418 * here and sort things out in the exception handler
12419 * (including possibly emulating an SG instruction).
12421 if (sattrs
.ns
!= !secure
) {
12423 fi
->type
= ARMFault_QEMU_NSCExec
;
12425 fi
->type
= ARMFault_QEMU_SFault
;
12427 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12428 *phys_ptr
= address
;
12433 /* For data accesses we always use the MMU bank indicated
12434 * by the current CPU state, but the security attributes
12435 * might downgrade a secure access to nonsecure.
12438 txattrs
->secure
= false;
12439 } else if (!secure
) {
12440 /* NS access to S memory must fault.
12441 * Architecturally we should first check whether the
12442 * MPU information for this address indicates that we
12443 * are doing an unaligned access to Device memory, which
12444 * should generate a UsageFault instead. QEMU does not
12445 * currently check for that kind of unaligned access though.
12446 * If we added it we would need to do so as a special case
12447 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12449 fi
->type
= ARMFault_QEMU_SFault
;
12450 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12451 *phys_ptr
= address
;
12458 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
12459 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
12460 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
12464 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
12465 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12466 hwaddr
*phys_ptr
, int *prot
,
12467 ARMMMUFaultInfo
*fi
)
12472 bool is_user
= regime_is_user(env
, mmu_idx
);
12474 if (regime_translation_disabled(env
, mmu_idx
)) {
12475 /* MPU disabled. */
12476 *phys_ptr
= address
;
12477 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12481 *phys_ptr
= address
;
12482 for (n
= 7; n
>= 0; n
--) {
12483 base
= env
->cp15
.c6_region
[n
];
12484 if ((base
& 1) == 0) {
12487 mask
= 1 << ((base
>> 1) & 0x1f);
12488 /* Keep this shift separate from the above to avoid an
12489 (undefined) << 32. */
12490 mask
= (mask
<< 1) - 1;
12491 if (((base
^ address
) & ~mask
) == 0) {
12496 fi
->type
= ARMFault_Background
;
12500 if (access_type
== MMU_INST_FETCH
) {
12501 mask
= env
->cp15
.pmsav5_insn_ap
;
12503 mask
= env
->cp15
.pmsav5_data_ap
;
12505 mask
= (mask
>> (n
* 4)) & 0xf;
12508 fi
->type
= ARMFault_Permission
;
12513 fi
->type
= ARMFault_Permission
;
12517 *prot
= PAGE_READ
| PAGE_WRITE
;
12522 *prot
|= PAGE_WRITE
;
12526 *prot
= PAGE_READ
| PAGE_WRITE
;
12530 fi
->type
= ARMFault_Permission
;
12540 /* Bad permission. */
12541 fi
->type
= ARMFault_Permission
;
12545 *prot
|= PAGE_EXEC
;
12549 /* Combine either inner or outer cacheability attributes for normal
12550 * memory, according to table D4-42 and pseudocode procedure
12551 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12553 * NB: only stage 1 includes allocation hints (RW bits), leading to
12556 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
12558 if (s1
== 4 || s2
== 4) {
12559 /* non-cacheable has precedence */
12561 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
12562 /* stage 1 write-through takes precedence */
12564 } else if (extract32(s2
, 2, 2) == 2) {
12565 /* stage 2 write-through takes precedence, but the allocation hint
12566 * is still taken from stage 1
12568 return (2 << 2) | extract32(s1
, 0, 2);
12569 } else { /* write-back */
12574 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12575 * and CombineS1S2Desc()
12577 * @s1: Attributes from stage 1 walk
12578 * @s2: Attributes from stage 2 walk
12580 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12582 uint8_t s1lo
, s2lo
, s1hi
, s2hi
;
12584 bool tagged
= false;
12586 if (s1
.attrs
== 0xf0) {
12591 s1lo
= extract32(s1
.attrs
, 0, 4);
12592 s2lo
= extract32(s2
.attrs
, 0, 4);
12593 s1hi
= extract32(s1
.attrs
, 4, 4);
12594 s2hi
= extract32(s2
.attrs
, 4, 4);
12596 /* Combine shareability attributes (table D4-43) */
12597 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
12598 /* if either are outer-shareable, the result is outer-shareable */
12599 ret
.shareability
= 2;
12600 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
12601 /* if either are inner-shareable, the result is inner-shareable */
12602 ret
.shareability
= 3;
12604 /* both non-shareable */
12605 ret
.shareability
= 0;
12608 /* Combine memory type and cacheability attributes */
12609 if (s1hi
== 0 || s2hi
== 0) {
12610 /* Device has precedence over normal */
12611 if (s1lo
== 0 || s2lo
== 0) {
12612 /* nGnRnE has precedence over anything */
12614 } else if (s1lo
== 4 || s2lo
== 4) {
12615 /* non-Reordering has precedence over Reordering */
12616 ret
.attrs
= 4; /* nGnRE */
12617 } else if (s1lo
== 8 || s2lo
== 8) {
12618 /* non-Gathering has precedence over Gathering */
12619 ret
.attrs
= 8; /* nGRE */
12621 ret
.attrs
= 0xc; /* GRE */
12624 /* Any location for which the resultant memory type is any
12625 * type of Device memory is always treated as Outer Shareable.
12627 ret
.shareability
= 2;
12628 } else { /* Normal memory */
12629 /* Outer/inner cacheability combine independently */
12630 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
12631 | combine_cacheattr_nibble(s1lo
, s2lo
);
12633 if (ret
.attrs
== 0x44) {
12634 /* Any location for which the resultant memory type is Normal
12635 * Inner Non-cacheable, Outer Non-cacheable is always treated
12636 * as Outer Shareable.
12638 ret
.shareability
= 2;
12642 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12643 if (tagged
&& ret
.attrs
== 0xff) {
12651 /* get_phys_addr - get the physical address for this virtual address
12653 * Find the physical address corresponding to the given virtual address,
12654 * by doing a translation table walk on MMU based systems or using the
12655 * MPU state on MPU based systems.
12657 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12658 * prot and page_size may not be filled in, and the populated fsr value provides
12659 * information on why the translation aborted, in the format of a
12660 * DFSR/IFSR fault register, with the following caveats:
12661 * * we honour the short vs long DFSR format differences.
12662 * * the WnR bit is never set (the caller must do this).
12663 * * for PSMAv5 based systems we don't bother to return a full FSR format
12666 * @env: CPUARMState
12667 * @address: virtual address to get physical address for
12668 * @access_type: 0 for read, 1 for write, 2 for execute
12669 * @mmu_idx: MMU index indicating required translation regime
12670 * @phys_ptr: set to the physical address corresponding to the virtual address
12671 * @attrs: set to the memory transaction attributes to use
12672 * @prot: set to the permissions for the page containing phys_ptr
12673 * @page_size: set to the size of the page containing phys_ptr
12674 * @fi: set to fault info if the translation fails
12675 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12677 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
12678 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12679 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
12680 target_ulong
*page_size
,
12681 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
12683 ARMMMUIdx s1_mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12685 if (mmu_idx
!= s1_mmu_idx
) {
12686 /* Call ourselves recursively to do the stage 1 and then stage 2
12687 * translations if mmu_idx is a two-stage regime.
12689 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
12693 ARMCacheAttrs cacheattrs2
= {};
12694 ARMMMUIdx s2_mmu_idx
;
12697 ret
= get_phys_addr(env
, address
, access_type
, s1_mmu_idx
, &ipa
,
12698 attrs
, prot
, page_size
, fi
, cacheattrs
);
12700 /* If S1 fails or S2 is disabled, return early. */
12701 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
12706 s2_mmu_idx
= attrs
->secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
12707 is_el0
= mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_SE10_0
;
12709 /* S1 is done. Now do S2 translation. */
12710 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, s2_mmu_idx
, is_el0
,
12711 phys_ptr
, attrs
, &s2_prot
,
12712 page_size
, fi
, &cacheattrs2
);
12714 /* Combine the S1 and S2 perms. */
12717 /* If S2 fails, return early. */
12722 /* Combine the S1 and S2 cache attributes. */
12723 if (arm_hcr_el2_eff(env
) & HCR_DC
) {
12725 * HCR.DC forces the first stage attributes to
12726 * Normal Non-Shareable,
12727 * Inner Write-Back Read-Allocate Write-Allocate,
12728 * Outer Write-Back Read-Allocate Write-Allocate.
12729 * Do not overwrite Tagged within attrs.
12731 if (cacheattrs
->attrs
!= 0xf0) {
12732 cacheattrs
->attrs
= 0xff;
12734 cacheattrs
->shareability
= 0;
12736 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
12738 /* Check if IPA translates to secure or non-secure PA space. */
12739 if (arm_is_secure_below_el3(env
)) {
12740 if (attrs
->secure
) {
12742 !(env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
));
12745 !((env
->cp15
.vtcr_el2
.raw_tcr
& (VTCR_NSA
| VTCR_NSW
))
12746 || (env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SA
));
12752 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12754 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12758 /* The page table entries may downgrade secure to non-secure, but
12759 * cannot upgrade an non-secure translation regime's attributes
12762 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
12763 attrs
->user
= regime_is_user(env
, mmu_idx
);
12765 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12766 * In v7 and earlier it affects all stage 1 translations.
12768 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
12769 && !arm_feature(env
, ARM_FEATURE_V8
)) {
12770 if (regime_el(env
, mmu_idx
) == 3) {
12771 address
+= env
->cp15
.fcseidr_s
;
12773 address
+= env
->cp15
.fcseidr_ns
;
12777 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
12779 *page_size
= TARGET_PAGE_SIZE
;
12781 if (arm_feature(env
, ARM_FEATURE_V8
)) {
12783 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
12784 phys_ptr
, attrs
, prot
, page_size
, fi
);
12785 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
12787 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
12788 phys_ptr
, prot
, page_size
, fi
);
12791 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
12792 phys_ptr
, prot
, fi
);
12794 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
12795 " mmu_idx %u -> %s (prot %c%c%c)\n",
12796 access_type
== MMU_DATA_LOAD
? "reading" :
12797 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
12798 (uint32_t)address
, mmu_idx
,
12799 ret
? "Miss" : "Hit",
12800 *prot
& PAGE_READ
? 'r' : '-',
12801 *prot
& PAGE_WRITE
? 'w' : '-',
12802 *prot
& PAGE_EXEC
? 'x' : '-');
12807 /* Definitely a real MMU, not an MPU */
12809 if (regime_translation_disabled(env
, mmu_idx
)) {
12814 * MMU disabled. S1 addresses within aa64 translation regimes are
12815 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12817 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
12818 int r_el
= regime_el(env
, mmu_idx
);
12819 if (arm_el_is_aa64(env
, r_el
)) {
12820 int pamax
= arm_pamax(env_archcpu(env
));
12821 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
12824 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12825 if (access_type
== MMU_INST_FETCH
) {
12826 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12828 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
12829 addrtop
= (tbi
? 55 : 63);
12831 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
12832 fi
->type
= ARMFault_AddressSize
;
12834 fi
->stage2
= false;
12839 * When TBI is disabled, we've just validated that all of the
12840 * bits above PAMax are zero, so logically we only need to
12841 * clear the top byte for TBI. But it's clearer to follow
12842 * the pseudocode set of addrdesc.paddress.
12844 address
= extract64(address
, 0, 52);
12847 *phys_ptr
= address
;
12848 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12849 *page_size
= TARGET_PAGE_SIZE
;
12851 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12852 hcr
= arm_hcr_el2_eff(env
);
12853 cacheattrs
->shareability
= 0;
12854 if (hcr
& HCR_DC
) {
12855 if (hcr
& HCR_DCT
) {
12856 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
12858 memattr
= 0xff; /* Normal, WB, RWA */
12860 } else if (access_type
== MMU_INST_FETCH
) {
12861 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
12862 memattr
= 0xee; /* Normal, WT, RA, NT */
12864 memattr
= 0x44; /* Normal, NC, No */
12866 cacheattrs
->shareability
= 2; /* outer sharable */
12868 memattr
= 0x00; /* Device, nGnRnE */
12870 cacheattrs
->attrs
= memattr
;
12874 if (regime_using_lpae_format(env
, mmu_idx
)) {
12875 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, false,
12876 phys_ptr
, attrs
, prot
, page_size
,
12878 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
12879 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
12880 phys_ptr
, attrs
, prot
, page_size
, fi
);
12882 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
12883 phys_ptr
, prot
, page_size
, fi
);
12887 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
12890 ARMCPU
*cpu
= ARM_CPU(cs
);
12891 CPUARMState
*env
= &cpu
->env
;
12893 target_ulong page_size
;
12896 ARMMMUFaultInfo fi
= {};
12897 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12898 ARMCacheAttrs cacheattrs
= {};
12900 *attrs
= (MemTxAttrs
) {};
12902 ret
= get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &phys_addr
,
12903 attrs
, &prot
, &page_size
, &fi
, &cacheattrs
);
12913 /* Note that signed overflow is undefined in C. The following routines are
12914 careful to use unsigned types where modulo arithmetic is required.
12915 Failure to do so _will_ break on newer gcc. */
12917 /* Signed saturating arithmetic. */
12919 /* Perform 16-bit signed saturating addition. */
12920 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12925 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12934 /* Perform 8-bit signed saturating addition. */
12935 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12940 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12949 /* Perform 16-bit signed saturating subtraction. */
12950 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12955 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12964 /* Perform 8-bit signed saturating subtraction. */
12965 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12970 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12979 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12980 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12981 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12982 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12985 #include "op_addsub.h"
12987 /* Unsigned saturating arithmetic. */
12988 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12997 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
13005 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
13014 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
13022 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
13023 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
13024 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
13025 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
13028 #include "op_addsub.h"
13030 /* Signed modulo arithmetic. */
13031 #define SARITH16(a, b, n, op) do { \
13033 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
13034 RESULT(sum, n, 16); \
13036 ge |= 3 << (n * 2); \
13039 #define SARITH8(a, b, n, op) do { \
13041 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
13042 RESULT(sum, n, 8); \
13048 #define ADD16(a, b, n) SARITH16(a, b, n, +)
13049 #define SUB16(a, b, n) SARITH16(a, b, n, -)
13050 #define ADD8(a, b, n) SARITH8(a, b, n, +)
13051 #define SUB8(a, b, n) SARITH8(a, b, n, -)
13055 #include "op_addsub.h"
13057 /* Unsigned modulo arithmetic. */
13058 #define ADD16(a, b, n) do { \
13060 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
13061 RESULT(sum, n, 16); \
13062 if ((sum >> 16) == 1) \
13063 ge |= 3 << (n * 2); \
13066 #define ADD8(a, b, n) do { \
13068 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
13069 RESULT(sum, n, 8); \
13070 if ((sum >> 8) == 1) \
13074 #define SUB16(a, b, n) do { \
13076 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
13077 RESULT(sum, n, 16); \
13078 if ((sum >> 16) == 0) \
13079 ge |= 3 << (n * 2); \
13082 #define SUB8(a, b, n) do { \
13084 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
13085 RESULT(sum, n, 8); \
13086 if ((sum >> 8) == 0) \
13093 #include "op_addsub.h"
13095 /* Halved signed arithmetic. */
13096 #define ADD16(a, b, n) \
13097 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
13098 #define SUB16(a, b, n) \
13099 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
13100 #define ADD8(a, b, n) \
13101 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
13102 #define SUB8(a, b, n) \
13103 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
13106 #include "op_addsub.h"
13108 /* Halved unsigned arithmetic. */
13109 #define ADD16(a, b, n) \
13110 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13111 #define SUB16(a, b, n) \
13112 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13113 #define ADD8(a, b, n) \
13114 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13115 #define SUB8(a, b, n) \
13116 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13119 #include "op_addsub.h"
13121 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
13129 /* Unsigned sum of absolute byte differences. */
13130 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
13133 sum
= do_usad(a
, b
);
13134 sum
+= do_usad(a
>> 8, b
>> 8);
13135 sum
+= do_usad(a
>> 16, b
>> 16);
13136 sum
+= do_usad(a
>> 24, b
>> 24);
13140 /* For ARMv6 SEL instruction. */
13141 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
13153 mask
|= 0xff000000;
13154 return (a
& mask
) | (b
& ~mask
);
13158 * The upper bytes of val (above the number specified by 'bytes') must have
13159 * been zeroed out by the caller.
13161 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13165 stl_le_p(buf
, val
);
13167 /* zlib crc32 converts the accumulator and output to one's complement. */
13168 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
13171 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13175 stl_le_p(buf
, val
);
13177 /* Linux crc32c converts the output to one's complement. */
13178 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
13181 /* Return the exception level to which FP-disabled exceptions should
13182 * be taken, or 0 if FP is enabled.
13184 int fp_exception_el(CPUARMState
*env
, int cur_el
)
13186 #ifndef CONFIG_USER_ONLY
13187 /* CPACR and the CPTR registers don't exist before v6, so FP is
13188 * always accessible
13190 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
13194 if (arm_feature(env
, ARM_FEATURE_M
)) {
13195 /* CPACR can cause a NOCP UsageFault taken to current security state */
13196 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
13200 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
13201 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
13202 /* FP insns cause a NOCP UsageFault taken to Secure */
13210 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13211 * 0, 2 : trap EL0 and EL1/PL1 accesses
13212 * 1 : trap only EL0 accesses
13213 * 3 : trap no accesses
13214 * This register is ignored if E2H+TGE are both set.
13216 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
13217 int fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13222 if (cur_el
== 0 || cur_el
== 1) {
13223 /* Trap to PL1, which might be EL1 or EL3 */
13224 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13229 if (cur_el
== 3 && !is_a64(env
)) {
13230 /* Secure PL1 running at EL3 */
13245 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
13246 * to control non-secure access to the FPU. It doesn't have any
13247 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
13249 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
13250 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
13251 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
13252 /* FP insns act as UNDEF */
13253 return cur_el
== 2 ? 2 : 1;
13257 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13258 * check because zero bits in the registers mean "don't trap".
13261 /* CPTR_EL2 : present in v7VE or v8 */
13262 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
13263 && arm_is_el2_enabled(env
)) {
13264 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13268 /* CPTR_EL3 : present in v8 */
13269 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
13270 /* Trap all FP ops to EL3 */
13277 /* Return the exception level we're running at if this is our mmu_idx */
13278 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
13280 if (mmu_idx
& ARM_MMU_IDX_M
) {
13281 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
13285 case ARMMMUIdx_E10_0
:
13286 case ARMMMUIdx_E20_0
:
13287 case ARMMMUIdx_SE10_0
:
13288 case ARMMMUIdx_SE20_0
:
13290 case ARMMMUIdx_E10_1
:
13291 case ARMMMUIdx_E10_1_PAN
:
13292 case ARMMMUIdx_SE10_1
:
13293 case ARMMMUIdx_SE10_1_PAN
:
13296 case ARMMMUIdx_E20_2
:
13297 case ARMMMUIdx_E20_2_PAN
:
13298 case ARMMMUIdx_SE2
:
13299 case ARMMMUIdx_SE20_2
:
13300 case ARMMMUIdx_SE20_2_PAN
:
13302 case ARMMMUIdx_SE3
:
13305 g_assert_not_reached();
13310 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13312 g_assert_not_reached();
13316 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
13321 if (arm_feature(env
, ARM_FEATURE_M
)) {
13322 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13325 /* See ARM pseudo-function ELIsInHost. */
13328 hcr
= arm_hcr_el2_eff(env
);
13329 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
13330 idx
= ARMMMUIdx_E20_0
;
13332 idx
= ARMMMUIdx_E10_0
;
13336 if (env
->pstate
& PSTATE_PAN
) {
13337 idx
= ARMMMUIdx_E10_1_PAN
;
13339 idx
= ARMMMUIdx_E10_1
;
13343 /* Note that TGE does not apply at EL2. */
13344 if (arm_hcr_el2_eff(env
) & HCR_E2H
) {
13345 if (env
->pstate
& PSTATE_PAN
) {
13346 idx
= ARMMMUIdx_E20_2_PAN
;
13348 idx
= ARMMMUIdx_E20_2
;
13351 idx
= ARMMMUIdx_E2
;
13355 return ARMMMUIdx_SE3
;
13357 g_assert_not_reached();
13360 if (arm_is_secure_below_el3(env
)) {
13361 idx
&= ~ARM_MMU_IDX_A_NS
;
13367 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13369 return arm_mmu_idx_el(env
, arm_current_el(env
));
13372 #ifndef CONFIG_USER_ONLY
13373 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13375 return stage_1_mmu_idx(arm_mmu_idx(env
));
13379 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
13381 CPUARMTBFlags flags
)
13383 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
13384 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13386 if (arm_singlestep_active(env
)) {
13387 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
13392 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
13394 CPUARMTBFlags flags
)
13396 bool sctlr_b
= arm_sctlr_b(env
);
13399 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
13401 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
13402 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13404 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
13406 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13409 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
13412 CPUARMTBFlags flags
= {};
13413 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
13415 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
13416 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
13417 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13420 if (arm_v7m_is_handler_mode(env
)) {
13421 DP_TBFLAG_M32(flags
, HANDLER
, 1);
13425 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
13426 * is suppressing them because the requested execution priority
13429 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13430 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13431 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13432 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
13435 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13438 static CPUARMTBFlags
rebuild_hflags_aprofile(CPUARMState
*env
)
13440 CPUARMTBFlags flags
= {};
13442 DP_TBFLAG_ANY(flags
, DEBUG_TARGET_EL
, arm_debug_target_el(env
));
13446 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
13449 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13450 int el
= arm_current_el(env
);
13452 if (arm_sctlr(env
, el
) & SCTLR_A
) {
13453 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13456 if (arm_el_is_aa64(env
, 1)) {
13457 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13460 if (el
< 2 && env
->cp15
.hstr_el2
&&
13461 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
13462 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
13465 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13468 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
13471 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13472 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13473 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
13477 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
13479 /* Get control bits for tagged addresses. */
13480 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
13481 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
13483 DP_TBFLAG_A64(flags
, TBII
, tbii
);
13484 DP_TBFLAG_A64(flags
, TBID
, tbid
);
13486 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
13487 int sve_el
= sve_exception_el(env
, el
);
13491 * If SVE is disabled, but FP is enabled,
13492 * then the effective len is 0.
13494 if (sve_el
!= 0 && fp_el
== 0) {
13497 zcr_len
= sve_zcr_len_for_el(env
, el
);
13499 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
13500 DP_TBFLAG_A64(flags
, ZCR_LEN
, zcr_len
);
13503 sctlr
= regime_sctlr(env
, stage1
);
13505 if (sctlr
& SCTLR_A
) {
13506 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13509 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
13510 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13513 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
13515 * In order to save space in flags, we record only whether
13516 * pauth is "inactive", meaning all insns are implemented as
13517 * a nop, or "active" when some action must be performed.
13518 * The decision of which action to take is left to a helper.
13520 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13521 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
13525 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13526 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13527 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
13528 DP_TBFLAG_A64(flags
, BT
, 1);
13532 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
13533 if (!(env
->pstate
& PSTATE_UAO
)) {
13535 case ARMMMUIdx_E10_1
:
13536 case ARMMMUIdx_E10_1_PAN
:
13537 case ARMMMUIdx_SE10_1
:
13538 case ARMMMUIdx_SE10_1_PAN
:
13539 /* TODO: ARMv8.3-NV */
13540 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13542 case ARMMMUIdx_E20_2
:
13543 case ARMMMUIdx_E20_2_PAN
:
13544 case ARMMMUIdx_SE20_2
:
13545 case ARMMMUIdx_SE20_2_PAN
:
13547 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
13548 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
13550 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
13551 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13559 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
13561 * Set MTE_ACTIVE if any access may be Checked, and leave clear
13562 * if all accesses must be Unchecked:
13563 * 1) If no TBI, then there are no tags in the address to check,
13564 * 2) If Tag Check Override, then all accesses are Unchecked,
13565 * 3) If Tag Check Fail == 0, then Checked access have no effect,
13566 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
13568 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
13569 DP_TBFLAG_A64(flags
, ATA
, 1);
13571 && !(env
->pstate
& PSTATE_TCO
)
13572 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
13573 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
13576 /* And again for unprivileged accesses, if required. */
13577 if (EX_TBFLAG_A64(flags
, UNPRIV
)
13579 && !(env
->pstate
& PSTATE_TCO
)
13580 && (sctlr
& SCTLR_TCF0
)
13581 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
13582 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
13584 /* Cache TCMA as well as TBI. */
13585 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
13588 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13591 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
13593 int el
= arm_current_el(env
);
13594 int fp_el
= fp_exception_el(env
, el
);
13595 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13598 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13599 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
13600 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13602 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13606 void arm_rebuild_hflags(CPUARMState
*env
)
13608 env
->hflags
= rebuild_hflags_internal(env
);
13612 * If we have triggered a EL state change we can't rely on the
13613 * translator having passed it to us, we need to recompute.
13615 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
13617 int el
= arm_current_el(env
);
13618 int fp_el
= fp_exception_el(env
, el
);
13619 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13621 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13624 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
13626 int fp_el
= fp_exception_el(env
, el
);
13627 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13629 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13633 * If we have triggered a EL state change we can't rely on the
13634 * translator having passed it to us, we need to recompute.
13636 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
13638 int el
= arm_current_el(env
);
13639 int fp_el
= fp_exception_el(env
, el
);
13640 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13641 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13644 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
13646 int fp_el
= fp_exception_el(env
, el
);
13647 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13649 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13652 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
13654 int fp_el
= fp_exception_el(env
, el
);
13655 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13657 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13660 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
13662 #ifdef CONFIG_DEBUG_TCG
13663 CPUARMTBFlags c
= env
->hflags
;
13664 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
13666 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
13667 fprintf(stderr
, "TCG hflags mismatch "
13668 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
13669 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
13670 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);
13676 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13677 target_ulong
*cs_base
, uint32_t *pflags
)
13679 CPUARMTBFlags flags
;
13681 assert_hflags_rebuild_correctly(env
);
13682 flags
= env
->hflags
;
13684 if (EX_TBFLAG_ANY(flags
, AARCH64_STATE
)) {
13686 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13687 DP_TBFLAG_A64(flags
, BTYPE
, env
->btype
);
13690 *pc
= env
->regs
[15];
13692 if (arm_feature(env
, ARM_FEATURE_M
)) {
13693 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13694 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
13695 != env
->v7m
.secure
) {
13696 DP_TBFLAG_M32(flags
, FPCCR_S_WRONG
, 1);
13699 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
13700 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
13701 (env
->v7m
.secure
&&
13702 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
13704 * ASPEN is set, but FPCA/SFPA indicate that there is no
13705 * active FP context; we must create a new FP context before
13706 * executing any FP insn.
13708 DP_TBFLAG_M32(flags
, NEW_FP_CTXT_NEEDED
, 1);
13711 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
13712 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
13713 DP_TBFLAG_M32(flags
, LSPACT
, 1);
13717 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13718 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13720 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
13721 DP_TBFLAG_A32(flags
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13723 DP_TBFLAG_A32(flags
, VECLEN
, env
->vfp
.vec_len
);
13724 DP_TBFLAG_A32(flags
, VECSTRIDE
, env
->vfp
.vec_stride
);
13726 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
13727 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13731 DP_TBFLAG_AM32(flags
, THUMB
, env
->thumb
);
13732 DP_TBFLAG_AM32(flags
, CONDEXEC
, env
->condexec_bits
);
13736 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13737 * states defined in the ARM ARM for software singlestep:
13738 * SS_ACTIVE PSTATE.SS State
13739 * 0 x Inactive (the TB flag for SS is always 0)
13740 * 1 0 Active-pending
13741 * 1 1 Active-not-pending
13742 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
13744 if (EX_TBFLAG_ANY(flags
, SS_ACTIVE
) && (env
->pstate
& PSTATE_SS
)) {
13745 DP_TBFLAG_ANY(flags
, PSTATE__SS
, 1);
13748 *pflags
= flags
.flags
;
13749 *cs_base
= flags
.flags2
;
13752 #ifdef TARGET_AARCH64
13754 * The manual says that when SVE is enabled and VQ is widened the
13755 * implementation is allowed to zero the previously inaccessible
13756 * portion of the registers. The corollary to that is that when
13757 * SVE is enabled and VQ is narrowed we are also allowed to zero
13758 * the now inaccessible portion of the registers.
13760 * The intent of this is that no predicate bit beyond VQ is ever set.
13761 * Which means that some operations on predicate registers themselves
13762 * may operate on full uint64_t or even unrolled across the maximum
13763 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13764 * may well be cheaper than conditionals to restrict the operation
13765 * to the relevant portion of a uint16_t[16].
13767 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13772 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13773 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
13775 /* Zap the high bits of the zregs. */
13776 for (i
= 0; i
< 32; i
++) {
13777 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13780 /* Zap the high bits of the pregs and ffr. */
13783 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13785 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13786 for (i
= 0; i
< 17; ++i
) {
13787 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13794 * Notice a change in SVE vector size when changing EL.
13796 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13797 int new_el
, bool el0_a64
)
13799 ARMCPU
*cpu
= env_archcpu(env
);
13800 int old_len
, new_len
;
13801 bool old_a64
, new_a64
;
13803 /* Nothing to do if no SVE. */
13804 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13808 /* Nothing to do if FP is disabled in either EL. */
13809 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13814 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13815 * at ELx, or not available because the EL is in AArch32 state, then
13816 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13817 * has an effective value of 0".
13819 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13820 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13821 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13822 * we already have the correct register contents when encountering the
13823 * vq0->vq0 transition between EL0->EL1.
13825 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13826 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13827 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13828 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13829 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13830 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13832 /* When changing vector length, clear inaccessible state. */
13833 if (new_len
< old_len
) {
13834 aarch64_sve_narrow_vq(env
, new_len
+ 1);