4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
12 #include "target/arm/idau.h"
15 #include "internals.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/timer.h"
20 #include "qemu/bitops.h"
21 #include "qemu/crc32c.h"
22 #include "qemu/qemu-print.h"
23 #include "exec/exec-all.h"
24 #include <zlib.h> /* For crc32 */
26 #include "semihosting/semihost.h"
27 #include "sysemu/cpus.h"
28 #include "sysemu/cpu-timers.h"
29 #include "sysemu/kvm.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
36 #include "exec/cpu_ldst.h"
37 #include "semihosting/common-semi.h"
42 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
44 static void switch_mode(CPUARMState
*env
, int mode
);
46 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
48 assert(ri
->fieldoffset
);
49 if (cpreg_field_is_64bit(ri
)) {
50 return CPREG_FIELD64(env
, ri
);
52 return CPREG_FIELD32(env
, ri
);
56 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
59 assert(ri
->fieldoffset
);
60 if (cpreg_field_is_64bit(ri
)) {
61 CPREG_FIELD64(env
, ri
) = value
;
63 CPREG_FIELD32(env
, ri
) = value
;
67 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
69 return (char *)env
+ ri
->fieldoffset
;
72 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
74 /* Raw read of a coprocessor register (as needed for migration, etc). */
75 if (ri
->type
& ARM_CP_CONST
) {
76 return ri
->resetvalue
;
77 } else if (ri
->raw_readfn
) {
78 return ri
->raw_readfn(env
, ri
);
79 } else if (ri
->readfn
) {
80 return ri
->readfn(env
, ri
);
82 return raw_read(env
, ri
);
86 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
89 /* Raw write of a coprocessor register (as needed for migration, etc).
90 * Note that constant registers are treated as write-ignored; the
91 * caller should check for success by whether a readback gives the
94 if (ri
->type
& ARM_CP_CONST
) {
96 } else if (ri
->raw_writefn
) {
97 ri
->raw_writefn(env
, ri
, v
);
98 } else if (ri
->writefn
) {
99 ri
->writefn(env
, ri
, v
);
101 raw_write(env
, ri
, v
);
105 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
107 /* Return true if the regdef would cause an assertion if you called
108 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
109 * program bug for it not to have the NO_RAW flag).
110 * NB that returning false here doesn't necessarily mean that calling
111 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
112 * read/write access functions which are safe for raw use" from "has
113 * read/write access functions which have side effects but has forgotten
114 * to provide raw access functions".
115 * The tests here line up with the conditions in read/write_raw_cp_reg()
116 * and assertions in raw_read()/raw_write().
118 if ((ri
->type
& ARM_CP_CONST
) ||
120 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
126 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
128 /* Write the coprocessor state from cpu->env to the (index,value) list. */
132 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
133 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
134 const ARMCPRegInfo
*ri
;
137 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
142 if (ri
->type
& ARM_CP_NO_RAW
) {
146 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
149 * Only sync if the previous list->cpustate sync succeeded.
150 * Rather than tracking the success/failure state for every
151 * item in the list, we just recheck "does the raw write we must
152 * have made in write_list_to_cpustate() read back OK" here.
154 uint64_t oldval
= cpu
->cpreg_values
[i
];
156 if (oldval
== newval
) {
160 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
161 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
165 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
167 cpu
->cpreg_values
[i
] = newval
;
172 bool write_list_to_cpustate(ARMCPU
*cpu
)
177 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
178 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
179 uint64_t v
= cpu
->cpreg_values
[i
];
180 const ARMCPRegInfo
*ri
;
182 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
187 if (ri
->type
& ARM_CP_NO_RAW
) {
190 /* Write value and confirm it reads back as written
191 * (to catch read-only registers and partially read-only
192 * registers where the incoming migration value doesn't match)
194 write_raw_cp_reg(&cpu
->env
, ri
, v
);
195 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
202 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
204 ARMCPU
*cpu
= opaque
;
205 uint32_t regidx
= (uintptr_t)key
;
206 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
208 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
209 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
210 /* The value array need not be initialized at this point */
211 cpu
->cpreg_array_len
++;
215 static void count_cpreg(gpointer key
, gpointer opaque
)
217 ARMCPU
*cpu
= opaque
;
218 const ARMCPRegInfo
*ri
;
220 ri
= g_hash_table_lookup(cpu
->cp_regs
, key
);
222 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
223 cpu
->cpreg_array_len
++;
227 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
229 uint64_t aidx
= cpreg_to_kvm_id((uintptr_t)a
);
230 uint64_t bidx
= cpreg_to_kvm_id((uintptr_t)b
);
241 void init_cpreg_list(ARMCPU
*cpu
)
243 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
244 * Note that we require cpreg_tuples[] to be sorted by key ID.
249 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
250 keys
= g_list_sort(keys
, cpreg_key_compare
);
252 cpu
->cpreg_array_len
= 0;
254 g_list_foreach(keys
, count_cpreg
, cpu
);
256 arraylen
= cpu
->cpreg_array_len
;
257 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
258 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
259 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
260 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
261 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
262 cpu
->cpreg_array_len
= 0;
264 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
266 assert(cpu
->cpreg_array_len
== arraylen
);
272 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
274 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
275 const ARMCPRegInfo
*ri
,
278 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
279 arm_is_secure_below_el3(env
)) {
280 return CP_ACCESS_TRAP_UNCATEGORIZED
;
285 /* Some secure-only AArch32 registers trap to EL3 if used from
286 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
287 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
288 * We assume that the .access field is set to PL1_RW.
290 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
291 const ARMCPRegInfo
*ri
,
294 if (arm_current_el(env
) == 3) {
297 if (arm_is_secure_below_el3(env
)) {
298 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
299 return CP_ACCESS_TRAP_EL2
;
301 return CP_ACCESS_TRAP_EL3
;
303 /* This will be EL1 NS and EL2 NS, which just UNDEF */
304 return CP_ACCESS_TRAP_UNCATEGORIZED
;
307 static uint64_t arm_mdcr_el2_eff(CPUARMState
*env
)
309 return arm_is_el2_enabled(env
) ? env
->cp15
.mdcr_el2
: 0;
312 /* Check for traps to "powerdown debug" registers, which are controlled
315 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
318 int el
= arm_current_el(env
);
319 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
320 bool mdcr_el2_tdosa
= (mdcr_el2
& MDCR_TDOSA
) || (mdcr_el2
& MDCR_TDE
) ||
321 (arm_hcr_el2_eff(env
) & HCR_TGE
);
323 if (el
< 2 && mdcr_el2_tdosa
) {
324 return CP_ACCESS_TRAP_EL2
;
326 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
327 return CP_ACCESS_TRAP_EL3
;
332 /* Check for traps to "debug ROM" registers, which are controlled
333 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
335 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
338 int el
= arm_current_el(env
);
339 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
340 bool mdcr_el2_tdra
= (mdcr_el2
& MDCR_TDRA
) || (mdcr_el2
& MDCR_TDE
) ||
341 (arm_hcr_el2_eff(env
) & HCR_TGE
);
343 if (el
< 2 && mdcr_el2_tdra
) {
344 return CP_ACCESS_TRAP_EL2
;
346 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
347 return CP_ACCESS_TRAP_EL3
;
352 /* Check for traps to general debug registers, which are controlled
353 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
355 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
358 int el
= arm_current_el(env
);
359 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
360 bool mdcr_el2_tda
= (mdcr_el2
& MDCR_TDA
) || (mdcr_el2
& MDCR_TDE
) ||
361 (arm_hcr_el2_eff(env
) & HCR_TGE
);
363 if (el
< 2 && mdcr_el2_tda
) {
364 return CP_ACCESS_TRAP_EL2
;
366 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
367 return CP_ACCESS_TRAP_EL3
;
372 /* Check for traps to performance monitor registers, which are controlled
373 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
375 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
378 int el
= arm_current_el(env
);
379 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
381 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
382 return CP_ACCESS_TRAP_EL2
;
384 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
385 return CP_ACCESS_TRAP_EL3
;
390 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
391 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
394 if (arm_current_el(env
) == 1) {
395 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
396 if (arm_hcr_el2_eff(env
) & trap
) {
397 return CP_ACCESS_TRAP_EL2
;
403 /* Check for traps from EL1 due to HCR_EL2.TSW. */
404 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
407 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
408 return CP_ACCESS_TRAP_EL2
;
413 /* Check for traps from EL1 due to HCR_EL2.TACR. */
414 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
417 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
418 return CP_ACCESS_TRAP_EL2
;
423 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
424 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
427 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
428 return CP_ACCESS_TRAP_EL2
;
433 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
435 ARMCPU
*cpu
= env_archcpu(env
);
437 raw_write(env
, ri
, value
);
438 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
441 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
443 ARMCPU
*cpu
= env_archcpu(env
);
445 if (raw_read(env
, ri
) != value
) {
446 /* Unlike real hardware the qemu TLB uses virtual addresses,
447 * not modified virtual addresses, so this causes a TLB flush.
450 raw_write(env
, ri
, value
);
454 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
457 ARMCPU
*cpu
= env_archcpu(env
);
459 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
460 && !extended_addresses_enabled(env
)) {
461 /* For VMSA (when not using the LPAE long descriptor page table
462 * format) this register includes the ASID, so do a TLB flush.
463 * For PMSA it is purely a process ID and no action is needed.
467 raw_write(env
, ri
, value
);
470 /* IS variants of TLB operations must affect all cores */
471 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
474 CPUState
*cs
= env_cpu(env
);
476 tlb_flush_all_cpus_synced(cs
);
479 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
482 CPUState
*cs
= env_cpu(env
);
484 tlb_flush_all_cpus_synced(cs
);
487 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 CPUState
*cs
= env_cpu(env
);
492 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
495 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
498 CPUState
*cs
= env_cpu(env
);
500 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
504 * Non-IS variants of TLB operations are upgraded to
505 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
506 * force broadcast of these operations.
508 static bool tlb_force_broadcast(CPUARMState
*env
)
510 return arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_FB
);
513 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
516 /* Invalidate all (TLBIALL) */
517 CPUState
*cs
= env_cpu(env
);
519 if (tlb_force_broadcast(env
)) {
520 tlb_flush_all_cpus_synced(cs
);
526 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
529 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
530 CPUState
*cs
= env_cpu(env
);
532 value
&= TARGET_PAGE_MASK
;
533 if (tlb_force_broadcast(env
)) {
534 tlb_flush_page_all_cpus_synced(cs
, value
);
536 tlb_flush_page(cs
, value
);
540 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
543 /* Invalidate by ASID (TLBIASID) */
544 CPUState
*cs
= env_cpu(env
);
546 if (tlb_force_broadcast(env
)) {
547 tlb_flush_all_cpus_synced(cs
);
553 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
556 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
557 CPUState
*cs
= env_cpu(env
);
559 value
&= TARGET_PAGE_MASK
;
560 if (tlb_force_broadcast(env
)) {
561 tlb_flush_page_all_cpus_synced(cs
, value
);
563 tlb_flush_page(cs
, value
);
567 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
570 CPUState
*cs
= env_cpu(env
);
572 tlb_flush_by_mmuidx(cs
,
574 ARMMMUIdxBit_E10_1_PAN
|
578 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
581 CPUState
*cs
= env_cpu(env
);
583 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
585 ARMMMUIdxBit_E10_1_PAN
|
590 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
593 CPUState
*cs
= env_cpu(env
);
595 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
598 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
601 CPUState
*cs
= env_cpu(env
);
603 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
606 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
609 CPUState
*cs
= env_cpu(env
);
610 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
612 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
615 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
618 CPUState
*cs
= env_cpu(env
);
619 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
621 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
625 static const ARMCPRegInfo cp_reginfo
[] = {
626 /* Define the secure and non-secure FCSE identifier CP registers
627 * separately because there is no secure bank in V8 (no _EL3). This allows
628 * the secure register to be properly reset and migrated. There is also no
629 * v8 EL1 version of the register so the non-secure instance stands alone.
632 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
633 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
634 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
635 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
636 { .name
= "FCSEIDR_S",
637 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
638 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
639 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
640 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
641 /* Define the secure and non-secure context identifier CP registers
642 * separately because there is no secure bank in V8 (no _EL3). This allows
643 * the secure register to be properly reset and migrated. In the
644 * non-secure case, the 32-bit register will have reset and migration
645 * disabled during registration as it is handled by the 64-bit instance.
647 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
648 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
649 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
650 .secure
= ARM_CP_SECSTATE_NS
,
651 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
652 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
653 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
654 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
655 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
656 .secure
= ARM_CP_SECSTATE_S
,
657 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
658 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
661 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
662 /* NB: Some of these registers exist in v8 but with more precise
663 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
665 /* MMU Domain access control / MPU write buffer control */
667 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
668 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
669 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
670 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
671 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
672 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
673 * For v6 and v5, these mappings are overly broad.
675 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
676 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
677 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
678 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
679 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
680 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
681 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
682 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
683 /* Cache maintenance ops; some of this space may be overridden later. */
684 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
685 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
686 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
689 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
690 /* Not all pre-v6 cores implemented this WFI, so this is slightly
693 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
694 .access
= PL1_W
, .type
= ARM_CP_WFI
},
697 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
698 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
699 * is UNPREDICTABLE; we choose to NOP as most implementations do).
701 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
702 .access
= PL1_W
, .type
= ARM_CP_WFI
},
703 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
704 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
705 * OMAPCP will override this space.
707 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
708 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
710 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
711 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
713 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
714 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
715 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
717 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
718 * implementing it as RAZ means the "debug architecture version" bits
719 * will read as a reserved value, which should cause Linux to not try
720 * to use the debug hardware.
722 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
723 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
724 /* MMU TLB control. Note that the wildcarding means we cover not just
725 * the unified TLB ops but also the dside/iside/inner-shareable variants.
727 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
728 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
729 .type
= ARM_CP_NO_RAW
},
730 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
731 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
732 .type
= ARM_CP_NO_RAW
},
733 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
734 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
735 .type
= ARM_CP_NO_RAW
},
736 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
737 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
738 .type
= ARM_CP_NO_RAW
},
739 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
740 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
741 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
742 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
745 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
750 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
751 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
752 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
753 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
754 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
756 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
757 /* VFP coprocessor: cp10 & cp11 [23:20] */
758 mask
|= R_CPACR_ASEDIS_MASK
|
759 R_CPACR_D32DIS_MASK
|
763 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
764 /* ASEDIS [31] bit is RAO/WI */
765 value
|= R_CPACR_ASEDIS_MASK
;
768 /* VFPv3 and upwards with NEON implement 32 double precision
769 * registers (D0-D31).
771 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
772 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
773 value
|= R_CPACR_D32DIS_MASK
;
780 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
781 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
783 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
784 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
785 mask
= R_CPACR_CP11_MASK
| R_CPACR_CP10_MASK
;
786 value
= (value
& ~mask
) | (env
->cp15
.cpacr_el1
& mask
);
789 env
->cp15
.cpacr_el1
= value
;
792 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
795 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
796 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
798 uint64_t value
= env
->cp15
.cpacr_el1
;
800 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
801 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
802 value
= ~(R_CPACR_CP11_MASK
| R_CPACR_CP10_MASK
);
808 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
810 /* Call cpacr_write() so that we reset with the correct RAO bits set
811 * for our CPU features.
813 cpacr_write(env
, ri
, 0);
816 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
819 if (arm_feature(env
, ARM_FEATURE_V8
)) {
820 /* Check if CPACR accesses are to be trapped to EL2 */
821 if (arm_current_el(env
) == 1 && arm_is_el2_enabled(env
) &&
822 FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TCPAC
)) {
823 return CP_ACCESS_TRAP_EL2
;
824 /* Check if CPACR accesses are to be trapped to EL3 */
825 } else if (arm_current_el(env
) < 3 &&
826 FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TCPAC
)) {
827 return CP_ACCESS_TRAP_EL3
;
834 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
837 /* Check if CPTR accesses are set to trap to EL3 */
838 if (arm_current_el(env
) == 2 &&
839 FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TCPAC
)) {
840 return CP_ACCESS_TRAP_EL3
;
846 static const ARMCPRegInfo v6_cp_reginfo
[] = {
847 /* prefetch by MVA in v6, NOP in v7 */
848 { .name
= "MVA_prefetch",
849 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
850 .access
= PL1_W
, .type
= ARM_CP_NOP
},
851 /* We need to break the TB after ISB to execute self-modifying code
852 * correctly and also to take any pending interrupts immediately.
853 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
855 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
856 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
857 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
858 .access
= PL0_W
, .type
= ARM_CP_NOP
},
859 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
860 .access
= PL0_W
, .type
= ARM_CP_NOP
},
861 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
862 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
863 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
864 offsetof(CPUARMState
, cp15
.ifar_ns
) },
866 /* Watchpoint Fault Address Register : should actually only be present
867 * for 1136, 1176, 11MPCore.
869 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
870 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
871 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
872 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
873 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
874 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
877 typedef struct pm_event
{
878 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
879 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
880 bool (*supported
)(CPUARMState
*);
882 * Retrieve the current count of the underlying event. The programmed
883 * counters hold a difference from the return value from this function
885 uint64_t (*get_count
)(CPUARMState
*);
887 * Return how many nanoseconds it will take (at a minimum) for count events
888 * to occur. A negative value indicates the counter will never overflow, or
889 * that the counter has otherwise arranged for the overflow bit to be set
890 * and the PMU interrupt to be raised on overflow.
892 int64_t (*ns_per_count
)(uint64_t);
895 static bool event_always_supported(CPUARMState
*env
)
900 static uint64_t swinc_get_count(CPUARMState
*env
)
903 * SW_INCR events are written directly to the pmevcntr's by writes to
904 * PMSWINC, so there is no underlying count maintained by the PMU itself
909 static int64_t swinc_ns_per(uint64_t ignored
)
915 * Return the underlying cycle count for the PMU cycle counters. If we're in
916 * usermode, simply return 0.
918 static uint64_t cycles_get_count(CPUARMState
*env
)
920 #ifndef CONFIG_USER_ONLY
921 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
922 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
924 return cpu_get_host_ticks();
928 #ifndef CONFIG_USER_ONLY
929 static int64_t cycles_ns_per(uint64_t cycles
)
931 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
934 static bool instructions_supported(CPUARMState
*env
)
936 return icount_enabled() == 1; /* Precise instruction counting */
939 static uint64_t instructions_get_count(CPUARMState
*env
)
941 return (uint64_t)icount_get_raw();
944 static int64_t instructions_ns_per(uint64_t icount
)
946 return icount_to_ns((int64_t)icount
);
950 static bool pmu_8_1_events_supported(CPUARMState
*env
)
952 /* For events which are supported in any v8.1 PMU */
953 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
956 static bool pmu_8_4_events_supported(CPUARMState
*env
)
958 /* For events which are supported in any v8.1 PMU */
959 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
962 static uint64_t zero_event_get_count(CPUARMState
*env
)
964 /* For events which on QEMU never fire, so their count is always zero */
968 static int64_t zero_event_ns_per(uint64_t cycles
)
970 /* An event which never fires can never overflow */
974 static const pm_event pm_events
[] = {
975 { .number
= 0x000, /* SW_INCR */
976 .supported
= event_always_supported
,
977 .get_count
= swinc_get_count
,
978 .ns_per_count
= swinc_ns_per
,
980 #ifndef CONFIG_USER_ONLY
981 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
982 .supported
= instructions_supported
,
983 .get_count
= instructions_get_count
,
984 .ns_per_count
= instructions_ns_per
,
986 { .number
= 0x011, /* CPU_CYCLES, Cycle */
987 .supported
= event_always_supported
,
988 .get_count
= cycles_get_count
,
989 .ns_per_count
= cycles_ns_per
,
992 { .number
= 0x023, /* STALL_FRONTEND */
993 .supported
= pmu_8_1_events_supported
,
994 .get_count
= zero_event_get_count
,
995 .ns_per_count
= zero_event_ns_per
,
997 { .number
= 0x024, /* STALL_BACKEND */
998 .supported
= pmu_8_1_events_supported
,
999 .get_count
= zero_event_get_count
,
1000 .ns_per_count
= zero_event_ns_per
,
1002 { .number
= 0x03c, /* STALL */
1003 .supported
= pmu_8_4_events_supported
,
1004 .get_count
= zero_event_get_count
,
1005 .ns_per_count
= zero_event_ns_per
,
1010 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1011 * events (i.e. the statistical profiling extension), this implementation
1012 * should first be updated to something sparse instead of the current
1013 * supported_event_map[] array.
1015 #define MAX_EVENT_ID 0x3c
1016 #define UNSUPPORTED_EVENT UINT16_MAX
1017 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1020 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1021 * of ARM event numbers to indices in our pm_events array.
1023 * Note: Events in the 0x40XX range are not currently supported.
1025 void pmu_init(ARMCPU
*cpu
)
1030 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1033 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1034 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1039 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1040 const pm_event
*cnt
= &pm_events
[i
];
1041 assert(cnt
->number
<= MAX_EVENT_ID
);
1042 /* We do not currently support events in the 0x40xx range */
1043 assert(cnt
->number
<= 0x3f);
1045 if (cnt
->supported(&cpu
->env
)) {
1046 supported_event_map
[cnt
->number
] = i
;
1047 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1048 if (cnt
->number
& 0x20) {
1049 cpu
->pmceid1
|= event_mask
;
1051 cpu
->pmceid0
|= event_mask
;
1058 * Check at runtime whether a PMU event is supported for the current machine
1060 static bool event_supported(uint16_t number
)
1062 if (number
> MAX_EVENT_ID
) {
1065 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1068 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1071 /* Performance monitor registers user accessibility is controlled
1072 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1073 * trapping to EL2 or EL3 for other accesses.
1075 int el
= arm_current_el(env
);
1076 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1078 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1079 return CP_ACCESS_TRAP
;
1081 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
1082 return CP_ACCESS_TRAP_EL2
;
1084 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1085 return CP_ACCESS_TRAP_EL3
;
1088 return CP_ACCESS_OK
;
1091 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1092 const ARMCPRegInfo
*ri
,
1095 /* ER: event counter read trap control */
1096 if (arm_feature(env
, ARM_FEATURE_V8
)
1097 && arm_current_el(env
) == 0
1098 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1100 return CP_ACCESS_OK
;
1103 return pmreg_access(env
, ri
, isread
);
1106 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1107 const ARMCPRegInfo
*ri
,
1110 /* SW: software increment write trap control */
1111 if (arm_feature(env
, ARM_FEATURE_V8
)
1112 && arm_current_el(env
) == 0
1113 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1115 return CP_ACCESS_OK
;
1118 return pmreg_access(env
, ri
, isread
);
1121 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1122 const ARMCPRegInfo
*ri
,
1125 /* ER: event counter read trap control */
1126 if (arm_feature(env
, ARM_FEATURE_V8
)
1127 && arm_current_el(env
) == 0
1128 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1129 return CP_ACCESS_OK
;
1132 return pmreg_access(env
, ri
, isread
);
1135 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1136 const ARMCPRegInfo
*ri
,
1139 /* CR: cycle counter read trap control */
1140 if (arm_feature(env
, ARM_FEATURE_V8
)
1141 && arm_current_el(env
) == 0
1142 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1144 return CP_ACCESS_OK
;
1147 return pmreg_access(env
, ri
, isread
);
1150 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1151 * the current EL, security state, and register configuration.
1153 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1156 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1157 bool enabled
, prohibited
, filtered
;
1158 bool secure
= arm_is_secure(env
);
1159 int el
= arm_current_el(env
);
1160 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1161 uint8_t hpmn
= mdcr_el2
& MDCR_HPMN
;
1163 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1167 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1168 (counter
< hpmn
|| counter
== 31)) {
1169 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1171 e
= mdcr_el2
& MDCR_HPME
;
1173 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1176 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1177 prohibited
= mdcr_el2
& MDCR_HPMD
;
1182 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1183 !(env
->cp15
.mdcr_el3
& MDCR_SPME
);
1186 if (prohibited
&& counter
== 31) {
1187 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1190 if (counter
== 31) {
1191 filter
= env
->cp15
.pmccfiltr_el0
;
1193 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1196 p
= filter
& PMXEVTYPER_P
;
1197 u
= filter
& PMXEVTYPER_U
;
1198 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1199 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1200 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1201 m
= arm_el_is_aa64(env
, 1) &&
1202 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1205 filtered
= secure
? u
: u
!= nsu
;
1206 } else if (el
== 1) {
1207 filtered
= secure
? p
: p
!= nsk
;
1208 } else if (el
== 2) {
1214 if (counter
!= 31) {
1216 * If not checking PMCCNTR, ensure the counter is setup to an event we
1219 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1220 if (!event_supported(event
)) {
1225 return enabled
&& !prohibited
&& !filtered
;
1228 static void pmu_update_irq(CPUARMState
*env
)
1230 ARMCPU
*cpu
= env_archcpu(env
);
1231 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1232 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1236 * Ensure c15_ccnt is the guest-visible count so that operations such as
1237 * enabling/disabling the counter or filtering, modifying the count itself,
1238 * etc. can be done logically. This is essentially a no-op if the counter is
1239 * not enabled at the time of the call.
1241 static void pmccntr_op_start(CPUARMState
*env
)
1243 uint64_t cycles
= cycles_get_count(env
);
1245 if (pmu_counter_enabled(env
, 31)) {
1246 uint64_t eff_cycles
= cycles
;
1247 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1248 /* Increment once every 64 processor clock cycles */
1252 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1254 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1255 1ull << 63 : 1ull << 31;
1256 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1257 env
->cp15
.c9_pmovsr
|= (1 << 31);
1258 pmu_update_irq(env
);
1261 env
->cp15
.c15_ccnt
= new_pmccntr
;
1263 env
->cp15
.c15_ccnt_delta
= cycles
;
1267 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1268 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1271 static void pmccntr_op_finish(CPUARMState
*env
)
1273 if (pmu_counter_enabled(env
, 31)) {
1274 #ifndef CONFIG_USER_ONLY
1275 /* Calculate when the counter will next overflow */
1276 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1277 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1278 remaining_cycles
= (uint32_t)remaining_cycles
;
1280 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1282 if (overflow_in
> 0) {
1283 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1285 ARMCPU
*cpu
= env_archcpu(env
);
1286 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1290 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1291 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1292 /* Increment once every 64 processor clock cycles */
1295 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1299 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1302 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1304 if (event_supported(event
)) {
1305 uint16_t event_idx
= supported_event_map
[event
];
1306 count
= pm_events
[event_idx
].get_count(env
);
1309 if (pmu_counter_enabled(env
, counter
)) {
1310 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1312 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1313 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1314 pmu_update_irq(env
);
1316 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1318 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1321 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1323 if (pmu_counter_enabled(env
, counter
)) {
1324 #ifndef CONFIG_USER_ONLY
1325 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1326 uint16_t event_idx
= supported_event_map
[event
];
1327 uint64_t delta
= UINT32_MAX
-
1328 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1329 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1331 if (overflow_in
> 0) {
1332 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1334 ARMCPU
*cpu
= env_archcpu(env
);
1335 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1339 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1340 env
->cp15
.c14_pmevcntr
[counter
];
1344 void pmu_op_start(CPUARMState
*env
)
1347 pmccntr_op_start(env
);
1348 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1349 pmevcntr_op_start(env
, i
);
1353 void pmu_op_finish(CPUARMState
*env
)
1356 pmccntr_op_finish(env
);
1357 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1358 pmevcntr_op_finish(env
, i
);
1362 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1364 pmu_op_start(&cpu
->env
);
1367 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1369 pmu_op_finish(&cpu
->env
);
1372 void arm_pmu_timer_cb(void *opaque
)
1374 ARMCPU
*cpu
= opaque
;
1377 * Update all the counter values based on the current underlying counts,
1378 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1379 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1380 * counter may expire.
1382 pmu_op_start(&cpu
->env
);
1383 pmu_op_finish(&cpu
->env
);
1386 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1391 if (value
& PMCRC
) {
1392 /* The counter has been reset */
1393 env
->cp15
.c15_ccnt
= 0;
1396 if (value
& PMCRP
) {
1398 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1399 env
->cp15
.c14_pmevcntr
[i
] = 0;
1403 env
->cp15
.c9_pmcr
&= ~PMCR_WRITABLE_MASK
;
1404 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITABLE_MASK
);
1409 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1413 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1414 /* Increment a counter's count iff: */
1415 if ((value
& (1 << i
)) && /* counter's bit is set */
1416 /* counter is enabled and not filtered */
1417 pmu_counter_enabled(env
, i
) &&
1418 /* counter is SW_INCR */
1419 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1420 pmevcntr_op_start(env
, i
);
1423 * Detect if this write causes an overflow since we can't predict
1424 * PMSWINC overflows like we can for other events
1426 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1428 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1429 env
->cp15
.c9_pmovsr
|= (1 << i
);
1430 pmu_update_irq(env
);
1433 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1435 pmevcntr_op_finish(env
, i
);
1440 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1443 pmccntr_op_start(env
);
1444 ret
= env
->cp15
.c15_ccnt
;
1445 pmccntr_op_finish(env
);
1449 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1452 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1453 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1454 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1457 env
->cp15
.c9_pmselr
= value
& 0x1f;
1460 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1463 pmccntr_op_start(env
);
1464 env
->cp15
.c15_ccnt
= value
;
1465 pmccntr_op_finish(env
);
1468 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1471 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1473 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1476 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1479 pmccntr_op_start(env
);
1480 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1481 pmccntr_op_finish(env
);
1484 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1487 pmccntr_op_start(env
);
1488 /* M is not accessible from AArch32 */
1489 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1490 (value
& PMCCFILTR
);
1491 pmccntr_op_finish(env
);
1494 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1496 /* M is not visible in AArch32 */
1497 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1500 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1503 value
&= pmu_counter_mask(env
);
1504 env
->cp15
.c9_pmcnten
|= value
;
1507 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1510 value
&= pmu_counter_mask(env
);
1511 env
->cp15
.c9_pmcnten
&= ~value
;
1514 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1517 value
&= pmu_counter_mask(env
);
1518 env
->cp15
.c9_pmovsr
&= ~value
;
1519 pmu_update_irq(env
);
1522 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1525 value
&= pmu_counter_mask(env
);
1526 env
->cp15
.c9_pmovsr
|= value
;
1527 pmu_update_irq(env
);
1530 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1531 uint64_t value
, const uint8_t counter
)
1533 if (counter
== 31) {
1534 pmccfiltr_write(env
, ri
, value
);
1535 } else if (counter
< pmu_num_counters(env
)) {
1536 pmevcntr_op_start(env
, counter
);
1539 * If this counter's event type is changing, store the current
1540 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1541 * pmevcntr_op_finish has the correct baseline when it converts back to
1544 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1545 PMXEVTYPER_EVTCOUNT
;
1546 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1547 if (old_event
!= new_event
) {
1549 if (event_supported(new_event
)) {
1550 uint16_t event_idx
= supported_event_map
[new_event
];
1551 count
= pm_events
[event_idx
].get_count(env
);
1553 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1556 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1557 pmevcntr_op_finish(env
, counter
);
1559 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1560 * PMSELR value is equal to or greater than the number of implemented
1561 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1565 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1566 const uint8_t counter
)
1568 if (counter
== 31) {
1569 return env
->cp15
.pmccfiltr_el0
;
1570 } else if (counter
< pmu_num_counters(env
)) {
1571 return env
->cp15
.c14_pmevtyper
[counter
];
1574 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1575 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1581 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1584 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1585 pmevtyper_write(env
, ri
, value
, counter
);
1588 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1591 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1592 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1595 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1596 * pmu_op_finish calls when loading saved state for a migration. Because
1597 * we're potentially updating the type of event here, the value written to
1598 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1599 * different counter type. Therefore, we need to set this value to the
1600 * current count for the counter type we're writing so that pmu_op_finish
1601 * has the correct count for its calculation.
1603 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1604 if (event_supported(event
)) {
1605 uint16_t event_idx
= supported_event_map
[event
];
1606 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1607 pm_events
[event_idx
].get_count(env
);
1611 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1613 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1614 return pmevtyper_read(env
, ri
, counter
);
1617 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1620 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1623 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1625 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1628 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1629 uint64_t value
, uint8_t counter
)
1631 if (counter
< pmu_num_counters(env
)) {
1632 pmevcntr_op_start(env
, counter
);
1633 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1634 pmevcntr_op_finish(env
, counter
);
1637 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1638 * are CONSTRAINED UNPREDICTABLE.
1642 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1645 if (counter
< pmu_num_counters(env
)) {
1647 pmevcntr_op_start(env
, counter
);
1648 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1649 pmevcntr_op_finish(env
, counter
);
1652 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1653 * are CONSTRAINED UNPREDICTABLE. */
1658 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1661 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1662 pmevcntr_write(env
, ri
, value
, counter
);
1665 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1667 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1668 return pmevcntr_read(env
, ri
, counter
);
1671 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1674 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1675 assert(counter
< pmu_num_counters(env
));
1676 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1677 pmevcntr_write(env
, ri
, value
, counter
);
1680 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1682 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1683 assert(counter
< pmu_num_counters(env
));
1684 return env
->cp15
.c14_pmevcntr
[counter
];
1687 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1690 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1693 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1695 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1698 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1701 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1702 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1704 env
->cp15
.c9_pmuserenr
= value
& 1;
1708 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1711 /* We have no event counters so only the C bit can be changed */
1712 value
&= pmu_counter_mask(env
);
1713 env
->cp15
.c9_pminten
|= value
;
1714 pmu_update_irq(env
);
1717 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1720 value
&= pmu_counter_mask(env
);
1721 env
->cp15
.c9_pminten
&= ~value
;
1722 pmu_update_irq(env
);
1725 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1728 /* Note that even though the AArch64 view of this register has bits
1729 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1730 * architectural requirements for bits which are RES0 only in some
1731 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1732 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1734 raw_write(env
, ri
, value
& ~0x1FULL
);
1737 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1739 /* Begin with base v8.0 state. */
1740 uint32_t valid_mask
= 0x3fff;
1741 ARMCPU
*cpu
= env_archcpu(env
);
1743 if (ri
->state
== ARM_CP_STATE_AA64
) {
1744 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1745 !cpu_isar_feature(aa64_aa32_el1
, cpu
)) {
1746 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1748 valid_mask
&= ~SCR_NET
;
1750 if (cpu_isar_feature(aa64_ras
, cpu
)) {
1751 valid_mask
|= SCR_TERR
;
1753 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1754 valid_mask
|= SCR_TLOR
;
1756 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1757 valid_mask
|= SCR_API
| SCR_APK
;
1759 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
1760 valid_mask
|= SCR_EEL2
;
1762 if (cpu_isar_feature(aa64_mte
, cpu
)) {
1763 valid_mask
|= SCR_ATA
;
1765 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
1766 valid_mask
|= SCR_ENSCXT
;
1768 if (cpu_isar_feature(aa64_doublefault
, cpu
)) {
1769 valid_mask
|= SCR_EASE
| SCR_NMEA
;
1772 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1773 if (cpu_isar_feature(aa32_ras
, cpu
)) {
1774 valid_mask
|= SCR_TERR
;
1778 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1779 valid_mask
&= ~SCR_HCE
;
1781 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1782 * supported if EL2 exists. The bit is UNK/SBZP when
1783 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1784 * when EL2 is unavailable.
1785 * On ARMv8, this bit is always available.
1787 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1788 !arm_feature(env
, ARM_FEATURE_V8
)) {
1789 valid_mask
&= ~SCR_SMD
;
1793 /* Clear all-context RES0 bits. */
1794 value
&= valid_mask
;
1795 raw_write(env
, ri
, value
);
1798 static void scr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1801 * scr_write will set the RES1 bits on an AArch64-only CPU.
1802 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1804 scr_write(env
, ri
, 0);
1807 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
1808 const ARMCPRegInfo
*ri
,
1811 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
1812 return CP_ACCESS_TRAP_EL2
;
1815 return CP_ACCESS_OK
;
1818 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1820 ARMCPU
*cpu
= env_archcpu(env
);
1822 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1825 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1826 ri
->secure
& ARM_CP_SECSTATE_S
);
1828 return cpu
->ccsidr
[index
];
1831 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1834 raw_write(env
, ri
, value
& 0xf);
1837 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1839 CPUState
*cs
= env_cpu(env
);
1840 bool el1
= arm_current_el(env
) == 1;
1841 uint64_t hcr_el2
= el1
? arm_hcr_el2_eff(env
) : 0;
1844 if (hcr_el2
& HCR_IMO
) {
1845 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1849 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1854 if (hcr_el2
& HCR_FMO
) {
1855 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1859 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1864 if (hcr_el2
& HCR_AMO
) {
1865 if (cs
->interrupt_request
& CPU_INTERRUPT_VSERR
) {
1873 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1876 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
1877 return CP_ACCESS_TRAP_EL2
;
1880 return CP_ACCESS_OK
;
1883 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1886 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1887 return access_aa64_tid1(env
, ri
, isread
);
1890 return CP_ACCESS_OK
;
1893 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1894 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1895 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1896 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1897 /* Performance monitors are implementation defined in v7,
1898 * but with an ARM recommended set of registers, which we
1901 * Performance registers fall into three categories:
1902 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1903 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1904 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1905 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1906 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1908 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1909 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1910 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1911 .writefn
= pmcntenset_write
,
1912 .accessfn
= pmreg_access
,
1913 .raw_writefn
= raw_write
},
1914 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1915 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1916 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1918 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1919 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1921 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1922 .accessfn
= pmreg_access
,
1923 .writefn
= pmcntenclr_write
,
1924 .type
= ARM_CP_ALIAS
},
1925 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1926 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1927 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1928 .type
= ARM_CP_ALIAS
,
1929 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1930 .writefn
= pmcntenclr_write
},
1931 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1932 .access
= PL0_RW
, .type
= ARM_CP_IO
,
1933 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1934 .accessfn
= pmreg_access
,
1935 .writefn
= pmovsr_write
,
1936 .raw_writefn
= raw_write
},
1937 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1938 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1939 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1940 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1941 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1942 .writefn
= pmovsr_write
,
1943 .raw_writefn
= raw_write
},
1944 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1945 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1946 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1947 .writefn
= pmswinc_write
},
1948 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
1949 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
1950 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1951 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1952 .writefn
= pmswinc_write
},
1953 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1954 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1955 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1956 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1957 .raw_writefn
= raw_write
},
1958 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1959 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1960 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1961 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1962 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1963 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1964 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1965 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1966 .accessfn
= pmreg_access_ccntr
},
1967 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1968 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1969 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1971 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
1972 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
1973 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
1974 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
1975 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
1976 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1977 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1979 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1980 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1981 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
1982 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1984 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1986 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1987 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1988 .accessfn
= pmreg_access
,
1989 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1990 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1991 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1992 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1993 .accessfn
= pmreg_access
,
1994 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1995 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1996 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1997 .accessfn
= pmreg_access_xevcntr
,
1998 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
1999 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2000 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2001 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2002 .accessfn
= pmreg_access_xevcntr
,
2003 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2004 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2005 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2006 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2008 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2009 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2010 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2011 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2012 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2014 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2015 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2016 .access
= PL1_RW
, .accessfn
= access_tpm
,
2017 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2018 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2020 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2021 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2022 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2023 .access
= PL1_RW
, .accessfn
= access_tpm
,
2025 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2026 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2027 .resetvalue
= 0x0 },
2028 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2029 .access
= PL1_RW
, .accessfn
= access_tpm
,
2030 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2031 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2032 .writefn
= pmintenclr_write
, },
2033 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2034 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2035 .access
= PL1_RW
, .accessfn
= access_tpm
,
2036 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2037 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2038 .writefn
= pmintenclr_write
},
2039 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2040 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2042 .accessfn
= access_aa64_tid2
,
2043 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2044 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2045 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2047 .accessfn
= access_aa64_tid2
,
2048 .writefn
= csselr_write
, .resetvalue
= 0,
2049 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2050 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2051 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2052 * just RAZ for all cores:
2054 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2055 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2056 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2057 .accessfn
= access_aa64_tid1
,
2059 /* Auxiliary fault status registers: these also are IMPDEF, and we
2060 * choose to RAZ/WI for all cores.
2062 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2063 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2064 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2065 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2066 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2067 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2068 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2069 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2070 /* MAIR can just read-as-written because we don't implement caches
2071 * and so don't need to care about memory attributes.
2073 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2074 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2075 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2076 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2078 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2079 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2080 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2082 /* For non-long-descriptor page tables these are PRRR and NMRR;
2083 * regardless they still act as reads-as-written for QEMU.
2085 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2086 * allows them to assign the correct fieldoffset based on the endianness
2087 * handled in the field definitions.
2089 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2090 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2091 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2092 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2093 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2094 .resetfn
= arm_cp_reset_ignore
},
2095 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2096 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2097 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2098 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2099 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2100 .resetfn
= arm_cp_reset_ignore
},
2101 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2102 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2103 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2104 /* 32 bit ITLB invalidates */
2105 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2106 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2107 .writefn
= tlbiall_write
},
2108 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2109 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2110 .writefn
= tlbimva_write
},
2111 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2112 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2113 .writefn
= tlbiasid_write
},
2114 /* 32 bit DTLB invalidates */
2115 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2116 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2117 .writefn
= tlbiall_write
},
2118 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2119 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2120 .writefn
= tlbimva_write
},
2121 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2122 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2123 .writefn
= tlbiasid_write
},
2124 /* 32 bit TLB invalidates */
2125 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2126 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2127 .writefn
= tlbiall_write
},
2128 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2129 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2130 .writefn
= tlbimva_write
},
2131 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2132 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2133 .writefn
= tlbiasid_write
},
2134 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2135 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2136 .writefn
= tlbimvaa_write
},
2139 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2140 /* 32 bit TLB invalidates, Inner Shareable */
2141 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2142 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2143 .writefn
= tlbiall_is_write
},
2144 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2145 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2146 .writefn
= tlbimva_is_write
},
2147 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2148 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2149 .writefn
= tlbiasid_is_write
},
2150 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2151 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2152 .writefn
= tlbimvaa_is_write
},
2155 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2156 /* PMOVSSET is not implemented in v7 before v7ve */
2157 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2158 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2159 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2160 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2161 .writefn
= pmovsset_write
,
2162 .raw_writefn
= raw_write
},
2163 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2164 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2165 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2166 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2167 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2168 .writefn
= pmovsset_write
,
2169 .raw_writefn
= raw_write
},
2172 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2179 static CPAccessResult
teecr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2183 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2184 * at all, so we don't need to check whether we're v8A.
2186 if (arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
2187 (env
->cp15
.hstr_el2
& HSTR_TTEE
)) {
2188 return CP_ACCESS_TRAP_EL2
;
2190 return CP_ACCESS_OK
;
2193 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2196 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2197 return CP_ACCESS_TRAP
;
2199 return teecr_access(env
, ri
, isread
);
2202 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2203 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2204 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2206 .writefn
= teecr_write
, .accessfn
= teecr_access
},
2207 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2208 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2209 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2212 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2213 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2214 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2216 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2217 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2219 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2220 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2221 .resetfn
= arm_cp_reset_ignore
},
2222 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2223 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2224 .access
= PL0_R
|PL1_W
,
2225 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2227 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2228 .access
= PL0_R
|PL1_W
,
2229 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2230 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2231 .resetfn
= arm_cp_reset_ignore
},
2232 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2233 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2235 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2236 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2238 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2239 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2243 #ifndef CONFIG_USER_ONLY
2245 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2248 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2249 * Writable only at the highest implemented exception level.
2251 int el
= arm_current_el(env
);
2257 hcr
= arm_hcr_el2_eff(env
);
2258 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2259 cntkctl
= env
->cp15
.cnthctl_el2
;
2261 cntkctl
= env
->cp15
.c14_cntkctl
;
2263 if (!extract32(cntkctl
, 0, 2)) {
2264 return CP_ACCESS_TRAP
;
2268 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2269 arm_is_secure_below_el3(env
)) {
2270 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2271 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2279 if (!isread
&& el
< arm_highest_el(env
)) {
2280 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2283 return CP_ACCESS_OK
;
2286 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2289 unsigned int cur_el
= arm_current_el(env
);
2290 bool has_el2
= arm_is_el2_enabled(env
);
2291 uint64_t hcr
= arm_hcr_el2_eff(env
);
2295 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2296 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2297 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2298 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2301 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2302 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2303 return CP_ACCESS_TRAP
;
2306 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2307 if (hcr
& HCR_E2H
) {
2308 if (timeridx
== GTIMER_PHYS
&&
2309 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2310 return CP_ACCESS_TRAP_EL2
;
2313 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2314 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2315 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2316 return CP_ACCESS_TRAP_EL2
;
2322 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2323 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2325 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2326 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2327 return CP_ACCESS_TRAP_EL2
;
2331 return CP_ACCESS_OK
;
2334 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2337 unsigned int cur_el
= arm_current_el(env
);
2338 bool has_el2
= arm_is_el2_enabled(env
);
2339 uint64_t hcr
= arm_hcr_el2_eff(env
);
2343 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2344 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2345 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2346 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2350 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2351 * EL0 if EL0[PV]TEN is zero.
2353 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2354 return CP_ACCESS_TRAP
;
2359 if (has_el2
&& timeridx
== GTIMER_PHYS
) {
2360 if (hcr
& HCR_E2H
) {
2361 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2362 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2363 return CP_ACCESS_TRAP_EL2
;
2366 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2367 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2368 return CP_ACCESS_TRAP_EL2
;
2374 return CP_ACCESS_OK
;
2377 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2378 const ARMCPRegInfo
*ri
,
2381 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2384 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2385 const ARMCPRegInfo
*ri
,
2388 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2391 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2394 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2397 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2400 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2403 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2404 const ARMCPRegInfo
*ri
,
2407 /* The AArch64 register view of the secure physical timer is
2408 * always accessible from EL3, and configurably accessible from
2411 switch (arm_current_el(env
)) {
2413 if (!arm_is_secure(env
)) {
2414 return CP_ACCESS_TRAP
;
2416 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2417 return CP_ACCESS_TRAP_EL3
;
2419 return CP_ACCESS_OK
;
2422 return CP_ACCESS_TRAP
;
2424 return CP_ACCESS_OK
;
2426 g_assert_not_reached();
2430 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2432 ARMCPU
*cpu
= env_archcpu(env
);
2434 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2437 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2439 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2442 /* Timer enabled: calculate and set current ISTATUS, irq, and
2443 * reset timer to when ISTATUS next has to change
2445 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2446 cpu
->env
.cp15
.cntvoff_el2
: 0;
2447 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2448 /* Note that this must be unsigned 64 bit arithmetic: */
2449 int istatus
= count
- offset
>= gt
->cval
;
2453 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2455 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2456 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2459 /* Next transition is when count rolls back over to zero */
2460 nexttick
= UINT64_MAX
;
2462 /* Next transition is when we hit cval */
2463 nexttick
= gt
->cval
+ offset
;
2465 /* Note that the desired next expiry time might be beyond the
2466 * signed-64-bit range of a QEMUTimer -- in this case we just
2467 * set the timer for as far in the future as possible. When the
2468 * timer expires we will reset the timer for any remaining period.
2470 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2471 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2473 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2475 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2477 /* Timer disabled: ISTATUS and timer output always clear */
2479 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2480 timer_del(cpu
->gt_timer
[timeridx
]);
2481 trace_arm_gt_recalc_disabled(timeridx
);
2485 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2488 ARMCPU
*cpu
= env_archcpu(env
);
2490 timer_del(cpu
->gt_timer
[timeridx
]);
2493 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2495 return gt_get_countervalue(env
);
2498 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2502 switch (arm_current_el(env
)) {
2504 hcr
= arm_hcr_el2_eff(env
);
2505 if (hcr
& HCR_E2H
) {
2510 hcr
= arm_hcr_el2_eff(env
);
2511 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2517 return env
->cp15
.cntvoff_el2
;
2520 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2522 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2525 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2529 trace_arm_gt_cval_write(timeridx
, value
);
2530 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2531 gt_recalc_timer(env_archcpu(env
), timeridx
);
2534 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2537 uint64_t offset
= 0;
2541 case GTIMER_HYPVIRT
:
2542 offset
= gt_virt_cnt_offset(env
);
2546 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2547 (gt_get_countervalue(env
) - offset
));
2550 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2554 uint64_t offset
= 0;
2558 case GTIMER_HYPVIRT
:
2559 offset
= gt_virt_cnt_offset(env
);
2563 trace_arm_gt_tval_write(timeridx
, value
);
2564 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2565 sextract64(value
, 0, 32);
2566 gt_recalc_timer(env_archcpu(env
), timeridx
);
2569 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2573 ARMCPU
*cpu
= env_archcpu(env
);
2574 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2576 trace_arm_gt_ctl_write(timeridx
, value
);
2577 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2578 if ((oldval
^ value
) & 1) {
2579 /* Enable toggled */
2580 gt_recalc_timer(cpu
, timeridx
);
2581 } else if ((oldval
^ value
) & 2) {
2582 /* IMASK toggled: don't need to recalculate,
2583 * just set the interrupt line based on ISTATUS
2585 int irqstate
= (oldval
& 4) && !(value
& 2);
2587 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2588 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2592 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2594 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2597 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2600 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2603 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2605 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2608 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2611 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2614 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2617 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2620 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2622 switch (arm_mmu_idx(env
)) {
2623 case ARMMMUIdx_E20_0
:
2624 case ARMMMUIdx_E20_2
:
2625 case ARMMMUIdx_E20_2_PAN
:
2626 case ARMMMUIdx_SE20_0
:
2627 case ARMMMUIdx_SE20_2
:
2628 case ARMMMUIdx_SE20_2_PAN
:
2635 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2637 switch (arm_mmu_idx(env
)) {
2638 case ARMMMUIdx_E20_0
:
2639 case ARMMMUIdx_E20_2
:
2640 case ARMMMUIdx_E20_2_PAN
:
2641 case ARMMMUIdx_SE20_0
:
2642 case ARMMMUIdx_SE20_2
:
2643 case ARMMMUIdx_SE20_2_PAN
:
2644 return GTIMER_HYPVIRT
;
2650 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2651 const ARMCPRegInfo
*ri
)
2653 int timeridx
= gt_phys_redir_timeridx(env
);
2654 return env
->cp15
.c14_timer
[timeridx
].cval
;
2657 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2660 int timeridx
= gt_phys_redir_timeridx(env
);
2661 gt_cval_write(env
, ri
, timeridx
, value
);
2664 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2665 const ARMCPRegInfo
*ri
)
2667 int timeridx
= gt_phys_redir_timeridx(env
);
2668 return gt_tval_read(env
, ri
, timeridx
);
2671 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2674 int timeridx
= gt_phys_redir_timeridx(env
);
2675 gt_tval_write(env
, ri
, timeridx
, value
);
2678 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2679 const ARMCPRegInfo
*ri
)
2681 int timeridx
= gt_phys_redir_timeridx(env
);
2682 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2685 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2688 int timeridx
= gt_phys_redir_timeridx(env
);
2689 gt_ctl_write(env
, ri
, timeridx
, value
);
2692 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2694 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2697 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2700 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2703 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2705 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2708 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2711 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2714 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2717 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2720 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2723 ARMCPU
*cpu
= env_archcpu(env
);
2725 trace_arm_gt_cntvoff_write(value
);
2726 raw_write(env
, ri
, value
);
2727 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2730 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
2731 const ARMCPRegInfo
*ri
)
2733 int timeridx
= gt_virt_redir_timeridx(env
);
2734 return env
->cp15
.c14_timer
[timeridx
].cval
;
2737 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2740 int timeridx
= gt_virt_redir_timeridx(env
);
2741 gt_cval_write(env
, ri
, timeridx
, value
);
2744 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
2745 const ARMCPRegInfo
*ri
)
2747 int timeridx
= gt_virt_redir_timeridx(env
);
2748 return gt_tval_read(env
, ri
, timeridx
);
2751 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2754 int timeridx
= gt_virt_redir_timeridx(env
);
2755 gt_tval_write(env
, ri
, timeridx
, value
);
2758 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
2759 const ARMCPRegInfo
*ri
)
2761 int timeridx
= gt_virt_redir_timeridx(env
);
2762 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2765 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2768 int timeridx
= gt_virt_redir_timeridx(env
);
2769 gt_ctl_write(env
, ri
, timeridx
, value
);
2772 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2774 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2777 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2780 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2783 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2785 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2788 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2791 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2794 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2797 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2800 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2802 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2805 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2808 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2811 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2813 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2816 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2819 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2822 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2825 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2828 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2830 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
2833 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2836 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2839 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2841 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
2844 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2847 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2850 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2853 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2856 void arm_gt_ptimer_cb(void *opaque
)
2858 ARMCPU
*cpu
= opaque
;
2860 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2863 void arm_gt_vtimer_cb(void *opaque
)
2865 ARMCPU
*cpu
= opaque
;
2867 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2870 void arm_gt_htimer_cb(void *opaque
)
2872 ARMCPU
*cpu
= opaque
;
2874 gt_recalc_timer(cpu
, GTIMER_HYP
);
2877 void arm_gt_stimer_cb(void *opaque
)
2879 ARMCPU
*cpu
= opaque
;
2881 gt_recalc_timer(cpu
, GTIMER_SEC
);
2884 void arm_gt_hvtimer_cb(void *opaque
)
2886 ARMCPU
*cpu
= opaque
;
2888 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
2891 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2893 ARMCPU
*cpu
= env_archcpu(env
);
2895 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
2898 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2899 /* Note that CNTFRQ is purely reads-as-written for the benefit
2900 * of software; writing it doesn't actually change the timer frequency.
2901 * Our reset value matches the fixed frequency we implement the timer at.
2903 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2904 .type
= ARM_CP_ALIAS
,
2905 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2906 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2908 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2909 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2910 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2911 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2912 .resetfn
= arm_gt_cntfrq_reset
,
2914 /* overall control: mostly access permissions */
2915 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2916 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2918 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2921 /* per-timer control */
2922 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2923 .secure
= ARM_CP_SECSTATE_NS
,
2924 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2925 .accessfn
= gt_ptimer_access
,
2926 .fieldoffset
= offsetoflow32(CPUARMState
,
2927 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2928 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
2929 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
2931 { .name
= "CNTP_CTL_S",
2932 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2933 .secure
= ARM_CP_SECSTATE_S
,
2934 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2935 .accessfn
= gt_ptimer_access
,
2936 .fieldoffset
= offsetoflow32(CPUARMState
,
2937 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2938 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2940 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2941 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2942 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2943 .accessfn
= gt_ptimer_access
,
2944 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2946 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
2947 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
2949 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2950 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2951 .accessfn
= gt_vtimer_access
,
2952 .fieldoffset
= offsetoflow32(CPUARMState
,
2953 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2954 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
2955 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
2957 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2958 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2959 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2960 .accessfn
= gt_vtimer_access
,
2961 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2963 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
2964 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
2966 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2967 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2968 .secure
= ARM_CP_SECSTATE_NS
,
2969 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2970 .accessfn
= gt_ptimer_access
,
2971 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
2973 { .name
= "CNTP_TVAL_S",
2974 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2975 .secure
= ARM_CP_SECSTATE_S
,
2976 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2977 .accessfn
= gt_ptimer_access
,
2978 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2980 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2981 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2982 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2983 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2984 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
2986 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2987 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2988 .accessfn
= gt_vtimer_access
,
2989 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
2991 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2992 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2993 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2994 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2995 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
2997 /* The counter itself */
2998 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2999 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3000 .accessfn
= gt_pct_access
,
3001 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3003 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3004 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3005 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3006 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3008 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3009 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3010 .accessfn
= gt_vct_access
,
3011 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3013 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3014 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3015 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3016 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3018 /* Comparison value, indicating when the timer goes off */
3019 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3020 .secure
= ARM_CP_SECSTATE_NS
,
3022 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3023 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3024 .accessfn
= gt_ptimer_access
,
3025 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3026 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3028 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3029 .secure
= ARM_CP_SECSTATE_S
,
3031 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3032 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3033 .accessfn
= gt_ptimer_access
,
3034 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3036 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3037 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3040 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3041 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3042 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3043 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3045 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3047 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3048 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3049 .accessfn
= gt_vtimer_access
,
3050 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3051 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3053 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3054 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3057 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3058 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3059 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3060 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3062 /* Secure timer -- this is actually restricted to only EL3
3063 * and configurably Secure-EL1 via the accessfn.
3065 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3066 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3067 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3068 .accessfn
= gt_stimer_access
,
3069 .readfn
= gt_sec_tval_read
,
3070 .writefn
= gt_sec_tval_write
,
3071 .resetfn
= gt_sec_timer_reset
,
3073 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3074 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3075 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3076 .accessfn
= gt_stimer_access
,
3077 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3079 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3081 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3082 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3083 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3084 .accessfn
= gt_stimer_access
,
3085 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3086 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3090 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3093 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3094 return CP_ACCESS_TRAP
;
3096 return CP_ACCESS_OK
;
3101 /* In user-mode most of the generic timer registers are inaccessible
3102 * however modern kernels (4.12+) allow access to cntvct_el0
3105 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3107 ARMCPU
*cpu
= env_archcpu(env
);
3109 /* Currently we have no support for QEMUTimer in linux-user so we
3110 * can't call gt_get_countervalue(env), instead we directly
3111 * call the lower level functions.
3113 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3116 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3117 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3118 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3119 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3120 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3121 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3123 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3124 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3125 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3126 .readfn
= gt_virt_cnt_read
,
3132 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3134 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3135 raw_write(env
, ri
, value
);
3136 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3137 raw_write(env
, ri
, value
& 0xfffff6ff);
3139 raw_write(env
, ri
, value
& 0xfffff1ff);
3143 #ifndef CONFIG_USER_ONLY
3144 /* get_phys_addr() isn't present for user-mode-only targets */
3146 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3150 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3151 * Secure EL1 (which can only happen if EL3 is AArch64).
3152 * They are simply UNDEF if executed from NS EL1.
3153 * They function normally from EL2 or EL3.
3155 if (arm_current_el(env
) == 1) {
3156 if (arm_is_secure_below_el3(env
)) {
3157 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
3158 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2
;
3160 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3162 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3165 return CP_ACCESS_OK
;
3169 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3170 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3173 target_ulong page_size
;
3177 bool format64
= false;
3178 MemTxAttrs attrs
= {};
3179 ARMMMUFaultInfo fi
= {};
3180 ARMCacheAttrs cacheattrs
= {};
3182 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3183 &prot
, &page_size
, &fi
, &cacheattrs
);
3186 * ATS operations only do S1 or S1+S2 translations, so we never
3187 * have to deal with the ARMCacheAttrs format for S2 only.
3189 assert(!cacheattrs
.is_s2_format
);
3193 * Some kinds of translation fault must cause exceptions rather
3194 * than being reported in the PAR.
3196 int current_el
= arm_current_el(env
);
3198 uint32_t syn
, fsr
, fsc
;
3199 bool take_exc
= false;
3201 if (fi
.s1ptw
&& current_el
== 1
3202 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3204 * Synchronous stage 2 fault on an access made as part of the
3205 * translation table walk for AT S1E0* or AT S1E1* insn
3206 * executed from NS EL1. If this is a synchronous external abort
3207 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3208 * to EL3. Otherwise the fault is taken as an exception to EL2,
3209 * and HPFAR_EL2 holds the faulting IPA.
3211 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3212 (env
->cp15
.scr_el3
& SCR_EA
)) {
3215 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3216 if (arm_is_secure_below_el3(env
) && fi
.s1ns
) {
3217 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
3222 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3224 * Synchronous external aborts during a translation table walk
3225 * are taken as Data Abort exceptions.
3228 if (current_el
== 3) {
3234 target_el
= exception_target_el(env
);
3240 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3241 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3242 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3243 fsr
= arm_fi_to_lfsc(&fi
);
3244 fsc
= extract32(fsr
, 0, 6);
3246 fsr
= arm_fi_to_sfsc(&fi
);
3250 * Report exception with ESR indicating a fault due to a
3251 * translation table walk for a cache maintenance instruction.
3253 syn
= syn_data_abort_no_iss(current_el
== target_el
, 0,
3254 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3255 env
->exception
.vaddress
= value
;
3256 env
->exception
.fsr
= fsr
;
3257 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3263 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3266 * * TTBCR.EAE determines whether the result is returned using the
3267 * 32-bit or the 64-bit PAR format
3268 * * Instructions executed in Hyp mode always use the 64bit format
3270 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3271 * * The Non-secure TTBCR.EAE bit is set to 1
3272 * * The implementation includes EL2, and the value of HCR.VM is 1
3274 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3276 * ATS1Hx always uses the 64bit format.
3278 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3280 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3281 if (mmu_idx
== ARMMMUIdx_E10_0
||
3282 mmu_idx
== ARMMMUIdx_E10_1
||
3283 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3284 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3286 format64
|= arm_current_el(env
) == 2;
3292 /* Create a 64-bit PAR */
3293 par64
= (1 << 11); /* LPAE bit always set */
3295 par64
|= phys_addr
& ~0xfffULL
;
3296 if (!attrs
.secure
) {
3297 par64
|= (1 << 9); /* NS */
3299 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3300 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3302 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3305 par64
|= (fsr
& 0x3f) << 1; /* FS */
3307 par64
|= (1 << 9); /* S */
3310 par64
|= (1 << 8); /* PTW */
3314 /* fsr is a DFSR/IFSR value for the short descriptor
3315 * translation table format (with WnR always clear).
3316 * Convert it to a 32-bit PAR.
3319 /* We do not set any attribute bits in the PAR */
3320 if (page_size
== (1 << 24)
3321 && arm_feature(env
, ARM_FEATURE_V7
)) {
3322 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3324 par64
= phys_addr
& 0xfffff000;
3326 if (!attrs
.secure
) {
3327 par64
|= (1 << 9); /* NS */
3330 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3332 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3333 ((fsr
& 0xf) << 1) | 1;
3338 #endif /* CONFIG_TCG */
3340 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3343 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3346 int el
= arm_current_el(env
);
3347 bool secure
= arm_is_secure_below_el3(env
);
3349 switch (ri
->opc2
& 6) {
3351 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3354 mmu_idx
= ARMMMUIdx_SE3
;
3357 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3360 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3361 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3362 : ARMMMUIdx_Stage1_E1_PAN
);
3364 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3368 g_assert_not_reached();
3372 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3375 mmu_idx
= ARMMMUIdx_SE10_0
;
3378 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3379 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3382 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3385 g_assert_not_reached();
3389 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3390 mmu_idx
= ARMMMUIdx_E10_1
;
3393 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3394 mmu_idx
= ARMMMUIdx_E10_0
;
3397 g_assert_not_reached();
3400 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3402 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3404 /* Handled by hardware accelerator. */
3405 g_assert_not_reached();
3406 #endif /* CONFIG_TCG */
3409 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3413 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3416 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3418 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3420 /* Handled by hardware accelerator. */
3421 g_assert_not_reached();
3422 #endif /* CONFIG_TCG */
3425 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3428 if (arm_current_el(env
) == 3 &&
3429 !(env
->cp15
.scr_el3
& (SCR_NS
| SCR_EEL2
))) {
3430 return CP_ACCESS_TRAP
;
3432 return CP_ACCESS_OK
;
3435 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3439 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3441 int secure
= arm_is_secure_below_el3(env
);
3443 switch (ri
->opc2
& 6) {
3446 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3447 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3448 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3449 : ARMMMUIdx_Stage1_E1_PAN
);
3451 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3454 case 4: /* AT S1E2R, AT S1E2W */
3455 mmu_idx
= secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
;
3457 case 6: /* AT S1E3R, AT S1E3W */
3458 mmu_idx
= ARMMMUIdx_SE3
;
3461 g_assert_not_reached();
3464 case 2: /* AT S1E0R, AT S1E0W */
3465 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3467 case 4: /* AT S12E1R, AT S12E1W */
3468 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3470 case 6: /* AT S12E0R, AT S12E0W */
3471 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3474 g_assert_not_reached();
3477 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3479 /* Handled by hardware accelerator. */
3480 g_assert_not_reached();
3481 #endif /* CONFIG_TCG */
3485 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3486 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3487 .access
= PL1_RW
, .resetvalue
= 0,
3488 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3489 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3490 .writefn
= par_write
},
3491 #ifndef CONFIG_USER_ONLY
3492 /* This underdecoding is safe because the reginfo is NO_RAW. */
3493 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3494 .access
= PL1_W
, .accessfn
= ats_access
,
3495 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3499 /* Return basic MPU access permission bits. */
3500 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3507 for (i
= 0; i
< 16; i
+= 2) {
3508 ret
|= (val
>> i
) & mask
;
3514 /* Pad basic MPU access permission bits to extended format. */
3515 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3522 for (i
= 0; i
< 16; i
+= 2) {
3523 ret
|= (val
& mask
) << i
;
3529 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3532 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3535 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3537 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3540 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3543 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3546 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3548 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3551 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3553 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3559 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3563 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3566 ARMCPU
*cpu
= env_archcpu(env
);
3567 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3573 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3574 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3578 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3581 ARMCPU
*cpu
= env_archcpu(env
);
3582 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3584 if (value
>= nrgs
) {
3585 qemu_log_mask(LOG_GUEST_ERROR
,
3586 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3587 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3591 raw_write(env
, ri
, value
);
3594 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3595 /* Reset for all these registers is handled in arm_cpu_reset(),
3596 * because the PMSAv7 is also used by M-profile CPUs, which do
3597 * not register cpregs but still need the state to be reset.
3599 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3600 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3601 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3602 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3603 .resetfn
= arm_cp_reset_ignore
},
3604 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3605 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3606 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3607 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3608 .resetfn
= arm_cp_reset_ignore
},
3609 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3610 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3611 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3612 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3613 .resetfn
= arm_cp_reset_ignore
},
3614 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3616 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3617 .writefn
= pmsav7_rgnr_write
,
3618 .resetfn
= arm_cp_reset_ignore
},
3621 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3622 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3623 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3624 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3625 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3626 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3627 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3628 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3629 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3630 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3632 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3634 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3636 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3638 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3640 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3641 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3643 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3644 /* Protection region base and size registers */
3645 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3646 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3647 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3648 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3649 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3650 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3651 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3652 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3653 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3654 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3655 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3656 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3657 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3658 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3659 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3660 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3661 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3662 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3663 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3664 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3665 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3666 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3667 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3668 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3671 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3674 TCR
*tcr
= raw_ptr(env
, ri
);
3675 int maskshift
= extract32(value
, 0, 3);
3677 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3678 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3679 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3680 * using Long-desciptor translation table format */
3681 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3682 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3683 /* In an implementation that includes the Security Extensions
3684 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3685 * Short-descriptor translation table format.
3687 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3693 /* Update the masks corresponding to the TCR bank being written
3694 * Note that we always calculate mask and base_mask, but
3695 * they are only used for short-descriptor tables (ie if EAE is 0);
3696 * for long-descriptor tables the TCR fields are used differently
3697 * and the mask and base_mask values are meaningless.
3699 tcr
->raw_tcr
= value
;
3700 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3701 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3704 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3707 ARMCPU
*cpu
= env_archcpu(env
);
3708 TCR
*tcr
= raw_ptr(env
, ri
);
3710 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3711 /* With LPAE the TTBCR could result in a change of ASID
3712 * via the TTBCR.A1 bit, so do a TLB flush.
3714 tlb_flush(CPU(cpu
));
3716 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3717 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3718 vmsa_ttbcr_raw_write(env
, ri
, value
);
3721 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3723 TCR
*tcr
= raw_ptr(env
, ri
);
3725 /* Reset both the TCR as well as the masks corresponding to the bank of
3726 * the TCR being reset.
3730 tcr
->base_mask
= 0xffffc000u
;
3733 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3736 ARMCPU
*cpu
= env_archcpu(env
);
3737 TCR
*tcr
= raw_ptr(env
, ri
);
3739 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3740 tlb_flush(CPU(cpu
));
3741 tcr
->raw_tcr
= value
;
3744 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3747 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3748 if (cpreg_field_is_64bit(ri
) &&
3749 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3750 ARMCPU
*cpu
= env_archcpu(env
);
3751 tlb_flush(CPU(cpu
));
3753 raw_write(env
, ri
, value
);
3756 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3760 * If we are running with E2&0 regime, then an ASID is active.
3761 * Flush if that might be changing. Note we're not checking
3762 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3763 * holds the active ASID, only checking the field that might.
3765 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
3766 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3767 uint16_t mask
= ARMMMUIdxBit_E20_2
|
3768 ARMMMUIdxBit_E20_2_PAN
|
3771 if (arm_is_secure_below_el3(env
)) {
3772 mask
>>= ARM_MMU_IDX_A_NS
;
3775 tlb_flush_by_mmuidx(env_cpu(env
), mask
);
3777 raw_write(env
, ri
, value
);
3780 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3783 ARMCPU
*cpu
= env_archcpu(env
);
3784 CPUState
*cs
= CPU(cpu
);
3787 * A change in VMID to the stage2 page table (Stage2) invalidates
3788 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3790 if (raw_read(env
, ri
) != value
) {
3791 uint16_t mask
= ARMMMUIdxBit_E10_1
|
3792 ARMMMUIdxBit_E10_1_PAN
|
3795 if (arm_is_secure_below_el3(env
)) {
3796 mask
>>= ARM_MMU_IDX_A_NS
;
3799 tlb_flush_by_mmuidx(cs
, mask
);
3800 raw_write(env
, ri
, value
);
3804 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3805 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3806 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
3807 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3808 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3809 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3810 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
3811 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3812 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3813 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3814 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
3815 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3816 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3817 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3818 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3819 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3820 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3824 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3825 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3826 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3827 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3828 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3829 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3830 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3831 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3832 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3833 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3834 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3835 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3836 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3837 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3838 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3839 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3840 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3841 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3842 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3843 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3844 .writefn
= vmsa_tcr_el12_write
,
3845 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3846 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3847 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3848 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3849 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3850 .raw_writefn
= vmsa_ttbcr_raw_write
,
3851 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
3852 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.tcr_el
[3]),
3853 offsetof(CPUARMState
, cp15
.tcr_el
[1])} },
3856 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3857 * qemu tlbs nor adjusting cached masks.
3859 static const ARMCPRegInfo ttbcr2_reginfo
= {
3860 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3861 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3862 .type
= ARM_CP_ALIAS
,
3863 .bank_fieldoffsets
= {
3864 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3].raw_tcr
),
3865 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1].raw_tcr
),
3869 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3872 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3873 /* The OS_TYPE bit in this register changes the reported CPUID! */
3874 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3875 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3878 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3881 env
->cp15
.c15_threadid
= value
& 0xffff;
3884 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3887 /* Wait-for-interrupt (deprecated) */
3888 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
3891 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3894 /* On OMAP there are registers indicating the max/min index of dcache lines
3895 * containing a dirty line; cache flush operations have to reset these.
3897 env
->cp15
.c15_i_max
= 0x000;
3898 env
->cp15
.c15_i_min
= 0xff0;
3901 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3902 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3903 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3904 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3906 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3907 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3908 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3910 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3911 .writefn
= omap_ticonfig_write
},
3912 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3914 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3915 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3916 .access
= PL1_RW
, .resetvalue
= 0xff0,
3917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3918 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3920 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3921 .writefn
= omap_threadid_write
},
3922 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3923 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3924 .type
= ARM_CP_NO_RAW
,
3925 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3926 /* TODO: Peripheral port remap register:
3927 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3928 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3931 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3932 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3933 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3934 .writefn
= omap_cachemaint_write
},
3935 { .name
= "C9", .cp
= 15, .crn
= 9,
3936 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3937 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3940 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3943 env
->cp15
.c15_cpar
= value
& 0x3fff;
3946 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3947 { .name
= "XSCALE_CPAR",
3948 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3949 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3950 .writefn
= xscale_cpar_write
, },
3951 { .name
= "XSCALE_AUXCR",
3952 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3953 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3955 /* XScale specific cache-lockdown: since we have no cache we NOP these
3956 * and hope the guest does not really rely on cache behaviour.
3958 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3959 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3960 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3961 { .name
= "XSCALE_UNLOCK_ICACHE",
3962 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3963 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3964 { .name
= "XSCALE_DCACHE_LOCK",
3965 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3966 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3967 { .name
= "XSCALE_UNLOCK_DCACHE",
3968 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3969 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3972 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3973 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3974 * implementation of this implementation-defined space.
3975 * Ideally this should eventually disappear in favour of actually
3976 * implementing the correct behaviour for all cores.
3978 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3979 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3981 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3985 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3986 /* Cache status: RAZ because we have no cache so it's always clean */
3987 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3988 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3992 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3993 /* We never have a a block transfer operation in progress */
3994 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3995 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3997 /* The cache ops themselves: these all NOP for QEMU */
3998 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3999 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4000 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4001 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4002 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4003 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4004 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4005 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4006 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4007 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4008 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4009 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4012 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4013 /* The cache test-and-clean instructions always return (1 << 30)
4014 * to indicate that there are no dirty cache lines.
4016 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4017 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4018 .resetvalue
= (1 << 30) },
4019 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4020 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4021 .resetvalue
= (1 << 30) },
4024 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4025 /* Ignore ReadBuffer accesses */
4026 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4027 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4028 .access
= PL1_RW
, .resetvalue
= 0,
4029 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4032 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4034 unsigned int cur_el
= arm_current_el(env
);
4036 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4037 return env
->cp15
.vpidr_el2
;
4039 return raw_read(env
, ri
);
4042 static uint64_t mpidr_read_val(CPUARMState
*env
)
4044 ARMCPU
*cpu
= env_archcpu(env
);
4045 uint64_t mpidr
= cpu
->mp_affinity
;
4047 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4048 mpidr
|= (1U << 31);
4049 /* Cores which are uniprocessor (non-coherent)
4050 * but still implement the MP extensions set
4051 * bit 30. (For instance, Cortex-R5).
4053 if (cpu
->mp_is_up
) {
4054 mpidr
|= (1u << 30);
4060 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4062 unsigned int cur_el
= arm_current_el(env
);
4064 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4065 return env
->cp15
.vmpidr_el2
;
4067 return mpidr_read_val(env
);
4070 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4072 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4073 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4074 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4075 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4076 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4077 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4078 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4079 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4080 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4081 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4082 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4083 offsetof(CPUARMState
, cp15
.par_ns
)} },
4084 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4085 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4086 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4087 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4088 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4089 .writefn
= vmsa_ttbr_write
, },
4090 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4091 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4092 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4093 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4094 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4095 .writefn
= vmsa_ttbr_write
, },
4098 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4100 return vfp_get_fpcr(env
);
4103 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4106 vfp_set_fpcr(env
, value
);
4109 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4111 return vfp_get_fpsr(env
);
4114 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4117 vfp_set_fpsr(env
, value
);
4120 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4123 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4124 return CP_ACCESS_TRAP
;
4126 return CP_ACCESS_OK
;
4129 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4132 env
->daif
= value
& PSTATE_DAIF
;
4135 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4137 return env
->pstate
& PSTATE_PAN
;
4140 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4143 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4146 static const ARMCPRegInfo pan_reginfo
= {
4147 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4148 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4149 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4150 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4153 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4155 return env
->pstate
& PSTATE_UAO
;
4158 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4161 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4164 static const ARMCPRegInfo uao_reginfo
= {
4165 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4167 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4168 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4171 static uint64_t aa64_dit_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4173 return env
->pstate
& PSTATE_DIT
;
4176 static void aa64_dit_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4179 env
->pstate
= (env
->pstate
& ~PSTATE_DIT
) | (value
& PSTATE_DIT
);
4182 static const ARMCPRegInfo dit_reginfo
= {
4183 .name
= "DIT", .state
= ARM_CP_STATE_AA64
,
4184 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 5,
4185 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4186 .readfn
= aa64_dit_read
, .writefn
= aa64_dit_write
4189 static uint64_t aa64_ssbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4191 return env
->pstate
& PSTATE_SSBS
;
4194 static void aa64_ssbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4197 env
->pstate
= (env
->pstate
& ~PSTATE_SSBS
) | (value
& PSTATE_SSBS
);
4200 static const ARMCPRegInfo ssbs_reginfo
= {
4201 .name
= "SSBS", .state
= ARM_CP_STATE_AA64
,
4202 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 6,
4203 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4204 .readfn
= aa64_ssbs_read
, .writefn
= aa64_ssbs_write
4207 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4208 const ARMCPRegInfo
*ri
,
4211 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4212 switch (arm_current_el(env
)) {
4214 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4215 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4216 return CP_ACCESS_TRAP
;
4220 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4221 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4222 return CP_ACCESS_TRAP_EL2
;
4226 return CP_ACCESS_OK
;
4229 static CPAccessResult
aa64_cacheop_pou_access(CPUARMState
*env
,
4230 const ARMCPRegInfo
*ri
,
4233 /* Cache invalidate/clean to Point of Unification... */
4234 switch (arm_current_el(env
)) {
4236 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4237 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4238 return CP_ACCESS_TRAP
;
4242 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4243 if (arm_hcr_el2_eff(env
) & HCR_TPU
) {
4244 return CP_ACCESS_TRAP_EL2
;
4248 return CP_ACCESS_OK
;
4251 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4252 * Page D4-1736 (DDI0487A.b)
4255 static int vae1_tlbmask(CPUARMState
*env
)
4257 uint64_t hcr
= arm_hcr_el2_eff(env
);
4260 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4261 mask
= ARMMMUIdxBit_E20_2
|
4262 ARMMMUIdxBit_E20_2_PAN
|
4265 mask
= ARMMMUIdxBit_E10_1
|
4266 ARMMMUIdxBit_E10_1_PAN
|
4270 if (arm_is_secure_below_el3(env
)) {
4271 mask
>>= ARM_MMU_IDX_A_NS
;
4277 /* Return 56 if TBI is enabled, 64 otherwise. */
4278 static int tlbbits_for_regime(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
4281 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
4282 int tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
4283 int select
= extract64(addr
, 55, 1);
4285 return (tbi
>> select
) & 1 ? 56 : 64;
4288 static int vae1_tlbbits(CPUARMState
*env
, uint64_t addr
)
4290 uint64_t hcr
= arm_hcr_el2_eff(env
);
4293 /* Only the regime of the mmu_idx below is significant. */
4294 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4295 mmu_idx
= ARMMMUIdx_E20_0
;
4297 mmu_idx
= ARMMMUIdx_E10_0
;
4300 if (arm_is_secure_below_el3(env
)) {
4301 mmu_idx
&= ~ARM_MMU_IDX_A_NS
;
4304 return tlbbits_for_regime(env
, mmu_idx
, addr
);
4307 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4310 CPUState
*cs
= env_cpu(env
);
4311 int mask
= vae1_tlbmask(env
);
4313 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4316 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4319 CPUState
*cs
= env_cpu(env
);
4320 int mask
= vae1_tlbmask(env
);
4322 if (tlb_force_broadcast(env
)) {
4323 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4325 tlb_flush_by_mmuidx(cs
, mask
);
4329 static int alle1_tlbmask(CPUARMState
*env
)
4332 * Note that the 'ALL' scope must invalidate both stage 1 and
4333 * stage 2 translations, whereas most other scopes only invalidate
4334 * stage 1 translations.
4336 if (arm_is_secure_below_el3(env
)) {
4337 return ARMMMUIdxBit_SE10_1
|
4338 ARMMMUIdxBit_SE10_1_PAN
|
4339 ARMMMUIdxBit_SE10_0
;
4341 return ARMMMUIdxBit_E10_1
|
4342 ARMMMUIdxBit_E10_1_PAN
|
4347 static int e2_tlbmask(CPUARMState
*env
)
4349 if (arm_is_secure_below_el3(env
)) {
4350 return ARMMMUIdxBit_SE20_0
|
4351 ARMMMUIdxBit_SE20_2
|
4352 ARMMMUIdxBit_SE20_2_PAN
|
4355 return ARMMMUIdxBit_E20_0
|
4356 ARMMMUIdxBit_E20_2
|
4357 ARMMMUIdxBit_E20_2_PAN
|
4362 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4365 CPUState
*cs
= env_cpu(env
);
4366 int mask
= alle1_tlbmask(env
);
4368 tlb_flush_by_mmuidx(cs
, mask
);
4371 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4374 CPUState
*cs
= env_cpu(env
);
4375 int mask
= e2_tlbmask(env
);
4377 tlb_flush_by_mmuidx(cs
, mask
);
4380 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4383 ARMCPU
*cpu
= env_archcpu(env
);
4384 CPUState
*cs
= CPU(cpu
);
4386 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4389 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4392 CPUState
*cs
= env_cpu(env
);
4393 int mask
= alle1_tlbmask(env
);
4395 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4398 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4401 CPUState
*cs
= env_cpu(env
);
4402 int mask
= e2_tlbmask(env
);
4404 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4407 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4410 CPUState
*cs
= env_cpu(env
);
4412 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4415 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4418 /* Invalidate by VA, EL2
4419 * Currently handles both VAE2 and VALE2, since we don't support
4420 * flush-last-level-only.
4422 CPUState
*cs
= env_cpu(env
);
4423 int mask
= e2_tlbmask(env
);
4424 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4426 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4429 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4432 /* Invalidate by VA, EL3
4433 * Currently handles both VAE3 and VALE3, since we don't support
4434 * flush-last-level-only.
4436 ARMCPU
*cpu
= env_archcpu(env
);
4437 CPUState
*cs
= CPU(cpu
);
4438 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4440 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4443 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4446 CPUState
*cs
= env_cpu(env
);
4447 int mask
= vae1_tlbmask(env
);
4448 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4449 int bits
= vae1_tlbbits(env
, pageaddr
);
4451 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4454 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4457 /* Invalidate by VA, EL1&0 (AArch64 version).
4458 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4459 * since we don't support flush-for-specific-ASID-only or
4460 * flush-last-level-only.
4462 CPUState
*cs
= env_cpu(env
);
4463 int mask
= vae1_tlbmask(env
);
4464 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4465 int bits
= vae1_tlbbits(env
, pageaddr
);
4467 if (tlb_force_broadcast(env
)) {
4468 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4470 tlb_flush_page_bits_by_mmuidx(cs
, pageaddr
, mask
, bits
);
4474 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4477 CPUState
*cs
= env_cpu(env
);
4478 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4479 bool secure
= arm_is_secure_below_el3(env
);
4480 int mask
= secure
? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
;
4481 int bits
= tlbbits_for_regime(env
, secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
,
4484 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4487 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4490 CPUState
*cs
= env_cpu(env
);
4491 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4492 int bits
= tlbbits_for_regime(env
, ARMMMUIdx_SE3
, pageaddr
);
4494 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4495 ARMMMUIdxBit_SE3
, bits
);
4498 #ifdef TARGET_AARCH64
4504 static TLBIRange
tlbi_aa64_get_range(CPUARMState
*env
, ARMMMUIdx mmuidx
,
4507 unsigned int page_size_granule
, page_shift
, num
, scale
, exponent
;
4508 /* Extract one bit to represent the va selector in use. */
4509 uint64_t select
= sextract64(value
, 36, 1);
4510 ARMVAParameters param
= aa64_va_parameters(env
, select
, mmuidx
, true);
4511 TLBIRange ret
= { };
4513 page_size_granule
= extract64(value
, 46, 2);
4515 /* The granule encoded in value must match the granule in use. */
4516 if (page_size_granule
!= (param
.using64k
? 3 : param
.using16k
? 2 : 1)) {
4517 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid tlbi page size granule %d\n",
4522 page_shift
= (page_size_granule
- 1) * 2 + 12;
4523 num
= extract64(value
, 39, 5);
4524 scale
= extract64(value
, 44, 2);
4525 exponent
= (5 * scale
) + 1;
4527 ret
.length
= (num
+ 1) << (exponent
+ page_shift
);
4530 ret
.base
= sextract64(value
, 0, 37);
4532 ret
.base
= extract64(value
, 0, 37);
4536 * With DS=1, BaseADDR is always shifted 16 so that it is able
4537 * to address all 52 va bits. The input address is perforce
4538 * aligned on a 64k boundary regardless of translation granule.
4542 ret
.base
<<= page_shift
;
4547 static void do_rvae_write(CPUARMState
*env
, uint64_t value
,
4548 int idxmap
, bool synced
)
4550 ARMMMUIdx one_idx
= ARM_MMU_IDX_A
| ctz32(idxmap
);
4554 range
= tlbi_aa64_get_range(env
, one_idx
, value
);
4555 bits
= tlbbits_for_regime(env
, one_idx
, range
.base
);
4558 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env
),
4564 tlb_flush_range_by_mmuidx(env_cpu(env
), range
.base
,
4565 range
.length
, idxmap
, bits
);
4569 static void tlbi_aa64_rvae1_write(CPUARMState
*env
,
4570 const ARMCPRegInfo
*ri
,
4574 * Invalidate by VA range, EL1&0.
4575 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4576 * since we don't support flush-for-specific-ASID-only or
4577 * flush-last-level-only.
4580 do_rvae_write(env
, value
, vae1_tlbmask(env
),
4581 tlb_force_broadcast(env
));
4584 static void tlbi_aa64_rvae1is_write(CPUARMState
*env
,
4585 const ARMCPRegInfo
*ri
,
4589 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4590 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4591 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4592 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4593 * shareable specific flushes.
4596 do_rvae_write(env
, value
, vae1_tlbmask(env
), true);
4599 static int vae2_tlbmask(CPUARMState
*env
)
4601 return (arm_is_secure_below_el3(env
)
4602 ? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
);
4605 static void tlbi_aa64_rvae2_write(CPUARMState
*env
,
4606 const ARMCPRegInfo
*ri
,
4610 * Invalidate by VA range, EL2.
4611 * Currently handles all of RVAE2 and RVALE2,
4612 * since we don't support flush-for-specific-ASID-only or
4613 * flush-last-level-only.
4616 do_rvae_write(env
, value
, vae2_tlbmask(env
),
4617 tlb_force_broadcast(env
));
4622 static void tlbi_aa64_rvae2is_write(CPUARMState
*env
,
4623 const ARMCPRegInfo
*ri
,
4627 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4628 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4629 * since we don't support flush-for-specific-ASID-only,
4630 * flush-last-level-only or inner/outer shareable specific flushes.
4633 do_rvae_write(env
, value
, vae2_tlbmask(env
), true);
4637 static void tlbi_aa64_rvae3_write(CPUARMState
*env
,
4638 const ARMCPRegInfo
*ri
,
4642 * Invalidate by VA range, EL3.
4643 * Currently handles all of RVAE3 and RVALE3,
4644 * since we don't support flush-for-specific-ASID-only or
4645 * flush-last-level-only.
4648 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
,
4649 tlb_force_broadcast(env
));
4652 static void tlbi_aa64_rvae3is_write(CPUARMState
*env
,
4653 const ARMCPRegInfo
*ri
,
4657 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4658 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4659 * since we don't support flush-for-specific-ASID-only,
4660 * flush-last-level-only or inner/outer specific flushes.
4663 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
, true);
4667 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4670 int cur_el
= arm_current_el(env
);
4673 uint64_t hcr
= arm_hcr_el2_eff(env
);
4676 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4677 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4678 return CP_ACCESS_TRAP_EL2
;
4681 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4682 return CP_ACCESS_TRAP
;
4684 if (hcr
& HCR_TDZ
) {
4685 return CP_ACCESS_TRAP_EL2
;
4688 } else if (hcr
& HCR_TDZ
) {
4689 return CP_ACCESS_TRAP_EL2
;
4692 return CP_ACCESS_OK
;
4695 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4697 ARMCPU
*cpu
= env_archcpu(env
);
4698 int dzp_bit
= 1 << 4;
4700 /* DZP indicates whether DC ZVA access is allowed */
4701 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4704 return cpu
->dcz_blocksize
| dzp_bit
;
4707 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4710 if (!(env
->pstate
& PSTATE_SP
)) {
4711 /* Access to SP_EL0 is undefined if it's being used as
4712 * the stack pointer.
4714 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4716 return CP_ACCESS_OK
;
4719 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4721 return env
->pstate
& PSTATE_SP
;
4724 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4726 update_spsel(env
, val
);
4729 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4732 ARMCPU
*cpu
= env_archcpu(env
);
4734 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4735 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4739 /* ??? Lots of these bits are not implemented. */
4741 if (ri
->state
== ARM_CP_STATE_AA64
&& !cpu_isar_feature(aa64_mte
, cpu
)) {
4742 if (ri
->opc1
== 6) { /* SCTLR_EL3 */
4743 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF
| SCTLR_ATA
);
4745 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF0
| SCTLR_TCF
|
4746 SCTLR_ATA0
| SCTLR_ATA
);
4750 if (raw_read(env
, ri
) == value
) {
4751 /* Skip the TLB flush if nothing actually changed; Linux likes
4752 * to do a lot of pointless SCTLR writes.
4757 raw_write(env
, ri
, value
);
4759 /* This may enable/disable the MMU, so do a TLB flush. */
4760 tlb_flush(CPU(cpu
));
4762 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4764 * Normally we would always end the TB on an SCTLR write; see the
4765 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4766 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4767 * of hflags from the translator, so do it here.
4769 arm_rebuild_hflags(env
);
4773 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4776 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4779 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4780 /* Minimal set of EL0-visible registers. This will need to be expanded
4781 * significantly for system emulation of AArch64 CPUs.
4783 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4784 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4785 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4786 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4787 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4788 .type
= ARM_CP_NO_RAW
,
4789 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4790 .fieldoffset
= offsetof(CPUARMState
, daif
),
4791 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4792 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4793 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4794 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4795 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4796 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4797 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4798 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4799 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4800 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4801 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4802 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4803 .readfn
= aa64_dczid_read
},
4804 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4805 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4806 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4807 #ifndef CONFIG_USER_ONLY
4808 /* Avoid overhead of an access check that always passes in user-mode */
4809 .accessfn
= aa64_zva_access
,
4812 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4813 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4814 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4815 /* Cache ops: all NOPs since we don't emulate caches */
4816 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4817 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4818 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4819 .accessfn
= aa64_cacheop_pou_access
},
4820 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4821 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4822 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4823 .accessfn
= aa64_cacheop_pou_access
},
4824 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4825 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4826 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4827 .accessfn
= aa64_cacheop_pou_access
},
4828 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4829 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4830 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
4831 .type
= ARM_CP_NOP
},
4832 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4833 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4834 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4835 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4836 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4837 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4838 .accessfn
= aa64_cacheop_poc_access
},
4839 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4840 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4841 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4842 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4843 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4844 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4845 .accessfn
= aa64_cacheop_pou_access
},
4846 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4847 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4848 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4849 .accessfn
= aa64_cacheop_poc_access
},
4850 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4851 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4852 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4853 /* TLBI operations */
4854 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4855 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4856 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4857 .writefn
= tlbi_aa64_vmalle1is_write
},
4858 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4859 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4860 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4861 .writefn
= tlbi_aa64_vae1is_write
},
4862 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4863 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4864 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4865 .writefn
= tlbi_aa64_vmalle1is_write
},
4866 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4867 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4868 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4869 .writefn
= tlbi_aa64_vae1is_write
},
4870 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4871 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4872 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4873 .writefn
= tlbi_aa64_vae1is_write
},
4874 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4875 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4876 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4877 .writefn
= tlbi_aa64_vae1is_write
},
4878 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4879 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4880 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4881 .writefn
= tlbi_aa64_vmalle1_write
},
4882 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4883 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4884 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4885 .writefn
= tlbi_aa64_vae1_write
},
4886 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4887 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4888 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4889 .writefn
= tlbi_aa64_vmalle1_write
},
4890 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4891 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4892 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4893 .writefn
= tlbi_aa64_vae1_write
},
4894 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4895 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4896 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4897 .writefn
= tlbi_aa64_vae1_write
},
4898 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4899 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4900 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4901 .writefn
= tlbi_aa64_vae1_write
},
4902 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4903 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4904 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4905 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4906 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4907 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4908 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4909 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4910 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4911 .writefn
= tlbi_aa64_alle1is_write
},
4912 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4913 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4914 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4915 .writefn
= tlbi_aa64_alle1is_write
},
4916 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4917 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4918 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4919 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4920 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4921 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4922 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4923 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4924 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4925 .writefn
= tlbi_aa64_alle1_write
},
4926 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4927 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4928 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4929 .writefn
= tlbi_aa64_alle1is_write
},
4930 #ifndef CONFIG_USER_ONLY
4931 /* 64 bit address translation operations */
4932 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4933 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4934 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4935 .writefn
= ats_write64
},
4936 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4937 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4938 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4939 .writefn
= ats_write64
},
4940 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4941 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4942 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4943 .writefn
= ats_write64
},
4944 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4945 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4946 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4947 .writefn
= ats_write64
},
4948 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4949 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4950 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4951 .writefn
= ats_write64
},
4952 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4953 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4954 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4955 .writefn
= ats_write64
},
4956 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4957 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4958 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4959 .writefn
= ats_write64
},
4960 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4961 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4962 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4963 .writefn
= ats_write64
},
4964 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4965 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4966 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4967 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4968 .writefn
= ats_write64
},
4969 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4970 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4971 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4972 .writefn
= ats_write64
},
4973 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4974 .type
= ARM_CP_ALIAS
,
4975 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4976 .access
= PL1_RW
, .resetvalue
= 0,
4977 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4978 .writefn
= par_write
},
4980 /* TLB invalidate last level of translation table walk */
4981 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4982 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4983 .writefn
= tlbimva_is_write
},
4984 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4985 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4986 .writefn
= tlbimvaa_is_write
},
4987 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4988 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4989 .writefn
= tlbimva_write
},
4990 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4991 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
4992 .writefn
= tlbimvaa_write
},
4993 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4994 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4995 .writefn
= tlbimva_hyp_write
},
4996 { .name
= "TLBIMVALHIS",
4997 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4998 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4999 .writefn
= tlbimva_hyp_is_write
},
5000 { .name
= "TLBIIPAS2",
5001 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5002 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5003 { .name
= "TLBIIPAS2IS",
5004 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5005 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5006 { .name
= "TLBIIPAS2L",
5007 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5008 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5009 { .name
= "TLBIIPAS2LIS",
5010 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5011 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5012 /* 32 bit cache operations */
5013 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5014 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5015 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5016 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5017 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5018 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5019 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5020 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5021 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5022 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5023 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5024 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5025 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5026 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5027 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5028 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5029 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5030 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5031 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5032 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5033 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5034 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5035 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5036 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5037 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5038 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5039 /* MMU Domain access control / MPU write buffer control */
5040 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5041 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5042 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5043 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5044 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5045 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5046 .type
= ARM_CP_ALIAS
,
5047 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5049 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5050 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5051 .type
= ARM_CP_ALIAS
,
5052 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5054 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5055 /* We rely on the access checks not allowing the guest to write to the
5056 * state field when SPSel indicates that it's being used as the stack
5059 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5060 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5061 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5062 .type
= ARM_CP_ALIAS
,
5063 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5064 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5065 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5066 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5067 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5068 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5069 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5070 .type
= ARM_CP_NO_RAW
,
5071 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5072 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5073 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5075 .type
= ARM_CP_ALIAS
| ARM_CP_FPU
| ARM_CP_EL3_NO_EL2_KEEP
,
5076 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]) },
5077 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5078 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5079 .access
= PL2_RW
, .resetvalue
= 0, .type
= ARM_CP_EL3_NO_EL2_KEEP
,
5080 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5081 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5082 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5083 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5084 .access
= PL2_RW
, .resetvalue
= 0, .type
= ARM_CP_EL3_NO_EL2_KEEP
,
5085 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5086 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5087 .type
= ARM_CP_ALIAS
,
5088 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5090 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5091 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5092 .type
= ARM_CP_ALIAS
,
5093 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5095 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5096 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5097 .type
= ARM_CP_ALIAS
,
5098 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5100 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5101 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5102 .type
= ARM_CP_ALIAS
,
5103 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5105 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5106 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5107 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5109 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5110 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
5111 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5112 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5113 .writefn
= sdcr_write
,
5114 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5117 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5119 ARMCPU
*cpu
= env_archcpu(env
);
5121 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5122 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5124 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5127 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5128 valid_mask
&= ~HCR_HCD
;
5129 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5130 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5131 * However, if we're using the SMC PSCI conduit then QEMU is
5132 * effectively acting like EL3 firmware and so the guest at
5133 * EL2 should retain the ability to prevent EL1 from being
5134 * able to make SMC calls into the ersatz firmware, so in
5135 * that case HCR.TSC should be read/write.
5137 valid_mask
&= ~HCR_TSC
;
5140 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5141 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5142 valid_mask
|= HCR_E2H
;
5144 if (cpu_isar_feature(aa64_ras
, cpu
)) {
5145 valid_mask
|= HCR_TERR
| HCR_TEA
;
5147 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5148 valid_mask
|= HCR_TLOR
;
5150 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5151 valid_mask
|= HCR_API
| HCR_APK
;
5153 if (cpu_isar_feature(aa64_mte
, cpu
)) {
5154 valid_mask
|= HCR_ATA
| HCR_DCT
| HCR_TID5
;
5156 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
5157 valid_mask
|= HCR_ENSCXT
;
5159 if (cpu_isar_feature(aa64_fwb
, cpu
)) {
5160 valid_mask
|= HCR_FWB
;
5164 /* Clear RES0 bits. */
5165 value
&= valid_mask
;
5168 * These bits change the MMU setup:
5169 * HCR_VM enables stage 2 translation
5170 * HCR_PTW forbids certain page-table setups
5171 * HCR_DC disables stage1 and enables stage2 translation
5172 * HCR_DCT enables tagging on (disabled) stage1 translation
5173 * HCR_FWB changes the interpretation of stage2 descriptor bits
5175 if ((env
->cp15
.hcr_el2
^ value
) &
5176 (HCR_VM
| HCR_PTW
| HCR_DC
| HCR_DCT
| HCR_FWB
)) {
5177 tlb_flush(CPU(cpu
));
5179 env
->cp15
.hcr_el2
= value
;
5182 * Updates to VI and VF require us to update the status of
5183 * virtual interrupts, which are the logical OR of these bits
5184 * and the state of the input lines from the GIC. (This requires
5185 * that we have the iothread lock, which is done by marking the
5186 * reginfo structs as ARM_CP_IO.)
5187 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5188 * possible for it to be taken immediately, because VIRQ and
5189 * VFIQ are masked unless running at EL0 or EL1, and HCR
5190 * can only be written at EL2.
5192 g_assert(qemu_mutex_iothread_locked());
5193 arm_cpu_update_virq(cpu
);
5194 arm_cpu_update_vfiq(cpu
);
5195 arm_cpu_update_vserr(cpu
);
5198 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5200 do_hcr_write(env
, value
, 0);
5203 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5206 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5207 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5208 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
5211 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5214 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5215 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5216 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
5220 * Return the effective value of HCR_EL2.
5221 * Bits that are not included here:
5222 * RW (read from SCR_EL3.RW as needed)
5224 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5226 uint64_t ret
= env
->cp15
.hcr_el2
;
5228 if (!arm_is_el2_enabled(env
)) {
5230 * "This register has no effect if EL2 is not enabled in the
5231 * current Security state". This is ARMv8.4-SecEL2 speak for
5232 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5234 * Prior to that, the language was "In an implementation that
5235 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5236 * as if this field is 0 for all purposes other than a direct
5237 * read or write access of HCR_EL2". With lots of enumeration
5238 * on a per-field basis. In current QEMU, this is condition
5239 * is arm_is_secure_below_el3.
5241 * Since the v8.4 language applies to the entire register, and
5242 * appears to be backward compatible, use that.
5248 * For a cpu that supports both aarch64 and aarch32, we can set bits
5249 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5250 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5252 if (!arm_el_is_aa64(env
, 2)) {
5253 uint64_t aa32_valid
;
5256 * These bits are up-to-date as of ARMv8.6.
5257 * For HCR, it's easiest to list just the 2 bits that are invalid.
5258 * For HCR2, list those that are valid.
5260 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
5261 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
5262 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
5266 if (ret
& HCR_TGE
) {
5267 /* These bits are up-to-date as of ARMv8.6. */
5268 if (ret
& HCR_E2H
) {
5269 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5270 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5271 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5272 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
5273 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
5274 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
5276 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5278 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5279 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5280 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5287 static void hcrx_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5290 uint64_t valid_mask
= 0;
5292 /* No features adding bits to HCRX are implemented. */
5294 /* Clear RES0 bits. */
5295 env
->cp15
.hcrx_el2
= value
& valid_mask
;
5298 static CPAccessResult
access_hxen(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5301 if (arm_current_el(env
) < 3
5302 && arm_feature(env
, ARM_FEATURE_EL3
)
5303 && !(env
->cp15
.scr_el3
& SCR_HXEN
)) {
5304 return CP_ACCESS_TRAP_EL3
;
5306 return CP_ACCESS_OK
;
5309 static const ARMCPRegInfo hcrx_el2_reginfo
= {
5310 .name
= "HCRX_EL2", .state
= ARM_CP_STATE_AA64
,
5311 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 2,
5312 .access
= PL2_RW
, .writefn
= hcrx_write
, .accessfn
= access_hxen
,
5313 .fieldoffset
= offsetof(CPUARMState
, cp15
.hcrx_el2
),
5316 /* Return the effective value of HCRX_EL2. */
5317 uint64_t arm_hcrx_el2_eff(CPUARMState
*env
)
5320 * The bits in this register behave as 0 for all purposes other than
5321 * direct reads of the register if:
5322 * - EL2 is not enabled in the current security state,
5323 * - SCR_EL3.HXEn is 0.
5325 if (!arm_is_el2_enabled(env
)
5326 || (arm_feature(env
, ARM_FEATURE_EL3
)
5327 && !(env
->cp15
.scr_el3
& SCR_HXEN
))) {
5330 return env
->cp15
.hcrx_el2
;
5333 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5337 * For A-profile AArch32 EL3, if NSACR.CP10
5338 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5340 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5341 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5342 uint64_t mask
= R_HCPTR_TCP11_MASK
| R_HCPTR_TCP10_MASK
;
5343 value
= (value
& ~mask
) | (env
->cp15
.cptr_el
[2] & mask
);
5345 env
->cp15
.cptr_el
[2] = value
;
5348 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5351 * For A-profile AArch32 EL3, if NSACR.CP10
5352 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5354 uint64_t value
= env
->cp15
.cptr_el
[2];
5356 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5357 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5358 value
|= R_HCPTR_TCP11_MASK
| R_HCPTR_TCP10_MASK
;
5363 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5364 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5366 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5367 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5368 .writefn
= hcr_write
},
5369 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5370 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5371 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5372 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5373 .writefn
= hcr_writelow
},
5374 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5375 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5376 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5377 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5378 .type
= ARM_CP_ALIAS
,
5379 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5381 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5382 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5383 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5384 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5385 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5386 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5387 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5388 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5389 .type
= ARM_CP_ALIAS
,
5390 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5392 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5393 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5394 .type
= ARM_CP_ALIAS
,
5395 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5397 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5398 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5399 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5400 .access
= PL2_RW
, .writefn
= vbar_write
,
5401 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5403 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5404 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5405 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5406 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5407 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5408 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5409 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5410 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5411 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5412 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5413 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5414 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5416 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5417 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5418 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5419 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5420 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5421 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5422 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5424 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5425 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5426 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5427 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5429 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5430 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5431 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5433 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5434 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5435 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5437 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5438 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5439 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5440 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5441 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5442 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5443 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5444 .type
= ARM_CP_ALIAS
,
5445 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5446 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5447 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5448 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5450 /* no .writefn needed as this can't cause an ASID change;
5451 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5453 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5454 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5455 .cp
= 15, .opc1
= 6, .crm
= 2,
5456 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5457 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5458 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5459 .writefn
= vttbr_write
},
5460 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5461 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5462 .access
= PL2_RW
, .writefn
= vttbr_write
,
5463 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5464 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5465 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5466 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5467 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5468 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5469 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5470 .access
= PL2_RW
, .resetvalue
= 0,
5471 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5472 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5473 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5474 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5475 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5476 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5477 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5478 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5479 { .name
= "TLBIALLNSNH",
5480 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5481 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5482 .writefn
= tlbiall_nsnh_write
},
5483 { .name
= "TLBIALLNSNHIS",
5484 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5485 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5486 .writefn
= tlbiall_nsnh_is_write
},
5487 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5488 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5489 .writefn
= tlbiall_hyp_write
},
5490 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5491 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5492 .writefn
= tlbiall_hyp_is_write
},
5493 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5494 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5495 .writefn
= tlbimva_hyp_write
},
5496 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5497 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5498 .writefn
= tlbimva_hyp_is_write
},
5499 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5500 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5501 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5502 .writefn
= tlbi_aa64_alle2_write
},
5503 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5504 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5505 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5506 .writefn
= tlbi_aa64_vae2_write
},
5507 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5508 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5509 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5510 .writefn
= tlbi_aa64_vae2_write
},
5511 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5512 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5513 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5514 .writefn
= tlbi_aa64_alle2is_write
},
5515 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5516 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5517 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5518 .writefn
= tlbi_aa64_vae2is_write
},
5519 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5520 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5521 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
5522 .writefn
= tlbi_aa64_vae2is_write
},
5523 #ifndef CONFIG_USER_ONLY
5524 /* Unlike the other EL2-related AT operations, these must
5525 * UNDEF from EL3 if EL2 is not implemented, which is why we
5526 * define them here rather than with the rest of the AT ops.
5528 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5529 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5530 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5531 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
| ARM_CP_EL3_NO_EL2_UNDEF
,
5532 .writefn
= ats_write64
},
5533 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5534 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5535 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5536 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
| ARM_CP_EL3_NO_EL2_UNDEF
,
5537 .writefn
= ats_write64
},
5538 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5539 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5540 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5541 * to behave as if SCR.NS was 1.
5543 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5545 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5546 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5548 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5549 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5550 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5551 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5552 * reset values as IMPDEF. We choose to reset to 3 to comply with
5553 * both ARMv7 and ARMv8.
5555 .access
= PL2_RW
, .resetvalue
= 3,
5556 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5557 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5558 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5559 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5560 .writefn
= gt_cntvoff_write
,
5561 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5562 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5563 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5564 .writefn
= gt_cntvoff_write
,
5565 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5566 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5567 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5568 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5569 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5570 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5571 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5572 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5573 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5574 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5575 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5576 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5577 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5578 .resetfn
= gt_hyp_timer_reset
,
5579 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5580 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5582 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5584 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5586 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5588 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5589 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5590 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5591 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5592 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5593 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5595 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5596 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5597 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5599 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5602 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5603 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5604 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5605 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5607 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5608 .writefn
= hcr_writehigh
},
5611 static CPAccessResult
sel2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5614 if (arm_current_el(env
) == 3 || arm_is_secure_below_el3(env
)) {
5615 return CP_ACCESS_OK
;
5617 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5620 static const ARMCPRegInfo el2_sec_cp_reginfo
[] = {
5621 { .name
= "VSTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5622 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 0,
5623 .access
= PL2_RW
, .accessfn
= sel2_access
,
5624 .fieldoffset
= offsetof(CPUARMState
, cp15
.vsttbr_el2
) },
5625 { .name
= "VSTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5626 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 2,
5627 .access
= PL2_RW
, .accessfn
= sel2_access
,
5628 .fieldoffset
= offsetof(CPUARMState
, cp15
.vstcr_el2
) },
5631 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5634 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5635 * At Secure EL1 it traps to EL3 or EL2.
5637 if (arm_current_el(env
) == 3) {
5638 return CP_ACCESS_OK
;
5640 if (arm_is_secure_below_el3(env
)) {
5641 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
5642 return CP_ACCESS_TRAP_EL2
;
5644 return CP_ACCESS_TRAP_EL3
;
5646 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5648 return CP_ACCESS_OK
;
5650 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5653 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5654 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5655 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5656 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5657 .resetfn
= scr_reset
, .writefn
= scr_write
},
5658 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5659 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5660 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5661 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5662 .writefn
= scr_write
},
5663 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5664 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5665 .access
= PL3_RW
, .resetvalue
= 0,
5666 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5668 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5669 .access
= PL3_RW
, .resetvalue
= 0,
5670 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5671 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5672 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5673 .writefn
= vbar_write
, .resetvalue
= 0,
5674 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5675 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5676 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5677 .access
= PL3_RW
, .resetvalue
= 0,
5678 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5679 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5680 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5682 /* no .writefn needed as this can't cause an ASID change;
5683 * we must provide a .raw_writefn and .resetfn because we handle
5684 * reset and migration for the AArch32 TTBCR(S), which might be
5685 * using mask and base_mask.
5687 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5688 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5689 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5690 .type
= ARM_CP_ALIAS
,
5691 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5693 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5694 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5695 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5696 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5697 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5698 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5699 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5700 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5701 .type
= ARM_CP_ALIAS
,
5702 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5704 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5705 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5706 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5707 .access
= PL3_RW
, .writefn
= vbar_write
,
5708 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5710 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5711 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5712 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5713 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5714 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5715 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5716 .access
= PL3_RW
, .resetvalue
= 0,
5717 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5718 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5719 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5720 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5722 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5723 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5724 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5726 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5727 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5728 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5730 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5731 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5732 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5733 .writefn
= tlbi_aa64_alle3is_write
},
5734 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5735 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5736 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5737 .writefn
= tlbi_aa64_vae3is_write
},
5738 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5739 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5740 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5741 .writefn
= tlbi_aa64_vae3is_write
},
5742 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5743 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5744 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5745 .writefn
= tlbi_aa64_alle3_write
},
5746 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5747 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5748 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5749 .writefn
= tlbi_aa64_vae3_write
},
5750 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5751 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5752 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5753 .writefn
= tlbi_aa64_vae3_write
},
5756 #ifndef CONFIG_USER_ONLY
5757 /* Test if system register redirection is to occur in the current state. */
5758 static bool redirect_for_e2h(CPUARMState
*env
)
5760 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
5763 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5767 if (redirect_for_e2h(env
)) {
5768 /* Switch to the saved EL2 version of the register. */
5770 readfn
= ri
->readfn
;
5772 readfn
= ri
->orig_readfn
;
5774 if (readfn
== NULL
) {
5777 return readfn(env
, ri
);
5780 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5785 if (redirect_for_e2h(env
)) {
5786 /* Switch to the saved EL2 version of the register. */
5788 writefn
= ri
->writefn
;
5790 writefn
= ri
->orig_writefn
;
5792 if (writefn
== NULL
) {
5793 writefn
= raw_write
;
5795 writefn(env
, ri
, value
);
5798 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
5801 uint32_t src_key
, dst_key
, new_key
;
5802 const char *src_name
, *dst_name
, *new_name
;
5803 bool (*feature
)(const ARMISARegisters
*id
);
5806 #define K(op0, op1, crn, crm, op2) \
5807 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5809 static const struct E2HAlias aliases
[] = {
5810 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5811 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5812 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5813 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5814 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5815 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5816 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5817 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5818 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5819 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5820 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5821 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5822 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5823 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5824 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5825 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5826 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5827 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5828 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5829 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5830 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5831 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5832 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5833 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5834 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5835 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5836 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5837 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5838 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5839 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5840 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5841 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5844 * Note that redirection of ZCR is mentioned in the description
5845 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5846 * not in the summary table.
5848 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5849 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
5851 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5852 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte
},
5854 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
5855 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
5856 isar_feature_aa64_scxtnum
},
5858 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5859 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5865 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
5866 const struct E2HAlias
*a
= &aliases
[i
];
5867 ARMCPRegInfo
*src_reg
, *dst_reg
, *new_reg
;
5870 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
5874 src_reg
= g_hash_table_lookup(cpu
->cp_regs
,
5875 (gpointer
)(uintptr_t)a
->src_key
);
5876 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
,
5877 (gpointer
)(uintptr_t)a
->dst_key
);
5878 g_assert(src_reg
!= NULL
);
5879 g_assert(dst_reg
!= NULL
);
5881 /* Cross-compare names to detect typos in the keys. */
5882 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
5883 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
5885 /* None of the core system registers use opaque; we will. */
5886 g_assert(src_reg
->opaque
== NULL
);
5888 /* Create alias before redirection so we dup the right data. */
5889 new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
5891 new_reg
->name
= a
->new_name
;
5892 new_reg
->type
|= ARM_CP_ALIAS
;
5893 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5894 new_reg
->access
&= PL2_RW
| PL3_RW
;
5896 ok
= g_hash_table_insert(cpu
->cp_regs
,
5897 (gpointer
)(uintptr_t)a
->new_key
, new_reg
);
5900 src_reg
->opaque
= dst_reg
;
5901 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
5902 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
5903 if (!src_reg
->raw_readfn
) {
5904 src_reg
->raw_readfn
= raw_read
;
5906 if (!src_reg
->raw_writefn
) {
5907 src_reg
->raw_writefn
= raw_write
;
5909 src_reg
->readfn
= el2_e2h_read
;
5910 src_reg
->writefn
= el2_e2h_write
;
5915 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5918 int cur_el
= arm_current_el(env
);
5921 uint64_t hcr
= arm_hcr_el2_eff(env
);
5924 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
5925 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
5926 return CP_ACCESS_TRAP_EL2
;
5929 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5930 return CP_ACCESS_TRAP
;
5932 if (hcr
& HCR_TID2
) {
5933 return CP_ACCESS_TRAP_EL2
;
5936 } else if (hcr
& HCR_TID2
) {
5937 return CP_ACCESS_TRAP_EL2
;
5941 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
5942 return CP_ACCESS_TRAP_EL2
;
5945 return CP_ACCESS_OK
;
5948 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5951 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5952 * read via a bit in OSLSR_EL1.
5956 if (ri
->state
== ARM_CP_STATE_AA32
) {
5957 oslock
= (value
== 0xC5ACCE55);
5962 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5965 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5966 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5967 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5968 * unlike DBGDRAR it is never accessible from EL0.
5969 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5972 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5973 .access
= PL0_R
, .accessfn
= access_tdra
,
5974 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5975 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5976 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5977 .access
= PL1_R
, .accessfn
= access_tdra
,
5978 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5979 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5980 .access
= PL0_R
, .accessfn
= access_tdra
,
5981 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5982 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5983 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5984 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5985 .access
= PL1_RW
, .accessfn
= access_tda
,
5986 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5989 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
5990 * Debug Communication Channel is not implemented.
5992 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_AA64
,
5993 .opc0
= 2, .opc1
= 3, .crn
= 0, .crm
= 1, .opc2
= 0,
5994 .access
= PL0_R
, .accessfn
= access_tda
,
5995 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5997 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
5998 * it is unlikely a guest will care.
5999 * We don't implement the configurable EL0 access.
6001 { .name
= "DBGDSCRint", .state
= ARM_CP_STATE_AA32
,
6002 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6003 .type
= ARM_CP_ALIAS
,
6004 .access
= PL1_R
, .accessfn
= access_tda
,
6005 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
6006 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
6007 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
6008 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6009 .accessfn
= access_tdosa
,
6010 .writefn
= oslar_write
},
6011 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
6012 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
6013 .access
= PL1_R
, .resetvalue
= 10,
6014 .accessfn
= access_tdosa
,
6015 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
6016 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6017 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6018 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
6019 .access
= PL1_RW
, .accessfn
= access_tdosa
,
6020 .type
= ARM_CP_NOP
},
6021 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6022 * implement vector catch debug events yet.
6025 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6026 .access
= PL1_RW
, .accessfn
= access_tda
,
6027 .type
= ARM_CP_NOP
},
6028 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6029 * to save and restore a 32-bit guest's DBGVCR)
6031 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
6032 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
6033 .access
= PL2_RW
, .accessfn
= access_tda
,
6034 .type
= ARM_CP_NOP
| ARM_CP_EL3_NO_EL2_KEEP
},
6035 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6036 * Channel but Linux may try to access this register. The 32-bit
6037 * alias is DBGDCCINT.
6039 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
6040 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6041 .access
= PL1_RW
, .accessfn
= access_tda
,
6042 .type
= ARM_CP_NOP
},
6045 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
6046 /* 64 bit access versions of the (dummy) debug registers */
6047 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
6048 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6049 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
6050 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6054 * Check for traps to RAS registers, which are controlled
6055 * by HCR_EL2.TERR and SCR_EL3.TERR.
6057 static CPAccessResult
access_terr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6060 int el
= arm_current_el(env
);
6062 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TERR
)) {
6063 return CP_ACCESS_TRAP_EL2
;
6065 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TERR
)) {
6066 return CP_ACCESS_TRAP_EL3
;
6068 return CP_ACCESS_OK
;
6071 static uint64_t disr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6073 int el
= arm_current_el(env
);
6075 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_AMO
)) {
6076 return env
->cp15
.vdisr_el2
;
6078 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_EA
)) {
6079 return 0; /* RAZ/WI */
6081 return env
->cp15
.disr_el1
;
6084 static void disr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
6086 int el
= arm_current_el(env
);
6088 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_AMO
)) {
6089 env
->cp15
.vdisr_el2
= val
;
6092 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_EA
)) {
6093 return; /* RAZ/WI */
6095 env
->cp15
.disr_el1
= val
;
6099 * Minimal RAS implementation with no Error Records.
6100 * Which means that all of the Error Record registers:
6108 * ERXPFGCDN_EL1 (RASv1p1)
6109 * ERXPFGCTL_EL1 (RASv1p1)
6110 * ERXPFGF_EL1 (RASv1p1)
6114 * may generate UNDEFINED, which is the effect we get by not
6115 * listing them at all.
6117 static const ARMCPRegInfo minimal_ras_reginfo
[] = {
6118 { .name
= "DISR_EL1", .state
= ARM_CP_STATE_BOTH
,
6119 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 1,
6120 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.disr_el1
),
6121 .readfn
= disr_read
, .writefn
= disr_write
, .raw_writefn
= raw_write
},
6122 { .name
= "ERRIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6123 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 3, .opc2
= 0,
6124 .access
= PL1_R
, .accessfn
= access_terr
,
6125 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6126 { .name
= "VDISR_EL2", .state
= ARM_CP_STATE_BOTH
,
6127 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 1, .opc2
= 1,
6128 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.vdisr_el2
) },
6129 { .name
= "VSESR_EL2", .state
= ARM_CP_STATE_BOTH
,
6130 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 3,
6131 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.vsesr_el2
) },
6134 /* Return the exception level to which exceptions should be taken
6135 * via SVEAccessTrap. If an exception should be routed through
6136 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6137 * take care of raising that exception.
6138 * C.f. the ARM pseudocode function CheckSVEEnabled.
6140 int sve_exception_el(CPUARMState
*env
, int el
)
6142 #ifndef CONFIG_USER_ONLY
6143 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
6145 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6146 switch (FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, ZEN
)) {
6155 return hcr_el2
& HCR_TGE
? 2 : 1;
6158 /* Check CPACR.FPEN. */
6159 switch (FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, FPEN
)) {
6172 * CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE).
6175 if (hcr_el2
& HCR_E2H
) {
6176 switch (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, ZEN
)) {
6178 if (el
!= 0 || !(hcr_el2
& HCR_TGE
)) {
6187 switch (FIELD_EX32(env
->cp15
.cptr_el
[2], CPTR_EL2
, FPEN
)) {
6189 if (el
== 2 || !(hcr_el2
& HCR_TGE
)) {
6197 } else if (arm_is_el2_enabled(env
)) {
6198 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TZ
)) {
6201 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TFP
)) {
6207 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6208 if (arm_feature(env
, ARM_FEATURE_EL3
)
6209 && !FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, EZ
)) {
6216 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
6220 start_len
= MIN(start_len
, ARM_MAX_VQ
- 1);
6221 end_len
= start_len
;
6223 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
6224 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
6225 assert(end_len
< start_len
);
6231 * Given that SVE is enabled, return the vector length for EL.
6233 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
6235 ARMCPU
*cpu
= env_archcpu(env
);
6236 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
6239 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6240 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
6242 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
6243 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
6245 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6246 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
6249 return aarch64_sve_zcr_get_valid_len(cpu
, zcr_len
);
6252 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6255 int cur_el
= arm_current_el(env
);
6256 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
6259 /* Bits other than [3:0] are RAZ/WI. */
6260 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
6261 raw_write(env
, ri
, value
& 0xf);
6264 * Because we arrived here, we know both FP and SVE are enabled;
6265 * otherwise we would have trapped access to the ZCR_ELn register.
6267 new_len
= sve_zcr_len_for_el(env
, cur_el
);
6268 if (new_len
< old_len
) {
6269 aarch64_sve_narrow_vq(env
, new_len
+ 1);
6273 static const ARMCPRegInfo zcr_reginfo
[] = {
6274 { .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
6275 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
6276 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
6277 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
6278 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
6279 { .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6280 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6281 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6282 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
6283 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
6284 { .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
6285 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
6286 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
6287 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
6288 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
6291 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6293 CPUARMState
*env
= &cpu
->env
;
6295 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6296 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6298 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6300 if (env
->cpu_watchpoint
[n
]) {
6301 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6302 env
->cpu_watchpoint
[n
] = NULL
;
6305 if (!FIELD_EX64(wcr
, DBGWCR
, E
)) {
6306 /* E bit clear : watchpoint disabled */
6310 switch (FIELD_EX64(wcr
, DBGWCR
, LSC
)) {
6312 /* LSC 00 is reserved and must behave as if the wp is disabled */
6315 flags
|= BP_MEM_READ
;
6318 flags
|= BP_MEM_WRITE
;
6321 flags
|= BP_MEM_ACCESS
;
6325 /* Attempts to use both MASK and BAS fields simultaneously are
6326 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6327 * thus generating a watchpoint for every byte in the masked region.
6329 mask
= FIELD_EX64(wcr
, DBGWCR
, MASK
);
6330 if (mask
== 1 || mask
== 2) {
6331 /* Reserved values of MASK; we must act as if the mask value was
6332 * some non-reserved value, or as if the watchpoint were disabled.
6333 * We choose the latter.
6337 /* Watchpoint covers an aligned area up to 2GB in size */
6339 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6340 * whether the watchpoint fires when the unmasked bits match; we opt
6341 * to generate the exceptions.
6345 /* Watchpoint covers bytes defined by the byte address select bits */
6346 int bas
= FIELD_EX64(wcr
, DBGWCR
, BAS
);
6349 if (extract64(wvr
, 2, 1)) {
6350 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6351 * ignored, and BAS[3:0] define which bytes to watch.
6357 /* This must act as if the watchpoint is disabled */
6361 /* The BAS bits are supposed to be programmed to indicate a contiguous
6362 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6363 * we fire for each byte in the word/doubleword addressed by the WVR.
6364 * We choose to ignore any non-zero bits after the first range of 1s.
6366 basstart
= ctz32(bas
);
6367 len
= cto32(bas
>> basstart
);
6371 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6372 &env
->cpu_watchpoint
[n
]);
6375 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6378 CPUARMState
*env
= &cpu
->env
;
6380 /* Completely clear out existing QEMU watchpoints and our array, to
6381 * avoid possible stale entries following migration load.
6383 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6384 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6386 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6387 hw_watchpoint_update(cpu
, i
);
6391 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6394 ARMCPU
*cpu
= env_archcpu(env
);
6398 * Bits [1:0] are RES0.
6400 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
6401 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
6402 * they contain the value written. It is CONSTRAINED UNPREDICTABLE
6403 * whether the RESS bits are ignored when comparing an address.
6405 * Therefore we are allowed to compare the entire register, which lets
6406 * us avoid considering whether or not FEAT_LVA is actually enabled.
6410 raw_write(env
, ri
, value
);
6411 hw_watchpoint_update(cpu
, i
);
6414 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6417 ARMCPU
*cpu
= env_archcpu(env
);
6420 raw_write(env
, ri
, value
);
6421 hw_watchpoint_update(cpu
, i
);
6424 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6426 CPUARMState
*env
= &cpu
->env
;
6427 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6428 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6433 if (env
->cpu_breakpoint
[n
]) {
6434 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6435 env
->cpu_breakpoint
[n
] = NULL
;
6438 if (!extract64(bcr
, 0, 1)) {
6439 /* E bit clear : watchpoint disabled */
6443 bt
= extract64(bcr
, 20, 4);
6446 case 4: /* unlinked address mismatch (reserved if AArch64) */
6447 case 5: /* linked address mismatch (reserved if AArch64) */
6448 qemu_log_mask(LOG_UNIMP
,
6449 "arm: address mismatch breakpoint types not implemented\n");
6451 case 0: /* unlinked address match */
6452 case 1: /* linked address match */
6455 * Bits [1:0] are RES0.
6457 * It is IMPLEMENTATION DEFINED whether bits [63:49]
6458 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
6459 * of the VA field ([48] or [52] for FEAT_LVA), or whether the
6460 * value is read as written. It is CONSTRAINED UNPREDICTABLE
6461 * whether the RESS bits are ignored when comparing an address.
6462 * Therefore we are allowed to compare the entire register, which
6463 * lets us avoid considering whether FEAT_LVA is actually enabled.
6465 * The BAS field is used to allow setting breakpoints on 16-bit
6466 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6467 * a bp will fire if the addresses covered by the bp and the addresses
6468 * covered by the insn overlap but the insn doesn't start at the
6469 * start of the bp address range. We choose to require the insn and
6470 * the bp to have the same address. The constraints on writing to
6471 * BAS enforced in dbgbcr_write mean we have only four cases:
6472 * 0b0000 => no breakpoint
6473 * 0b0011 => breakpoint on addr
6474 * 0b1100 => breakpoint on addr + 2
6475 * 0b1111 => breakpoint on addr
6476 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6478 int bas
= extract64(bcr
, 5, 4);
6488 case 2: /* unlinked context ID match */
6489 case 8: /* unlinked VMID match (reserved if no EL2) */
6490 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6491 qemu_log_mask(LOG_UNIMP
,
6492 "arm: unlinked context breakpoint types not implemented\n");
6494 case 9: /* linked VMID match (reserved if no EL2) */
6495 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6496 case 3: /* linked context ID match */
6498 /* We must generate no events for Linked context matches (unless
6499 * they are linked to by some other bp/wp, which is handled in
6500 * updates for the linking bp/wp). We choose to also generate no events
6501 * for reserved values.
6506 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6509 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6512 CPUARMState
*env
= &cpu
->env
;
6514 /* Completely clear out existing QEMU breakpoints and our array, to
6515 * avoid possible stale entries following migration load.
6517 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6518 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6520 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6521 hw_breakpoint_update(cpu
, i
);
6525 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6528 ARMCPU
*cpu
= env_archcpu(env
);
6531 raw_write(env
, ri
, value
);
6532 hw_breakpoint_update(cpu
, i
);
6535 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6538 ARMCPU
*cpu
= env_archcpu(env
);
6541 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6544 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6545 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6547 raw_write(env
, ri
, value
);
6548 hw_breakpoint_update(cpu
, i
);
6551 static void define_debug_regs(ARMCPU
*cpu
)
6553 /* Define v7 and v8 architectural debug registers.
6554 * These are just dummy implementations for now.
6557 int wrps
, brps
, ctx_cmps
;
6560 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6561 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
6562 * the register must not exist for this cpu.
6564 if (cpu
->isar
.dbgdidr
!= 0) {
6565 ARMCPRegInfo dbgdidr
= {
6566 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0,
6567 .opc1
= 0, .opc2
= 0,
6568 .access
= PL0_R
, .accessfn
= access_tda
,
6569 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->isar
.dbgdidr
,
6571 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6574 brps
= arm_num_brps(cpu
);
6575 wrps
= arm_num_wrps(cpu
);
6576 ctx_cmps
= arm_num_ctx_cmps(cpu
);
6578 assert(ctx_cmps
<= brps
);
6580 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6582 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6583 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6586 for (i
= 0; i
< brps
; i
++) {
6587 char *dbgbvr_el1_name
= g_strdup_printf("DBGBVR%d_EL1", i
);
6588 char *dbgbcr_el1_name
= g_strdup_printf("DBGBCR%d_EL1", i
);
6589 ARMCPRegInfo dbgregs
[] = {
6590 { .name
= dbgbvr_el1_name
, .state
= ARM_CP_STATE_BOTH
,
6591 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6592 .access
= PL1_RW
, .accessfn
= access_tda
,
6593 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6594 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6596 { .name
= dbgbcr_el1_name
, .state
= ARM_CP_STATE_BOTH
,
6597 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6598 .access
= PL1_RW
, .accessfn
= access_tda
,
6599 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6600 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6603 define_arm_cp_regs(cpu
, dbgregs
);
6604 g_free(dbgbvr_el1_name
);
6605 g_free(dbgbcr_el1_name
);
6608 for (i
= 0; i
< wrps
; i
++) {
6609 char *dbgwvr_el1_name
= g_strdup_printf("DBGWVR%d_EL1", i
);
6610 char *dbgwcr_el1_name
= g_strdup_printf("DBGWCR%d_EL1", i
);
6611 ARMCPRegInfo dbgregs
[] = {
6612 { .name
= dbgwvr_el1_name
, .state
= ARM_CP_STATE_BOTH
,
6613 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6614 .access
= PL1_RW
, .accessfn
= access_tda
,
6615 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6616 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6618 { .name
= dbgwcr_el1_name
, .state
= ARM_CP_STATE_BOTH
,
6619 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6620 .access
= PL1_RW
, .accessfn
= access_tda
,
6621 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6622 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6625 define_arm_cp_regs(cpu
, dbgregs
);
6626 g_free(dbgwvr_el1_name
);
6627 g_free(dbgwcr_el1_name
);
6631 static void define_pmu_regs(ARMCPU
*cpu
)
6634 * v7 performance monitor control register: same implementor
6635 * field as main ID register, and we implement four counters in
6636 * addition to the cycle count register.
6638 unsigned int i
, pmcrn
= pmu_num_counters(&cpu
->env
);
6639 ARMCPRegInfo pmcr
= {
6640 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6642 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6643 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6644 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6645 .raw_writefn
= raw_write
,
6647 ARMCPRegInfo pmcr64
= {
6648 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6649 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6650 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6652 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6653 .resetvalue
= cpu
->isar
.reset_pmcr_el0
,
6654 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6657 define_one_arm_cp_reg(cpu
, &pmcr
);
6658 define_one_arm_cp_reg(cpu
, &pmcr64
);
6659 for (i
= 0; i
< pmcrn
; i
++) {
6660 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6661 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6662 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6663 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6664 ARMCPRegInfo pmev_regs
[] = {
6665 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6666 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6667 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6668 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6669 .accessfn
= pmreg_access_xevcntr
},
6670 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6671 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6672 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access_xevcntr
,
6674 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6675 .raw_readfn
= pmevcntr_rawread
,
6676 .raw_writefn
= pmevcntr_rawwrite
},
6677 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6678 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6679 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6680 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6681 .accessfn
= pmreg_access
},
6682 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6683 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6684 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6686 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6687 .raw_writefn
= pmevtyper_rawwrite
},
6689 define_arm_cp_regs(cpu
, pmev_regs
);
6690 g_free(pmevcntr_name
);
6691 g_free(pmevcntr_el0_name
);
6692 g_free(pmevtyper_name
);
6693 g_free(pmevtyper_el0_name
);
6695 if (cpu_isar_feature(aa32_pmu_8_1
, cpu
)) {
6696 ARMCPRegInfo v81_pmu_regs
[] = {
6697 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6698 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6699 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6700 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6701 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6702 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6703 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6704 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6706 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6708 if (cpu_isar_feature(any_pmu_8_4
, cpu
)) {
6709 static const ARMCPRegInfo v84_pmmir
= {
6710 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
6711 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
6712 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6715 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
6719 /* We don't know until after realize whether there's a GICv3
6720 * attached, and that is what registers the gicv3 sysregs.
6721 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6724 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6726 ARMCPU
*cpu
= env_archcpu(env
);
6727 uint64_t pfr1
= cpu
->isar
.id_pfr1
;
6729 if (env
->gicv3state
) {
6735 #ifndef CONFIG_USER_ONLY
6736 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6738 ARMCPU
*cpu
= env_archcpu(env
);
6739 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6741 if (env
->gicv3state
) {
6748 /* Shared logic between LORID and the rest of the LOR* registers.
6749 * Secure state exclusion has already been dealt with.
6751 static CPAccessResult
access_lor_ns(CPUARMState
*env
,
6752 const ARMCPRegInfo
*ri
, bool isread
)
6754 int el
= arm_current_el(env
);
6756 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
6757 return CP_ACCESS_TRAP_EL2
;
6759 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
6760 return CP_ACCESS_TRAP_EL3
;
6762 return CP_ACCESS_OK
;
6765 static CPAccessResult
access_lor_other(CPUARMState
*env
,
6766 const ARMCPRegInfo
*ri
, bool isread
)
6768 if (arm_is_secure_below_el3(env
)) {
6769 /* Access denied in secure mode. */
6770 return CP_ACCESS_TRAP
;
6772 return access_lor_ns(env
, ri
, isread
);
6776 * A trivial implementation of ARMv8.1-LOR leaves all of these
6777 * registers fixed at 0, which indicates that there are zero
6778 * supported Limited Ordering regions.
6780 static const ARMCPRegInfo lor_reginfo
[] = {
6781 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6782 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6783 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6784 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6785 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6786 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6787 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6788 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6789 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6790 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6791 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6792 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6793 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6794 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6795 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6796 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6797 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6798 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6799 .access
= PL1_R
, .accessfn
= access_lor_ns
,
6800 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6803 #ifdef TARGET_AARCH64
6804 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6807 int el
= arm_current_el(env
);
6810 arm_is_el2_enabled(env
) &&
6811 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
6812 return CP_ACCESS_TRAP_EL2
;
6815 arm_feature(env
, ARM_FEATURE_EL3
) &&
6816 !(env
->cp15
.scr_el3
& SCR_APK
)) {
6817 return CP_ACCESS_TRAP_EL3
;
6819 return CP_ACCESS_OK
;
6822 static const ARMCPRegInfo pauth_reginfo
[] = {
6823 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6824 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
6825 .access
= PL1_RW
, .accessfn
= access_pauth
,
6826 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
6827 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6828 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
6829 .access
= PL1_RW
, .accessfn
= access_pauth
,
6830 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
6831 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6832 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
6833 .access
= PL1_RW
, .accessfn
= access_pauth
,
6834 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
6835 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6836 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
6837 .access
= PL1_RW
, .accessfn
= access_pauth
,
6838 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
6839 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6840 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
6841 .access
= PL1_RW
, .accessfn
= access_pauth
,
6842 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
6843 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6844 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
6845 .access
= PL1_RW
, .accessfn
= access_pauth
,
6846 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
6847 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6848 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
6849 .access
= PL1_RW
, .accessfn
= access_pauth
,
6850 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
6851 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6852 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
6853 .access
= PL1_RW
, .accessfn
= access_pauth
,
6854 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
6855 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6856 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
6857 .access
= PL1_RW
, .accessfn
= access_pauth
,
6858 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
6859 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6860 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
6861 .access
= PL1_RW
, .accessfn
= access_pauth
,
6862 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
6865 static const ARMCPRegInfo tlbirange_reginfo
[] = {
6866 { .name
= "TLBI_RVAE1IS", .state
= ARM_CP_STATE_AA64
,
6867 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 1,
6868 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6869 .writefn
= tlbi_aa64_rvae1is_write
},
6870 { .name
= "TLBI_RVAAE1IS", .state
= ARM_CP_STATE_AA64
,
6871 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 3,
6872 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6873 .writefn
= tlbi_aa64_rvae1is_write
},
6874 { .name
= "TLBI_RVALE1IS", .state
= ARM_CP_STATE_AA64
,
6875 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 5,
6876 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6877 .writefn
= tlbi_aa64_rvae1is_write
},
6878 { .name
= "TLBI_RVAALE1IS", .state
= ARM_CP_STATE_AA64
,
6879 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 7,
6880 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6881 .writefn
= tlbi_aa64_rvae1is_write
},
6882 { .name
= "TLBI_RVAE1OS", .state
= ARM_CP_STATE_AA64
,
6883 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
6884 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6885 .writefn
= tlbi_aa64_rvae1is_write
},
6886 { .name
= "TLBI_RVAAE1OS", .state
= ARM_CP_STATE_AA64
,
6887 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 3,
6888 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6889 .writefn
= tlbi_aa64_rvae1is_write
},
6890 { .name
= "TLBI_RVALE1OS", .state
= ARM_CP_STATE_AA64
,
6891 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 5,
6892 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6893 .writefn
= tlbi_aa64_rvae1is_write
},
6894 { .name
= "TLBI_RVAALE1OS", .state
= ARM_CP_STATE_AA64
,
6895 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 7,
6896 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6897 .writefn
= tlbi_aa64_rvae1is_write
},
6898 { .name
= "TLBI_RVAE1", .state
= ARM_CP_STATE_AA64
,
6899 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
6900 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6901 .writefn
= tlbi_aa64_rvae1_write
},
6902 { .name
= "TLBI_RVAAE1", .state
= ARM_CP_STATE_AA64
,
6903 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 3,
6904 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6905 .writefn
= tlbi_aa64_rvae1_write
},
6906 { .name
= "TLBI_RVALE1", .state
= ARM_CP_STATE_AA64
,
6907 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 5,
6908 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6909 .writefn
= tlbi_aa64_rvae1_write
},
6910 { .name
= "TLBI_RVAALE1", .state
= ARM_CP_STATE_AA64
,
6911 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 7,
6912 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6913 .writefn
= tlbi_aa64_rvae1_write
},
6914 { .name
= "TLBI_RIPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
6915 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 2,
6916 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6917 { .name
= "TLBI_RIPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
6918 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 6,
6919 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6920 { .name
= "TLBI_RVAE2IS", .state
= ARM_CP_STATE_AA64
,
6921 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 1,
6922 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6923 .writefn
= tlbi_aa64_rvae2is_write
},
6924 { .name
= "TLBI_RVALE2IS", .state
= ARM_CP_STATE_AA64
,
6925 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 5,
6926 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6927 .writefn
= tlbi_aa64_rvae2is_write
},
6928 { .name
= "TLBI_RIPAS2E1", .state
= ARM_CP_STATE_AA64
,
6929 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 2,
6930 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6931 { .name
= "TLBI_RIPAS2LE1", .state
= ARM_CP_STATE_AA64
,
6932 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 6,
6933 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6934 { .name
= "TLBI_RVAE2OS", .state
= ARM_CP_STATE_AA64
,
6935 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 1,
6936 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6937 .writefn
= tlbi_aa64_rvae2is_write
},
6938 { .name
= "TLBI_RVALE2OS", .state
= ARM_CP_STATE_AA64
,
6939 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 5,
6940 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6941 .writefn
= tlbi_aa64_rvae2is_write
},
6942 { .name
= "TLBI_RVAE2", .state
= ARM_CP_STATE_AA64
,
6943 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 1,
6944 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6945 .writefn
= tlbi_aa64_rvae2_write
},
6946 { .name
= "TLBI_RVALE2", .state
= ARM_CP_STATE_AA64
,
6947 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 5,
6948 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6949 .writefn
= tlbi_aa64_rvae2_write
},
6950 { .name
= "TLBI_RVAE3IS", .state
= ARM_CP_STATE_AA64
,
6951 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 1,
6952 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6953 .writefn
= tlbi_aa64_rvae3is_write
},
6954 { .name
= "TLBI_RVALE3IS", .state
= ARM_CP_STATE_AA64
,
6955 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 5,
6956 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6957 .writefn
= tlbi_aa64_rvae3is_write
},
6958 { .name
= "TLBI_RVAE3OS", .state
= ARM_CP_STATE_AA64
,
6959 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 1,
6960 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6961 .writefn
= tlbi_aa64_rvae3is_write
},
6962 { .name
= "TLBI_RVALE3OS", .state
= ARM_CP_STATE_AA64
,
6963 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 5,
6964 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6965 .writefn
= tlbi_aa64_rvae3is_write
},
6966 { .name
= "TLBI_RVAE3", .state
= ARM_CP_STATE_AA64
,
6967 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 1,
6968 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6969 .writefn
= tlbi_aa64_rvae3_write
},
6970 { .name
= "TLBI_RVALE3", .state
= ARM_CP_STATE_AA64
,
6971 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 5,
6972 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6973 .writefn
= tlbi_aa64_rvae3_write
},
6976 static const ARMCPRegInfo tlbios_reginfo
[] = {
6977 { .name
= "TLBI_VMALLE1OS", .state
= ARM_CP_STATE_AA64
,
6978 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 0,
6979 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6980 .writefn
= tlbi_aa64_vmalle1is_write
},
6981 { .name
= "TLBI_VAE1OS", .state
= ARM_CP_STATE_AA64
,
6982 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 1,
6983 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6984 .writefn
= tlbi_aa64_vae1is_write
},
6985 { .name
= "TLBI_ASIDE1OS", .state
= ARM_CP_STATE_AA64
,
6986 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 2,
6987 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6988 .writefn
= tlbi_aa64_vmalle1is_write
},
6989 { .name
= "TLBI_VAAE1OS", .state
= ARM_CP_STATE_AA64
,
6990 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 3,
6991 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6992 .writefn
= tlbi_aa64_vae1is_write
},
6993 { .name
= "TLBI_VALE1OS", .state
= ARM_CP_STATE_AA64
,
6994 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 5,
6995 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6996 .writefn
= tlbi_aa64_vae1is_write
},
6997 { .name
= "TLBI_VAALE1OS", .state
= ARM_CP_STATE_AA64
,
6998 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 7,
6999 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7000 .writefn
= tlbi_aa64_vae1is_write
},
7001 { .name
= "TLBI_ALLE2OS", .state
= ARM_CP_STATE_AA64
,
7002 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 0,
7003 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7004 .writefn
= tlbi_aa64_alle2is_write
},
7005 { .name
= "TLBI_VAE2OS", .state
= ARM_CP_STATE_AA64
,
7006 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 1,
7007 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7008 .writefn
= tlbi_aa64_vae2is_write
},
7009 { .name
= "TLBI_ALLE1OS", .state
= ARM_CP_STATE_AA64
,
7010 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 4,
7011 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7012 .writefn
= tlbi_aa64_alle1is_write
},
7013 { .name
= "TLBI_VALE2OS", .state
= ARM_CP_STATE_AA64
,
7014 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 5,
7015 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7016 .writefn
= tlbi_aa64_vae2is_write
},
7017 { .name
= "TLBI_VMALLS12E1OS", .state
= ARM_CP_STATE_AA64
,
7018 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 6,
7019 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7020 .writefn
= tlbi_aa64_alle1is_write
},
7021 { .name
= "TLBI_IPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7022 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 0,
7023 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7024 { .name
= "TLBI_RIPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7025 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 3,
7026 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7027 { .name
= "TLBI_IPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7028 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 4,
7029 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7030 { .name
= "TLBI_RIPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7031 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 7,
7032 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7033 { .name
= "TLBI_ALLE3OS", .state
= ARM_CP_STATE_AA64
,
7034 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 0,
7035 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7036 .writefn
= tlbi_aa64_alle3is_write
},
7037 { .name
= "TLBI_VAE3OS", .state
= ARM_CP_STATE_AA64
,
7038 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 1,
7039 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7040 .writefn
= tlbi_aa64_vae3is_write
},
7041 { .name
= "TLBI_VALE3OS", .state
= ARM_CP_STATE_AA64
,
7042 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 5,
7043 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7044 .writefn
= tlbi_aa64_vae3is_write
},
7047 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7052 /* Success sets NZCV = 0000. */
7053 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
7055 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
7057 * ??? Failed, for unknown reasons in the crypto subsystem.
7058 * The best we can do is log the reason and return the
7059 * timed-out indication to the guest. There is no reason
7060 * we know to expect this failure to be transitory, so the
7061 * guest may well hang retrying the operation.
7063 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
7064 ri
->name
, error_get_pretty(err
));
7067 env
->ZF
= 0; /* NZCF = 0100 */
7073 /* We do not support re-seeding, so the two registers operate the same. */
7074 static const ARMCPRegInfo rndr_reginfo
[] = {
7075 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
7076 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7077 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
7078 .access
= PL0_R
, .readfn
= rndr_readfn
},
7079 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
7080 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7081 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
7082 .access
= PL0_R
, .readfn
= rndr_readfn
},
7085 #ifndef CONFIG_USER_ONLY
7086 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
7089 ARMCPU
*cpu
= env_archcpu(env
);
7090 /* CTR_EL0 System register -> DminLine, bits [19:16] */
7091 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
7092 uint64_t vaddr_in
= (uint64_t) value
;
7093 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
7095 int mem_idx
= cpu_mmu_index(env
, false);
7097 /* This won't be crossing page boundaries */
7098 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
7104 /* RCU lock is already being held */
7105 mr
= memory_region_from_host(haddr
, &offset
);
7108 memory_region_writeback(mr
, offset
, dline_size
);
7113 static const ARMCPRegInfo dcpop_reg
[] = {
7114 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
7115 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
7116 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7117 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7120 static const ARMCPRegInfo dcpodp_reg
[] = {
7121 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
7122 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
7123 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7124 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7126 #endif /*CONFIG_USER_ONLY*/
7128 static CPAccessResult
access_aa64_tid5(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7131 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID5
)) {
7132 return CP_ACCESS_TRAP_EL2
;
7135 return CP_ACCESS_OK
;
7138 static CPAccessResult
access_mte(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7141 int el
= arm_current_el(env
);
7143 if (el
< 2 && arm_is_el2_enabled(env
)) {
7144 uint64_t hcr
= arm_hcr_el2_eff(env
);
7145 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
7146 return CP_ACCESS_TRAP_EL2
;
7150 arm_feature(env
, ARM_FEATURE_EL3
) &&
7151 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
7152 return CP_ACCESS_TRAP_EL3
;
7154 return CP_ACCESS_OK
;
7157 static uint64_t tco_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7159 return env
->pstate
& PSTATE_TCO
;
7162 static void tco_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
7164 env
->pstate
= (env
->pstate
& ~PSTATE_TCO
) | (val
& PSTATE_TCO
);
7167 static const ARMCPRegInfo mte_reginfo
[] = {
7168 { .name
= "TFSRE0_EL1", .state
= ARM_CP_STATE_AA64
,
7169 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 1,
7170 .access
= PL1_RW
, .accessfn
= access_mte
,
7171 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[0]) },
7172 { .name
= "TFSR_EL1", .state
= ARM_CP_STATE_AA64
,
7173 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 0,
7174 .access
= PL1_RW
, .accessfn
= access_mte
,
7175 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[1]) },
7176 { .name
= "TFSR_EL2", .state
= ARM_CP_STATE_AA64
,
7177 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 6, .opc2
= 0,
7178 .access
= PL2_RW
, .accessfn
= access_mte
,
7179 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[2]) },
7180 { .name
= "TFSR_EL3", .state
= ARM_CP_STATE_AA64
,
7181 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 6, .opc2
= 0,
7183 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[3]) },
7184 { .name
= "RGSR_EL1", .state
= ARM_CP_STATE_AA64
,
7185 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 5,
7186 .access
= PL1_RW
, .accessfn
= access_mte
,
7187 .fieldoffset
= offsetof(CPUARMState
, cp15
.rgsr_el1
) },
7188 { .name
= "GCR_EL1", .state
= ARM_CP_STATE_AA64
,
7189 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 6,
7190 .access
= PL1_RW
, .accessfn
= access_mte
,
7191 .fieldoffset
= offsetof(CPUARMState
, cp15
.gcr_el1
) },
7192 { .name
= "GMID_EL1", .state
= ARM_CP_STATE_AA64
,
7193 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 4,
7194 .access
= PL1_R
, .accessfn
= access_aa64_tid5
,
7195 .type
= ARM_CP_CONST
, .resetvalue
= GMID_EL1_BS
},
7196 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7197 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7198 .type
= ARM_CP_NO_RAW
,
7199 .access
= PL0_RW
, .readfn
= tco_read
, .writefn
= tco_write
},
7200 { .name
= "DC_IGVAC", .state
= ARM_CP_STATE_AA64
,
7201 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 3,
7202 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7203 .accessfn
= aa64_cacheop_poc_access
},
7204 { .name
= "DC_IGSW", .state
= ARM_CP_STATE_AA64
,
7205 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 4,
7206 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7207 { .name
= "DC_IGDVAC", .state
= ARM_CP_STATE_AA64
,
7208 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 5,
7209 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7210 .accessfn
= aa64_cacheop_poc_access
},
7211 { .name
= "DC_IGDSW", .state
= ARM_CP_STATE_AA64
,
7212 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 6,
7213 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7214 { .name
= "DC_CGSW", .state
= ARM_CP_STATE_AA64
,
7215 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 4,
7216 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7217 { .name
= "DC_CGDSW", .state
= ARM_CP_STATE_AA64
,
7218 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 6,
7219 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7220 { .name
= "DC_CIGSW", .state
= ARM_CP_STATE_AA64
,
7221 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 4,
7222 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7223 { .name
= "DC_CIGDSW", .state
= ARM_CP_STATE_AA64
,
7224 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 6,
7225 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7228 static const ARMCPRegInfo mte_tco_ro_reginfo
[] = {
7229 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7230 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7231 .type
= ARM_CP_CONST
, .access
= PL0_RW
, },
7234 static const ARMCPRegInfo mte_el0_cacheop_reginfo
[] = {
7235 { .name
= "DC_CGVAC", .state
= ARM_CP_STATE_AA64
,
7236 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 3,
7237 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7238 .accessfn
= aa64_cacheop_poc_access
},
7239 { .name
= "DC_CGDVAC", .state
= ARM_CP_STATE_AA64
,
7240 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 5,
7241 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7242 .accessfn
= aa64_cacheop_poc_access
},
7243 { .name
= "DC_CGVAP", .state
= ARM_CP_STATE_AA64
,
7244 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 3,
7245 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7246 .accessfn
= aa64_cacheop_poc_access
},
7247 { .name
= "DC_CGDVAP", .state
= ARM_CP_STATE_AA64
,
7248 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 5,
7249 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7250 .accessfn
= aa64_cacheop_poc_access
},
7251 { .name
= "DC_CGVADP", .state
= ARM_CP_STATE_AA64
,
7252 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 3,
7253 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7254 .accessfn
= aa64_cacheop_poc_access
},
7255 { .name
= "DC_CGDVADP", .state
= ARM_CP_STATE_AA64
,
7256 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 5,
7257 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7258 .accessfn
= aa64_cacheop_poc_access
},
7259 { .name
= "DC_CIGVAC", .state
= ARM_CP_STATE_AA64
,
7260 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 3,
7261 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7262 .accessfn
= aa64_cacheop_poc_access
},
7263 { .name
= "DC_CIGDVAC", .state
= ARM_CP_STATE_AA64
,
7264 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 5,
7265 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7266 .accessfn
= aa64_cacheop_poc_access
},
7267 { .name
= "DC_GVA", .state
= ARM_CP_STATE_AA64
,
7268 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 3,
7269 .access
= PL0_W
, .type
= ARM_CP_DC_GVA
,
7270 #ifndef CONFIG_USER_ONLY
7271 /* Avoid overhead of an access check that always passes in user-mode */
7272 .accessfn
= aa64_zva_access
,
7275 { .name
= "DC_GZVA", .state
= ARM_CP_STATE_AA64
,
7276 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 4,
7277 .access
= PL0_W
, .type
= ARM_CP_DC_GZVA
,
7278 #ifndef CONFIG_USER_ONLY
7279 /* Avoid overhead of an access check that always passes in user-mode */
7280 .accessfn
= aa64_zva_access
,
7285 static CPAccessResult
access_scxtnum(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7288 uint64_t hcr
= arm_hcr_el2_eff(env
);
7289 int el
= arm_current_el(env
);
7291 if (el
== 0 && !((hcr
& HCR_E2H
) && (hcr
& HCR_TGE
))) {
7292 if (env
->cp15
.sctlr_el
[1] & SCTLR_TSCXT
) {
7293 if (hcr
& HCR_TGE
) {
7294 return CP_ACCESS_TRAP_EL2
;
7296 return CP_ACCESS_TRAP
;
7298 } else if (el
< 2 && (env
->cp15
.sctlr_el
[2] & SCTLR_TSCXT
)) {
7299 return CP_ACCESS_TRAP_EL2
;
7301 if (el
< 2 && arm_is_el2_enabled(env
) && !(hcr
& HCR_ENSCXT
)) {
7302 return CP_ACCESS_TRAP_EL2
;
7305 && arm_feature(env
, ARM_FEATURE_EL3
)
7306 && !(env
->cp15
.scr_el3
& SCR_ENSCXT
)) {
7307 return CP_ACCESS_TRAP_EL3
;
7309 return CP_ACCESS_OK
;
7312 static const ARMCPRegInfo scxtnum_reginfo
[] = {
7313 { .name
= "SCXTNUM_EL0", .state
= ARM_CP_STATE_AA64
,
7314 .opc0
= 3, .opc1
= 3, .crn
= 13, .crm
= 0, .opc2
= 7,
7315 .access
= PL0_RW
, .accessfn
= access_scxtnum
,
7316 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[0]) },
7317 { .name
= "SCXTNUM_EL1", .state
= ARM_CP_STATE_AA64
,
7318 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 7,
7319 .access
= PL1_RW
, .accessfn
= access_scxtnum
,
7320 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[1]) },
7321 { .name
= "SCXTNUM_EL2", .state
= ARM_CP_STATE_AA64
,
7322 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 7,
7323 .access
= PL2_RW
, .accessfn
= access_scxtnum
,
7324 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[2]) },
7325 { .name
= "SCXTNUM_EL3", .state
= ARM_CP_STATE_AA64
,
7326 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 7,
7328 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[3]) },
7330 #endif /* TARGET_AARCH64 */
7332 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7335 int el
= arm_current_el(env
);
7338 uint64_t sctlr
= arm_sctlr(env
, el
);
7339 if (!(sctlr
& SCTLR_EnRCTX
)) {
7340 return CP_ACCESS_TRAP
;
7342 } else if (el
== 1) {
7343 uint64_t hcr
= arm_hcr_el2_eff(env
);
7345 return CP_ACCESS_TRAP_EL2
;
7348 return CP_ACCESS_OK
;
7351 static const ARMCPRegInfo predinv_reginfo
[] = {
7352 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
7353 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
7354 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7355 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
7356 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
7357 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7358 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
7359 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
7360 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7362 * Note the AArch32 opcodes have a different OPC1.
7364 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
7365 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
7366 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7367 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
7368 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
7369 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7370 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
7371 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
7372 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7375 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7377 /* Read the high 32 bits of the current CCSIDR */
7378 return extract64(ccsidr_read(env
, ri
), 32, 32);
7381 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
7382 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
7383 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
7385 .accessfn
= access_aa64_tid2
,
7386 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
7389 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7392 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
7393 return CP_ACCESS_TRAP_EL2
;
7396 return CP_ACCESS_OK
;
7399 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7402 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7403 return access_aa64_tid3(env
, ri
, isread
);
7406 return CP_ACCESS_OK
;
7409 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7412 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
7413 return CP_ACCESS_TRAP_EL2
;
7416 return CP_ACCESS_OK
;
7419 static CPAccessResult
access_joscr_jmcr(CPUARMState
*env
,
7420 const ARMCPRegInfo
*ri
, bool isread
)
7423 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7424 * in v7A, not in v8A.
7426 if (!arm_feature(env
, ARM_FEATURE_V8
) &&
7427 arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
7428 (env
->cp15
.hstr_el2
& HSTR_TJDBX
)) {
7429 return CP_ACCESS_TRAP_EL2
;
7431 return CP_ACCESS_OK
;
7434 static const ARMCPRegInfo jazelle_regs
[] = {
7436 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
7437 .access
= PL1_R
, .accessfn
= access_jazelle
,
7438 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7440 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
7441 .accessfn
= access_joscr_jmcr
,
7442 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7444 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
7445 .accessfn
= access_joscr_jmcr
,
7446 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7449 static const ARMCPRegInfo contextidr_el2
= {
7450 .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7451 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
7453 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2])
7456 static const ARMCPRegInfo vhe_reginfo
[] = {
7457 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
7458 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
7459 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
7460 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
7461 #ifndef CONFIG_USER_ONLY
7462 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
7463 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
7465 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
7466 .type
= ARM_CP_IO
, .access
= PL2_RW
,
7467 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
7468 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
7469 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
7470 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
7471 .resetfn
= gt_hv_timer_reset
,
7472 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
7473 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
7475 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
7477 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
7478 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
7479 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7480 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
7481 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7482 .access
= PL2_RW
, .accessfn
= e2h_access
,
7483 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
7484 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
7485 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7486 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
7487 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7488 .access
= PL2_RW
, .accessfn
= e2h_access
,
7489 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
7490 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
7491 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7492 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
7493 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7494 .access
= PL2_RW
, .accessfn
= e2h_access
,
7495 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
7496 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7497 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
7498 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7499 .access
= PL2_RW
, .accessfn
= e2h_access
,
7500 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
7501 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7502 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
7503 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7504 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
7505 .access
= PL2_RW
, .accessfn
= e2h_access
,
7506 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
7507 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7508 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
7509 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7510 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
7511 .access
= PL2_RW
, .accessfn
= e2h_access
,
7512 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
7516 #ifndef CONFIG_USER_ONLY
7517 static const ARMCPRegInfo ats1e1_reginfo
[] = {
7518 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
7519 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7520 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7521 .writefn
= ats_write64
},
7522 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
7523 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7524 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7525 .writefn
= ats_write64
},
7528 static const ARMCPRegInfo ats1cp_reginfo
[] = {
7529 { .name
= "ATS1CPRP",
7530 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7531 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7532 .writefn
= ats_write
},
7533 { .name
= "ATS1CPWP",
7534 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7535 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7536 .writefn
= ats_write
},
7541 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7542 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7543 * is non-zero, which is never for ARMv7, optionally in ARMv8
7544 * and mandatorily for ARMv8.2 and up.
7545 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7546 * implementation is RAZ/WI we can ignore this detail, as we
7549 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
7550 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
7551 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
7552 .access
= PL1_RW
, .accessfn
= access_tacr
,
7553 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7554 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7555 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7556 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7560 void register_cp_regs_for_features(ARMCPU
*cpu
)
7562 /* Register all the coprocessor registers based on feature bits */
7563 CPUARMState
*env
= &cpu
->env
;
7564 if (arm_feature(env
, ARM_FEATURE_M
)) {
7565 /* M profile has no coprocessor registers */
7569 define_arm_cp_regs(cpu
, cp_reginfo
);
7570 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7571 /* Must go early as it is full of wildcards that may be
7572 * overridden by later definitions.
7574 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
7577 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7578 /* The ID registers all have impdef reset values */
7579 ARMCPRegInfo v6_idregs
[] = {
7580 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
7581 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
7582 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7583 .accessfn
= access_aa32_tid3
,
7584 .resetvalue
= cpu
->isar
.id_pfr0
},
7585 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7586 * the value of the GIC field until after we define these regs.
7588 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
7589 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
7590 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
7591 .accessfn
= access_aa32_tid3
,
7592 .readfn
= id_pfr1_read
,
7593 .writefn
= arm_cp_write_ignore
},
7594 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
7595 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
7596 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7597 .accessfn
= access_aa32_tid3
,
7598 .resetvalue
= cpu
->isar
.id_dfr0
},
7599 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
7600 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
7601 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7602 .accessfn
= access_aa32_tid3
,
7603 .resetvalue
= cpu
->id_afr0
},
7604 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
7605 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
7606 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7607 .accessfn
= access_aa32_tid3
,
7608 .resetvalue
= cpu
->isar
.id_mmfr0
},
7609 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
7610 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
7611 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7612 .accessfn
= access_aa32_tid3
,
7613 .resetvalue
= cpu
->isar
.id_mmfr1
},
7614 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
7615 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
7616 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7617 .accessfn
= access_aa32_tid3
,
7618 .resetvalue
= cpu
->isar
.id_mmfr2
},
7619 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
7620 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
7621 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7622 .accessfn
= access_aa32_tid3
,
7623 .resetvalue
= cpu
->isar
.id_mmfr3
},
7624 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
7625 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
7626 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7627 .accessfn
= access_aa32_tid3
,
7628 .resetvalue
= cpu
->isar
.id_isar0
},
7629 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
7630 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
7631 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7632 .accessfn
= access_aa32_tid3
,
7633 .resetvalue
= cpu
->isar
.id_isar1
},
7634 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
7635 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
7636 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7637 .accessfn
= access_aa32_tid3
,
7638 .resetvalue
= cpu
->isar
.id_isar2
},
7639 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
7640 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
7641 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7642 .accessfn
= access_aa32_tid3
,
7643 .resetvalue
= cpu
->isar
.id_isar3
},
7644 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
7645 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
7646 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7647 .accessfn
= access_aa32_tid3
,
7648 .resetvalue
= cpu
->isar
.id_isar4
},
7649 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
7650 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
7651 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7652 .accessfn
= access_aa32_tid3
,
7653 .resetvalue
= cpu
->isar
.id_isar5
},
7654 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
7655 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
7656 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7657 .accessfn
= access_aa32_tid3
,
7658 .resetvalue
= cpu
->isar
.id_mmfr4
},
7659 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
7660 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
7661 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7662 .accessfn
= access_aa32_tid3
,
7663 .resetvalue
= cpu
->isar
.id_isar6
},
7665 define_arm_cp_regs(cpu
, v6_idregs
);
7666 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
7668 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
7670 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
7671 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
7673 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
7674 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
7675 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
7677 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
7678 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
7680 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7681 ARMCPRegInfo clidr
= {
7682 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
7683 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
7684 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7685 .accessfn
= access_aa64_tid2
,
7686 .resetvalue
= cpu
->clidr
7688 define_one_arm_cp_reg(cpu
, &clidr
);
7689 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
7690 define_debug_regs(cpu
);
7691 define_pmu_regs(cpu
);
7693 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
7695 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7696 /* AArch64 ID registers, which all have impdef reset values.
7697 * Note that within the ID register ranges the unused slots
7698 * must all RAZ, not UNDEF; future architecture versions may
7699 * define new registers here.
7701 ARMCPRegInfo v8_idregs
[] = {
7703 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7704 * emulation because we don't know the right value for the
7705 * GIC field until after we define these regs.
7707 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7708 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
7710 #ifdef CONFIG_USER_ONLY
7711 .type
= ARM_CP_CONST
,
7712 .resetvalue
= cpu
->isar
.id_aa64pfr0
7714 .type
= ARM_CP_NO_RAW
,
7715 .accessfn
= access_aa64_tid3
,
7716 .readfn
= id_aa64pfr0_read
,
7717 .writefn
= arm_cp_write_ignore
7720 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7721 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
7722 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7723 .accessfn
= access_aa64_tid3
,
7724 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
7725 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7726 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
7727 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7728 .accessfn
= access_aa64_tid3
,
7730 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7731 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
7732 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7733 .accessfn
= access_aa64_tid3
,
7735 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7736 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
7737 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7738 .accessfn
= access_aa64_tid3
,
7739 .resetvalue
= cpu
->isar
.id_aa64zfr0
},
7740 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7741 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
7742 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7743 .accessfn
= access_aa64_tid3
,
7745 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7746 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
7747 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7748 .accessfn
= access_aa64_tid3
,
7750 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7751 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7752 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7753 .accessfn
= access_aa64_tid3
,
7755 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7756 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7757 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7758 .accessfn
= access_aa64_tid3
,
7759 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
7760 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7761 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7762 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7763 .accessfn
= access_aa64_tid3
,
7764 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
7765 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7766 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7767 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7768 .accessfn
= access_aa64_tid3
,
7770 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7771 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7772 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7773 .accessfn
= access_aa64_tid3
,
7775 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7776 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7777 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7778 .accessfn
= access_aa64_tid3
,
7779 .resetvalue
= cpu
->id_aa64afr0
},
7780 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7781 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7782 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7783 .accessfn
= access_aa64_tid3
,
7784 .resetvalue
= cpu
->id_aa64afr1
},
7785 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7786 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7787 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7788 .accessfn
= access_aa64_tid3
,
7790 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7791 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7792 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7793 .accessfn
= access_aa64_tid3
,
7795 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7796 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7797 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7798 .accessfn
= access_aa64_tid3
,
7799 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7800 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7801 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7802 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7803 .accessfn
= access_aa64_tid3
,
7804 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7805 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7806 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7807 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7808 .accessfn
= access_aa64_tid3
,
7810 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7811 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
7812 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7813 .accessfn
= access_aa64_tid3
,
7815 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7816 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
7817 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7818 .accessfn
= access_aa64_tid3
,
7820 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7821 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
7822 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7823 .accessfn
= access_aa64_tid3
,
7825 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7826 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
7827 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7828 .accessfn
= access_aa64_tid3
,
7830 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7831 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
7832 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7833 .accessfn
= access_aa64_tid3
,
7835 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7836 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
7837 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7838 .accessfn
= access_aa64_tid3
,
7839 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
7840 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7841 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
7842 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7843 .accessfn
= access_aa64_tid3
,
7844 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
7845 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7846 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
7847 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7848 .accessfn
= access_aa64_tid3
,
7849 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
7850 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7851 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
7852 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7853 .accessfn
= access_aa64_tid3
,
7855 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7856 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
7857 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7858 .accessfn
= access_aa64_tid3
,
7860 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7861 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
7862 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7863 .accessfn
= access_aa64_tid3
,
7865 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7866 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
7867 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7868 .accessfn
= access_aa64_tid3
,
7870 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7871 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
7872 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7873 .accessfn
= access_aa64_tid3
,
7875 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7876 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
7877 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7878 .accessfn
= access_aa64_tid3
,
7879 .resetvalue
= cpu
->isar
.mvfr0
},
7880 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7881 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
7882 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7883 .accessfn
= access_aa64_tid3
,
7884 .resetvalue
= cpu
->isar
.mvfr1
},
7885 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7886 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
7887 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7888 .accessfn
= access_aa64_tid3
,
7889 .resetvalue
= cpu
->isar
.mvfr2
},
7890 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7891 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
7892 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7893 .accessfn
= access_aa64_tid3
,
7895 { .name
= "ID_PFR2", .state
= ARM_CP_STATE_BOTH
,
7896 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
7897 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7898 .accessfn
= access_aa64_tid3
,
7899 .resetvalue
= cpu
->isar
.id_pfr2
},
7900 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7901 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
7902 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7903 .accessfn
= access_aa64_tid3
,
7905 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7906 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
7907 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7908 .accessfn
= access_aa64_tid3
,
7910 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7911 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
7912 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7913 .accessfn
= access_aa64_tid3
,
7915 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
7916 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
7917 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7918 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
7919 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
7920 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
7921 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7922 .resetvalue
= cpu
->pmceid0
},
7923 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
7924 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
7925 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7926 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
7927 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
7928 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
7929 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7930 .resetvalue
= cpu
->pmceid1
},
7932 #ifdef CONFIG_USER_ONLY
7933 static const ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
7934 { .name
= "ID_AA64PFR0_EL1",
7935 .exported_bits
= 0x000f000f00ff0000,
7936 .fixed_bits
= 0x0000000000000011 },
7937 { .name
= "ID_AA64PFR1_EL1",
7938 .exported_bits
= 0x00000000000000f0 },
7939 { .name
= "ID_AA64PFR*_EL1_RESERVED",
7941 { .name
= "ID_AA64ZFR0_EL1" },
7942 { .name
= "ID_AA64MMFR0_EL1",
7943 .fixed_bits
= 0x00000000ff000000 },
7944 { .name
= "ID_AA64MMFR1_EL1" },
7945 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
7947 { .name
= "ID_AA64DFR0_EL1",
7948 .fixed_bits
= 0x0000000000000006 },
7949 { .name
= "ID_AA64DFR1_EL1" },
7950 { .name
= "ID_AA64DFR*_EL1_RESERVED",
7952 { .name
= "ID_AA64AFR*",
7954 { .name
= "ID_AA64ISAR0_EL1",
7955 .exported_bits
= 0x00fffffff0fffff0 },
7956 { .name
= "ID_AA64ISAR1_EL1",
7957 .exported_bits
= 0x000000f0ffffffff },
7958 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
7961 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
7963 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7964 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
7965 !arm_feature(env
, ARM_FEATURE_EL2
)) {
7966 ARMCPRegInfo rvbar
= {
7967 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7968 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
7970 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
7972 define_one_arm_cp_reg(cpu
, &rvbar
);
7974 define_arm_cp_regs(cpu
, v8_idregs
);
7975 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
7979 * Register the base EL2 cpregs.
7980 * Pre v8, these registers are implemented only as part of the
7981 * Virtualization Extensions (EL2 present). Beginning with v8,
7982 * if EL2 is missing but EL3 is enabled, mostly these become
7983 * RES0 from EL3, with some specific exceptions.
7985 if (arm_feature(env
, ARM_FEATURE_EL2
)
7986 || (arm_feature(env
, ARM_FEATURE_EL3
)
7987 && arm_feature(env
, ARM_FEATURE_V8
))) {
7988 uint64_t vmpidr_def
= mpidr_read_val(env
);
7989 ARMCPRegInfo vpidr_regs
[] = {
7990 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
7991 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7992 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7993 .resetvalue
= cpu
->midr
,
7994 .type
= ARM_CP_ALIAS
| ARM_CP_EL3_NO_EL2_C_NZ
,
7995 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
7996 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7997 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7998 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
7999 .type
= ARM_CP_EL3_NO_EL2_C_NZ
,
8000 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
8001 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
8002 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8003 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8004 .resetvalue
= vmpidr_def
,
8005 .type
= ARM_CP_ALIAS
| ARM_CP_EL3_NO_EL2_C_NZ
,
8006 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
8007 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
8008 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8009 .access
= PL2_RW
, .resetvalue
= vmpidr_def
,
8010 .type
= ARM_CP_EL3_NO_EL2_C_NZ
,
8011 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
8014 * The only field of MDCR_EL2 that has a defined architectural reset
8015 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
8017 ARMCPRegInfo mdcr_el2
= {
8018 .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
8019 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
8020 .access
= PL2_RW
, .resetvalue
= pmu_num_counters(env
),
8021 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
),
8023 define_one_arm_cp_reg(cpu
, &mdcr_el2
);
8024 define_arm_cp_regs(cpu
, vpidr_regs
);
8025 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
8026 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8027 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
8029 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
8030 define_arm_cp_regs(cpu
, el2_sec_cp_reginfo
);
8032 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
8033 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
8034 ARMCPRegInfo rvbar
= {
8035 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
8036 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
8038 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
8040 define_one_arm_cp_reg(cpu
, &rvbar
);
8044 /* Register the base EL3 cpregs. */
8045 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8046 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
8047 ARMCPRegInfo el3_regs
[] = {
8048 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
8049 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
8051 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
8053 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8054 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
8056 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
8057 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
8058 .resetvalue
= cpu
->reset_sctlr
},
8061 define_arm_cp_regs(cpu
, el3_regs
);
8063 /* The behaviour of NSACR is sufficiently various that we don't
8064 * try to describe it in a single reginfo:
8065 * if EL3 is 64 bit, then trap to EL3 from S EL1,
8066 * reads as constant 0xc00 from NS EL1 and NS EL2
8067 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8068 * if v7 without EL3, register doesn't exist
8069 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8071 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8072 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8073 static const ARMCPRegInfo nsacr
= {
8074 .name
= "NSACR", .type
= ARM_CP_CONST
,
8075 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8076 .access
= PL1_RW
, .accessfn
= nsacr_access
,
8079 define_one_arm_cp_reg(cpu
, &nsacr
);
8081 static const ARMCPRegInfo nsacr
= {
8083 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8084 .access
= PL3_RW
| PL1_R
,
8086 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
8088 define_one_arm_cp_reg(cpu
, &nsacr
);
8091 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8092 static const ARMCPRegInfo nsacr
= {
8093 .name
= "NSACR", .type
= ARM_CP_CONST
,
8094 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8098 define_one_arm_cp_reg(cpu
, &nsacr
);
8102 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
8103 if (arm_feature(env
, ARM_FEATURE_V6
)) {
8104 /* PMSAv6 not implemented */
8105 assert(arm_feature(env
, ARM_FEATURE_V7
));
8106 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8107 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
8109 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
8112 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8113 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
8114 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
8115 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
8116 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
8119 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
8120 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
8122 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
8123 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
8125 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
8126 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
8128 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
8129 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
8131 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
8132 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
8134 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
8135 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
8137 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
8138 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
8140 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8141 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
8143 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8144 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
8146 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
8147 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
8149 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
8150 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
8152 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
8153 define_arm_cp_regs(cpu
, jazelle_regs
);
8155 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
8156 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8157 * be read-only (ie write causes UNDEF exception).
8160 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
8161 /* Pre-v8 MIDR space.
8162 * Note that the MIDR isn't a simple constant register because
8163 * of the TI925 behaviour where writes to another register can
8164 * cause the MIDR value to change.
8166 * Unimplemented registers in the c15 0 0 0 space default to
8167 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8168 * and friends override accordingly.
8171 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
8172 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
8173 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
8174 .readfn
= midr_read
,
8175 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8176 .type
= ARM_CP_OVERRIDE
},
8177 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8179 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
8180 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8182 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
8183 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8185 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
8186 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8188 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
8189 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8191 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
8192 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8194 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
8195 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8196 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
8197 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
8198 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8199 .readfn
= midr_read
},
8200 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8201 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8202 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8203 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8204 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8205 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
8206 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8207 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8208 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
8210 .accessfn
= access_aa64_tid1
,
8211 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
8213 ARMCPRegInfo id_cp_reginfo
[] = {
8214 /* These are common to v8 and pre-v8 */
8216 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
8217 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
8218 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8219 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
8220 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
8221 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
8222 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8223 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8225 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
8227 .accessfn
= access_aa32_tid1
,
8228 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8230 /* TLBTR is specific to VMSA */
8231 ARMCPRegInfo id_tlbtr_reginfo
= {
8233 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
8235 .accessfn
= access_aa32_tid1
,
8236 .type
= ARM_CP_CONST
, .resetvalue
= 0,
8238 /* MPUIR is specific to PMSA V6+ */
8239 ARMCPRegInfo id_mpuir_reginfo
= {
8241 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8242 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8243 .resetvalue
= cpu
->pmsav7_dregion
<< 8
8245 static const ARMCPRegInfo crn0_wi_reginfo
= {
8246 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
8247 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
8248 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
8250 #ifdef CONFIG_USER_ONLY
8251 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
8252 { .name
= "MIDR_EL1",
8253 .exported_bits
= 0x00000000ffffffff },
8254 { .name
= "REVIDR_EL1" },
8256 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
8258 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
8259 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8261 /* Register the blanket "writes ignored" value first to cover the
8262 * whole space. Then update the specific ID registers to allow write
8263 * access, so that they ignore writes rather than causing them to
8266 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
8267 for (i
= 0; i
< ARRAY_SIZE(id_pre_v8_midr_cp_reginfo
); ++i
) {
8268 id_pre_v8_midr_cp_reginfo
[i
].access
= PL1_RW
;
8270 for (i
= 0; i
< ARRAY_SIZE(id_cp_reginfo
); ++i
) {
8271 id_cp_reginfo
[i
].access
= PL1_RW
;
8273 id_mpuir_reginfo
.access
= PL1_RW
;
8274 id_tlbtr_reginfo
.access
= PL1_RW
;
8276 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8277 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
8279 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
8281 define_arm_cp_regs(cpu
, id_cp_reginfo
);
8282 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
8283 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
8284 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8285 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
8289 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
8290 ARMCPRegInfo mpidr_cp_reginfo
[] = {
8291 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8292 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
8293 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
8295 #ifdef CONFIG_USER_ONLY
8296 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
8297 { .name
= "MPIDR_EL1",
8298 .fixed_bits
= 0x0000000080000000 },
8300 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
8302 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
8305 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
8306 ARMCPRegInfo auxcr_reginfo
[] = {
8307 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
8308 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
8309 .access
= PL1_RW
, .accessfn
= access_tacr
,
8310 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
8311 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
8312 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
8313 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
8315 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8316 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
8317 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
8320 define_arm_cp_regs(cpu
, auxcr_reginfo
);
8321 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
8322 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
8326 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
8328 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8329 * There are two flavours:
8330 * (1) older 32-bit only cores have a simple 32-bit CBAR
8331 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8332 * 32-bit register visible to AArch32 at a different encoding
8333 * to the "flavour 1" register and with the bits rearranged to
8334 * be able to squash a 64-bit address into the 32-bit view.
8335 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8336 * in future if we support AArch32-only configs of some of the
8337 * AArch64 cores we might need to add a specific feature flag
8338 * to indicate cores with "flavour 2" CBAR.
8340 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8341 /* 32 bit view is [31:18] 0...0 [43:32]. */
8342 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
8343 | extract64(cpu
->reset_cbar
, 32, 12);
8344 ARMCPRegInfo cbar_reginfo
[] = {
8346 .type
= ARM_CP_CONST
,
8347 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
8348 .access
= PL1_R
, .resetvalue
= cbar32
},
8349 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
8350 .type
= ARM_CP_CONST
,
8351 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
8352 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
8354 /* We don't implement a r/w 64 bit CBAR currently */
8355 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
8356 define_arm_cp_regs(cpu
, cbar_reginfo
);
8358 ARMCPRegInfo cbar
= {
8360 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
8361 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
8362 .fieldoffset
= offsetof(CPUARMState
,
8363 cp15
.c15_config_base_address
)
8365 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
8366 cbar
.access
= PL1_R
;
8367 cbar
.fieldoffset
= 0;
8368 cbar
.type
= ARM_CP_CONST
;
8370 define_one_arm_cp_reg(cpu
, &cbar
);
8374 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
8375 static const ARMCPRegInfo vbar_cp_reginfo
[] = {
8376 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
8377 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
8378 .access
= PL1_RW
, .writefn
= vbar_write
,
8379 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
8380 offsetof(CPUARMState
, cp15
.vbar_ns
) },
8383 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
8386 /* Generic registers whose values depend on the implementation */
8388 ARMCPRegInfo sctlr
= {
8389 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
8390 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
8391 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
8392 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
8393 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
8394 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
8395 .raw_writefn
= raw_write
,
8397 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8398 /* Normally we would always end the TB on an SCTLR write, but Linux
8399 * arch/arm/mach-pxa/sleep.S expects two instructions following
8400 * an MMU enable to execute from cache. Imitate this behaviour.
8402 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
8404 define_one_arm_cp_reg(cpu
, &sctlr
);
8407 if (cpu_isar_feature(aa64_lor
, cpu
)) {
8408 define_arm_cp_regs(cpu
, lor_reginfo
);
8410 if (cpu_isar_feature(aa64_pan
, cpu
)) {
8411 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
8413 #ifndef CONFIG_USER_ONLY
8414 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
8415 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
8417 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
8418 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
8421 if (cpu_isar_feature(aa64_uao
, cpu
)) {
8422 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
8425 if (cpu_isar_feature(aa64_dit
, cpu
)) {
8426 define_one_arm_cp_reg(cpu
, &dit_reginfo
);
8428 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
8429 define_one_arm_cp_reg(cpu
, &ssbs_reginfo
);
8431 if (cpu_isar_feature(any_ras
, cpu
)) {
8432 define_arm_cp_regs(cpu
, minimal_ras_reginfo
);
8435 if (cpu_isar_feature(aa64_vh
, cpu
) ||
8436 cpu_isar_feature(aa64_debugv8p2
, cpu
)) {
8437 define_one_arm_cp_reg(cpu
, &contextidr_el2
);
8439 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8440 define_arm_cp_regs(cpu
, vhe_reginfo
);
8443 if (cpu_isar_feature(aa64_sve
, cpu
)) {
8444 define_arm_cp_regs(cpu
, zcr_reginfo
);
8447 if (cpu_isar_feature(aa64_hcx
, cpu
)) {
8448 define_one_arm_cp_reg(cpu
, &hcrx_el2_reginfo
);
8451 #ifdef TARGET_AARCH64
8452 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
8453 define_arm_cp_regs(cpu
, pauth_reginfo
);
8455 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
8456 define_arm_cp_regs(cpu
, rndr_reginfo
);
8458 if (cpu_isar_feature(aa64_tlbirange
, cpu
)) {
8459 define_arm_cp_regs(cpu
, tlbirange_reginfo
);
8461 if (cpu_isar_feature(aa64_tlbios
, cpu
)) {
8462 define_arm_cp_regs(cpu
, tlbios_reginfo
);
8464 #ifndef CONFIG_USER_ONLY
8465 /* Data Cache clean instructions up to PoP */
8466 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
8467 define_one_arm_cp_reg(cpu
, dcpop_reg
);
8469 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
8470 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
8473 #endif /*CONFIG_USER_ONLY*/
8476 * If full MTE is enabled, add all of the system registers.
8477 * If only "instructions available at EL0" are enabled,
8478 * then define only a RAZ/WI version of PSTATE.TCO.
8480 if (cpu_isar_feature(aa64_mte
, cpu
)) {
8481 define_arm_cp_regs(cpu
, mte_reginfo
);
8482 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8483 } else if (cpu_isar_feature(aa64_mte_insn_reg
, cpu
)) {
8484 define_arm_cp_regs(cpu
, mte_tco_ro_reginfo
);
8485 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8488 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
8489 define_arm_cp_regs(cpu
, scxtnum_reginfo
);
8493 if (cpu_isar_feature(any_predinv
, cpu
)) {
8494 define_arm_cp_regs(cpu
, predinv_reginfo
);
8497 if (cpu_isar_feature(any_ccidx
, cpu
)) {
8498 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
8501 #ifndef CONFIG_USER_ONLY
8503 * Register redirections and aliases must be done last,
8504 * after the registers from the other extensions have been defined.
8506 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8507 define_arm_vh_e2h_redirects_aliases(cpu
);
8512 /* Sort alphabetically by type name, except for "any". */
8513 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
8515 ObjectClass
*class_a
= (ObjectClass
*)a
;
8516 ObjectClass
*class_b
= (ObjectClass
*)b
;
8517 const char *name_a
, *name_b
;
8519 name_a
= object_class_get_name(class_a
);
8520 name_b
= object_class_get_name(class_b
);
8521 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
8523 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
8526 return strcmp(name_a
, name_b
);
8530 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
8532 ObjectClass
*oc
= data
;
8533 const char *typename
;
8536 typename
= object_class_get_name(oc
);
8537 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8538 qemu_printf(" %s\n", name
);
8542 void arm_cpu_list(void)
8546 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8547 list
= g_slist_sort(list
, arm_cpu_list_compare
);
8548 qemu_printf("Available CPUs:\n");
8549 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
8553 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
8555 ObjectClass
*oc
= data
;
8556 CpuDefinitionInfoList
**cpu_list
= user_data
;
8557 CpuDefinitionInfo
*info
;
8558 const char *typename
;
8560 typename
= object_class_get_name(oc
);
8561 info
= g_malloc0(sizeof(*info
));
8562 info
->name
= g_strndup(typename
,
8563 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8564 info
->q_typename
= g_strdup(typename
);
8566 QAPI_LIST_PREPEND(*cpu_list
, info
);
8569 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
8571 CpuDefinitionInfoList
*cpu_list
= NULL
;
8574 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8575 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
8582 * Private utility function for define_one_arm_cp_reg_with_opaque():
8583 * add a single reginfo struct to the hash table.
8585 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
8586 void *opaque
, CPState state
,
8587 CPSecureState secstate
,
8588 int crm
, int opc1
, int opc2
,
8591 CPUARMState
*env
= &cpu
->env
;
8594 bool is64
= r
->type
& ARM_CP_64BIT
;
8595 bool ns
= secstate
& ARM_CP_SECSTATE_NS
;
8601 case ARM_CP_STATE_AA32
:
8602 /* We assume it is a cp15 register if the .cp field is left unset. */
8603 if (cp
== 0 && r
->state
== ARM_CP_STATE_BOTH
) {
8606 key
= ENCODE_CP_REG(cp
, is64
, ns
, r
->crn
, crm
, opc1
, opc2
);
8608 case ARM_CP_STATE_AA64
:
8610 * To allow abbreviation of ARMCPRegInfo definitions, we treat
8611 * cp == 0 as equivalent to the value for "standard guest-visible
8612 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
8613 * in their AArch64 view (the .cp value may be non-zero for the
8614 * benefit of the AArch32 view).
8616 if (cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
8617 cp
= CP_REG_ARM64_SYSREG_CP
;
8619 key
= ENCODE_AA64_CP_REG(cp
, r
->crn
, crm
, r
->opc0
, opc1
, opc2
);
8622 g_assert_not_reached();
8625 /* Overriding of an existing definition must be explicitly requested. */
8626 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
8627 const ARMCPRegInfo
*oldreg
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
8629 assert(oldreg
->type
& ARM_CP_OVERRIDE
);
8634 * Eliminate registers that are not present because the EL is missing.
8635 * Doing this here makes it easier to put all registers for a given
8636 * feature into the same ARMCPRegInfo array and define them all at once.
8639 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8641 * An EL2 register without EL2 but with EL3 is (usually) RES0.
8642 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
8644 int min_el
= ctz32(r
->access
) / 2;
8645 if (min_el
== 2 && !arm_feature(env
, ARM_FEATURE_EL2
)) {
8646 if (r
->type
& ARM_CP_EL3_NO_EL2_UNDEF
) {
8649 make_const
= !(r
->type
& ARM_CP_EL3_NO_EL2_KEEP
);
8652 CPAccessRights max_el
= (arm_feature(env
, ARM_FEATURE_EL2
)
8654 if ((r
->access
& max_el
) == 0) {
8659 /* Combine cpreg and name into one allocation. */
8660 name_len
= strlen(name
) + 1;
8661 r2
= g_malloc(sizeof(*r2
) + name_len
);
8663 r2
->name
= memcpy(r2
+ 1, name
, name_len
);
8666 * Update fields to match the instantiation, overwiting wildcards
8667 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
8674 r2
->secure
= secstate
;
8676 r2
->opaque
= opaque
;
8680 /* This should not have been a very special register to begin. */
8681 int old_special
= r2
->type
& ARM_CP_SPECIAL_MASK
;
8682 assert(old_special
== 0 || old_special
== ARM_CP_NOP
);
8684 * Set the special function to CONST, retaining the other flags.
8685 * This is important for e.g. ARM_CP_SVE so that we still
8686 * take the SVE trap if CPTR_EL3.EZ == 0.
8688 r2
->type
= (r2
->type
& ~ARM_CP_SPECIAL_MASK
) | ARM_CP_CONST
;
8690 * Usually, these registers become RES0, but there are a few
8691 * special cases like VPIDR_EL2 which have a constant non-zero
8692 * value with writes ignored.
8694 if (!(r
->type
& ARM_CP_EL3_NO_EL2_C_NZ
)) {
8698 * ARM_CP_CONST has precedence, so removing the callbacks and
8699 * offsets are not strictly necessary, but it is potentially
8700 * less confusing to debug later.
8704 r2
->raw_readfn
= NULL
;
8705 r2
->raw_writefn
= NULL
;
8707 r2
->fieldoffset
= 0;
8708 r2
->bank_fieldoffsets
[0] = 0;
8709 r2
->bank_fieldoffsets
[1] = 0;
8711 bool isbanked
= r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1];
8715 * Register is banked (using both entries in array).
8716 * Overwriting fieldoffset as the array is only used to define
8717 * banked registers but later only fieldoffset is used.
8719 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
8721 if (state
== ARM_CP_STATE_AA32
) {
8724 * If the register is banked then we don't need to migrate or
8725 * reset the 32-bit instance in certain cases:
8727 * 1) If the register has both 32-bit and 64-bit instances
8728 * then we can count on the 64-bit instance taking care
8729 * of the non-secure bank.
8730 * 2) If ARMv8 is enabled then we can count on a 64-bit
8731 * version taking care of the secure bank. This requires
8732 * that separate 32 and 64-bit definitions are provided.
8734 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
8735 (arm_feature(env
, ARM_FEATURE_V8
) && !ns
)) {
8736 r2
->type
|= ARM_CP_ALIAS
;
8738 } else if ((secstate
!= r
->secure
) && !ns
) {
8740 * The register is not banked so we only want to allow
8741 * migration of the non-secure instance.
8743 r2
->type
|= ARM_CP_ALIAS
;
8746 if (HOST_BIG_ENDIAN
&&
8747 r
->state
== ARM_CP_STATE_BOTH
&& r2
->fieldoffset
) {
8748 r2
->fieldoffset
+= sizeof(uint32_t);
8754 * By convention, for wildcarded registers only the first
8755 * entry is used for migration; the others are marked as
8756 * ALIAS so we don't try to transfer the register
8757 * multiple times. Special registers (ie NOP/WFI) are
8758 * never migratable and not even raw-accessible.
8760 if (r2
->type
& ARM_CP_SPECIAL_MASK
) {
8761 r2
->type
|= ARM_CP_NO_RAW
;
8763 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
8764 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
8765 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
8766 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
8770 * Check that raw accesses are either forbidden or handled. Note that
8771 * we can't assert this earlier because the setup of fieldoffset for
8772 * banked registers has to be done first.
8774 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
8775 assert(!raw_accessors_invalid(r2
));
8778 g_hash_table_insert(cpu
->cp_regs
, (gpointer
)(uintptr_t)key
, r2
);
8782 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
8783 const ARMCPRegInfo
*r
, void *opaque
)
8785 /* Define implementations of coprocessor registers.
8786 * We store these in a hashtable because typically
8787 * there are less than 150 registers in a space which
8788 * is 16*16*16*8*8 = 262144 in size.
8789 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8790 * If a register is defined twice then the second definition is
8791 * used, so this can be used to define some generic registers and
8792 * then override them with implementation specific variations.
8793 * At least one of the original and the second definition should
8794 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8795 * against accidental use.
8797 * The state field defines whether the register is to be
8798 * visible in the AArch32 or AArch64 execution state. If the
8799 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8800 * reginfo structure for the AArch32 view, which sees the lower
8801 * 32 bits of the 64 bit register.
8803 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8804 * be wildcarded. AArch64 registers are always considered to be 64
8805 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8806 * the register, if any.
8808 int crm
, opc1
, opc2
;
8809 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8810 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8811 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8812 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8813 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8814 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8817 /* 64 bit registers have only CRm and Opc1 fields */
8818 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8819 /* op0 only exists in the AArch64 encodings */
8820 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8821 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8822 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8824 * This API is only for Arm's system coprocessors (14 and 15) or
8825 * (M-profile or v7A-and-earlier only) for implementation defined
8826 * coprocessors in the range 0..7. Our decode assumes this, since
8827 * 8..13 can be used for other insns including VFP and Neon. See
8828 * valid_cp() in translate.c. Assert here that we haven't tried
8829 * to use an invalid coprocessor number.
8832 case ARM_CP_STATE_BOTH
:
8833 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8838 case ARM_CP_STATE_AA32
:
8839 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
8840 !arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
8841 assert(r
->cp
>= 14 && r
->cp
<= 15);
8843 assert(r
->cp
< 8 || (r
->cp
>= 14 && r
->cp
<= 15));
8846 case ARM_CP_STATE_AA64
:
8847 assert(r
->cp
== 0 || r
->cp
== CP_REG_ARM64_SYSREG_CP
);
8850 g_assert_not_reached();
8852 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8853 * encodes a minimum access level for the register. We roll this
8854 * runtime check into our general permission check code, so check
8855 * here that the reginfo's specified permissions are strict enough
8856 * to encompass the generic architectural permission check.
8858 if (r
->state
!= ARM_CP_STATE_AA32
) {
8859 CPAccessRights mask
;
8862 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8863 mask
= PL0U_R
| PL1_RW
;
8883 /* min_EL EL1, secure mode only (we don't check the latter) */
8887 /* broken reginfo with out-of-range opc1 */
8888 g_assert_not_reached();
8890 /* assert our permissions are not too lax (stricter is fine) */
8891 assert((r
->access
& ~mask
) == 0);
8894 /* Check that the register definition has enough info to handle
8895 * reads and writes if they are permitted.
8897 if (!(r
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_CONST
))) {
8898 if (r
->access
& PL3_R
) {
8899 assert((r
->fieldoffset
||
8900 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8903 if (r
->access
& PL3_W
) {
8904 assert((r
->fieldoffset
||
8905 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8910 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
8911 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
8912 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
8913 for (state
= ARM_CP_STATE_AA32
;
8914 state
<= ARM_CP_STATE_AA64
; state
++) {
8915 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
8918 if (state
== ARM_CP_STATE_AA32
) {
8919 /* Under AArch32 CP registers can be common
8920 * (same for secure and non-secure world) or banked.
8924 switch (r
->secure
) {
8925 case ARM_CP_SECSTATE_S
:
8926 case ARM_CP_SECSTATE_NS
:
8927 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8928 r
->secure
, crm
, opc1
, opc2
,
8931 case ARM_CP_SECSTATE_BOTH
:
8932 name
= g_strdup_printf("%s_S", r
->name
);
8933 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8935 crm
, opc1
, opc2
, name
);
8937 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8939 crm
, opc1
, opc2
, r
->name
);
8942 g_assert_not_reached();
8945 /* AArch64 registers get mapped to non-secure instance
8947 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8949 crm
, opc1
, opc2
, r
->name
);
8957 /* Define a whole list of registers */
8958 void define_arm_cp_regs_with_opaque_len(ARMCPU
*cpu
, const ARMCPRegInfo
*regs
,
8959 void *opaque
, size_t len
)
8962 for (i
= 0; i
< len
; ++i
) {
8963 define_one_arm_cp_reg_with_opaque(cpu
, regs
+ i
, opaque
);
8968 * Modify ARMCPRegInfo for access from userspace.
8970 * This is a data driven modification directed by
8971 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8972 * user-space cannot alter any values and dynamic values pertaining to
8973 * execution state are hidden from user space view anyway.
8975 void modify_arm_cp_regs_with_len(ARMCPRegInfo
*regs
, size_t regs_len
,
8976 const ARMCPRegUserSpaceInfo
*mods
,
8979 for (size_t mi
= 0; mi
< mods_len
; ++mi
) {
8980 const ARMCPRegUserSpaceInfo
*m
= mods
+ mi
;
8981 GPatternSpec
*pat
= NULL
;
8984 pat
= g_pattern_spec_new(m
->name
);
8986 for (size_t ri
= 0; ri
< regs_len
; ++ri
) {
8987 ARMCPRegInfo
*r
= regs
+ ri
;
8989 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
8990 r
->type
= ARM_CP_CONST
;
8994 } else if (strcmp(r
->name
, m
->name
) == 0) {
8995 r
->type
= ARM_CP_CONST
;
8997 r
->resetvalue
&= m
->exported_bits
;
8998 r
->resetvalue
|= m
->fixed_bits
;
9003 g_pattern_spec_free(pat
);
9008 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
9010 return g_hash_table_lookup(cpregs
, (gpointer
)(uintptr_t)encoded_cp
);
9013 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
9016 /* Helper coprocessor write function for write-ignore registers */
9019 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
9021 /* Helper coprocessor write function for read-as-zero registers */
9025 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
9027 /* Helper coprocessor reset function for do-nothing-on-reset registers */
9030 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
9032 /* Return true if it is not valid for us to switch to
9033 * this CPU mode (ie all the UNPREDICTABLE cases in
9034 * the ARM ARM CPSRWriteByInstr pseudocode).
9037 /* Changes to or from Hyp via MSR and CPS are illegal. */
9038 if (write_type
== CPSRWriteByInstr
&&
9039 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
9040 mode
== ARM_CPU_MODE_HYP
)) {
9045 case ARM_CPU_MODE_USR
:
9047 case ARM_CPU_MODE_SYS
:
9048 case ARM_CPU_MODE_SVC
:
9049 case ARM_CPU_MODE_ABT
:
9050 case ARM_CPU_MODE_UND
:
9051 case ARM_CPU_MODE_IRQ
:
9052 case ARM_CPU_MODE_FIQ
:
9053 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
9054 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9056 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
9057 * and CPS are treated as illegal mode changes.
9059 if (write_type
== CPSRWriteByInstr
&&
9060 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
9061 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
9065 case ARM_CPU_MODE_HYP
:
9066 return !arm_is_el2_enabled(env
) || arm_current_el(env
) < 2;
9067 case ARM_CPU_MODE_MON
:
9068 return arm_current_el(env
) < 3;
9074 uint32_t cpsr_read(CPUARMState
*env
)
9077 ZF
= (env
->ZF
== 0);
9078 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
9079 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
9080 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
9081 | ((env
->condexec_bits
& 0xfc) << 8)
9082 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
9085 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
9086 CPSRWriteType write_type
)
9088 uint32_t changed_daif
;
9089 bool rebuild_hflags
= (write_type
!= CPSRWriteRaw
) &&
9090 (mask
& (CPSR_M
| CPSR_E
| CPSR_IL
));
9092 if (mask
& CPSR_NZCV
) {
9093 env
->ZF
= (~val
) & CPSR_Z
;
9095 env
->CF
= (val
>> 29) & 1;
9096 env
->VF
= (val
<< 3) & 0x80000000;
9099 env
->QF
= ((val
& CPSR_Q
) != 0);
9101 env
->thumb
= ((val
& CPSR_T
) != 0);
9102 if (mask
& CPSR_IT_0_1
) {
9103 env
->condexec_bits
&= ~3;
9104 env
->condexec_bits
|= (val
>> 25) & 3;
9106 if (mask
& CPSR_IT_2_7
) {
9107 env
->condexec_bits
&= 3;
9108 env
->condexec_bits
|= (val
>> 8) & 0xfc;
9110 if (mask
& CPSR_GE
) {
9111 env
->GE
= (val
>> 16) & 0xf;
9114 /* In a V7 implementation that includes the security extensions but does
9115 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9116 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9117 * bits respectively.
9119 * In a V8 implementation, it is permitted for privileged software to
9120 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9122 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
9123 arm_feature(env
, ARM_FEATURE_EL3
) &&
9124 !arm_feature(env
, ARM_FEATURE_EL2
) &&
9125 !arm_is_secure(env
)) {
9127 changed_daif
= (env
->daif
^ val
) & mask
;
9129 if (changed_daif
& CPSR_A
) {
9130 /* Check to see if we are allowed to change the masking of async
9131 * abort exceptions from a non-secure state.
9133 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
9134 qemu_log_mask(LOG_GUEST_ERROR
,
9135 "Ignoring attempt to switch CPSR_A flag from "
9136 "non-secure world with SCR.AW bit clear\n");
9141 if (changed_daif
& CPSR_F
) {
9142 /* Check to see if we are allowed to change the masking of FIQ
9143 * exceptions from a non-secure state.
9145 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
9146 qemu_log_mask(LOG_GUEST_ERROR
,
9147 "Ignoring attempt to switch CPSR_F flag from "
9148 "non-secure world with SCR.FW bit clear\n");
9152 /* Check whether non-maskable FIQ (NMFI) support is enabled.
9153 * If this bit is set software is not allowed to mask
9154 * FIQs, but is allowed to set CPSR_F to 0.
9156 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
9158 qemu_log_mask(LOG_GUEST_ERROR
,
9159 "Ignoring attempt to enable CPSR_F flag "
9160 "(non-maskable FIQ [NMFI] support enabled)\n");
9166 env
->daif
&= ~(CPSR_AIF
& mask
);
9167 env
->daif
|= val
& CPSR_AIF
& mask
;
9169 if (write_type
!= CPSRWriteRaw
&&
9170 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
9171 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
9172 /* Note that we can only get here in USR mode if this is a
9173 * gdb stub write; for this case we follow the architectural
9174 * behaviour for guest writes in USR mode of ignoring an attempt
9175 * to switch mode. (Those are caught by translate.c for writes
9176 * triggered by guest instructions.)
9179 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
9180 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9181 * v7, and has defined behaviour in v8:
9182 * + leave CPSR.M untouched
9183 * + allow changes to the other CPSR fields
9185 * For user changes via the GDB stub, we don't set PSTATE.IL,
9186 * as this would be unnecessarily harsh for a user error.
9189 if (write_type
!= CPSRWriteByGDBStub
&&
9190 arm_feature(env
, ARM_FEATURE_V8
)) {
9194 qemu_log_mask(LOG_GUEST_ERROR
,
9195 "Illegal AArch32 mode switch attempt from %s to %s\n",
9196 aarch32_mode_name(env
->uncached_cpsr
),
9197 aarch32_mode_name(val
));
9199 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
9200 write_type
== CPSRWriteExceptionReturn
?
9201 "Exception return from AArch32" :
9202 "AArch32 mode switch from",
9203 aarch32_mode_name(env
->uncached_cpsr
),
9204 aarch32_mode_name(val
), env
->regs
[15]);
9205 switch_mode(env
, val
& CPSR_M
);
9208 mask
&= ~CACHED_CPSR_BITS
;
9209 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
9210 if (rebuild_hflags
) {
9211 arm_rebuild_hflags(env
);
9215 /* Sign/zero extend */
9216 uint32_t HELPER(sxtb16
)(uint32_t x
)
9219 res
= (uint16_t)(int8_t)x
;
9220 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
9224 static void handle_possible_div0_trap(CPUARMState
*env
, uintptr_t ra
)
9227 * Take a division-by-zero exception if necessary; otherwise return
9228 * to get the usual non-trapping division behaviour (result of 0)
9230 if (arm_feature(env
, ARM_FEATURE_M
)
9231 && (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_DIV_0_TRP_MASK
)) {
9232 raise_exception_ra(env
, EXCP_DIVBYZERO
, 0, 1, ra
);
9236 uint32_t HELPER(uxtb16
)(uint32_t x
)
9239 res
= (uint16_t)(uint8_t)x
;
9240 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
9244 int32_t HELPER(sdiv
)(CPUARMState
*env
, int32_t num
, int32_t den
)
9247 handle_possible_div0_trap(env
, GETPC());
9250 if (num
== INT_MIN
&& den
== -1) {
9256 uint32_t HELPER(udiv
)(CPUARMState
*env
, uint32_t num
, uint32_t den
)
9259 handle_possible_div0_trap(env
, GETPC());
9265 uint32_t HELPER(rbit
)(uint32_t x
)
9270 #ifdef CONFIG_USER_ONLY
9272 static void switch_mode(CPUARMState
*env
, int mode
)
9274 ARMCPU
*cpu
= env_archcpu(env
);
9276 if (mode
!= ARM_CPU_MODE_USR
) {
9277 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
9281 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9282 uint32_t cur_el
, bool secure
)
9287 void aarch64_sync_64_to_32(CPUARMState
*env
)
9289 g_assert_not_reached();
9294 static void switch_mode(CPUARMState
*env
, int mode
)
9299 old_mode
= env
->uncached_cpsr
& CPSR_M
;
9300 if (mode
== old_mode
)
9303 if (old_mode
== ARM_CPU_MODE_FIQ
) {
9304 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9305 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
9306 } else if (mode
== ARM_CPU_MODE_FIQ
) {
9307 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9308 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
9311 i
= bank_number(old_mode
);
9312 env
->banked_r13
[i
] = env
->regs
[13];
9313 env
->banked_spsr
[i
] = env
->spsr
;
9315 i
= bank_number(mode
);
9316 env
->regs
[13] = env
->banked_r13
[i
];
9317 env
->spsr
= env
->banked_spsr
[i
];
9319 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
9320 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
9323 /* Physical Interrupt Target EL Lookup Table
9325 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9327 * The below multi-dimensional table is used for looking up the target
9328 * exception level given numerous condition criteria. Specifically, the
9329 * target EL is based on SCR and HCR routing controls as well as the
9330 * currently executing EL and secure state.
9333 * target_el_table[2][2][2][2][2][4]
9334 * | | | | | +--- Current EL
9335 * | | | | +------ Non-secure(0)/Secure(1)
9336 * | | | +--------- HCR mask override
9337 * | | +------------ SCR exec state control
9338 * | +--------------- SCR mask override
9339 * +------------------ 32-bit(0)/64-bit(1) EL3
9341 * The table values are as such:
9345 * The ARM ARM target EL table includes entries indicating that an "exception
9346 * is not taken". The two cases where this is applicable are:
9347 * 1) An exception is taken from EL3 but the SCR does not have the exception
9349 * 2) An exception is taken from EL2 but the HCR does not have the exception
9351 * In these two cases, the below table contain a target of EL1. This value is
9352 * returned as it is expected that the consumer of the table data will check
9353 * for "target EL >= current EL" to ensure the exception is not taken.
9357 * BIT IRQ IMO Non-secure Secure
9358 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9360 static const int8_t target_el_table
[2][2][2][2][2][4] = {
9361 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9362 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9363 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9364 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9365 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9366 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9367 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9368 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9369 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9370 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9371 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9372 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9373 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9374 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9375 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9376 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9380 * Determine the target EL for physical exceptions
9382 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9383 uint32_t cur_el
, bool secure
)
9385 CPUARMState
*env
= cs
->env_ptr
;
9390 /* Is the highest EL AArch64? */
9391 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
9394 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9395 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
9397 /* Either EL2 is the highest EL (and so the EL2 register width
9398 * is given by is64); or there is no EL2 or EL3, in which case
9399 * the value of 'rw' does not affect the table lookup anyway.
9404 hcr_el2
= arm_hcr_el2_eff(env
);
9407 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
9408 hcr
= hcr_el2
& HCR_IMO
;
9411 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
9412 hcr
= hcr_el2
& HCR_FMO
;
9415 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
9416 hcr
= hcr_el2
& HCR_AMO
;
9421 * For these purposes, TGE and AMO/IMO/FMO both force the
9422 * interrupt to EL2. Fold TGE into the bit extracted above.
9424 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
9426 /* Perform a table-lookup for the target EL given the current state */
9427 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
9429 assert(target_el
> 0);
9434 void arm_log_exception(CPUState
*cs
)
9436 int idx
= cs
->exception_index
;
9438 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
9439 const char *exc
= NULL
;
9440 static const char * const excnames
[] = {
9441 [EXCP_UDEF
] = "Undefined Instruction",
9443 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
9444 [EXCP_DATA_ABORT
] = "Data Abort",
9447 [EXCP_BKPT
] = "Breakpoint",
9448 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
9449 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
9450 [EXCP_HVC
] = "Hypervisor Call",
9451 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
9452 [EXCP_SMC
] = "Secure Monitor Call",
9453 [EXCP_VIRQ
] = "Virtual IRQ",
9454 [EXCP_VFIQ
] = "Virtual FIQ",
9455 [EXCP_SEMIHOST
] = "Semihosting call",
9456 [EXCP_NOCP
] = "v7M NOCP UsageFault",
9457 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
9458 [EXCP_STKOF
] = "v8M STKOF UsageFault",
9459 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
9460 [EXCP_LSERR
] = "v8M LSERR UsageFault",
9461 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
9462 [EXCP_DIVBYZERO
] = "v7M DIVBYZERO UsageFault",
9463 [EXCP_VSERR
] = "Virtual SERR",
9466 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
9467 exc
= excnames
[idx
];
9472 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s] on CPU %d\n",
9473 idx
, exc
, cs
->cpu_index
);
9478 * Function used to synchronize QEMU's AArch64 register set with AArch32
9479 * register set. This is necessary when switching between AArch32 and AArch64
9482 void aarch64_sync_32_to_64(CPUARMState
*env
)
9485 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9487 /* We can blanket copy R[0:7] to X[0:7] */
9488 for (i
= 0; i
< 8; i
++) {
9489 env
->xregs
[i
] = env
->regs
[i
];
9493 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9494 * Otherwise, they come from the banked user regs.
9496 if (mode
== ARM_CPU_MODE_FIQ
) {
9497 for (i
= 8; i
< 13; i
++) {
9498 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9501 for (i
= 8; i
< 13; i
++) {
9502 env
->xregs
[i
] = env
->regs
[i
];
9507 * Registers x13-x23 are the various mode SP and FP registers. Registers
9508 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9509 * from the mode banked register.
9511 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9512 env
->xregs
[13] = env
->regs
[13];
9513 env
->xregs
[14] = env
->regs
[14];
9515 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9516 /* HYP is an exception in that it is copied from r14 */
9517 if (mode
== ARM_CPU_MODE_HYP
) {
9518 env
->xregs
[14] = env
->regs
[14];
9520 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9524 if (mode
== ARM_CPU_MODE_HYP
) {
9525 env
->xregs
[15] = env
->regs
[13];
9527 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9530 if (mode
== ARM_CPU_MODE_IRQ
) {
9531 env
->xregs
[16] = env
->regs
[14];
9532 env
->xregs
[17] = env
->regs
[13];
9534 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9535 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9538 if (mode
== ARM_CPU_MODE_SVC
) {
9539 env
->xregs
[18] = env
->regs
[14];
9540 env
->xregs
[19] = env
->regs
[13];
9542 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9543 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9546 if (mode
== ARM_CPU_MODE_ABT
) {
9547 env
->xregs
[20] = env
->regs
[14];
9548 env
->xregs
[21] = env
->regs
[13];
9550 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9551 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9554 if (mode
== ARM_CPU_MODE_UND
) {
9555 env
->xregs
[22] = env
->regs
[14];
9556 env
->xregs
[23] = env
->regs
[13];
9558 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9559 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9563 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9564 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9565 * FIQ bank for r8-r14.
9567 if (mode
== ARM_CPU_MODE_FIQ
) {
9568 for (i
= 24; i
< 31; i
++) {
9569 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9572 for (i
= 24; i
< 29; i
++) {
9573 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9575 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9576 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9579 env
->pc
= env
->regs
[15];
9583 * Function used to synchronize QEMU's AArch32 register set with AArch64
9584 * register set. This is necessary when switching between AArch32 and AArch64
9587 void aarch64_sync_64_to_32(CPUARMState
*env
)
9590 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9592 /* We can blanket copy X[0:7] to R[0:7] */
9593 for (i
= 0; i
< 8; i
++) {
9594 env
->regs
[i
] = env
->xregs
[i
];
9598 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9599 * Otherwise, we copy x8-x12 into the banked user regs.
9601 if (mode
== ARM_CPU_MODE_FIQ
) {
9602 for (i
= 8; i
< 13; i
++) {
9603 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9606 for (i
= 8; i
< 13; i
++) {
9607 env
->regs
[i
] = env
->xregs
[i
];
9612 * Registers r13 & r14 depend on the current mode.
9613 * If we are in a given mode, we copy the corresponding x registers to r13
9614 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9617 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9618 env
->regs
[13] = env
->xregs
[13];
9619 env
->regs
[14] = env
->xregs
[14];
9621 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9624 * HYP is an exception in that it does not have its own banked r14 but
9625 * shares the USR r14
9627 if (mode
== ARM_CPU_MODE_HYP
) {
9628 env
->regs
[14] = env
->xregs
[14];
9630 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9634 if (mode
== ARM_CPU_MODE_HYP
) {
9635 env
->regs
[13] = env
->xregs
[15];
9637 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9640 if (mode
== ARM_CPU_MODE_IRQ
) {
9641 env
->regs
[14] = env
->xregs
[16];
9642 env
->regs
[13] = env
->xregs
[17];
9644 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9645 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9648 if (mode
== ARM_CPU_MODE_SVC
) {
9649 env
->regs
[14] = env
->xregs
[18];
9650 env
->regs
[13] = env
->xregs
[19];
9652 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9653 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9656 if (mode
== ARM_CPU_MODE_ABT
) {
9657 env
->regs
[14] = env
->xregs
[20];
9658 env
->regs
[13] = env
->xregs
[21];
9660 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9661 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9664 if (mode
== ARM_CPU_MODE_UND
) {
9665 env
->regs
[14] = env
->xregs
[22];
9666 env
->regs
[13] = env
->xregs
[23];
9668 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9669 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9672 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9673 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9674 * FIQ bank for r8-r14.
9676 if (mode
== ARM_CPU_MODE_FIQ
) {
9677 for (i
= 24; i
< 31; i
++) {
9678 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9681 for (i
= 24; i
< 29; i
++) {
9682 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9684 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9685 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9688 env
->regs
[15] = env
->pc
;
9691 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9692 uint32_t mask
, uint32_t offset
,
9697 /* Change the CPU state so as to actually take the exception. */
9698 switch_mode(env
, new_mode
);
9701 * For exceptions taken to AArch32 we must clear the SS bit in both
9702 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9704 env
->pstate
&= ~PSTATE_SS
;
9705 env
->spsr
= cpsr_read(env
);
9706 /* Clear IT bits. */
9707 env
->condexec_bits
= 0;
9708 /* Switch to the new mode, and to the correct instruction set. */
9709 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9711 /* This must be after mode switching. */
9712 new_el
= arm_current_el(env
);
9714 /* Set new mode endianness */
9715 env
->uncached_cpsr
&= ~CPSR_E
;
9716 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
9717 env
->uncached_cpsr
|= CPSR_E
;
9719 /* J and IL must always be cleared for exception entry */
9720 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9723 if (cpu_isar_feature(aa32_ssbs
, env_archcpu(env
))) {
9724 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_32
) {
9725 env
->uncached_cpsr
|= CPSR_SSBS
;
9727 env
->uncached_cpsr
&= ~CPSR_SSBS
;
9731 if (new_mode
== ARM_CPU_MODE_HYP
) {
9732 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9733 env
->elr_el
[2] = env
->regs
[15];
9735 /* CPSR.PAN is normally preserved preserved unless... */
9736 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
9739 if (!arm_is_secure_below_el3(env
)) {
9740 /* ... the target is EL3, from non-secure state. */
9741 env
->uncached_cpsr
&= ~CPSR_PAN
;
9744 /* ... the target is EL3, from secure state ... */
9747 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9748 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
9749 env
->uncached_cpsr
|= CPSR_PAN
;
9755 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9756 * and we should just guard the thumb mode on V4
9758 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9760 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9762 env
->regs
[14] = env
->regs
[15] + offset
;
9764 env
->regs
[15] = newpc
;
9765 arm_rebuild_hflags(env
);
9768 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9771 * Handle exception entry to Hyp mode; this is sufficiently
9772 * different to entry to other AArch32 modes that we handle it
9775 * The vector table entry used is always the 0x14 Hyp mode entry point,
9776 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
9777 * The offset applied to the preferred return address is always zero
9778 * (see DDI0487C.a section G1.12.3).
9779 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9781 uint32_t addr
, mask
;
9782 ARMCPU
*cpu
= ARM_CPU(cs
);
9783 CPUARMState
*env
= &cpu
->env
;
9785 switch (cs
->exception_index
) {
9793 /* Fall through to prefetch abort. */
9794 case EXCP_PREFETCH_ABORT
:
9795 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9796 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9797 (uint32_t)env
->exception
.vaddress
);
9800 case EXCP_DATA_ABORT
:
9801 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9802 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9803 (uint32_t)env
->exception
.vaddress
);
9819 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9822 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9823 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9825 * QEMU syndrome values are v8-style. v7 has the IL bit
9826 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9827 * If this is a v7 CPU, squash the IL bit in those cases.
9829 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9830 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9831 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9832 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9833 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9836 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9839 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9844 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9847 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9850 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9854 addr
+= env
->cp15
.hvbar
;
9856 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9859 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9861 ARMCPU
*cpu
= ARM_CPU(cs
);
9862 CPUARMState
*env
= &cpu
->env
;
9869 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9870 switch (syn_get_ec(env
->exception
.syndrome
)) {
9872 case EC_BREAKPOINT_SAME_EL
:
9876 case EC_WATCHPOINT_SAME_EL
:
9882 case EC_VECTORCATCH
:
9891 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9894 if (env
->exception
.target_el
== 2) {
9895 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9899 switch (cs
->exception_index
) {
9901 new_mode
= ARM_CPU_MODE_UND
;
9910 new_mode
= ARM_CPU_MODE_SVC
;
9913 /* The PC already points to the next instruction. */
9917 /* Fall through to prefetch abort. */
9918 case EXCP_PREFETCH_ABORT
:
9919 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9920 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9921 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9922 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9923 new_mode
= ARM_CPU_MODE_ABT
;
9925 mask
= CPSR_A
| CPSR_I
;
9928 case EXCP_DATA_ABORT
:
9929 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9930 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9931 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9933 (uint32_t)env
->exception
.vaddress
);
9934 new_mode
= ARM_CPU_MODE_ABT
;
9936 mask
= CPSR_A
| CPSR_I
;
9940 new_mode
= ARM_CPU_MODE_IRQ
;
9942 /* Disable IRQ and imprecise data aborts. */
9943 mask
= CPSR_A
| CPSR_I
;
9945 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9946 /* IRQ routed to monitor mode */
9947 new_mode
= ARM_CPU_MODE_MON
;
9952 new_mode
= ARM_CPU_MODE_FIQ
;
9954 /* Disable FIQ, IRQ and imprecise data aborts. */
9955 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9956 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9957 /* FIQ routed to monitor mode */
9958 new_mode
= ARM_CPU_MODE_MON
;
9963 new_mode
= ARM_CPU_MODE_IRQ
;
9965 /* Disable IRQ and imprecise data aborts. */
9966 mask
= CPSR_A
| CPSR_I
;
9970 new_mode
= ARM_CPU_MODE_FIQ
;
9972 /* Disable FIQ, IRQ and imprecise data aborts. */
9973 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9979 * Note that this is reported as a data abort, but the DFAR
9980 * has an UNKNOWN value. Construct the SError syndrome from
9981 * AET and ExT fields.
9983 ARMMMUFaultInfo fi
= { .type
= ARMFault_AsyncExternal
, };
9985 if (extended_addresses_enabled(env
)) {
9986 env
->exception
.fsr
= arm_fi_to_lfsc(&fi
);
9988 env
->exception
.fsr
= arm_fi_to_sfsc(&fi
);
9990 env
->exception
.fsr
|= env
->cp15
.vsesr_el2
& 0xd000;
9991 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9992 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x\n",
9993 env
->exception
.fsr
);
9995 new_mode
= ARM_CPU_MODE_ABT
;
9997 mask
= CPSR_A
| CPSR_I
;
10002 new_mode
= ARM_CPU_MODE_MON
;
10004 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10008 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10009 return; /* Never happens. Keep compiler happy. */
10012 if (new_mode
== ARM_CPU_MODE_MON
) {
10013 addr
+= env
->cp15
.mvbar
;
10014 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
10015 /* High vectors. When enabled, base address cannot be remapped. */
10016 addr
+= 0xffff0000;
10018 /* ARM v7 architectures provide a vector base address register to remap
10019 * the interrupt vector table.
10020 * This register is only followed in non-monitor mode, and is banked.
10021 * Note: only bits 31:5 are valid.
10023 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
10026 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
10027 env
->cp15
.scr_el3
&= ~SCR_NS
;
10030 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
10033 static int aarch64_regnum(CPUARMState
*env
, int aarch32_reg
)
10036 * Return the register number of the AArch64 view of the AArch32
10037 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
10038 * be that of the AArch32 mode the exception came from.
10040 int mode
= env
->uncached_cpsr
& CPSR_M
;
10042 switch (aarch32_reg
) {
10044 return aarch32_reg
;
10046 return mode
== ARM_CPU_MODE_FIQ
? aarch32_reg
+ 16 : aarch32_reg
;
10049 case ARM_CPU_MODE_USR
:
10050 case ARM_CPU_MODE_SYS
:
10052 case ARM_CPU_MODE_HYP
:
10054 case ARM_CPU_MODE_IRQ
:
10056 case ARM_CPU_MODE_SVC
:
10058 case ARM_CPU_MODE_ABT
:
10060 case ARM_CPU_MODE_UND
:
10062 case ARM_CPU_MODE_FIQ
:
10065 g_assert_not_reached();
10069 case ARM_CPU_MODE_USR
:
10070 case ARM_CPU_MODE_SYS
:
10071 case ARM_CPU_MODE_HYP
:
10073 case ARM_CPU_MODE_IRQ
:
10075 case ARM_CPU_MODE_SVC
:
10077 case ARM_CPU_MODE_ABT
:
10079 case ARM_CPU_MODE_UND
:
10081 case ARM_CPU_MODE_FIQ
:
10084 g_assert_not_reached();
10089 g_assert_not_reached();
10093 static uint32_t cpsr_read_for_spsr_elx(CPUARMState
*env
)
10095 uint32_t ret
= cpsr_read(env
);
10097 /* Move DIT to the correct location for SPSR_ELx */
10098 if (ret
& CPSR_DIT
) {
10102 /* Merge PSTATE.SS into SPSR_ELx */
10103 ret
|= env
->pstate
& PSTATE_SS
;
10108 static bool syndrome_is_sync_extabt(uint32_t syndrome
)
10110 /* Return true if this syndrome value is a synchronous external abort */
10111 switch (syn_get_ec(syndrome
)) {
10113 case EC_INSNABORT_SAME_EL
:
10115 case EC_DATAABORT_SAME_EL
:
10116 /* Look at fault status code for all the synchronous ext abort cases */
10117 switch (syndrome
& 0x3f) {
10133 /* Handle exception entry to a target EL which is using AArch64 */
10134 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
10136 ARMCPU
*cpu
= ARM_CPU(cs
);
10137 CPUARMState
*env
= &cpu
->env
;
10138 unsigned int new_el
= env
->exception
.target_el
;
10139 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
10140 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
10141 unsigned int old_mode
;
10142 unsigned int cur_el
= arm_current_el(env
);
10146 * Note that new_el can never be 0. If cur_el is 0, then
10147 * el0_a64 is is_a64(), else el0_a64 is ignored.
10149 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
10151 if (cur_el
< new_el
) {
10152 /* Entry vector offset depends on whether the implemented EL
10153 * immediately lower than the target level is using AArch32 or AArch64
10160 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
10163 hcr
= arm_hcr_el2_eff(env
);
10164 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
10165 is_aa64
= (hcr
& HCR_RW
) != 0;
10170 is_aa64
= is_a64(env
);
10173 g_assert_not_reached();
10181 } else if (pstate_read(env
) & PSTATE_SP
) {
10185 switch (cs
->exception_index
) {
10186 case EXCP_PREFETCH_ABORT
:
10187 case EXCP_DATA_ABORT
:
10189 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
10190 * to be taken to the SError vector entrypoint.
10192 if (new_el
== 3 && (env
->cp15
.scr_el3
& SCR_EASE
) &&
10193 syndrome_is_sync_extabt(env
->exception
.syndrome
)) {
10196 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
10197 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
10198 env
->cp15
.far_el
[new_el
]);
10204 case EXCP_HYP_TRAP
:
10206 switch (syn_get_ec(env
->exception
.syndrome
)) {
10207 case EC_ADVSIMDFPACCESSTRAP
:
10209 * QEMU internal FP/SIMD syndromes from AArch32 include the
10210 * TA and coproc fields which are only exposed if the exception
10211 * is taken to AArch32 Hyp mode. Mask them out to get a valid
10212 * AArch64 format syndrome.
10214 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
10216 case EC_CP14RTTRAP
:
10217 case EC_CP15RTTRAP
:
10218 case EC_CP14DTTRAP
:
10220 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10221 * the raw register field from the insn; when taking this to
10222 * AArch64 we must convert it to the AArch64 view of the register
10223 * number. Notice that we read a 4-bit AArch32 register number and
10224 * write back a 5-bit AArch64 one.
10226 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10227 rt
= aarch64_regnum(env
, rt
);
10228 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10231 case EC_CP15RRTTRAP
:
10232 case EC_CP14RRTTRAP
:
10233 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10234 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10235 rt
= aarch64_regnum(env
, rt
);
10236 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10238 rt
= extract32(env
->exception
.syndrome
, 10, 4);
10239 rt
= aarch64_regnum(env
, rt
);
10240 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10244 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
10256 /* Construct the SError syndrome from IDS and ISS fields. */
10257 env
->exception
.syndrome
= syn_serror(env
->cp15
.vsesr_el2
& 0x1ffffff);
10258 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
10261 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10265 old_mode
= pstate_read(env
);
10266 aarch64_save_sp(env
, arm_current_el(env
));
10267 env
->elr_el
[new_el
] = env
->pc
;
10269 old_mode
= cpsr_read_for_spsr_elx(env
);
10270 env
->elr_el
[new_el
] = env
->regs
[15];
10272 aarch64_sync_32_to_64(env
);
10274 env
->condexec_bits
= 0;
10276 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
10278 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
10279 env
->elr_el
[new_el
]);
10281 if (cpu_isar_feature(aa64_pan
, cpu
)) {
10282 /* The value of PSTATE.PAN is normally preserved, except when ... */
10283 new_mode
|= old_mode
& PSTATE_PAN
;
10286 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
10287 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
10288 != (HCR_E2H
| HCR_TGE
)) {
10293 /* ... the target is EL1 ... */
10294 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
10295 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
10296 new_mode
|= PSTATE_PAN
;
10301 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10302 new_mode
|= PSTATE_TCO
;
10305 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
10306 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_64
) {
10307 new_mode
|= PSTATE_SSBS
;
10309 new_mode
&= ~PSTATE_SSBS
;
10313 pstate_write(env
, PSTATE_DAIF
| new_mode
);
10314 env
->aarch64
= true;
10315 aarch64_restore_sp(env
, new_el
);
10316 helper_rebuild_hflags_a64(env
, new_el
);
10320 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
10321 new_el
, env
->pc
, pstate_read(env
));
10325 * Do semihosting call and set the appropriate return value. All the
10326 * permission and validity checks have been done at translate time.
10328 * We only see semihosting exceptions in TCG only as they are not
10329 * trapped to the hypervisor in KVM.
10332 static void handle_semihosting(CPUState
*cs
)
10334 ARMCPU
*cpu
= ARM_CPU(cs
);
10335 CPUARMState
*env
= &cpu
->env
;
10338 qemu_log_mask(CPU_LOG_INT
,
10339 "...handling as semihosting call 0x%" PRIx64
"\n",
10341 env
->xregs
[0] = do_common_semihosting(cs
);
10344 qemu_log_mask(CPU_LOG_INT
,
10345 "...handling as semihosting call 0x%x\n",
10347 env
->regs
[0] = do_common_semihosting(cs
);
10348 env
->regs
[15] += env
->thumb
? 2 : 4;
10353 /* Handle a CPU exception for A and R profile CPUs.
10354 * Do any appropriate logging, handle PSCI calls, and then hand off
10355 * to the AArch64-entry or AArch32-entry function depending on the
10356 * target exception level's register width.
10358 * Note: this is used for both TCG (as the do_interrupt tcg op),
10359 * and KVM to re-inject guest debug exceptions, and to
10360 * inject a Synchronous-External-Abort.
10362 void arm_cpu_do_interrupt(CPUState
*cs
)
10364 ARMCPU
*cpu
= ARM_CPU(cs
);
10365 CPUARMState
*env
= &cpu
->env
;
10366 unsigned int new_el
= env
->exception
.target_el
;
10368 assert(!arm_feature(env
, ARM_FEATURE_M
));
10370 arm_log_exception(cs
);
10371 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
10373 if (qemu_loglevel_mask(CPU_LOG_INT
)
10374 && !excp_is_internal(cs
->exception_index
)) {
10375 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
10376 syn_get_ec(env
->exception
.syndrome
),
10377 env
->exception
.syndrome
);
10380 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
10381 arm_handle_psci_call(cpu
);
10382 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
10387 * Semihosting semantics depend on the register width of the code
10388 * that caused the exception, not the target exception level, so
10389 * must be handled here.
10392 if (cs
->exception_index
== EXCP_SEMIHOST
) {
10393 handle_semihosting(cs
);
10398 /* Hooks may change global state so BQL should be held, also the
10399 * BQL needs to be held for any modification of
10400 * cs->interrupt_request.
10402 g_assert(qemu_mutex_iothread_locked());
10404 arm_call_pre_el_change_hook(cpu
);
10406 assert(!excp_is_internal(cs
->exception_index
));
10407 if (arm_el_is_aa64(env
, new_el
)) {
10408 arm_cpu_do_interrupt_aarch64(cs
);
10410 arm_cpu_do_interrupt_aarch32(cs
);
10413 arm_call_el_change_hook(cpu
);
10415 if (!kvm_enabled()) {
10416 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
10419 #endif /* !CONFIG_USER_ONLY */
10421 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
10423 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10425 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
10426 el
= (mmu_idx
== ARMMMUIdx_E20_0
|| mmu_idx
== ARMMMUIdx_SE20_0
)
10429 return env
->cp15
.sctlr_el
[el
];
10432 #ifndef CONFIG_USER_ONLY
10434 /* Return true if the specified stage of address translation is disabled */
10435 bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10439 if (arm_feature(env
, ARM_FEATURE_M
)) {
10440 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
10441 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
10442 case R_V7M_MPU_CTRL_ENABLE_MASK
:
10443 /* Enabled, but not for HardFault and NMI */
10444 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
10445 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
10446 /* Enabled for all cases */
10450 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10451 * we warned about that in armv7m_nvic.c when the guest set it.
10457 hcr_el2
= arm_hcr_el2_eff(env
);
10459 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
10460 /* HCR.DC means HCR.VM behaves as 1 */
10461 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
10464 if (hcr_el2
& HCR_TGE
) {
10465 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10466 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
10471 if ((hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
10472 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10476 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
10479 static inline bool regime_translation_big_endian(CPUARMState
*env
,
10482 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
10485 /* Return the TTBR associated with this translation regime */
10486 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10489 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10490 return env
->cp15
.vttbr_el2
;
10492 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
10493 return env
->cp15
.vsttbr_el2
;
10496 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
10498 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
10502 /* Convert a possible stage1+2 MMU index into the appropriate
10503 * stage 1 MMU index
10505 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
10508 case ARMMMUIdx_SE10_0
:
10509 return ARMMMUIdx_Stage1_SE0
;
10510 case ARMMMUIdx_SE10_1
:
10511 return ARMMMUIdx_Stage1_SE1
;
10512 case ARMMMUIdx_SE10_1_PAN
:
10513 return ARMMMUIdx_Stage1_SE1_PAN
;
10514 case ARMMMUIdx_E10_0
:
10515 return ARMMMUIdx_Stage1_E0
;
10516 case ARMMMUIdx_E10_1
:
10517 return ARMMMUIdx_Stage1_E1
;
10518 case ARMMMUIdx_E10_1_PAN
:
10519 return ARMMMUIdx_Stage1_E1_PAN
;
10524 #endif /* !CONFIG_USER_ONLY */
10526 /* Return true if the translation regime is using LPAE format page tables */
10527 bool regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10529 int el
= regime_el(env
, mmu_idx
);
10530 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
10533 if (arm_feature(env
, ARM_FEATURE_LPAE
)
10534 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
10540 /* Returns true if the stage 1 translation regime is using LPAE format page
10541 * tables. Used when raising alignment exceptions, whose FSR changes depending
10542 * on whether the long or short descriptor format is in use. */
10543 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10545 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10547 return regime_using_lpae_format(env
, mmu_idx
);
10550 #ifndef CONFIG_USER_ONLY
10551 bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10554 case ARMMMUIdx_SE10_0
:
10555 case ARMMMUIdx_E20_0
:
10556 case ARMMMUIdx_SE20_0
:
10557 case ARMMMUIdx_Stage1_E0
:
10558 case ARMMMUIdx_Stage1_SE0
:
10559 case ARMMMUIdx_MUser
:
10560 case ARMMMUIdx_MSUser
:
10561 case ARMMMUIdx_MUserNegPri
:
10562 case ARMMMUIdx_MSUserNegPri
:
10566 case ARMMMUIdx_E10_0
:
10567 case ARMMMUIdx_E10_1
:
10568 case ARMMMUIdx_E10_1_PAN
:
10569 g_assert_not_reached();
10573 /* Translate section/page access permissions to page
10574 * R/W protection flags
10576 * @env: CPUARMState
10577 * @mmu_idx: MMU index indicating required translation regime
10578 * @ap: The 3-bit access permissions (AP[2:0])
10579 * @domain_prot: The 2-bit domain access permissions
10581 int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
, int domain_prot
)
10583 bool is_user
= regime_is_user(env
, mmu_idx
);
10585 if (domain_prot
== 3) {
10586 return PAGE_READ
| PAGE_WRITE
;
10591 if (arm_feature(env
, ARM_FEATURE_V7
)) {
10594 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
10596 return is_user
? 0 : PAGE_READ
;
10603 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10608 return PAGE_READ
| PAGE_WRITE
;
10611 return PAGE_READ
| PAGE_WRITE
;
10612 case 4: /* Reserved. */
10615 return is_user
? 0 : PAGE_READ
;
10619 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
10624 g_assert_not_reached();
10628 /* Translate section/page access permissions to page
10629 * R/W protection flags.
10631 * @ap: The 2-bit simple AP (AP[2:1])
10632 * @is_user: TRUE if accessing from PL0
10634 int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
10638 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10640 return PAGE_READ
| PAGE_WRITE
;
10642 return is_user
? 0 : PAGE_READ
;
10646 g_assert_not_reached();
10650 /* Translate S2 section/page access permissions to protection flags
10652 * @env: CPUARMState
10653 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10654 * @xn: XN (execute-never) bits
10655 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10657 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
10665 prot
|= PAGE_WRITE
;
10668 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
10686 g_assert_not_reached();
10689 if (!extract32(xn
, 1, 1)) {
10690 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10698 /* Translate section/page access permissions to protection flags
10700 * @env: CPUARMState
10701 * @mmu_idx: MMU index indicating required translation regime
10702 * @is_aa64: TRUE if AArch64
10703 * @ap: The 2-bit simple AP (AP[2:1])
10704 * @ns: NS (non-secure) bit
10705 * @xn: XN (execute-never) bit
10706 * @pxn: PXN (privileged execute-never) bit
10708 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10709 int ap
, int ns
, int xn
, int pxn
)
10711 bool is_user
= regime_is_user(env
, mmu_idx
);
10712 int prot_rw
, user_rw
;
10716 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
10717 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
10719 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10723 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
10724 /* PAN forbids data accesses but doesn't affect insn fetch */
10727 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10731 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10735 /* TODO have_wxn should be replaced with
10736 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10737 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10738 * compatible processors have EL2, which is required for [U]WXN.
10740 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10743 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10747 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
10748 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10750 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10751 switch (regime_el(env
, mmu_idx
)) {
10755 xn
= xn
|| !(user_rw
& PAGE_READ
);
10759 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10761 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10762 (uwxn
&& (user_rw
& PAGE_WRITE
));
10772 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10775 return prot_rw
| PAGE_EXEC
;
10778 bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10779 uint32_t *table
, uint32_t address
)
10781 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10782 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10784 if (address
& tcr
->mask
) {
10785 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10786 /* Translation table walk disabled for TTBR1 */
10789 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10791 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10792 /* Translation table walk disabled for TTBR0 */
10795 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10797 *table
|= (address
>> 18) & 0x3ffc;
10801 static bool ptw_attrs_are_device(CPUARMState
*env
, ARMCacheAttrs cacheattrs
)
10804 * For an S1 page table walk, the stage 1 attributes are always
10805 * some form of "this is Normal memory". The combined S1+S2
10806 * attributes are therefore only Device if stage 2 specifies Device.
10807 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
10808 * ie when cacheattrs.attrs bits [3:2] are 0b00.
10809 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
10810 * when cacheattrs.attrs bit [2] is 0.
10812 assert(cacheattrs
.is_s2_format
);
10813 if (arm_hcr_el2_eff(env
) & HCR_FWB
) {
10814 return (cacheattrs
.attrs
& 0x4) == 0;
10816 return (cacheattrs
.attrs
& 0xc) == 0;
10820 /* Translate a S1 pagetable walk through S2 if needed. */
10821 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10822 hwaddr addr
, bool *is_secure
,
10823 ARMMMUFaultInfo
*fi
)
10825 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
10826 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10827 target_ulong s2size
;
10831 ARMMMUIdx s2_mmu_idx
= *is_secure
? ARMMMUIdx_Stage2_S
10832 : ARMMMUIdx_Stage2
;
10833 ARMCacheAttrs cacheattrs
= {};
10834 MemTxAttrs txattrs
= {};
10836 ret
= get_phys_addr_lpae(env
, addr
, MMU_DATA_LOAD
, s2_mmu_idx
, false,
10837 &s2pa
, &txattrs
, &s2prot
, &s2size
, fi
,
10840 assert(fi
->type
!= ARMFault_None
);
10844 fi
->s1ns
= !*is_secure
;
10847 if ((arm_hcr_el2_eff(env
) & HCR_PTW
) &&
10848 ptw_attrs_are_device(env
, cacheattrs
)) {
10850 * PTW set and S1 walk touched S2 Device memory:
10851 * generate Permission fault.
10853 fi
->type
= ARMFault_Permission
;
10857 fi
->s1ns
= !*is_secure
;
10861 if (arm_is_secure_below_el3(env
)) {
10862 /* Check if page table walk is to secure or non-secure PA space. */
10864 *is_secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
10866 *is_secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
10869 assert(!*is_secure
);
10877 /* All loads done in the course of a page table walk go through here. */
10878 uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10879 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10881 ARMCPU
*cpu
= ARM_CPU(cs
);
10882 CPUARMState
*env
= &cpu
->env
;
10883 MemTxAttrs attrs
= {};
10884 MemTxResult result
= MEMTX_OK
;
10888 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
10889 attrs
.secure
= is_secure
;
10890 as
= arm_addressspace(cs
, attrs
);
10894 if (regime_translation_big_endian(env
, mmu_idx
)) {
10895 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10897 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10899 if (result
== MEMTX_OK
) {
10902 fi
->type
= ARMFault_SyncExternalOnWalk
;
10903 fi
->ea
= arm_extabort_type(result
);
10907 uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10908 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10910 ARMCPU
*cpu
= ARM_CPU(cs
);
10911 CPUARMState
*env
= &cpu
->env
;
10912 MemTxAttrs attrs
= {};
10913 MemTxResult result
= MEMTX_OK
;
10917 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
10918 attrs
.secure
= is_secure
;
10919 as
= arm_addressspace(cs
, attrs
);
10923 if (regime_translation_big_endian(env
, mmu_idx
)) {
10924 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10926 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10928 if (result
== MEMTX_OK
) {
10931 fi
->type
= ARMFault_SyncExternalOnWalk
;
10932 fi
->ea
= arm_extabort_type(result
);
10937 * check_s2_mmu_setup
10939 * @is_aa64: True if the translation regime is in AArch64 state
10940 * @startlevel: Suggested starting level
10941 * @inputsize: Bitsize of IPAs
10942 * @stride: Page-table stride (See the ARM ARM)
10944 * Returns true if the suggested S2 translation parameters are OK and
10947 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10948 int inputsize
, int stride
, int outputsize
)
10950 const int grainsize
= stride
+ 3;
10951 int startsizecheck
;
10954 * Negative levels are usually not allowed...
10955 * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
10956 * begins with level -1. Note that previous feature tests will have
10957 * eliminated this combination if it is not enabled.
10959 if (level
< (inputsize
== 52 && stride
== 9 ? -1 : 0)) {
10963 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10964 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10970 case 13: /* 64KB Pages. */
10971 if (level
== 0 || (level
== 1 && outputsize
<= 42)) {
10975 case 11: /* 16KB Pages. */
10976 if (level
== 0 || (level
== 1 && outputsize
<= 40)) {
10980 case 9: /* 4KB Pages. */
10981 if (level
== 0 && outputsize
<= 42) {
10986 g_assert_not_reached();
10989 /* Inputsize checks. */
10990 if (inputsize
> outputsize
&&
10991 (arm_el_is_aa64(&cpu
->env
, 1) || inputsize
> 40)) {
10992 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10996 /* AArch32 only supports 4KB pages. Assert on that. */
10997 assert(stride
== 9);
11006 /* Translate from the 4-bit stage 2 representation of
11007 * memory attributes (without cache-allocation hints) to
11008 * the 8-bit representation of the stage 1 MAIR registers
11009 * (which includes allocation hints).
11011 * ref: shared/translation/attrs/S2AttrDecode()
11012 * .../S2ConvertAttrsHints()
11014 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
11016 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
11017 uint8_t loattr
= extract32(s2attrs
, 0, 2);
11018 uint8_t hihint
= 0, lohint
= 0;
11020 if (hiattr
!= 0) { /* normal memory */
11021 if (arm_hcr_el2_eff(env
) & HCR_CD
) { /* cache disabled */
11022 hiattr
= loattr
= 1; /* non-cacheable */
11024 if (hiattr
!= 1) { /* Write-through or write-back */
11025 hihint
= 3; /* RW allocate */
11027 if (loattr
!= 1) { /* Write-through or write-back */
11028 lohint
= 3; /* RW allocate */
11033 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
11035 #endif /* !CONFIG_USER_ONLY */
11037 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
11038 static const uint8_t pamax_map
[] = {
11048 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
11049 unsigned int arm_pamax(ARMCPU
*cpu
)
11051 unsigned int parange
=
11052 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
11055 * id_aa64mmfr0 is a read-only register so values outside of the
11056 * supported mappings can be considered an implementation error.
11058 assert(parange
< ARRAY_SIZE(pamax_map
));
11059 return pamax_map
[parange
];
11062 int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11064 if (regime_has_2_ranges(mmu_idx
)) {
11065 return extract64(tcr
, 37, 2);
11066 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11067 return 0; /* VTCR_EL2 */
11069 /* Replicate the single TBI bit so we always have 2 bits. */
11070 return extract32(tcr
, 20, 1) * 3;
11074 int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11076 if (regime_has_2_ranges(mmu_idx
)) {
11077 return extract64(tcr
, 51, 2);
11078 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11079 return 0; /* VTCR_EL2 */
11081 /* Replicate the single TBID bit so we always have 2 bits. */
11082 return extract32(tcr
, 29, 1) * 3;
11086 static int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11088 if (regime_has_2_ranges(mmu_idx
)) {
11089 return extract64(tcr
, 57, 2);
11091 /* Replicate the single TCMA bit so we always have 2 bits. */
11092 return extract32(tcr
, 30, 1) * 3;
11096 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
11097 ARMMMUIdx mmu_idx
, bool data
)
11099 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11100 bool epd
, hpd
, using16k
, using64k
, tsz_oob
, ds
;
11101 int select
, tsz
, tbi
, max_tsz
, min_tsz
, ps
, sh
;
11102 ARMCPU
*cpu
= env_archcpu(env
);
11104 if (!regime_has_2_ranges(mmu_idx
)) {
11106 tsz
= extract32(tcr
, 0, 6);
11107 using64k
= extract32(tcr
, 14, 1);
11108 using16k
= extract32(tcr
, 15, 1);
11109 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11113 hpd
= extract32(tcr
, 24, 1);
11116 sh
= extract32(tcr
, 12, 2);
11117 ps
= extract32(tcr
, 16, 3);
11118 ds
= extract64(tcr
, 32, 1);
11121 * Bit 55 is always between the two regions, and is canonical for
11122 * determining if address tagging is enabled.
11124 select
= extract64(va
, 55, 1);
11126 tsz
= extract32(tcr
, 0, 6);
11127 epd
= extract32(tcr
, 7, 1);
11128 sh
= extract32(tcr
, 12, 2);
11129 using64k
= extract32(tcr
, 14, 1);
11130 using16k
= extract32(tcr
, 15, 1);
11131 hpd
= extract64(tcr
, 41, 1);
11133 int tg
= extract32(tcr
, 30, 2);
11134 using16k
= tg
== 1;
11135 using64k
= tg
== 3;
11136 tsz
= extract32(tcr
, 16, 6);
11137 epd
= extract32(tcr
, 23, 1);
11138 sh
= extract32(tcr
, 28, 2);
11139 hpd
= extract64(tcr
, 42, 1);
11141 ps
= extract64(tcr
, 32, 3);
11142 ds
= extract64(tcr
, 59, 1);
11145 if (cpu_isar_feature(aa64_st
, cpu
)) {
11146 max_tsz
= 48 - using64k
;
11152 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
11153 * adjust the effective value of DS, as documented.
11157 if (cpu_isar_feature(aa64_lva
, cpu
)) {
11163 case ARMMMUIdx_Stage2
:
11164 case ARMMMUIdx_Stage2_S
:
11166 ds
= cpu_isar_feature(aa64_tgran16_2_lpa2
, cpu
);
11168 ds
= cpu_isar_feature(aa64_tgran4_2_lpa2
, cpu
);
11173 ds
= cpu_isar_feature(aa64_tgran16_lpa2
, cpu
);
11175 ds
= cpu_isar_feature(aa64_tgran4_lpa2
, cpu
);
11184 if (tsz
> max_tsz
) {
11187 } else if (tsz
< min_tsz
) {
11194 /* Present TBI as a composite with TBID. */
11195 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
11197 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
11199 tbi
= (tbi
>> select
) & 1;
11201 return (ARMVAParameters
) {
11209 .using16k
= using16k
,
11210 .using64k
= using64k
,
11211 .tsz_oob
= tsz_oob
,
11216 #ifndef CONFIG_USER_ONLY
11217 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
11220 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11221 uint32_t el
= regime_el(env
, mmu_idx
);
11225 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
11227 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11229 bool sext
= extract32(tcr
, 4, 1);
11230 bool sign
= extract32(tcr
, 3, 1);
11233 * If the sign-extend bit is not the same as t0sz[3], the result
11234 * is unpredictable. Flag this as a guest error.
11236 if (sign
!= sext
) {
11237 qemu_log_mask(LOG_GUEST_ERROR
,
11238 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11240 tsz
= sextract32(tcr
, 0, 4) + 8;
11244 } else if (el
== 2) {
11246 tsz
= extract32(tcr
, 0, 3);
11248 hpd
= extract64(tcr
, 24, 1);
11251 int t0sz
= extract32(tcr
, 0, 3);
11252 int t1sz
= extract32(tcr
, 16, 3);
11255 select
= va
> (0xffffffffu
>> t0sz
);
11257 /* Note that we will detect errors later. */
11258 select
= va
>= ~(0xffffffffu
>> t1sz
);
11262 epd
= extract32(tcr
, 7, 1);
11263 hpd
= extract64(tcr
, 41, 1);
11266 epd
= extract32(tcr
, 23, 1);
11267 hpd
= extract64(tcr
, 42, 1);
11269 /* For aarch32, hpd0 is not enabled without t2e as well. */
11270 hpd
&= extract32(tcr
, 6, 1);
11273 return (ARMVAParameters
) {
11282 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
11284 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11285 * prot and page_size may not be filled in, and the populated fsr value provides
11286 * information on why the translation aborted, in the format of a long-format
11287 * DFSR/IFSR fault register, with the following caveats:
11288 * * the WnR bit is never set (the caller must do this).
11290 * @env: CPUARMState
11291 * @address: virtual address to get physical address for
11292 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
11293 * @mmu_idx: MMU index indicating required translation regime
11294 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
11295 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
11296 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
11297 * @phys_ptr: set to the physical address corresponding to the virtual address
11298 * @attrs: set to the memory transaction attributes to use
11299 * @prot: set to the permissions for the page containing phys_ptr
11300 * @page_size_ptr: set to the size of the page containing phys_ptr
11301 * @fi: set to fault info if the translation fails
11302 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11304 bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
11305 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11307 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
11308 target_ulong
*page_size_ptr
,
11309 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11311 ARMCPU
*cpu
= env_archcpu(env
);
11312 CPUState
*cs
= CPU(cpu
);
11313 /* Read an LPAE long-descriptor translation table. */
11314 ARMFaultType fault_type
= ARMFault_Translation
;
11316 ARMVAParameters param
;
11318 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
11319 uint32_t tableattrs
;
11320 target_ulong page_size
;
11323 int addrsize
, inputsize
, outputsize
;
11324 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
11325 int ap
, ns
, xn
, pxn
;
11326 uint32_t el
= regime_el(env
, mmu_idx
);
11327 uint64_t descaddrmask
;
11328 bool aarch64
= arm_el_is_aa64(env
, el
);
11329 bool guarded
= false;
11331 /* TODO: This code does not support shareability levels. */
11335 param
= aa64_va_parameters(env
, address
, mmu_idx
,
11336 access_type
!= MMU_INST_FETCH
);
11340 * If TxSZ is programmed to a value larger than the maximum,
11341 * or smaller than the effective minimum, it is IMPLEMENTATION
11342 * DEFINED whether we behave as if the field were programmed
11343 * within bounds, or if a level 0 Translation fault is generated.
11345 * With FEAT_LVA, fault on less than minimum becomes required,
11346 * so our choice is to always raise the fault.
11348 if (param
.tsz_oob
) {
11349 fault_type
= ARMFault_Translation
;
11353 addrsize
= 64 - 8 * param
.tbi
;
11354 inputsize
= 64 - param
.tsz
;
11357 * Bound PS by PARANGE to find the effective output address size.
11358 * ID_AA64MMFR0 is a read-only register so values outside of the
11359 * supported mappings can be considered an implementation error.
11361 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
11362 ps
= MIN(ps
, param
.ps
);
11363 assert(ps
< ARRAY_SIZE(pamax_map
));
11364 outputsize
= pamax_map
[ps
];
11366 param
= aa32_va_parameters(env
, address
, mmu_idx
);
11368 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
11369 inputsize
= addrsize
- param
.tsz
;
11374 * We determined the region when collecting the parameters, but we
11375 * have not yet validated that the address is valid for the region.
11376 * Extract the top bits and verify that they all match select.
11378 * For aa32, if inputsize == addrsize, then we have selected the
11379 * region by exclusion in aa32_va_parameters and there is no more
11380 * validation to do here.
11382 if (inputsize
< addrsize
) {
11383 target_ulong top_bits
= sextract64(address
, inputsize
,
11384 addrsize
- inputsize
);
11385 if (-top_bits
!= param
.select
) {
11386 /* The gap between the two regions is a Translation fault */
11387 fault_type
= ARMFault_Translation
;
11392 if (param
.using64k
) {
11394 } else if (param
.using16k
) {
11400 /* Note that QEMU ignores shareability and cacheability attributes,
11401 * so we don't need to do anything with the SH, ORGN, IRGN fields
11402 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11403 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11404 * implement any ASID-like capability so we can ignore it (instead
11405 * we will always flush the TLB any time the ASID is changed).
11407 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
11409 /* Here we should have set up all the parameters for the translation:
11410 * inputsize, ttbr, epd, stride, tbi
11414 /* Translation table walk disabled => Translation fault on TLB miss
11415 * Note: This is always 0 on 64-bit EL2 and EL3.
11420 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
11421 /* The starting level depends on the virtual address size (which can
11422 * be up to 48 bits) and the translation granule size. It indicates
11423 * the number of strides (stride bits at a time) needed to
11424 * consume the bits of the input address. In the pseudocode this is:
11425 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11426 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11427 * our 'stride + 3' and 'stride' is our 'stride'.
11428 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11429 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11430 * = 4 - (inputsize - 4) / stride;
11432 level
= 4 - (inputsize
- 4) / stride
;
11434 /* For stage 2 translations the starting level is specified by the
11435 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11437 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
11438 uint32_t sl2
= extract64(tcr
->raw_tcr
, 33, 1);
11439 uint32_t startlevel
;
11442 /* SL2 is RES0 unless DS=1 & 4kb granule. */
11443 if (param
.ds
&& stride
== 9 && sl2
) {
11446 fault_type
= ARMFault_Translation
;
11450 } else if (!aarch64
|| stride
== 9) {
11451 /* AArch32 or 4KB pages */
11452 startlevel
= 2 - sl0
;
11454 if (cpu_isar_feature(aa64_st
, cpu
)) {
11458 /* 16KB or 64KB pages */
11459 startlevel
= 3 - sl0
;
11462 /* Check that the starting level is valid. */
11463 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
11464 inputsize
, stride
, outputsize
);
11466 fault_type
= ARMFault_Translation
;
11469 level
= startlevel
;
11472 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
11473 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
11475 /* Now we can extract the actual base address from the TTBR */
11476 descaddr
= extract64(ttbr
, 0, 48);
11479 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
11481 * Otherwise, if the base address is out of range, raise AddressSizeFault.
11482 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
11483 * but we've just cleared the bits above 47, so simplify the test.
11485 if (outputsize
> 48) {
11486 descaddr
|= extract64(ttbr
, 2, 4) << 48;
11487 } else if (descaddr
>> outputsize
) {
11489 fault_type
= ARMFault_AddressSize
;
11494 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11495 * and also to mask out CnP (bit 0) which could validly be non-zero.
11497 descaddr
&= ~indexmask
;
11500 * For AArch32, the address field in the descriptor goes up to bit 39
11501 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
11502 * or an AddressSize fault is raised. So for v8 we extract those SBZ
11503 * bits as part of the address, which will be checked via outputsize.
11504 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
11505 * the highest bits of a 52-bit output are placed elsewhere.
11508 descaddrmask
= MAKE_64BIT_MASK(0, 50);
11509 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
11510 descaddrmask
= MAKE_64BIT_MASK(0, 48);
11512 descaddrmask
= MAKE_64BIT_MASK(0, 40);
11514 descaddrmask
&= ~indexmask_grainsize
;
11516 /* Secure accesses start with the page table in secure memory and
11517 * can be downgraded to non-secure at any step. Non-secure accesses
11518 * remain non-secure. We implement this by just ORing in the NSTable/NS
11519 * bits at each step.
11521 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
11523 uint64_t descriptor
;
11526 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
11528 nstable
= extract32(tableattrs
, 4, 1);
11529 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
11530 if (fi
->type
!= ARMFault_None
) {
11534 if (!(descriptor
& 1) ||
11535 (!(descriptor
& 2) && (level
== 3))) {
11536 /* Invalid, or the Reserved level 3 encoding */
11540 descaddr
= descriptor
& descaddrmask
;
11543 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
11544 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
11545 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
11546 * raise AddressSizeFault.
11548 if (outputsize
> 48) {
11550 descaddr
|= extract64(descriptor
, 8, 2) << 50;
11552 descaddr
|= extract64(descriptor
, 12, 4) << 48;
11554 } else if (descaddr
>> outputsize
) {
11555 fault_type
= ARMFault_AddressSize
;
11559 if ((descriptor
& 2) && (level
< 3)) {
11560 /* Table entry. The top five bits are attributes which may
11561 * propagate down through lower levels of the table (and
11562 * which are all arranged so that 0 means "no effect", so
11563 * we can gather them up by ORing in the bits at each level).
11565 tableattrs
|= extract64(descriptor
, 59, 5);
11567 indexmask
= indexmask_grainsize
;
11571 * Block entry at level 1 or 2, or page entry at level 3.
11572 * These are basically the same thing, although the number
11573 * of bits we pull in from the vaddr varies. Note that although
11574 * descaddrmask masks enough of the low bits of the descriptor
11575 * to give a correct page or table address, the address field
11576 * in a block descriptor is smaller; so we need to explicitly
11577 * clear the lower bits here before ORing in the low vaddr bits.
11579 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
11580 descaddr
&= ~(page_size
- 1);
11581 descaddr
|= (address
& (page_size
- 1));
11582 /* Extract attributes from the descriptor */
11583 attrs
= extract64(descriptor
, 2, 10)
11584 | (extract64(descriptor
, 52, 12) << 10);
11586 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11587 /* Stage 2 table descriptors do not include any attribute fields */
11590 /* Merge in attributes from table descriptors */
11591 attrs
|= nstable
<< 3; /* NS */
11592 guarded
= extract64(descriptor
, 50, 1); /* GP */
11594 /* HPD disables all the table attributes except NSTable. */
11597 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
11598 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11599 * means "force PL1 access only", which means forcing AP[1] to 0.
11601 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
11602 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
11605 /* Here descaddr is the final physical address, and attributes
11606 * are all in attrs.
11608 fault_type
= ARMFault_AccessFlag
;
11609 if ((attrs
& (1 << 8)) == 0) {
11614 ap
= extract32(attrs
, 4, 2);
11616 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11617 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11618 xn
= extract32(attrs
, 11, 2);
11619 *prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
11621 ns
= extract32(attrs
, 3, 1);
11622 xn
= extract32(attrs
, 12, 1);
11623 pxn
= extract32(attrs
, 11, 1);
11624 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
11627 fault_type
= ARMFault_Permission
;
11628 if (!(*prot
& (1 << access_type
))) {
11633 /* The NS bit will (as required by the architecture) have no effect if
11634 * the CPU doesn't support TZ or this is a non-secure translation
11635 * regime, because the attribute will already be non-secure.
11637 txattrs
->secure
= false;
11639 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11640 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
11641 arm_tlb_bti_gp(txattrs
) = true;
11644 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11645 cacheattrs
->is_s2_format
= true;
11646 cacheattrs
->attrs
= extract32(attrs
, 0, 4);
11648 /* Index into MAIR registers for cache attributes */
11649 uint8_t attrindx
= extract32(attrs
, 0, 3);
11650 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
11651 assert(attrindx
<= 7);
11652 cacheattrs
->is_s2_format
= false;
11653 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
11657 * For FEAT_LPA2 and effective DS, the SH field in the attributes
11658 * was re-purposed for output address bits. The SH attribute in
11659 * that case comes from TCR_ELx, which we extracted earlier.
11662 cacheattrs
->shareability
= param
.sh
;
11664 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
11667 *phys_ptr
= descaddr
;
11668 *page_size_ptr
= page_size
;
11672 fi
->type
= fault_type
;
11674 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11675 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
||
11676 mmu_idx
== ARMMMUIdx_Stage2_S
);
11677 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11681 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11683 int32_t address
, int *prot
)
11685 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11686 *prot
= PAGE_READ
| PAGE_WRITE
;
11688 case 0xF0000000 ... 0xFFFFFFFF:
11689 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11690 /* hivecs execing is ok */
11691 *prot
|= PAGE_EXEC
;
11694 case 0x00000000 ... 0x7FFFFFFF:
11695 *prot
|= PAGE_EXEC
;
11699 /* Default system address map for M profile cores.
11700 * The architecture specifies which regions are execute-never;
11701 * at the MPU level no other checks are defined.
11704 case 0x00000000 ... 0x1fffffff: /* ROM */
11705 case 0x20000000 ... 0x3fffffff: /* SRAM */
11706 case 0x60000000 ... 0x7fffffff: /* RAM */
11707 case 0x80000000 ... 0x9fffffff: /* RAM */
11708 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11710 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11711 case 0xa0000000 ... 0xbfffffff: /* Device */
11712 case 0xc0000000 ... 0xdfffffff: /* Device */
11713 case 0xe0000000 ... 0xffffffff: /* System */
11714 *prot
= PAGE_READ
| PAGE_WRITE
;
11717 g_assert_not_reached();
11722 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11723 ARMMMUIdx mmu_idx
, bool is_user
)
11725 /* Return true if we should use the default memory map as a
11726 * "background" region if there are no hits against any MPU regions.
11728 CPUARMState
*env
= &cpu
->env
;
11734 if (arm_feature(env
, ARM_FEATURE_M
)) {
11735 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11736 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11738 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11742 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11744 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11745 return arm_feature(env
, ARM_FEATURE_M
) &&
11746 extract32(address
, 20, 12) == 0xe00;
11749 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11751 /* True if address is in the M profile system region
11752 * 0xe0000000 - 0xffffffff
11754 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11757 bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11758 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11759 hwaddr
*phys_ptr
, int *prot
,
11760 target_ulong
*page_size
,
11761 ARMMMUFaultInfo
*fi
)
11763 ARMCPU
*cpu
= env_archcpu(env
);
11765 bool is_user
= regime_is_user(env
, mmu_idx
);
11767 *phys_ptr
= address
;
11768 *page_size
= TARGET_PAGE_SIZE
;
11771 if (regime_translation_disabled(env
, mmu_idx
) ||
11772 m_is_ppb_region(env
, address
)) {
11773 /* MPU disabled or M profile PPB access: use default memory map.
11774 * The other case which uses the default memory map in the
11775 * v7M ARM ARM pseudocode is exception vector reads from the vector
11776 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11777 * which always does a direct read using address_space_ldl(), rather
11778 * than going via this function, so we don't need to check that here.
11780 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11781 } else { /* MPU enabled */
11782 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11783 /* region search */
11784 uint32_t base
= env
->pmsav7
.drbar
[n
];
11785 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11787 bool srdis
= false;
11789 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11794 qemu_log_mask(LOG_GUEST_ERROR
,
11795 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11799 rmask
= (1ull << rsize
) - 1;
11801 if (base
& rmask
) {
11802 qemu_log_mask(LOG_GUEST_ERROR
,
11803 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11804 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11809 if (address
< base
|| address
> base
+ rmask
) {
11811 * Address not in this region. We must check whether the
11812 * region covers addresses in the same page as our address.
11813 * In that case we must not report a size that covers the
11814 * whole page for a subsequent hit against a different MPU
11815 * region or the background region, because it would result in
11816 * incorrect TLB hits for subsequent accesses to addresses that
11817 * are in this MPU region.
11819 if (ranges_overlap(base
, rmask
,
11820 address
& TARGET_PAGE_MASK
,
11821 TARGET_PAGE_SIZE
)) {
11827 /* Region matched */
11829 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11831 uint32_t srdis_mask
;
11833 rsize
-= 3; /* sub region size (power of 2) */
11834 snd
= ((address
- base
) >> rsize
) & 0x7;
11835 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11837 srdis_mask
= srdis
? 0x3 : 0x0;
11838 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11839 /* This will check in groups of 2, 4 and then 8, whether
11840 * the subregion bits are consistent. rsize is incremented
11841 * back up to give the region size, considering consistent
11842 * adjacent subregions as one region. Stop testing if rsize
11843 * is already big enough for an entire QEMU page.
11845 int snd_rounded
= snd
& ~(i
- 1);
11846 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11847 snd_rounded
+ 8, i
);
11848 if (srdis_mask
^ srdis_multi
) {
11851 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11858 if (rsize
< TARGET_PAGE_BITS
) {
11859 *page_size
= 1 << rsize
;
11864 if (n
== -1) { /* no hits */
11865 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11866 /* background fault */
11867 fi
->type
= ARMFault_Background
;
11870 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11871 } else { /* a MPU hit! */
11872 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11873 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11875 if (m_is_system_region(env
, address
)) {
11876 /* System space is always execute never */
11880 if (is_user
) { /* User mode AP bit decoding */
11885 break; /* no access */
11887 *prot
|= PAGE_WRITE
;
11891 *prot
|= PAGE_READ
| PAGE_EXEC
;
11894 /* for v7M, same as 6; for R profile a reserved value */
11895 if (arm_feature(env
, ARM_FEATURE_M
)) {
11896 *prot
|= PAGE_READ
| PAGE_EXEC
;
11901 qemu_log_mask(LOG_GUEST_ERROR
,
11902 "DRACR[%d]: Bad value for AP bits: 0x%"
11903 PRIx32
"\n", n
, ap
);
11905 } else { /* Priv. mode AP bits decoding */
11908 break; /* no access */
11912 *prot
|= PAGE_WRITE
;
11916 *prot
|= PAGE_READ
| PAGE_EXEC
;
11919 /* for v7M, same as 6; for R profile a reserved value */
11920 if (arm_feature(env
, ARM_FEATURE_M
)) {
11921 *prot
|= PAGE_READ
| PAGE_EXEC
;
11926 qemu_log_mask(LOG_GUEST_ERROR
,
11927 "DRACR[%d]: Bad value for AP bits: 0x%"
11928 PRIx32
"\n", n
, ap
);
11932 /* execute never */
11934 *prot
&= ~PAGE_EXEC
;
11939 fi
->type
= ARMFault_Permission
;
11941 return !(*prot
& (1 << access_type
));
11944 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11945 uint32_t address
, MMUAccessType access_type
)
11947 /* The architecture specifies that certain address ranges are
11948 * exempt from v8M SAU/IDAU checks.
11951 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11952 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11953 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11954 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11955 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11956 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11959 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11960 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11961 V8M_SAttributes
*sattrs
)
11963 /* Look up the security attributes for this address. Compare the
11964 * pseudocode SecurityCheck() function.
11965 * We assume the caller has zero-initialized *sattrs.
11967 ARMCPU
*cpu
= env_archcpu(env
);
11969 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11970 int idau_region
= IREGION_NOTVALID
;
11971 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11972 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11975 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11976 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11978 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11982 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11983 /* 0xf0000000..0xffffffff is always S for insn fetches */
11987 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11988 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11992 if (idau_region
!= IREGION_NOTVALID
) {
11993 sattrs
->irvalid
= true;
11994 sattrs
->iregion
= idau_region
;
11997 switch (env
->sau
.ctrl
& 3) {
11998 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
12000 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
12003 default: /* SAU.ENABLE == 1 */
12004 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
12005 if (env
->sau
.rlar
[r
] & 1) {
12006 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
12007 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
12009 if (base
<= address
&& limit
>= address
) {
12010 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12011 sattrs
->subpage
= true;
12013 if (sattrs
->srvalid
) {
12014 /* If we hit in more than one region then we must report
12015 * as Secure, not NS-Callable, with no valid region
12018 sattrs
->ns
= false;
12019 sattrs
->nsc
= false;
12020 sattrs
->sregion
= 0;
12021 sattrs
->srvalid
= false;
12024 if (env
->sau
.rlar
[r
] & 2) {
12025 sattrs
->nsc
= true;
12029 sattrs
->srvalid
= true;
12030 sattrs
->sregion
= r
;
12034 * Address not in this region. We must check whether the
12035 * region covers addresses in the same page as our address.
12036 * In that case we must not report a size that covers the
12037 * whole page for a subsequent hit against a different MPU
12038 * region or the background region, because it would result
12039 * in incorrect TLB hits for subsequent accesses to
12040 * addresses that are in this MPU region.
12042 if (limit
>= base
&&
12043 ranges_overlap(base
, limit
- base
+ 1,
12045 TARGET_PAGE_SIZE
)) {
12046 sattrs
->subpage
= true;
12055 * The IDAU will override the SAU lookup results if it specifies
12056 * higher security than the SAU does.
12059 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
12060 sattrs
->ns
= false;
12061 sattrs
->nsc
= idau_nsc
;
12066 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
12067 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12068 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12069 int *prot
, bool *is_subpage
,
12070 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
12072 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
12073 * that a full phys-to-virt translation does).
12074 * mregion is (if not NULL) set to the region number which matched,
12075 * or -1 if no region number is returned (MPU off, address did not
12076 * hit a region, address hit in multiple regions).
12077 * We set is_subpage to true if the region hit doesn't cover the
12078 * entire TARGET_PAGE the address is within.
12080 ARMCPU
*cpu
= env_archcpu(env
);
12081 bool is_user
= regime_is_user(env
, mmu_idx
);
12082 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12084 int matchregion
= -1;
12086 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12087 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12089 *is_subpage
= false;
12090 *phys_ptr
= address
;
12096 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12097 * was an exception vector read from the vector table (which is always
12098 * done using the default system address map), because those accesses
12099 * are done in arm_v7m_load_vector(), which always does a direct
12100 * read using address_space_ldl(), rather than going via this function.
12102 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
12104 } else if (m_is_ppb_region(env
, address
)) {
12107 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12111 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
12112 /* region search */
12113 /* Note that the base address is bits [31:5] from the register
12114 * with bits [4:0] all zeroes, but the limit address is bits
12115 * [31:5] from the register with bits [4:0] all ones.
12117 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
12118 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
12120 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
12121 /* Region disabled */
12125 if (address
< base
|| address
> limit
) {
12127 * Address not in this region. We must check whether the
12128 * region covers addresses in the same page as our address.
12129 * In that case we must not report a size that covers the
12130 * whole page for a subsequent hit against a different MPU
12131 * region or the background region, because it would result in
12132 * incorrect TLB hits for subsequent accesses to addresses that
12133 * are in this MPU region.
12135 if (limit
>= base
&&
12136 ranges_overlap(base
, limit
- base
+ 1,
12138 TARGET_PAGE_SIZE
)) {
12139 *is_subpage
= true;
12144 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12145 *is_subpage
= true;
12148 if (matchregion
!= -1) {
12149 /* Multiple regions match -- always a failure (unlike
12150 * PMSAv7 where highest-numbered-region wins)
12152 fi
->type
= ARMFault_Permission
;
12163 /* background fault */
12164 fi
->type
= ARMFault_Background
;
12168 if (matchregion
== -1) {
12169 /* hit using the background region */
12170 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12172 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
12173 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
12176 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
12177 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
12180 if (m_is_system_region(env
, address
)) {
12181 /* System space is always execute never */
12185 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
12186 if (*prot
&& !xn
&& !(pxn
&& !is_user
)) {
12187 *prot
|= PAGE_EXEC
;
12189 /* We don't need to look the attribute up in the MAIR0/MAIR1
12190 * registers because that only tells us about cacheability.
12193 *mregion
= matchregion
;
12197 fi
->type
= ARMFault_Permission
;
12199 return !(*prot
& (1 << access_type
));
12203 bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
12204 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12205 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12206 int *prot
, target_ulong
*page_size
,
12207 ARMMMUFaultInfo
*fi
)
12209 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12210 V8M_SAttributes sattrs
= {};
12212 bool mpu_is_subpage
;
12214 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12215 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
12216 if (access_type
== MMU_INST_FETCH
) {
12217 /* Instruction fetches always use the MMU bank and the
12218 * transaction attribute determined by the fetch address,
12219 * regardless of CPU state. This is painful for QEMU
12220 * to handle, because it would mean we need to encode
12221 * into the mmu_idx not just the (user, negpri) information
12222 * for the current security state but also that for the
12223 * other security state, which would balloon the number
12224 * of mmu_idx values needed alarmingly.
12225 * Fortunately we can avoid this because it's not actually
12226 * possible to arbitrarily execute code from memory with
12227 * the wrong security attribute: it will always generate
12228 * an exception of some kind or another, apart from the
12229 * special case of an NS CPU executing an SG instruction
12230 * in S&NSC memory. So we always just fail the translation
12231 * here and sort things out in the exception handler
12232 * (including possibly emulating an SG instruction).
12234 if (sattrs
.ns
!= !secure
) {
12236 fi
->type
= ARMFault_QEMU_NSCExec
;
12238 fi
->type
= ARMFault_QEMU_SFault
;
12240 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12241 *phys_ptr
= address
;
12246 /* For data accesses we always use the MMU bank indicated
12247 * by the current CPU state, but the security attributes
12248 * might downgrade a secure access to nonsecure.
12251 txattrs
->secure
= false;
12252 } else if (!secure
) {
12253 /* NS access to S memory must fault.
12254 * Architecturally we should first check whether the
12255 * MPU information for this address indicates that we
12256 * are doing an unaligned access to Device memory, which
12257 * should generate a UsageFault instead. QEMU does not
12258 * currently check for that kind of unaligned access though.
12259 * If we added it we would need to do so as a special case
12260 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12262 fi
->type
= ARMFault_QEMU_SFault
;
12263 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12264 *phys_ptr
= address
;
12271 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
12272 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
12273 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
12277 bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
12278 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12279 hwaddr
*phys_ptr
, int *prot
,
12280 ARMMMUFaultInfo
*fi
)
12285 bool is_user
= regime_is_user(env
, mmu_idx
);
12287 if (regime_translation_disabled(env
, mmu_idx
)) {
12288 /* MPU disabled. */
12289 *phys_ptr
= address
;
12290 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12294 *phys_ptr
= address
;
12295 for (n
= 7; n
>= 0; n
--) {
12296 base
= env
->cp15
.c6_region
[n
];
12297 if ((base
& 1) == 0) {
12300 mask
= 1 << ((base
>> 1) & 0x1f);
12301 /* Keep this shift separate from the above to avoid an
12302 (undefined) << 32. */
12303 mask
= (mask
<< 1) - 1;
12304 if (((base
^ address
) & ~mask
) == 0) {
12309 fi
->type
= ARMFault_Background
;
12313 if (access_type
== MMU_INST_FETCH
) {
12314 mask
= env
->cp15
.pmsav5_insn_ap
;
12316 mask
= env
->cp15
.pmsav5_data_ap
;
12318 mask
= (mask
>> (n
* 4)) & 0xf;
12321 fi
->type
= ARMFault_Permission
;
12326 fi
->type
= ARMFault_Permission
;
12330 *prot
= PAGE_READ
| PAGE_WRITE
;
12335 *prot
|= PAGE_WRITE
;
12339 *prot
= PAGE_READ
| PAGE_WRITE
;
12343 fi
->type
= ARMFault_Permission
;
12353 /* Bad permission. */
12354 fi
->type
= ARMFault_Permission
;
12358 *prot
|= PAGE_EXEC
;
12362 /* Combine either inner or outer cacheability attributes for normal
12363 * memory, according to table D4-42 and pseudocode procedure
12364 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12366 * NB: only stage 1 includes allocation hints (RW bits), leading to
12369 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
12371 if (s1
== 4 || s2
== 4) {
12372 /* non-cacheable has precedence */
12374 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
12375 /* stage 1 write-through takes precedence */
12377 } else if (extract32(s2
, 2, 2) == 2) {
12378 /* stage 2 write-through takes precedence, but the allocation hint
12379 * is still taken from stage 1
12381 return (2 << 2) | extract32(s1
, 0, 2);
12382 } else { /* write-back */
12388 * Combine the memory type and cacheability attributes of
12389 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
12390 * combined attributes in MAIR_EL1 format.
12392 static uint8_t combined_attrs_nofwb(CPUARMState
*env
,
12393 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12395 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
12397 s2_mair_attrs
= convert_stage2_attrs(env
, s2
.attrs
);
12399 s1lo
= extract32(s1
.attrs
, 0, 4);
12400 s2lo
= extract32(s2_mair_attrs
, 0, 4);
12401 s1hi
= extract32(s1
.attrs
, 4, 4);
12402 s2hi
= extract32(s2_mair_attrs
, 4, 4);
12404 /* Combine memory type and cacheability attributes */
12405 if (s1hi
== 0 || s2hi
== 0) {
12406 /* Device has precedence over normal */
12407 if (s1lo
== 0 || s2lo
== 0) {
12408 /* nGnRnE has precedence over anything */
12410 } else if (s1lo
== 4 || s2lo
== 4) {
12411 /* non-Reordering has precedence over Reordering */
12412 ret_attrs
= 4; /* nGnRE */
12413 } else if (s1lo
== 8 || s2lo
== 8) {
12414 /* non-Gathering has precedence over Gathering */
12415 ret_attrs
= 8; /* nGRE */
12417 ret_attrs
= 0xc; /* GRE */
12419 } else { /* Normal memory */
12420 /* Outer/inner cacheability combine independently */
12421 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
12422 | combine_cacheattr_nibble(s1lo
, s2lo
);
12427 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
12430 * Given the 4 bits specifying the outer or inner cacheability
12431 * in MAIR format, return a value specifying Normal Write-Back,
12432 * with the allocation and transient hints taken from the input
12433 * if the input specified some kind of cacheable attribute.
12435 if (attr
== 0 || attr
== 4) {
12437 * 0 == an UNPREDICTABLE encoding
12438 * 4 == Non-cacheable
12439 * Either way, force Write-Back RW allocate non-transient
12443 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
12448 * Combine the memory type and cacheability attributes of
12449 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
12450 * combined attributes in MAIR_EL1 format.
12452 static uint8_t combined_attrs_fwb(CPUARMState
*env
,
12453 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12455 switch (s2
.attrs
) {
12457 /* Use stage 1 attributes */
12461 * Force Normal Write-Back. Note that if S1 is Normal cacheable
12462 * then we take the allocation hints from it; otherwise it is
12463 * RW allocate, non-transient.
12465 if ((s1
.attrs
& 0xf0) == 0) {
12469 /* Need to check the Inner and Outer nibbles separately */
12470 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
12471 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
12473 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
12474 if ((s1
.attrs
& 0xf0) == 0) {
12479 /* Force Device, of subtype specified by S2 */
12480 return s2
.attrs
<< 2;
12483 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
12484 * arbitrarily force Device.
12490 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12491 * and CombineS1S2Desc()
12493 * @env: CPUARMState
12494 * @s1: Attributes from stage 1 walk
12495 * @s2: Attributes from stage 2 walk
12497 ARMCacheAttrs
combine_cacheattrs(CPUARMState
*env
,
12498 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12501 bool tagged
= false;
12503 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
12504 ret
.is_s2_format
= false;
12506 if (s1
.attrs
== 0xf0) {
12511 /* Combine shareability attributes (table D4-43) */
12512 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
12513 /* if either are outer-shareable, the result is outer-shareable */
12514 ret
.shareability
= 2;
12515 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
12516 /* if either are inner-shareable, the result is inner-shareable */
12517 ret
.shareability
= 3;
12519 /* both non-shareable */
12520 ret
.shareability
= 0;
12523 /* Combine memory type and cacheability attributes */
12524 if (arm_hcr_el2_eff(env
) & HCR_FWB
) {
12525 ret
.attrs
= combined_attrs_fwb(env
, s1
, s2
);
12527 ret
.attrs
= combined_attrs_nofwb(env
, s1
, s2
);
12531 * Any location for which the resultant memory type is any
12532 * type of Device memory is always treated as Outer Shareable.
12533 * Any location for which the resultant memory type is Normal
12534 * Inner Non-cacheable, Outer Non-cacheable is always treated
12535 * as Outer Shareable.
12536 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
12538 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
12539 ret
.shareability
= 2;
12542 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12543 if (tagged
&& ret
.attrs
== 0xff) {
12550 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
12553 ARMCPU
*cpu
= ARM_CPU(cs
);
12554 CPUARMState
*env
= &cpu
->env
;
12556 target_ulong page_size
;
12559 ARMMMUFaultInfo fi
= {};
12560 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12561 ARMCacheAttrs cacheattrs
= {};
12563 *attrs
= (MemTxAttrs
) {};
12565 ret
= get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &phys_addr
,
12566 attrs
, &prot
, &page_size
, &fi
, &cacheattrs
);
12575 /* Note that signed overflow is undefined in C. The following routines are
12576 careful to use unsigned types where modulo arithmetic is required.
12577 Failure to do so _will_ break on newer gcc. */
12579 /* Signed saturating arithmetic. */
12581 /* Perform 16-bit signed saturating addition. */
12582 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12587 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12596 /* Perform 8-bit signed saturating addition. */
12597 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12602 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12611 /* Perform 16-bit signed saturating subtraction. */
12612 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12617 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12626 /* Perform 8-bit signed saturating subtraction. */
12627 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12632 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12641 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12642 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12643 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12644 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12647 #include "op_addsub.h"
12649 /* Unsigned saturating arithmetic. */
12650 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12659 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12667 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12676 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12684 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12685 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12686 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12687 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12690 #include "op_addsub.h"
12692 /* Signed modulo arithmetic. */
12693 #define SARITH16(a, b, n, op) do { \
12695 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12696 RESULT(sum, n, 16); \
12698 ge |= 3 << (n * 2); \
12701 #define SARITH8(a, b, n, op) do { \
12703 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12704 RESULT(sum, n, 8); \
12710 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12711 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12712 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12713 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12717 #include "op_addsub.h"
12719 /* Unsigned modulo arithmetic. */
12720 #define ADD16(a, b, n) do { \
12722 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12723 RESULT(sum, n, 16); \
12724 if ((sum >> 16) == 1) \
12725 ge |= 3 << (n * 2); \
12728 #define ADD8(a, b, n) do { \
12730 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12731 RESULT(sum, n, 8); \
12732 if ((sum >> 8) == 1) \
12736 #define SUB16(a, b, n) do { \
12738 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12739 RESULT(sum, n, 16); \
12740 if ((sum >> 16) == 0) \
12741 ge |= 3 << (n * 2); \
12744 #define SUB8(a, b, n) do { \
12746 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12747 RESULT(sum, n, 8); \
12748 if ((sum >> 8) == 0) \
12755 #include "op_addsub.h"
12757 /* Halved signed arithmetic. */
12758 #define ADD16(a, b, n) \
12759 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12760 #define SUB16(a, b, n) \
12761 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12762 #define ADD8(a, b, n) \
12763 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12764 #define SUB8(a, b, n) \
12765 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12768 #include "op_addsub.h"
12770 /* Halved unsigned arithmetic. */
12771 #define ADD16(a, b, n) \
12772 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12773 #define SUB16(a, b, n) \
12774 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12775 #define ADD8(a, b, n) \
12776 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12777 #define SUB8(a, b, n) \
12778 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12781 #include "op_addsub.h"
12783 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12791 /* Unsigned sum of absolute byte differences. */
12792 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12795 sum
= do_usad(a
, b
);
12796 sum
+= do_usad(a
>> 8, b
>> 8);
12797 sum
+= do_usad(a
>> 16, b
>> 16);
12798 sum
+= do_usad(a
>> 24, b
>> 24);
12802 /* For ARMv6 SEL instruction. */
12803 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12815 mask
|= 0xff000000;
12816 return (a
& mask
) | (b
& ~mask
);
12820 * The upper bytes of val (above the number specified by 'bytes') must have
12821 * been zeroed out by the caller.
12823 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12827 stl_le_p(buf
, val
);
12829 /* zlib crc32 converts the accumulator and output to one's complement. */
12830 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12833 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12837 stl_le_p(buf
, val
);
12839 /* Linux crc32c converts the output to one's complement. */
12840 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12843 /* Return the exception level to which FP-disabled exceptions should
12844 * be taken, or 0 if FP is enabled.
12846 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12848 #ifndef CONFIG_USER_ONLY
12851 /* CPACR and the CPTR registers don't exist before v6, so FP is
12852 * always accessible
12854 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12858 if (arm_feature(env
, ARM_FEATURE_M
)) {
12859 /* CPACR can cause a NOCP UsageFault taken to current security state */
12860 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
12864 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
12865 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
12866 /* FP insns cause a NOCP UsageFault taken to Secure */
12874 hcr_el2
= arm_hcr_el2_eff(env
);
12876 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12877 * 0, 2 : trap EL0 and EL1/PL1 accesses
12878 * 1 : trap only EL0 accesses
12879 * 3 : trap no accesses
12880 * This register is ignored if E2H+TGE are both set.
12882 if ((hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12883 int fpen
= FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, FPEN
);
12888 if (cur_el
== 0 || cur_el
== 1) {
12889 /* Trap to PL1, which might be EL1 or EL3 */
12890 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12895 if (cur_el
== 3 && !is_a64(env
)) {
12896 /* Secure PL1 running at EL3 */
12911 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12912 * to control non-secure access to the FPU. It doesn't have any
12913 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12915 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
12916 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
12917 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
12918 /* FP insns act as UNDEF */
12919 return cur_el
== 2 ? 2 : 1;
12924 * CPTR_EL2 is present in v7VE or v8, and changes format
12925 * with HCR_EL2.E2H (regardless of TGE).
12928 if (hcr_el2
& HCR_E2H
) {
12929 switch (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, FPEN
)) {
12931 if (cur_el
!= 0 || !(hcr_el2
& HCR_TGE
)) {
12939 } else if (arm_is_el2_enabled(env
)) {
12940 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TFP
)) {
12946 /* CPTR_EL3 : present in v8 */
12947 if (FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TFP
)) {
12948 /* Trap all FP ops to EL3 */
12955 /* Return the exception level we're running at if this is our mmu_idx */
12956 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
12958 if (mmu_idx
& ARM_MMU_IDX_M
) {
12959 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
12963 case ARMMMUIdx_E10_0
:
12964 case ARMMMUIdx_E20_0
:
12965 case ARMMMUIdx_SE10_0
:
12966 case ARMMMUIdx_SE20_0
:
12968 case ARMMMUIdx_E10_1
:
12969 case ARMMMUIdx_E10_1_PAN
:
12970 case ARMMMUIdx_SE10_1
:
12971 case ARMMMUIdx_SE10_1_PAN
:
12974 case ARMMMUIdx_E20_2
:
12975 case ARMMMUIdx_E20_2_PAN
:
12976 case ARMMMUIdx_SE2
:
12977 case ARMMMUIdx_SE20_2
:
12978 case ARMMMUIdx_SE20_2_PAN
:
12980 case ARMMMUIdx_SE3
:
12983 g_assert_not_reached();
12988 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12990 g_assert_not_reached();
12994 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
12999 if (arm_feature(env
, ARM_FEATURE_M
)) {
13000 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13003 /* See ARM pseudo-function ELIsInHost. */
13006 hcr
= arm_hcr_el2_eff(env
);
13007 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
13008 idx
= ARMMMUIdx_E20_0
;
13010 idx
= ARMMMUIdx_E10_0
;
13014 if (env
->pstate
& PSTATE_PAN
) {
13015 idx
= ARMMMUIdx_E10_1_PAN
;
13017 idx
= ARMMMUIdx_E10_1
;
13021 /* Note that TGE does not apply at EL2. */
13022 if (arm_hcr_el2_eff(env
) & HCR_E2H
) {
13023 if (env
->pstate
& PSTATE_PAN
) {
13024 idx
= ARMMMUIdx_E20_2_PAN
;
13026 idx
= ARMMMUIdx_E20_2
;
13029 idx
= ARMMMUIdx_E2
;
13033 return ARMMMUIdx_SE3
;
13035 g_assert_not_reached();
13038 if (arm_is_secure_below_el3(env
)) {
13039 idx
&= ~ARM_MMU_IDX_A_NS
;
13045 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13047 return arm_mmu_idx_el(env
, arm_current_el(env
));
13050 #ifndef CONFIG_USER_ONLY
13051 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13053 return stage_1_mmu_idx(arm_mmu_idx(env
));
13057 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
13059 CPUARMTBFlags flags
)
13061 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
13062 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13064 if (arm_singlestep_active(env
)) {
13065 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
13070 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
13072 CPUARMTBFlags flags
)
13074 bool sctlr_b
= arm_sctlr_b(env
);
13077 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
13079 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
13080 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13082 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
13084 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13087 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
13090 CPUARMTBFlags flags
= {};
13091 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
13093 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
13094 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
13095 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13098 if (arm_v7m_is_handler_mode(env
)) {
13099 DP_TBFLAG_M32(flags
, HANDLER
, 1);
13103 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
13104 * is suppressing them because the requested execution priority
13107 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13108 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13109 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13110 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
13113 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13116 static CPUARMTBFlags
rebuild_hflags_aprofile(CPUARMState
*env
)
13118 CPUARMTBFlags flags
= {};
13120 DP_TBFLAG_ANY(flags
, DEBUG_TARGET_EL
, arm_debug_target_el(env
));
13124 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
13127 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13128 int el
= arm_current_el(env
);
13130 if (arm_sctlr(env
, el
) & SCTLR_A
) {
13131 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13134 if (arm_el_is_aa64(env
, 1)) {
13135 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13138 if (el
< 2 && env
->cp15
.hstr_el2
&&
13139 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
13140 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
13143 if (env
->uncached_cpsr
& CPSR_IL
) {
13144 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
13147 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13150 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
13153 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13154 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13155 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
13159 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
13161 /* Get control bits for tagged addresses. */
13162 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
13163 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
13165 DP_TBFLAG_A64(flags
, TBII
, tbii
);
13166 DP_TBFLAG_A64(flags
, TBID
, tbid
);
13168 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
13169 int sve_el
= sve_exception_el(env
, el
);
13173 * If SVE is disabled, but FP is enabled,
13174 * then the effective len is 0.
13176 if (sve_el
!= 0 && fp_el
== 0) {
13179 zcr_len
= sve_zcr_len_for_el(env
, el
);
13181 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
13182 DP_TBFLAG_A64(flags
, ZCR_LEN
, zcr_len
);
13185 sctlr
= regime_sctlr(env
, stage1
);
13187 if (sctlr
& SCTLR_A
) {
13188 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13191 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
13192 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13195 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
13197 * In order to save space in flags, we record only whether
13198 * pauth is "inactive", meaning all insns are implemented as
13199 * a nop, or "active" when some action must be performed.
13200 * The decision of which action to take is left to a helper.
13202 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13203 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
13207 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13208 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13209 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
13210 DP_TBFLAG_A64(flags
, BT
, 1);
13214 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
13215 if (!(env
->pstate
& PSTATE_UAO
)) {
13217 case ARMMMUIdx_E10_1
:
13218 case ARMMMUIdx_E10_1_PAN
:
13219 case ARMMMUIdx_SE10_1
:
13220 case ARMMMUIdx_SE10_1_PAN
:
13221 /* TODO: ARMv8.3-NV */
13222 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13224 case ARMMMUIdx_E20_2
:
13225 case ARMMMUIdx_E20_2_PAN
:
13226 case ARMMMUIdx_SE20_2
:
13227 case ARMMMUIdx_SE20_2_PAN
:
13229 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
13230 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
13232 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
13233 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13241 if (env
->pstate
& PSTATE_IL
) {
13242 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
13245 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
13247 * Set MTE_ACTIVE if any access may be Checked, and leave clear
13248 * if all accesses must be Unchecked:
13249 * 1) If no TBI, then there are no tags in the address to check,
13250 * 2) If Tag Check Override, then all accesses are Unchecked,
13251 * 3) If Tag Check Fail == 0, then Checked access have no effect,
13252 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
13254 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
13255 DP_TBFLAG_A64(flags
, ATA
, 1);
13257 && !(env
->pstate
& PSTATE_TCO
)
13258 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
13259 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
13262 /* And again for unprivileged accesses, if required. */
13263 if (EX_TBFLAG_A64(flags
, UNPRIV
)
13265 && !(env
->pstate
& PSTATE_TCO
)
13266 && (sctlr
& SCTLR_TCF0
)
13267 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
13268 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
13270 /* Cache TCMA as well as TBI. */
13271 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
13274 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13277 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
13279 int el
= arm_current_el(env
);
13280 int fp_el
= fp_exception_el(env
, el
);
13281 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13284 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13285 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
13286 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13288 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13292 void arm_rebuild_hflags(CPUARMState
*env
)
13294 env
->hflags
= rebuild_hflags_internal(env
);
13298 * If we have triggered a EL state change we can't rely on the
13299 * translator having passed it to us, we need to recompute.
13301 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
13303 int el
= arm_current_el(env
);
13304 int fp_el
= fp_exception_el(env
, el
);
13305 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13307 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13310 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
13312 int fp_el
= fp_exception_el(env
, el
);
13313 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13315 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13319 * If we have triggered a EL state change we can't rely on the
13320 * translator having passed it to us, we need to recompute.
13322 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
13324 int el
= arm_current_el(env
);
13325 int fp_el
= fp_exception_el(env
, el
);
13326 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13327 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13330 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
13332 int fp_el
= fp_exception_el(env
, el
);
13333 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13335 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13338 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
13340 int fp_el
= fp_exception_el(env
, el
);
13341 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13343 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13346 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
13348 #ifdef CONFIG_DEBUG_TCG
13349 CPUARMTBFlags c
= env
->hflags
;
13350 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
13352 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
13353 fprintf(stderr
, "TCG hflags mismatch "
13354 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
13355 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
13356 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);
13362 static bool mve_no_pred(CPUARMState
*env
)
13365 * Return true if there is definitely no predication of MVE
13366 * instructions by VPR or LTPSIZE. (Returning false even if there
13367 * isn't any predication is OK; generated code will just be
13369 * If the CPU does not implement MVE then this TB flag is always 0.
13371 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
13372 * logic in gen_update_fp_context() needs to be updated to match.
13374 * We do not include the effect of the ECI bits here -- they are
13375 * tracked in other TB flags. This simplifies the logic for
13376 * "when did we emit code that changes the MVE_NO_PRED TB flag
13377 * and thus need to end the TB?".
13379 if (cpu_isar_feature(aa32_mve
, env_archcpu(env
))) {
13382 if (env
->v7m
.vpr
) {
13385 if (env
->v7m
.ltpsize
< 4) {
13391 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13392 target_ulong
*cs_base
, uint32_t *pflags
)
13394 CPUARMTBFlags flags
;
13396 assert_hflags_rebuild_correctly(env
);
13397 flags
= env
->hflags
;
13399 if (EX_TBFLAG_ANY(flags
, AARCH64_STATE
)) {
13401 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13402 DP_TBFLAG_A64(flags
, BTYPE
, env
->btype
);
13405 *pc
= env
->regs
[15];
13407 if (arm_feature(env
, ARM_FEATURE_M
)) {
13408 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13409 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
13410 != env
->v7m
.secure
) {
13411 DP_TBFLAG_M32(flags
, FPCCR_S_WRONG
, 1);
13414 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
13415 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
13416 (env
->v7m
.secure
&&
13417 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
13419 * ASPEN is set, but FPCA/SFPA indicate that there is no
13420 * active FP context; we must create a new FP context before
13421 * executing any FP insn.
13423 DP_TBFLAG_M32(flags
, NEW_FP_CTXT_NEEDED
, 1);
13426 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
13427 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
13428 DP_TBFLAG_M32(flags
, LSPACT
, 1);
13431 if (mve_no_pred(env
)) {
13432 DP_TBFLAG_M32(flags
, MVE_NO_PRED
, 1);
13436 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13437 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13439 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
13440 DP_TBFLAG_A32(flags
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13442 DP_TBFLAG_A32(flags
, VECLEN
, env
->vfp
.vec_len
);
13443 DP_TBFLAG_A32(flags
, VECSTRIDE
, env
->vfp
.vec_stride
);
13445 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
13446 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13450 DP_TBFLAG_AM32(flags
, THUMB
, env
->thumb
);
13451 DP_TBFLAG_AM32(flags
, CONDEXEC
, env
->condexec_bits
);
13455 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13456 * states defined in the ARM ARM for software singlestep:
13457 * SS_ACTIVE PSTATE.SS State
13458 * 0 x Inactive (the TB flag for SS is always 0)
13459 * 1 0 Active-pending
13460 * 1 1 Active-not-pending
13461 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
13463 if (EX_TBFLAG_ANY(flags
, SS_ACTIVE
) && (env
->pstate
& PSTATE_SS
)) {
13464 DP_TBFLAG_ANY(flags
, PSTATE__SS
, 1);
13467 *pflags
= flags
.flags
;
13468 *cs_base
= flags
.flags2
;
13471 #ifdef TARGET_AARCH64
13473 * The manual says that when SVE is enabled and VQ is widened the
13474 * implementation is allowed to zero the previously inaccessible
13475 * portion of the registers. The corollary to that is that when
13476 * SVE is enabled and VQ is narrowed we are also allowed to zero
13477 * the now inaccessible portion of the registers.
13479 * The intent of this is that no predicate bit beyond VQ is ever set.
13480 * Which means that some operations on predicate registers themselves
13481 * may operate on full uint64_t or even unrolled across the maximum
13482 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13483 * may well be cheaper than conditionals to restrict the operation
13484 * to the relevant portion of a uint16_t[16].
13486 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13491 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13492 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
13494 /* Zap the high bits of the zregs. */
13495 for (i
= 0; i
< 32; i
++) {
13496 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13499 /* Zap the high bits of the pregs and ffr. */
13502 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13504 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13505 for (i
= 0; i
< 17; ++i
) {
13506 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13513 * Notice a change in SVE vector size when changing EL.
13515 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13516 int new_el
, bool el0_a64
)
13518 ARMCPU
*cpu
= env_archcpu(env
);
13519 int old_len
, new_len
;
13520 bool old_a64
, new_a64
;
13522 /* Nothing to do if no SVE. */
13523 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13527 /* Nothing to do if FP is disabled in either EL. */
13528 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13533 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13534 * at ELx, or not available because the EL is in AArch32 state, then
13535 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13536 * has an effective value of 0".
13538 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13539 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13540 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13541 * we already have the correct register contents when encountering the
13542 * vq0->vq0 transition between EL0->EL1.
13544 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13545 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13546 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13547 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13548 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13549 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13551 /* When changing vector length, clear inaccessible state. */
13552 if (new_len
< old_len
) {
13553 aarch64_sve_narrow_vq(env
, new_len
+ 1);