4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "qemu/range.h"
29 #include "qapi/qapi-commands-machine-target.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
34 #include "exec/cpu_ldst.h"
37 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39 #ifndef CONFIG_USER_ONLY
41 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
42 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
43 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
44 target_ulong
*page_size_ptr
,
45 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
48 static void switch_mode(CPUARMState
*env
, int mode
);
50 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
54 /* VFP data registers are always little-endian. */
55 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
57 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
60 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
61 /* Aliases for Q regs. */
64 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
66 stq_le_p(buf
+ 8, q
[1]);
70 switch (reg
- nregs
) {
71 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
72 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
73 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
78 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
82 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
84 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
87 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
90 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
92 q
[1] = ldq_le_p(buf
+ 8);
96 switch (reg
- nregs
) {
97 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
98 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
99 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
104 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
108 /* 128 bit FP register */
110 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
112 stq_le_p(buf
+ 8, q
[1]);
117 stl_p(buf
, vfp_get_fpsr(env
));
121 stl_p(buf
, vfp_get_fpcr(env
));
128 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
132 /* 128 bit FP register */
134 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
135 q
[0] = ldq_le_p(buf
);
136 q
[1] = ldq_le_p(buf
+ 8);
141 vfp_set_fpsr(env
, ldl_p(buf
));
145 vfp_set_fpcr(env
, ldl_p(buf
));
152 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
154 assert(ri
->fieldoffset
);
155 if (cpreg_field_is_64bit(ri
)) {
156 return CPREG_FIELD64(env
, ri
);
158 return CPREG_FIELD32(env
, ri
);
162 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
165 assert(ri
->fieldoffset
);
166 if (cpreg_field_is_64bit(ri
)) {
167 CPREG_FIELD64(env
, ri
) = value
;
169 CPREG_FIELD32(env
, ri
) = value
;
173 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
175 return (char *)env
+ ri
->fieldoffset
;
178 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
180 /* Raw read of a coprocessor register (as needed for migration, etc). */
181 if (ri
->type
& ARM_CP_CONST
) {
182 return ri
->resetvalue
;
183 } else if (ri
->raw_readfn
) {
184 return ri
->raw_readfn(env
, ri
);
185 } else if (ri
->readfn
) {
186 return ri
->readfn(env
, ri
);
188 return raw_read(env
, ri
);
192 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
195 /* Raw write of a coprocessor register (as needed for migration, etc).
196 * Note that constant registers are treated as write-ignored; the
197 * caller should check for success by whether a readback gives the
200 if (ri
->type
& ARM_CP_CONST
) {
202 } else if (ri
->raw_writefn
) {
203 ri
->raw_writefn(env
, ri
, v
);
204 } else if (ri
->writefn
) {
205 ri
->writefn(env
, ri
, v
);
207 raw_write(env
, ri
, v
);
211 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
213 ARMCPU
*cpu
= env_archcpu(env
);
214 const ARMCPRegInfo
*ri
;
217 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
218 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
220 if (cpreg_field_is_64bit(ri
)) {
221 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
223 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
229 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
234 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
236 /* Return true if the regdef would cause an assertion if you called
237 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
238 * program bug for it not to have the NO_RAW flag).
239 * NB that returning false here doesn't necessarily mean that calling
240 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
241 * read/write access functions which are safe for raw use" from "has
242 * read/write access functions which have side effects but has forgotten
243 * to provide raw access functions".
244 * The tests here line up with the conditions in read/write_raw_cp_reg()
245 * and assertions in raw_read()/raw_write().
247 if ((ri
->type
& ARM_CP_CONST
) ||
249 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
255 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
257 /* Write the coprocessor state from cpu->env to the (index,value) list. */
261 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
262 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
263 const ARMCPRegInfo
*ri
;
266 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
271 if (ri
->type
& ARM_CP_NO_RAW
) {
275 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
278 * Only sync if the previous list->cpustate sync succeeded.
279 * Rather than tracking the success/failure state for every
280 * item in the list, we just recheck "does the raw write we must
281 * have made in write_list_to_cpustate() read back OK" here.
283 uint64_t oldval
= cpu
->cpreg_values
[i
];
285 if (oldval
== newval
) {
289 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
290 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
294 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
296 cpu
->cpreg_values
[i
] = newval
;
301 bool write_list_to_cpustate(ARMCPU
*cpu
)
306 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
307 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
308 uint64_t v
= cpu
->cpreg_values
[i
];
309 const ARMCPRegInfo
*ri
;
311 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
316 if (ri
->type
& ARM_CP_NO_RAW
) {
319 /* Write value and confirm it reads back as written
320 * (to catch read-only registers and partially read-only
321 * registers where the incoming migration value doesn't match)
323 write_raw_cp_reg(&cpu
->env
, ri
, v
);
324 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
331 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
333 ARMCPU
*cpu
= opaque
;
335 const ARMCPRegInfo
*ri
;
337 regidx
= *(uint32_t *)key
;
338 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
340 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
341 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
342 /* The value array need not be initialized at this point */
343 cpu
->cpreg_array_len
++;
347 static void count_cpreg(gpointer key
, gpointer opaque
)
349 ARMCPU
*cpu
= opaque
;
351 const ARMCPRegInfo
*ri
;
353 regidx
= *(uint32_t *)key
;
354 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
356 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
357 cpu
->cpreg_array_len
++;
361 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
363 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
364 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
375 void init_cpreg_list(ARMCPU
*cpu
)
377 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
378 * Note that we require cpreg_tuples[] to be sorted by key ID.
383 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
384 keys
= g_list_sort(keys
, cpreg_key_compare
);
386 cpu
->cpreg_array_len
= 0;
388 g_list_foreach(keys
, count_cpreg
, cpu
);
390 arraylen
= cpu
->cpreg_array_len
;
391 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
392 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
393 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
394 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
395 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
396 cpu
->cpreg_array_len
= 0;
398 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
400 assert(cpu
->cpreg_array_len
== arraylen
);
406 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
407 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
409 * access_el3_aa32ns: Used to check AArch32 register views.
410 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
412 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
413 const ARMCPRegInfo
*ri
,
416 bool secure
= arm_is_secure_below_el3(env
);
418 assert(!arm_el_is_aa64(env
, 3));
420 return CP_ACCESS_TRAP_UNCATEGORIZED
;
425 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
426 const ARMCPRegInfo
*ri
,
429 if (!arm_el_is_aa64(env
, 3)) {
430 return access_el3_aa32ns(env
, ri
, isread
);
435 /* Some secure-only AArch32 registers trap to EL3 if used from
436 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
437 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
438 * We assume that the .access field is set to PL1_RW.
440 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
441 const ARMCPRegInfo
*ri
,
444 if (arm_current_el(env
) == 3) {
447 if (arm_is_secure_below_el3(env
)) {
448 return CP_ACCESS_TRAP_EL3
;
450 /* This will be EL1 NS and EL2 NS, which just UNDEF */
451 return CP_ACCESS_TRAP_UNCATEGORIZED
;
454 /* Check for traps to "powerdown debug" registers, which are controlled
457 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
460 int el
= arm_current_el(env
);
461 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
462 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
463 (arm_hcr_el2_eff(env
) & HCR_TGE
);
465 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
466 return CP_ACCESS_TRAP_EL2
;
468 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
469 return CP_ACCESS_TRAP_EL3
;
474 /* Check for traps to "debug ROM" registers, which are controlled
475 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
477 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
480 int el
= arm_current_el(env
);
481 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
482 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
483 (arm_hcr_el2_eff(env
) & HCR_TGE
);
485 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
486 return CP_ACCESS_TRAP_EL2
;
488 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
489 return CP_ACCESS_TRAP_EL3
;
494 /* Check for traps to general debug registers, which are controlled
495 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
497 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
500 int el
= arm_current_el(env
);
501 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
502 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
503 (arm_hcr_el2_eff(env
) & HCR_TGE
);
505 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
506 return CP_ACCESS_TRAP_EL2
;
508 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
509 return CP_ACCESS_TRAP_EL3
;
514 /* Check for traps to performance monitor registers, which are controlled
515 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
517 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
520 int el
= arm_current_el(env
);
522 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
523 && !arm_is_secure_below_el3(env
)) {
524 return CP_ACCESS_TRAP_EL2
;
526 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
527 return CP_ACCESS_TRAP_EL3
;
532 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
534 ARMCPU
*cpu
= env_archcpu(env
);
536 raw_write(env
, ri
, value
);
537 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
540 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
542 ARMCPU
*cpu
= env_archcpu(env
);
544 if (raw_read(env
, ri
) != value
) {
545 /* Unlike real hardware the qemu TLB uses virtual addresses,
546 * not modified virtual addresses, so this causes a TLB flush.
549 raw_write(env
, ri
, value
);
553 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
556 ARMCPU
*cpu
= env_archcpu(env
);
558 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
559 && !extended_addresses_enabled(env
)) {
560 /* For VMSA (when not using the LPAE long descriptor page table
561 * format) this register includes the ASID, so do a TLB flush.
562 * For PMSA it is purely a process ID and no action is needed.
566 raw_write(env
, ri
, value
);
569 /* IS variants of TLB operations must affect all cores */
570 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
573 CPUState
*cs
= env_cpu(env
);
575 tlb_flush_all_cpus_synced(cs
);
578 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
581 CPUState
*cs
= env_cpu(env
);
583 tlb_flush_all_cpus_synced(cs
);
586 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
589 CPUState
*cs
= env_cpu(env
);
591 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
594 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
597 CPUState
*cs
= env_cpu(env
);
599 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
603 * Non-IS variants of TLB operations are upgraded to
604 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
605 * force broadcast of these operations.
607 static bool tlb_force_broadcast(CPUARMState
*env
)
609 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
610 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
613 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
616 /* Invalidate all (TLBIALL) */
617 CPUState
*cs
= env_cpu(env
);
619 if (tlb_force_broadcast(env
)) {
620 tlb_flush_all_cpus_synced(cs
);
626 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
629 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
630 CPUState
*cs
= env_cpu(env
);
632 value
&= TARGET_PAGE_MASK
;
633 if (tlb_force_broadcast(env
)) {
634 tlb_flush_page_all_cpus_synced(cs
, value
);
636 tlb_flush_page(cs
, value
);
640 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
643 /* Invalidate by ASID (TLBIASID) */
644 CPUState
*cs
= env_cpu(env
);
646 if (tlb_force_broadcast(env
)) {
647 tlb_flush_all_cpus_synced(cs
);
653 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
656 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
657 CPUState
*cs
= env_cpu(env
);
659 value
&= TARGET_PAGE_MASK
;
660 if (tlb_force_broadcast(env
)) {
661 tlb_flush_page_all_cpus_synced(cs
, value
);
663 tlb_flush_page(cs
, value
);
667 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
670 CPUState
*cs
= env_cpu(env
);
672 tlb_flush_by_mmuidx(cs
,
674 ARMMMUIdxBit_E10_1_PAN
|
676 ARMMMUIdxBit_Stage2
);
679 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
682 CPUState
*cs
= env_cpu(env
);
684 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
686 ARMMMUIdxBit_E10_1_PAN
|
688 ARMMMUIdxBit_Stage2
);
691 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
694 /* Invalidate by IPA. This has to invalidate any structures that
695 * contain only stage 2 translation information, but does not need
696 * to apply to structures that contain combined stage 1 and stage 2
697 * translation information.
698 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
700 CPUState
*cs
= env_cpu(env
);
703 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
707 pageaddr
= sextract64(value
<< 12, 0, 40);
709 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
712 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
715 CPUState
*cs
= env_cpu(env
);
718 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
722 pageaddr
= sextract64(value
<< 12, 0, 40);
724 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
725 ARMMMUIdxBit_Stage2
);
728 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
731 CPUState
*cs
= env_cpu(env
);
733 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
736 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
739 CPUState
*cs
= env_cpu(env
);
741 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
744 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
747 CPUState
*cs
= env_cpu(env
);
748 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
750 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
753 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
756 CPUState
*cs
= env_cpu(env
);
757 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
759 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
763 static const ARMCPRegInfo cp_reginfo
[] = {
764 /* Define the secure and non-secure FCSE identifier CP registers
765 * separately because there is no secure bank in V8 (no _EL3). This allows
766 * the secure register to be properly reset and migrated. There is also no
767 * v8 EL1 version of the register so the non-secure instance stands alone.
770 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
771 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
772 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
773 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
774 { .name
= "FCSEIDR_S",
775 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
776 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
777 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
778 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
779 /* Define the secure and non-secure context identifier CP registers
780 * separately because there is no secure bank in V8 (no _EL3). This allows
781 * the secure register to be properly reset and migrated. In the
782 * non-secure case, the 32-bit register will have reset and migration
783 * disabled during registration as it is handled by the 64-bit instance.
785 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
786 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
787 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
788 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
789 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
790 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
791 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
792 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
793 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
794 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
798 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
799 /* NB: Some of these registers exist in v8 but with more precise
800 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
802 /* MMU Domain access control / MPU write buffer control */
804 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
805 .access
= PL1_RW
, .resetvalue
= 0,
806 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
807 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
808 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
809 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
810 * For v6 and v5, these mappings are overly broad.
812 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
813 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
814 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
815 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
816 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
817 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
818 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
819 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
820 /* Cache maintenance ops; some of this space may be overridden later. */
821 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
822 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
823 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
827 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
828 /* Not all pre-v6 cores implemented this WFI, so this is slightly
831 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
832 .access
= PL1_W
, .type
= ARM_CP_WFI
},
836 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
837 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
838 * is UNPREDICTABLE; we choose to NOP as most implementations do).
840 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
841 .access
= PL1_W
, .type
= ARM_CP_WFI
},
842 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
843 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
844 * OMAPCP will override this space.
846 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
847 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
849 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
850 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
852 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
853 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
854 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
856 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
857 * implementing it as RAZ means the "debug architecture version" bits
858 * will read as a reserved value, which should cause Linux to not try
859 * to use the debug hardware.
861 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
862 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
863 /* MMU TLB control. Note that the wildcarding means we cover not just
864 * the unified TLB ops but also the dside/iside/inner-shareable variants.
866 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
867 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
868 .type
= ARM_CP_NO_RAW
},
869 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
870 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
871 .type
= ARM_CP_NO_RAW
},
872 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
873 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
874 .type
= ARM_CP_NO_RAW
},
875 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
876 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
877 .type
= ARM_CP_NO_RAW
},
878 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
879 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
880 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
881 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
885 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
890 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
891 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
892 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
893 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
894 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
896 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
897 /* VFP coprocessor: cp10 & cp11 [23:20] */
898 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
900 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
901 /* ASEDIS [31] bit is RAO/WI */
905 /* VFPv3 and upwards with NEON implement 32 double precision
906 * registers (D0-D31).
908 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
909 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
910 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
918 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
919 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
921 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
922 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
923 value
&= ~(0xf << 20);
924 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
927 env
->cp15
.cpacr_el1
= value
;
930 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
933 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
934 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
936 uint64_t value
= env
->cp15
.cpacr_el1
;
938 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
939 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
940 value
&= ~(0xf << 20);
946 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
948 /* Call cpacr_write() so that we reset with the correct RAO bits set
949 * for our CPU features.
951 cpacr_write(env
, ri
, 0);
954 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
957 if (arm_feature(env
, ARM_FEATURE_V8
)) {
958 /* Check if CPACR accesses are to be trapped to EL2 */
959 if (arm_current_el(env
) == 1 &&
960 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
961 return CP_ACCESS_TRAP_EL2
;
962 /* Check if CPACR accesses are to be trapped to EL3 */
963 } else if (arm_current_el(env
) < 3 &&
964 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
965 return CP_ACCESS_TRAP_EL3
;
972 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
975 /* Check if CPTR accesses are set to trap to EL3 */
976 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
977 return CP_ACCESS_TRAP_EL3
;
983 static const ARMCPRegInfo v6_cp_reginfo
[] = {
984 /* prefetch by MVA in v6, NOP in v7 */
985 { .name
= "MVA_prefetch",
986 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
987 .access
= PL1_W
, .type
= ARM_CP_NOP
},
988 /* We need to break the TB after ISB to execute self-modifying code
989 * correctly and also to take any pending interrupts immediately.
990 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
992 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
993 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
994 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
995 .access
= PL0_W
, .type
= ARM_CP_NOP
},
996 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
997 .access
= PL0_W
, .type
= ARM_CP_NOP
},
998 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1000 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1001 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1003 /* Watchpoint Fault Address Register : should actually only be present
1004 * for 1136, 1176, 11MPCore.
1006 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1007 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1008 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1009 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1010 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1011 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1015 /* Definitions for the PMU registers */
1016 #define PMCRN_MASK 0xf800
1017 #define PMCRN_SHIFT 11
1025 #define PMXEVTYPER_P 0x80000000
1026 #define PMXEVTYPER_U 0x40000000
1027 #define PMXEVTYPER_NSK 0x20000000
1028 #define PMXEVTYPER_NSU 0x10000000
1029 #define PMXEVTYPER_NSH 0x08000000
1030 #define PMXEVTYPER_M 0x04000000
1031 #define PMXEVTYPER_MT 0x02000000
1032 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1033 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1034 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1035 PMXEVTYPER_M | PMXEVTYPER_MT | \
1036 PMXEVTYPER_EVTCOUNT)
1038 #define PMCCFILTR 0xf8000000
1039 #define PMCCFILTR_M PMXEVTYPER_M
1040 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1042 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1044 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1047 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1048 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1050 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1053 typedef struct pm_event
{
1054 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1055 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1056 bool (*supported
)(CPUARMState
*);
1058 * Retrieve the current count of the underlying event. The programmed
1059 * counters hold a difference from the return value from this function
1061 uint64_t (*get_count
)(CPUARMState
*);
1063 * Return how many nanoseconds it will take (at a minimum) for count events
1064 * to occur. A negative value indicates the counter will never overflow, or
1065 * that the counter has otherwise arranged for the overflow bit to be set
1066 * and the PMU interrupt to be raised on overflow.
1068 int64_t (*ns_per_count
)(uint64_t);
1071 static bool event_always_supported(CPUARMState
*env
)
1076 static uint64_t swinc_get_count(CPUARMState
*env
)
1079 * SW_INCR events are written directly to the pmevcntr's by writes to
1080 * PMSWINC, so there is no underlying count maintained by the PMU itself
1085 static int64_t swinc_ns_per(uint64_t ignored
)
1091 * Return the underlying cycle count for the PMU cycle counters. If we're in
1092 * usermode, simply return 0.
1094 static uint64_t cycles_get_count(CPUARMState
*env
)
1096 #ifndef CONFIG_USER_ONLY
1097 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1098 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1100 return cpu_get_host_ticks();
1104 #ifndef CONFIG_USER_ONLY
1105 static int64_t cycles_ns_per(uint64_t cycles
)
1107 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1110 static bool instructions_supported(CPUARMState
*env
)
1112 return use_icount
== 1 /* Precise instruction counting */;
1115 static uint64_t instructions_get_count(CPUARMState
*env
)
1117 return (uint64_t)cpu_get_icount_raw();
1120 static int64_t instructions_ns_per(uint64_t icount
)
1122 return cpu_icount_to_ns((int64_t)icount
);
1126 static const pm_event pm_events
[] = {
1127 { .number
= 0x000, /* SW_INCR */
1128 .supported
= event_always_supported
,
1129 .get_count
= swinc_get_count
,
1130 .ns_per_count
= swinc_ns_per
,
1132 #ifndef CONFIG_USER_ONLY
1133 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1134 .supported
= instructions_supported
,
1135 .get_count
= instructions_get_count
,
1136 .ns_per_count
= instructions_ns_per
,
1138 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1139 .supported
= event_always_supported
,
1140 .get_count
= cycles_get_count
,
1141 .ns_per_count
= cycles_ns_per
,
1147 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1148 * events (i.e. the statistical profiling extension), this implementation
1149 * should first be updated to something sparse instead of the current
1150 * supported_event_map[] array.
1152 #define MAX_EVENT_ID 0x11
1153 #define UNSUPPORTED_EVENT UINT16_MAX
1154 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1157 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1158 * of ARM event numbers to indices in our pm_events array.
1160 * Note: Events in the 0x40XX range are not currently supported.
1162 void pmu_init(ARMCPU
*cpu
)
1167 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1170 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1171 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1176 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1177 const pm_event
*cnt
= &pm_events
[i
];
1178 assert(cnt
->number
<= MAX_EVENT_ID
);
1179 /* We do not currently support events in the 0x40xx range */
1180 assert(cnt
->number
<= 0x3f);
1182 if (cnt
->supported(&cpu
->env
)) {
1183 supported_event_map
[cnt
->number
] = i
;
1184 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1185 if (cnt
->number
& 0x20) {
1186 cpu
->pmceid1
|= event_mask
;
1188 cpu
->pmceid0
|= event_mask
;
1195 * Check at runtime whether a PMU event is supported for the current machine
1197 static bool event_supported(uint16_t number
)
1199 if (number
> MAX_EVENT_ID
) {
1202 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1205 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1208 /* Performance monitor registers user accessibility is controlled
1209 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1210 * trapping to EL2 or EL3 for other accesses.
1212 int el
= arm_current_el(env
);
1214 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1215 return CP_ACCESS_TRAP
;
1217 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1218 && !arm_is_secure_below_el3(env
)) {
1219 return CP_ACCESS_TRAP_EL2
;
1221 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1222 return CP_ACCESS_TRAP_EL3
;
1225 return CP_ACCESS_OK
;
1228 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1229 const ARMCPRegInfo
*ri
,
1232 /* ER: event counter read trap control */
1233 if (arm_feature(env
, ARM_FEATURE_V8
)
1234 && arm_current_el(env
) == 0
1235 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1237 return CP_ACCESS_OK
;
1240 return pmreg_access(env
, ri
, isread
);
1243 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1244 const ARMCPRegInfo
*ri
,
1247 /* SW: software increment write trap control */
1248 if (arm_feature(env
, ARM_FEATURE_V8
)
1249 && arm_current_el(env
) == 0
1250 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1252 return CP_ACCESS_OK
;
1255 return pmreg_access(env
, ri
, isread
);
1258 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1259 const ARMCPRegInfo
*ri
,
1262 /* ER: event counter read trap control */
1263 if (arm_feature(env
, ARM_FEATURE_V8
)
1264 && arm_current_el(env
) == 0
1265 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1266 return CP_ACCESS_OK
;
1269 return pmreg_access(env
, ri
, isread
);
1272 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1273 const ARMCPRegInfo
*ri
,
1276 /* CR: cycle counter read trap control */
1277 if (arm_feature(env
, ARM_FEATURE_V8
)
1278 && arm_current_el(env
) == 0
1279 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1281 return CP_ACCESS_OK
;
1284 return pmreg_access(env
, ri
, isread
);
1287 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1288 * the current EL, security state, and register configuration.
1290 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1293 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1294 bool enabled
, prohibited
, filtered
;
1295 bool secure
= arm_is_secure(env
);
1296 int el
= arm_current_el(env
);
1297 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1299 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1303 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1304 (counter
< hpmn
|| counter
== 31)) {
1305 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1307 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1309 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1312 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1313 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1318 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1319 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1322 if (prohibited
&& counter
== 31) {
1323 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1326 if (counter
== 31) {
1327 filter
= env
->cp15
.pmccfiltr_el0
;
1329 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1332 p
= filter
& PMXEVTYPER_P
;
1333 u
= filter
& PMXEVTYPER_U
;
1334 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1335 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1336 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1337 m
= arm_el_is_aa64(env
, 1) &&
1338 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1341 filtered
= secure
? u
: u
!= nsu
;
1342 } else if (el
== 1) {
1343 filtered
= secure
? p
: p
!= nsk
;
1344 } else if (el
== 2) {
1350 if (counter
!= 31) {
1352 * If not checking PMCCNTR, ensure the counter is setup to an event we
1355 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1356 if (!event_supported(event
)) {
1361 return enabled
&& !prohibited
&& !filtered
;
1364 static void pmu_update_irq(CPUARMState
*env
)
1366 ARMCPU
*cpu
= env_archcpu(env
);
1367 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1368 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1372 * Ensure c15_ccnt is the guest-visible count so that operations such as
1373 * enabling/disabling the counter or filtering, modifying the count itself,
1374 * etc. can be done logically. This is essentially a no-op if the counter is
1375 * not enabled at the time of the call.
1377 static void pmccntr_op_start(CPUARMState
*env
)
1379 uint64_t cycles
= cycles_get_count(env
);
1381 if (pmu_counter_enabled(env
, 31)) {
1382 uint64_t eff_cycles
= cycles
;
1383 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1384 /* Increment once every 64 processor clock cycles */
1388 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1390 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1391 1ull << 63 : 1ull << 31;
1392 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1393 env
->cp15
.c9_pmovsr
|= (1 << 31);
1394 pmu_update_irq(env
);
1397 env
->cp15
.c15_ccnt
= new_pmccntr
;
1399 env
->cp15
.c15_ccnt_delta
= cycles
;
1403 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1404 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1407 static void pmccntr_op_finish(CPUARMState
*env
)
1409 if (pmu_counter_enabled(env
, 31)) {
1410 #ifndef CONFIG_USER_ONLY
1411 /* Calculate when the counter will next overflow */
1412 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1413 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1414 remaining_cycles
= (uint32_t)remaining_cycles
;
1416 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1418 if (overflow_in
> 0) {
1419 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1421 ARMCPU
*cpu
= env_archcpu(env
);
1422 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1426 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1427 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1428 /* Increment once every 64 processor clock cycles */
1431 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1435 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1438 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1440 if (event_supported(event
)) {
1441 uint16_t event_idx
= supported_event_map
[event
];
1442 count
= pm_events
[event_idx
].get_count(env
);
1445 if (pmu_counter_enabled(env
, counter
)) {
1446 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1448 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1449 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1450 pmu_update_irq(env
);
1452 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1454 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1457 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1459 if (pmu_counter_enabled(env
, counter
)) {
1460 #ifndef CONFIG_USER_ONLY
1461 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1462 uint16_t event_idx
= supported_event_map
[event
];
1463 uint64_t delta
= UINT32_MAX
-
1464 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1465 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1467 if (overflow_in
> 0) {
1468 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1470 ARMCPU
*cpu
= env_archcpu(env
);
1471 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1475 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1476 env
->cp15
.c14_pmevcntr
[counter
];
1480 void pmu_op_start(CPUARMState
*env
)
1483 pmccntr_op_start(env
);
1484 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1485 pmevcntr_op_start(env
, i
);
1489 void pmu_op_finish(CPUARMState
*env
)
1492 pmccntr_op_finish(env
);
1493 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1494 pmevcntr_op_finish(env
, i
);
1498 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1500 pmu_op_start(&cpu
->env
);
1503 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1505 pmu_op_finish(&cpu
->env
);
1508 void arm_pmu_timer_cb(void *opaque
)
1510 ARMCPU
*cpu
= opaque
;
1513 * Update all the counter values based on the current underlying counts,
1514 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1515 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1516 * counter may expire.
1518 pmu_op_start(&cpu
->env
);
1519 pmu_op_finish(&cpu
->env
);
1522 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1527 if (value
& PMCRC
) {
1528 /* The counter has been reset */
1529 env
->cp15
.c15_ccnt
= 0;
1532 if (value
& PMCRP
) {
1534 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1535 env
->cp15
.c14_pmevcntr
[i
] = 0;
1539 /* only the DP, X, D and E bits are writable */
1540 env
->cp15
.c9_pmcr
&= ~0x39;
1541 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1546 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1550 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1551 /* Increment a counter's count iff: */
1552 if ((value
& (1 << i
)) && /* counter's bit is set */
1553 /* counter is enabled and not filtered */
1554 pmu_counter_enabled(env
, i
) &&
1555 /* counter is SW_INCR */
1556 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1557 pmevcntr_op_start(env
, i
);
1560 * Detect if this write causes an overflow since we can't predict
1561 * PMSWINC overflows like we can for other events
1563 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1565 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1566 env
->cp15
.c9_pmovsr
|= (1 << i
);
1567 pmu_update_irq(env
);
1570 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1572 pmevcntr_op_finish(env
, i
);
1577 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1580 pmccntr_op_start(env
);
1581 ret
= env
->cp15
.c15_ccnt
;
1582 pmccntr_op_finish(env
);
1586 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1589 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1590 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1591 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1594 env
->cp15
.c9_pmselr
= value
& 0x1f;
1597 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1600 pmccntr_op_start(env
);
1601 env
->cp15
.c15_ccnt
= value
;
1602 pmccntr_op_finish(env
);
1605 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1608 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1610 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1613 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1616 pmccntr_op_start(env
);
1617 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1618 pmccntr_op_finish(env
);
1621 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1624 pmccntr_op_start(env
);
1625 /* M is not accessible from AArch32 */
1626 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1627 (value
& PMCCFILTR
);
1628 pmccntr_op_finish(env
);
1631 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1633 /* M is not visible in AArch32 */
1634 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1637 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1640 value
&= pmu_counter_mask(env
);
1641 env
->cp15
.c9_pmcnten
|= value
;
1644 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1647 value
&= pmu_counter_mask(env
);
1648 env
->cp15
.c9_pmcnten
&= ~value
;
1651 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1654 value
&= pmu_counter_mask(env
);
1655 env
->cp15
.c9_pmovsr
&= ~value
;
1656 pmu_update_irq(env
);
1659 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1662 value
&= pmu_counter_mask(env
);
1663 env
->cp15
.c9_pmovsr
|= value
;
1664 pmu_update_irq(env
);
1667 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1668 uint64_t value
, const uint8_t counter
)
1670 if (counter
== 31) {
1671 pmccfiltr_write(env
, ri
, value
);
1672 } else if (counter
< pmu_num_counters(env
)) {
1673 pmevcntr_op_start(env
, counter
);
1676 * If this counter's event type is changing, store the current
1677 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1678 * pmevcntr_op_finish has the correct baseline when it converts back to
1681 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1682 PMXEVTYPER_EVTCOUNT
;
1683 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1684 if (old_event
!= new_event
) {
1686 if (event_supported(new_event
)) {
1687 uint16_t event_idx
= supported_event_map
[new_event
];
1688 count
= pm_events
[event_idx
].get_count(env
);
1690 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1693 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1694 pmevcntr_op_finish(env
, counter
);
1696 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1697 * PMSELR value is equal to or greater than the number of implemented
1698 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1702 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1703 const uint8_t counter
)
1705 if (counter
== 31) {
1706 return env
->cp15
.pmccfiltr_el0
;
1707 } else if (counter
< pmu_num_counters(env
)) {
1708 return env
->cp15
.c14_pmevtyper
[counter
];
1711 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1712 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1718 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1721 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1722 pmevtyper_write(env
, ri
, value
, counter
);
1725 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1728 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1729 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1732 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1733 * pmu_op_finish calls when loading saved state for a migration. Because
1734 * we're potentially updating the type of event here, the value written to
1735 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1736 * different counter type. Therefore, we need to set this value to the
1737 * current count for the counter type we're writing so that pmu_op_finish
1738 * has the correct count for its calculation.
1740 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1741 if (event_supported(event
)) {
1742 uint16_t event_idx
= supported_event_map
[event
];
1743 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1744 pm_events
[event_idx
].get_count(env
);
1748 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1750 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1751 return pmevtyper_read(env
, ri
, counter
);
1754 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1757 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1760 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1762 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1765 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1766 uint64_t value
, uint8_t counter
)
1768 if (counter
< pmu_num_counters(env
)) {
1769 pmevcntr_op_start(env
, counter
);
1770 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1771 pmevcntr_op_finish(env
, counter
);
1774 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1775 * are CONSTRAINED UNPREDICTABLE.
1779 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1782 if (counter
< pmu_num_counters(env
)) {
1784 pmevcntr_op_start(env
, counter
);
1785 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1786 pmevcntr_op_finish(env
, counter
);
1789 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1790 * are CONSTRAINED UNPREDICTABLE. */
1795 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1798 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1799 pmevcntr_write(env
, ri
, value
, counter
);
1802 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1804 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1805 return pmevcntr_read(env
, ri
, counter
);
1808 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1811 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1812 assert(counter
< pmu_num_counters(env
));
1813 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1814 pmevcntr_write(env
, ri
, value
, counter
);
1817 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1819 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1820 assert(counter
< pmu_num_counters(env
));
1821 return env
->cp15
.c14_pmevcntr
[counter
];
1824 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1827 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1830 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1832 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1835 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1838 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1839 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1841 env
->cp15
.c9_pmuserenr
= value
& 1;
1845 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1848 /* We have no event counters so only the C bit can be changed */
1849 value
&= pmu_counter_mask(env
);
1850 env
->cp15
.c9_pminten
|= value
;
1851 pmu_update_irq(env
);
1854 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1857 value
&= pmu_counter_mask(env
);
1858 env
->cp15
.c9_pminten
&= ~value
;
1859 pmu_update_irq(env
);
1862 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1865 /* Note that even though the AArch64 view of this register has bits
1866 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1867 * architectural requirements for bits which are RES0 only in some
1868 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1869 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1871 raw_write(env
, ri
, value
& ~0x1FULL
);
1874 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1876 /* Begin with base v8.0 state. */
1877 uint32_t valid_mask
= 0x3fff;
1878 ARMCPU
*cpu
= env_archcpu(env
);
1880 if (arm_el_is_aa64(env
, 3)) {
1881 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1882 valid_mask
&= ~SCR_NET
;
1884 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1887 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1888 valid_mask
&= ~SCR_HCE
;
1890 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1891 * supported if EL2 exists. The bit is UNK/SBZP when
1892 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1893 * when EL2 is unavailable.
1894 * On ARMv8, this bit is always available.
1896 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1897 !arm_feature(env
, ARM_FEATURE_V8
)) {
1898 valid_mask
&= ~SCR_SMD
;
1901 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1902 valid_mask
|= SCR_TLOR
;
1904 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1905 valid_mask
|= SCR_API
| SCR_APK
;
1908 /* Clear all-context RES0 bits. */
1909 value
&= valid_mask
;
1910 raw_write(env
, ri
, value
);
1913 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
1914 const ARMCPRegInfo
*ri
,
1917 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
1918 return CP_ACCESS_TRAP_EL2
;
1921 return CP_ACCESS_OK
;
1924 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1926 ARMCPU
*cpu
= env_archcpu(env
);
1928 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1931 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1932 ri
->secure
& ARM_CP_SECSTATE_S
);
1934 return cpu
->ccsidr
[index
];
1937 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1940 raw_write(env
, ri
, value
& 0xf);
1943 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1945 CPUState
*cs
= env_cpu(env
);
1946 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1948 bool allow_virt
= (arm_current_el(env
) == 1 &&
1949 (!arm_is_secure_below_el3(env
) ||
1950 (env
->cp15
.scr_el3
& SCR_EEL2
)));
1952 if (allow_virt
&& (hcr_el2
& HCR_IMO
)) {
1953 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1957 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1962 if (allow_virt
&& (hcr_el2
& HCR_FMO
)) {
1963 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1967 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1972 /* External aborts are not possible in QEMU so A bit is always clear */
1976 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1979 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
1980 return CP_ACCESS_TRAP_EL2
;
1983 return CP_ACCESS_OK
;
1986 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1989 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1990 return access_aa64_tid1(env
, ri
, isread
);
1993 return CP_ACCESS_OK
;
1996 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1997 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1998 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1999 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2000 /* Performance monitors are implementation defined in v7,
2001 * but with an ARM recommended set of registers, which we
2004 * Performance registers fall into three categories:
2005 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2006 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2007 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2008 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2009 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2011 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2012 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2013 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2014 .writefn
= pmcntenset_write
,
2015 .accessfn
= pmreg_access
,
2016 .raw_writefn
= raw_write
},
2017 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2018 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2019 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2020 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2021 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2022 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2024 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2025 .accessfn
= pmreg_access
,
2026 .writefn
= pmcntenclr_write
,
2027 .type
= ARM_CP_ALIAS
},
2028 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2029 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2030 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2031 .type
= ARM_CP_ALIAS
,
2032 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2033 .writefn
= pmcntenclr_write
},
2034 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2035 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2036 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2037 .accessfn
= pmreg_access
,
2038 .writefn
= pmovsr_write
,
2039 .raw_writefn
= raw_write
},
2040 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2041 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2042 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2043 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2044 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2045 .writefn
= pmovsr_write
,
2046 .raw_writefn
= raw_write
},
2047 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2048 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2049 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2050 .writefn
= pmswinc_write
},
2051 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2052 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2053 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2054 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2055 .writefn
= pmswinc_write
},
2056 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2057 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2058 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2059 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2060 .raw_writefn
= raw_write
},
2061 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2062 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2063 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2064 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2065 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2066 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2067 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2068 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2069 .accessfn
= pmreg_access_ccntr
},
2070 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2071 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2072 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2074 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2075 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2076 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2077 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2078 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2079 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2080 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2082 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2083 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2084 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2085 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2087 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2089 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2090 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2091 .accessfn
= pmreg_access
,
2092 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2093 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2094 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2095 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2096 .accessfn
= pmreg_access
,
2097 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2098 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2099 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2100 .accessfn
= pmreg_access_xevcntr
,
2101 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2102 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2103 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2104 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2105 .accessfn
= pmreg_access_xevcntr
,
2106 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2107 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2108 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2109 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2111 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2112 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2113 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2114 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2115 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2117 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2118 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2119 .access
= PL1_RW
, .accessfn
= access_tpm
,
2120 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2121 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2123 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2124 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2125 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2126 .access
= PL1_RW
, .accessfn
= access_tpm
,
2128 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2129 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2130 .resetvalue
= 0x0 },
2131 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2132 .access
= PL1_RW
, .accessfn
= access_tpm
,
2133 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2134 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2135 .writefn
= pmintenclr_write
, },
2136 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2137 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2138 .access
= PL1_RW
, .accessfn
= access_tpm
,
2139 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2140 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2141 .writefn
= pmintenclr_write
},
2142 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2143 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2145 .accessfn
= access_aa64_tid2
,
2146 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2147 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2148 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2150 .accessfn
= access_aa64_tid2
,
2151 .writefn
= csselr_write
, .resetvalue
= 0,
2152 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2153 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2154 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2155 * just RAZ for all cores:
2157 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2158 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2159 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2160 .accessfn
= access_aa64_tid1
,
2162 /* Auxiliary fault status registers: these also are IMPDEF, and we
2163 * choose to RAZ/WI for all cores.
2165 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2166 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2167 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2168 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2169 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2170 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2171 /* MAIR can just read-as-written because we don't implement caches
2172 * and so don't need to care about memory attributes.
2174 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2175 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2176 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2178 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2179 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2180 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2182 /* For non-long-descriptor page tables these are PRRR and NMRR;
2183 * regardless they still act as reads-as-written for QEMU.
2185 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2186 * allows them to assign the correct fieldoffset based on the endianness
2187 * handled in the field definitions.
2189 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2190 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2191 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2192 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2193 .resetfn
= arm_cp_reset_ignore
},
2194 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2195 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2196 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2197 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2198 .resetfn
= arm_cp_reset_ignore
},
2199 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2200 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2201 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2202 /* 32 bit ITLB invalidates */
2203 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2204 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2205 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2206 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2207 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2208 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2209 /* 32 bit DTLB invalidates */
2210 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2211 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2212 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2213 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2214 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2215 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2216 /* 32 bit TLB invalidates */
2217 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2218 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2219 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2220 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2221 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2222 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2223 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2224 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2228 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2229 /* 32 bit TLB invalidates, Inner Shareable */
2230 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2231 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2232 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2233 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2234 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2235 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2236 .writefn
= tlbiasid_is_write
},
2237 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2238 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2239 .writefn
= tlbimvaa_is_write
},
2243 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2244 /* PMOVSSET is not implemented in v7 before v7ve */
2245 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2246 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2247 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2248 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2249 .writefn
= pmovsset_write
,
2250 .raw_writefn
= raw_write
},
2251 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2252 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2253 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2254 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2255 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2256 .writefn
= pmovsset_write
,
2257 .raw_writefn
= raw_write
},
2261 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2268 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2271 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2272 return CP_ACCESS_TRAP
;
2274 return CP_ACCESS_OK
;
2277 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2278 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2279 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2281 .writefn
= teecr_write
},
2282 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2283 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2284 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2288 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2289 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2290 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2292 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2293 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2295 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2296 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2297 .resetfn
= arm_cp_reset_ignore
},
2298 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2299 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2300 .access
= PL0_R
|PL1_W
,
2301 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2303 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2304 .access
= PL0_R
|PL1_W
,
2305 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2306 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2307 .resetfn
= arm_cp_reset_ignore
},
2308 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2309 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2311 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2312 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2314 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2315 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2320 #ifndef CONFIG_USER_ONLY
2322 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2325 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2326 * Writable only at the highest implemented exception level.
2328 int el
= arm_current_el(env
);
2334 hcr
= arm_hcr_el2_eff(env
);
2335 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2336 cntkctl
= env
->cp15
.cnthctl_el2
;
2338 cntkctl
= env
->cp15
.c14_cntkctl
;
2340 if (!extract32(cntkctl
, 0, 2)) {
2341 return CP_ACCESS_TRAP
;
2345 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2346 arm_is_secure_below_el3(env
)) {
2347 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2348 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2356 if (!isread
&& el
< arm_highest_el(env
)) {
2357 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2360 return CP_ACCESS_OK
;
2363 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2366 unsigned int cur_el
= arm_current_el(env
);
2367 bool secure
= arm_is_secure(env
);
2368 uint64_t hcr
= arm_hcr_el2_eff(env
);
2372 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2373 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2374 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2375 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2378 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2379 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2380 return CP_ACCESS_TRAP
;
2383 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2384 if (hcr
& HCR_E2H
) {
2385 if (timeridx
== GTIMER_PHYS
&&
2386 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2387 return CP_ACCESS_TRAP_EL2
;
2390 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2391 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2392 timeridx
== GTIMER_PHYS
&& !secure
&&
2393 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2394 return CP_ACCESS_TRAP_EL2
;
2400 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2401 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2402 timeridx
== GTIMER_PHYS
&& !secure
&&
2404 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2405 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2406 return CP_ACCESS_TRAP_EL2
;
2410 return CP_ACCESS_OK
;
2413 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2416 unsigned int cur_el
= arm_current_el(env
);
2417 bool secure
= arm_is_secure(env
);
2418 uint64_t hcr
= arm_hcr_el2_eff(env
);
2422 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2423 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2424 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2425 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2429 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2430 * EL0 if EL0[PV]TEN is zero.
2432 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2433 return CP_ACCESS_TRAP
;
2438 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2439 timeridx
== GTIMER_PHYS
&& !secure
) {
2440 if (hcr
& HCR_E2H
) {
2441 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2442 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2443 return CP_ACCESS_TRAP_EL2
;
2446 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2447 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2448 return CP_ACCESS_TRAP_EL2
;
2454 return CP_ACCESS_OK
;
2457 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2458 const ARMCPRegInfo
*ri
,
2461 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2464 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2465 const ARMCPRegInfo
*ri
,
2468 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2471 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2474 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2477 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2480 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2483 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2484 const ARMCPRegInfo
*ri
,
2487 /* The AArch64 register view of the secure physical timer is
2488 * always accessible from EL3, and configurably accessible from
2491 switch (arm_current_el(env
)) {
2493 if (!arm_is_secure(env
)) {
2494 return CP_ACCESS_TRAP
;
2496 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2497 return CP_ACCESS_TRAP_EL3
;
2499 return CP_ACCESS_OK
;
2502 return CP_ACCESS_TRAP
;
2504 return CP_ACCESS_OK
;
2506 g_assert_not_reached();
2510 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2512 ARMCPU
*cpu
= env_archcpu(env
);
2514 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2517 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2519 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2522 /* Timer enabled: calculate and set current ISTATUS, irq, and
2523 * reset timer to when ISTATUS next has to change
2525 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2526 cpu
->env
.cp15
.cntvoff_el2
: 0;
2527 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2528 /* Note that this must be unsigned 64 bit arithmetic: */
2529 int istatus
= count
- offset
>= gt
->cval
;
2533 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2535 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2536 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2539 /* Next transition is when count rolls back over to zero */
2540 nexttick
= UINT64_MAX
;
2542 /* Next transition is when we hit cval */
2543 nexttick
= gt
->cval
+ offset
;
2545 /* Note that the desired next expiry time might be beyond the
2546 * signed-64-bit range of a QEMUTimer -- in this case we just
2547 * set the timer for as far in the future as possible. When the
2548 * timer expires we will reset the timer for any remaining period.
2550 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2551 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2553 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2555 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2557 /* Timer disabled: ISTATUS and timer output always clear */
2559 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2560 timer_del(cpu
->gt_timer
[timeridx
]);
2561 trace_arm_gt_recalc_disabled(timeridx
);
2565 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2568 ARMCPU
*cpu
= env_archcpu(env
);
2570 timer_del(cpu
->gt_timer
[timeridx
]);
2573 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2575 return gt_get_countervalue(env
);
2578 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2582 switch (arm_current_el(env
)) {
2584 hcr
= arm_hcr_el2_eff(env
);
2585 if (hcr
& HCR_E2H
) {
2590 hcr
= arm_hcr_el2_eff(env
);
2591 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2597 return env
->cp15
.cntvoff_el2
;
2600 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2602 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2605 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2609 trace_arm_gt_cval_write(timeridx
, value
);
2610 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2611 gt_recalc_timer(env_archcpu(env
), timeridx
);
2614 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2617 uint64_t offset
= 0;
2621 case GTIMER_HYPVIRT
:
2622 offset
= gt_virt_cnt_offset(env
);
2626 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2627 (gt_get_countervalue(env
) - offset
));
2630 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2634 uint64_t offset
= 0;
2638 case GTIMER_HYPVIRT
:
2639 offset
= gt_virt_cnt_offset(env
);
2643 trace_arm_gt_tval_write(timeridx
, value
);
2644 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2645 sextract64(value
, 0, 32);
2646 gt_recalc_timer(env_archcpu(env
), timeridx
);
2649 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2653 ARMCPU
*cpu
= env_archcpu(env
);
2654 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2656 trace_arm_gt_ctl_write(timeridx
, value
);
2657 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2658 if ((oldval
^ value
) & 1) {
2659 /* Enable toggled */
2660 gt_recalc_timer(cpu
, timeridx
);
2661 } else if ((oldval
^ value
) & 2) {
2662 /* IMASK toggled: don't need to recalculate,
2663 * just set the interrupt line based on ISTATUS
2665 int irqstate
= (oldval
& 4) && !(value
& 2);
2667 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2668 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2672 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2674 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2677 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2680 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2683 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2685 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2688 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2691 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2694 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2697 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2700 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2702 switch (arm_mmu_idx(env
)) {
2703 case ARMMMUIdx_E20_0
:
2704 case ARMMMUIdx_E20_2
:
2705 case ARMMMUIdx_E20_2_PAN
:
2712 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2714 switch (arm_mmu_idx(env
)) {
2715 case ARMMMUIdx_E20_0
:
2716 case ARMMMUIdx_E20_2
:
2717 case ARMMMUIdx_E20_2_PAN
:
2718 return GTIMER_HYPVIRT
;
2724 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2725 const ARMCPRegInfo
*ri
)
2727 int timeridx
= gt_phys_redir_timeridx(env
);
2728 return env
->cp15
.c14_timer
[timeridx
].cval
;
2731 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2734 int timeridx
= gt_phys_redir_timeridx(env
);
2735 gt_cval_write(env
, ri
, timeridx
, value
);
2738 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2739 const ARMCPRegInfo
*ri
)
2741 int timeridx
= gt_phys_redir_timeridx(env
);
2742 return gt_tval_read(env
, ri
, timeridx
);
2745 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2748 int timeridx
= gt_phys_redir_timeridx(env
);
2749 gt_tval_write(env
, ri
, timeridx
, value
);
2752 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2753 const ARMCPRegInfo
*ri
)
2755 int timeridx
= gt_phys_redir_timeridx(env
);
2756 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2759 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2762 int timeridx
= gt_phys_redir_timeridx(env
);
2763 gt_ctl_write(env
, ri
, timeridx
, value
);
2766 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2768 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2771 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2774 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2777 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2779 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2782 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2785 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2788 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2791 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2794 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2797 ARMCPU
*cpu
= env_archcpu(env
);
2799 trace_arm_gt_cntvoff_write(value
);
2800 raw_write(env
, ri
, value
);
2801 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2804 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
2805 const ARMCPRegInfo
*ri
)
2807 int timeridx
= gt_virt_redir_timeridx(env
);
2808 return env
->cp15
.c14_timer
[timeridx
].cval
;
2811 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2814 int timeridx
= gt_virt_redir_timeridx(env
);
2815 gt_cval_write(env
, ri
, timeridx
, value
);
2818 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
2819 const ARMCPRegInfo
*ri
)
2821 int timeridx
= gt_virt_redir_timeridx(env
);
2822 return gt_tval_read(env
, ri
, timeridx
);
2825 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2828 int timeridx
= gt_virt_redir_timeridx(env
);
2829 gt_tval_write(env
, ri
, timeridx
, value
);
2832 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
2833 const ARMCPRegInfo
*ri
)
2835 int timeridx
= gt_virt_redir_timeridx(env
);
2836 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2839 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2842 int timeridx
= gt_virt_redir_timeridx(env
);
2843 gt_ctl_write(env
, ri
, timeridx
, value
);
2846 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2848 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2851 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2854 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2857 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2859 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2862 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2865 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2868 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2871 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2874 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2876 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2879 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2882 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2885 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2887 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2890 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2893 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2896 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2899 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2902 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2904 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
2907 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2910 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2913 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2915 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
2918 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2921 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2924 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2927 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2930 void arm_gt_ptimer_cb(void *opaque
)
2932 ARMCPU
*cpu
= opaque
;
2934 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2937 void arm_gt_vtimer_cb(void *opaque
)
2939 ARMCPU
*cpu
= opaque
;
2941 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2944 void arm_gt_htimer_cb(void *opaque
)
2946 ARMCPU
*cpu
= opaque
;
2948 gt_recalc_timer(cpu
, GTIMER_HYP
);
2951 void arm_gt_stimer_cb(void *opaque
)
2953 ARMCPU
*cpu
= opaque
;
2955 gt_recalc_timer(cpu
, GTIMER_SEC
);
2958 void arm_gt_hvtimer_cb(void *opaque
)
2960 ARMCPU
*cpu
= opaque
;
2962 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
2965 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2967 ARMCPU
*cpu
= env_archcpu(env
);
2969 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
2972 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2973 /* Note that CNTFRQ is purely reads-as-written for the benefit
2974 * of software; writing it doesn't actually change the timer frequency.
2975 * Our reset value matches the fixed frequency we implement the timer at.
2977 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2978 .type
= ARM_CP_ALIAS
,
2979 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2980 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2982 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2983 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2984 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2985 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2986 .resetfn
= arm_gt_cntfrq_reset
,
2988 /* overall control: mostly access permissions */
2989 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2990 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2992 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2995 /* per-timer control */
2996 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2997 .secure
= ARM_CP_SECSTATE_NS
,
2998 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2999 .accessfn
= gt_ptimer_access
,
3000 .fieldoffset
= offsetoflow32(CPUARMState
,
3001 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3002 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3003 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3005 { .name
= "CNTP_CTL_S",
3006 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3007 .secure
= ARM_CP_SECSTATE_S
,
3008 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3009 .accessfn
= gt_ptimer_access
,
3010 .fieldoffset
= offsetoflow32(CPUARMState
,
3011 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3012 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3014 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3015 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
3016 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3017 .accessfn
= gt_ptimer_access
,
3018 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3020 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3021 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3023 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
3024 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3025 .accessfn
= gt_vtimer_access
,
3026 .fieldoffset
= offsetoflow32(CPUARMState
,
3027 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3028 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3029 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3031 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3032 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
3033 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3034 .accessfn
= gt_vtimer_access
,
3035 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3037 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3038 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3040 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3041 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3042 .secure
= ARM_CP_SECSTATE_NS
,
3043 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3044 .accessfn
= gt_ptimer_access
,
3045 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3047 { .name
= "CNTP_TVAL_S",
3048 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3049 .secure
= ARM_CP_SECSTATE_S
,
3050 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3051 .accessfn
= gt_ptimer_access
,
3052 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
3054 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3055 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
3056 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3057 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
3058 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3060 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
3061 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3062 .accessfn
= gt_vtimer_access
,
3063 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3065 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3066 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
3067 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3068 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3069 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3071 /* The counter itself */
3072 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3073 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3074 .accessfn
= gt_pct_access
,
3075 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3077 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3078 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3079 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3080 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3082 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3083 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3084 .accessfn
= gt_vct_access
,
3085 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3087 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3088 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3089 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3090 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3092 /* Comparison value, indicating when the timer goes off */
3093 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3094 .secure
= ARM_CP_SECSTATE_NS
,
3096 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3097 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3098 .accessfn
= gt_ptimer_access
,
3099 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3100 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3102 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3103 .secure
= ARM_CP_SECSTATE_S
,
3105 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3106 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3107 .accessfn
= gt_ptimer_access
,
3108 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3110 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3111 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3114 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3115 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3116 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3117 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3119 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3121 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3122 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3123 .accessfn
= gt_vtimer_access
,
3124 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3125 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3127 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3128 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3131 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3132 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3133 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3134 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3136 /* Secure timer -- this is actually restricted to only EL3
3137 * and configurably Secure-EL1 via the accessfn.
3139 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3140 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3141 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3142 .accessfn
= gt_stimer_access
,
3143 .readfn
= gt_sec_tval_read
,
3144 .writefn
= gt_sec_tval_write
,
3145 .resetfn
= gt_sec_timer_reset
,
3147 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3148 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3149 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3150 .accessfn
= gt_stimer_access
,
3151 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3153 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3155 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3156 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3157 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3158 .accessfn
= gt_stimer_access
,
3159 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3160 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3165 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3168 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3169 return CP_ACCESS_TRAP
;
3171 return CP_ACCESS_OK
;
3176 /* In user-mode most of the generic timer registers are inaccessible
3177 * however modern kernels (4.12+) allow access to cntvct_el0
3180 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3182 ARMCPU
*cpu
= env_archcpu(env
);
3184 /* Currently we have no support for QEMUTimer in linux-user so we
3185 * can't call gt_get_countervalue(env), instead we directly
3186 * call the lower level functions.
3188 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3191 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3192 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3193 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3194 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3195 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3196 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3198 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3199 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3200 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3201 .readfn
= gt_virt_cnt_read
,
3208 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3210 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3211 raw_write(env
, ri
, value
);
3212 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3213 raw_write(env
, ri
, value
& 0xfffff6ff);
3215 raw_write(env
, ri
, value
& 0xfffff1ff);
3219 #ifndef CONFIG_USER_ONLY
3220 /* get_phys_addr() isn't present for user-mode-only targets */
3222 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3226 /* The ATS12NSO* operations must trap to EL3 if executed in
3227 * Secure EL1 (which can only happen if EL3 is AArch64).
3228 * They are simply UNDEF if executed from NS EL1.
3229 * They function normally from EL2 or EL3.
3231 if (arm_current_el(env
) == 1) {
3232 if (arm_is_secure_below_el3(env
)) {
3233 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3235 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3238 return CP_ACCESS_OK
;
3241 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3242 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3245 target_ulong page_size
;
3249 bool format64
= false;
3250 MemTxAttrs attrs
= {};
3251 ARMMMUFaultInfo fi
= {};
3252 ARMCacheAttrs cacheattrs
= {};
3254 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3255 &prot
, &page_size
, &fi
, &cacheattrs
);
3259 * Some kinds of translation fault must cause exceptions rather
3260 * than being reported in the PAR.
3262 int current_el
= arm_current_el(env
);
3264 uint32_t syn
, fsr
, fsc
;
3265 bool take_exc
= false;
3267 if (fi
.s1ptw
&& current_el
== 1 && !arm_is_secure(env
)
3268 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3270 * Synchronous stage 2 fault on an access made as part of the
3271 * translation table walk for AT S1E0* or AT S1E1* insn
3272 * executed from NS EL1. If this is a synchronous external abort
3273 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3274 * to EL3. Otherwise the fault is taken as an exception to EL2,
3275 * and HPFAR_EL2 holds the faulting IPA.
3277 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3278 (env
->cp15
.scr_el3
& SCR_EA
)) {
3281 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3285 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3287 * Synchronous external aborts during a translation table walk
3288 * are taken as Data Abort exceptions.
3291 if (current_el
== 3) {
3297 target_el
= exception_target_el(env
);
3303 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3304 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3305 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3306 fsr
= arm_fi_to_lfsc(&fi
);
3307 fsc
= extract32(fsr
, 0, 6);
3309 fsr
= arm_fi_to_sfsc(&fi
);
3313 * Report exception with ESR indicating a fault due to a
3314 * translation table walk for a cache maintenance instruction.
3316 syn
= syn_data_abort_no_iss(current_el
== target_el
,
3317 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3318 env
->exception
.vaddress
= value
;
3319 env
->exception
.fsr
= fsr
;
3320 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3326 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3329 * * TTBCR.EAE determines whether the result is returned using the
3330 * 32-bit or the 64-bit PAR format
3331 * * Instructions executed in Hyp mode always use the 64bit format
3333 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3334 * * The Non-secure TTBCR.EAE bit is set to 1
3335 * * The implementation includes EL2, and the value of HCR.VM is 1
3337 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3339 * ATS1Hx always uses the 64bit format.
3341 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3343 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3344 if (mmu_idx
== ARMMMUIdx_E10_0
||
3345 mmu_idx
== ARMMMUIdx_E10_1
||
3346 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3347 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3349 format64
|= arm_current_el(env
) == 2;
3355 /* Create a 64-bit PAR */
3356 par64
= (1 << 11); /* LPAE bit always set */
3358 par64
|= phys_addr
& ~0xfffULL
;
3359 if (!attrs
.secure
) {
3360 par64
|= (1 << 9); /* NS */
3362 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3363 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3365 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3368 par64
|= (fsr
& 0x3f) << 1; /* FS */
3370 par64
|= (1 << 9); /* S */
3373 par64
|= (1 << 8); /* PTW */
3377 /* fsr is a DFSR/IFSR value for the short descriptor
3378 * translation table format (with WnR always clear).
3379 * Convert it to a 32-bit PAR.
3382 /* We do not set any attribute bits in the PAR */
3383 if (page_size
== (1 << 24)
3384 && arm_feature(env
, ARM_FEATURE_V7
)) {
3385 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3387 par64
= phys_addr
& 0xfffff000;
3389 if (!attrs
.secure
) {
3390 par64
|= (1 << 9); /* NS */
3393 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3395 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3396 ((fsr
& 0xf) << 1) | 1;
3402 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3404 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3407 int el
= arm_current_el(env
);
3408 bool secure
= arm_is_secure_below_el3(env
);
3410 switch (ri
->opc2
& 6) {
3412 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3415 mmu_idx
= ARMMMUIdx_SE3
;
3418 g_assert(!secure
); /* TODO: ARMv8.4-SecEL2 */
3421 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3422 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3423 : ARMMMUIdx_Stage1_E1_PAN
);
3425 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3429 g_assert_not_reached();
3433 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3436 mmu_idx
= ARMMMUIdx_SE10_0
;
3439 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3442 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3445 g_assert_not_reached();
3449 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3450 mmu_idx
= ARMMMUIdx_E10_1
;
3453 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3454 mmu_idx
= ARMMMUIdx_E10_0
;
3457 g_assert_not_reached();
3460 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3462 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3465 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3468 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3471 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3473 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3476 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3479 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3480 return CP_ACCESS_TRAP
;
3482 return CP_ACCESS_OK
;
3485 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3488 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3490 int secure
= arm_is_secure_below_el3(env
);
3492 switch (ri
->opc2
& 6) {
3495 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3496 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3497 mmu_idx
= (secure
? ARMMMUIdx_SE10_1_PAN
3498 : ARMMMUIdx_Stage1_E1_PAN
);
3500 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3503 case 4: /* AT S1E2R, AT S1E2W */
3504 mmu_idx
= ARMMMUIdx_E2
;
3506 case 6: /* AT S1E3R, AT S1E3W */
3507 mmu_idx
= ARMMMUIdx_SE3
;
3510 g_assert_not_reached();
3513 case 2: /* AT S1E0R, AT S1E0W */
3514 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3516 case 4: /* AT S12E1R, AT S12E1W */
3517 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3519 case 6: /* AT S12E0R, AT S12E0W */
3520 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3523 g_assert_not_reached();
3526 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3530 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3531 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3532 .access
= PL1_RW
, .resetvalue
= 0,
3533 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3534 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3535 .writefn
= par_write
},
3536 #ifndef CONFIG_USER_ONLY
3537 /* This underdecoding is safe because the reginfo is NO_RAW. */
3538 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3539 .access
= PL1_W
, .accessfn
= ats_access
,
3540 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3545 /* Return basic MPU access permission bits. */
3546 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3553 for (i
= 0; i
< 16; i
+= 2) {
3554 ret
|= (val
>> i
) & mask
;
3560 /* Pad basic MPU access permission bits to extended format. */
3561 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3568 for (i
= 0; i
< 16; i
+= 2) {
3569 ret
|= (val
& mask
) << i
;
3575 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3578 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3581 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3583 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3586 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3589 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3592 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3594 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3597 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3599 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3605 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3609 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3612 ARMCPU
*cpu
= env_archcpu(env
);
3613 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3619 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3620 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3624 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3627 ARMCPU
*cpu
= env_archcpu(env
);
3628 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3630 if (value
>= nrgs
) {
3631 qemu_log_mask(LOG_GUEST_ERROR
,
3632 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3633 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3637 raw_write(env
, ri
, value
);
3640 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3641 /* Reset for all these registers is handled in arm_cpu_reset(),
3642 * because the PMSAv7 is also used by M-profile CPUs, which do
3643 * not register cpregs but still need the state to be reset.
3645 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3646 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3647 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3648 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3649 .resetfn
= arm_cp_reset_ignore
},
3650 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3651 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3652 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3653 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3654 .resetfn
= arm_cp_reset_ignore
},
3655 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3656 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3657 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3658 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3659 .resetfn
= arm_cp_reset_ignore
},
3660 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3662 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3663 .writefn
= pmsav7_rgnr_write
,
3664 .resetfn
= arm_cp_reset_ignore
},
3668 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3669 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3670 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3671 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3672 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3673 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3674 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3675 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3676 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3677 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3679 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3681 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3683 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3685 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3687 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3688 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3690 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3691 /* Protection region base and size registers */
3692 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3693 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3694 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3695 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3696 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3697 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3698 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3699 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3700 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3701 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3702 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3703 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3704 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3705 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3706 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3707 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3708 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3709 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3710 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3711 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3712 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3713 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3714 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3715 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3719 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3722 TCR
*tcr
= raw_ptr(env
, ri
);
3723 int maskshift
= extract32(value
, 0, 3);
3725 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3726 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3727 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3728 * using Long-desciptor translation table format */
3729 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3730 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3731 /* In an implementation that includes the Security Extensions
3732 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3733 * Short-descriptor translation table format.
3735 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3741 /* Update the masks corresponding to the TCR bank being written
3742 * Note that we always calculate mask and base_mask, but
3743 * they are only used for short-descriptor tables (ie if EAE is 0);
3744 * for long-descriptor tables the TCR fields are used differently
3745 * and the mask and base_mask values are meaningless.
3747 tcr
->raw_tcr
= value
;
3748 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3749 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3752 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3755 ARMCPU
*cpu
= env_archcpu(env
);
3756 TCR
*tcr
= raw_ptr(env
, ri
);
3758 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3759 /* With LPAE the TTBCR could result in a change of ASID
3760 * via the TTBCR.A1 bit, so do a TLB flush.
3762 tlb_flush(CPU(cpu
));
3764 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3765 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3766 vmsa_ttbcr_raw_write(env
, ri
, value
);
3769 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3771 TCR
*tcr
= raw_ptr(env
, ri
);
3773 /* Reset both the TCR as well as the masks corresponding to the bank of
3774 * the TCR being reset.
3778 tcr
->base_mask
= 0xffffc000u
;
3781 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3784 ARMCPU
*cpu
= env_archcpu(env
);
3785 TCR
*tcr
= raw_ptr(env
, ri
);
3787 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3788 tlb_flush(CPU(cpu
));
3789 tcr
->raw_tcr
= value
;
3792 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3795 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3796 if (cpreg_field_is_64bit(ri
) &&
3797 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3798 ARMCPU
*cpu
= env_archcpu(env
);
3799 tlb_flush(CPU(cpu
));
3801 raw_write(env
, ri
, value
);
3804 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3808 * If we are running with E2&0 regime, then an ASID is active.
3809 * Flush if that might be changing. Note we're not checking
3810 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3811 * holds the active ASID, only checking the field that might.
3813 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
3814 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3815 tlb_flush_by_mmuidx(env_cpu(env
),
3816 ARMMMUIdxBit_E20_2
|
3817 ARMMMUIdxBit_E20_2_PAN
|
3818 ARMMMUIdxBit_E20_0
);
3820 raw_write(env
, ri
, value
);
3823 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3826 ARMCPU
*cpu
= env_archcpu(env
);
3827 CPUState
*cs
= CPU(cpu
);
3830 * A change in VMID to the stage2 page table (Stage2) invalidates
3831 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3833 if (raw_read(env
, ri
) != value
) {
3834 tlb_flush_by_mmuidx(cs
,
3835 ARMMMUIdxBit_E10_1
|
3836 ARMMMUIdxBit_E10_1_PAN
|
3837 ARMMMUIdxBit_E10_0
|
3838 ARMMMUIdxBit_Stage2
);
3839 raw_write(env
, ri
, value
);
3843 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3844 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3845 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3846 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3847 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3848 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3849 .access
= PL1_RW
, .resetvalue
= 0,
3850 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3851 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3852 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3853 .access
= PL1_RW
, .resetvalue
= 0,
3854 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3855 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3856 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3857 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3858 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3863 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3864 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3865 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3867 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3868 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3869 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3870 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3871 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3872 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3873 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3874 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3875 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3876 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3877 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3878 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3879 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3880 .access
= PL1_RW
, .writefn
= vmsa_tcr_el12_write
,
3881 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3882 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3883 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3884 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3885 .raw_writefn
= vmsa_ttbcr_raw_write
,
3886 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3887 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3891 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3892 * qemu tlbs nor adjusting cached masks.
3894 static const ARMCPRegInfo ttbcr2_reginfo
= {
3895 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3896 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3897 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3898 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3901 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3904 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3905 /* The OS_TYPE bit in this register changes the reported CPUID! */
3906 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3907 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3910 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3913 env
->cp15
.c15_threadid
= value
& 0xffff;
3916 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3919 /* Wait-for-interrupt (deprecated) */
3920 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
3923 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3926 /* On OMAP there are registers indicating the max/min index of dcache lines
3927 * containing a dirty line; cache flush operations have to reset these.
3929 env
->cp15
.c15_i_max
= 0x000;
3930 env
->cp15
.c15_i_min
= 0xff0;
3933 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3934 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3935 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3936 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3938 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3939 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3940 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3942 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3943 .writefn
= omap_ticonfig_write
},
3944 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3946 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3947 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3948 .access
= PL1_RW
, .resetvalue
= 0xff0,
3949 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3950 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3952 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3953 .writefn
= omap_threadid_write
},
3954 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3955 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3956 .type
= ARM_CP_NO_RAW
,
3957 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3958 /* TODO: Peripheral port remap register:
3959 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3960 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3963 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3964 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3965 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3966 .writefn
= omap_cachemaint_write
},
3967 { .name
= "C9", .cp
= 15, .crn
= 9,
3968 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3969 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3973 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3976 env
->cp15
.c15_cpar
= value
& 0x3fff;
3979 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3980 { .name
= "XSCALE_CPAR",
3981 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3982 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3983 .writefn
= xscale_cpar_write
, },
3984 { .name
= "XSCALE_AUXCR",
3985 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3986 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3988 /* XScale specific cache-lockdown: since we have no cache we NOP these
3989 * and hope the guest does not really rely on cache behaviour.
3991 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3992 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3993 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3994 { .name
= "XSCALE_UNLOCK_ICACHE",
3995 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3996 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3997 { .name
= "XSCALE_DCACHE_LOCK",
3998 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3999 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4000 { .name
= "XSCALE_UNLOCK_DCACHE",
4001 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
4002 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4006 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
4007 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4008 * implementation of this implementation-defined space.
4009 * Ideally this should eventually disappear in favour of actually
4010 * implementing the correct behaviour for all cores.
4012 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
4013 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4015 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
4020 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
4021 /* Cache status: RAZ because we have no cache so it's always clean */
4022 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
4023 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4028 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4029 /* We never have a a block transfer operation in progress */
4030 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4031 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4033 /* The cache ops themselves: these all NOP for QEMU */
4034 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4035 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4036 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4037 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4038 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4039 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4040 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4041 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4042 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4043 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4044 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4045 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4049 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4050 /* The cache test-and-clean instructions always return (1 << 30)
4051 * to indicate that there are no dirty cache lines.
4053 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4054 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4055 .resetvalue
= (1 << 30) },
4056 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4057 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4058 .resetvalue
= (1 << 30) },
4062 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4063 /* Ignore ReadBuffer accesses */
4064 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4065 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4066 .access
= PL1_RW
, .resetvalue
= 0,
4067 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4071 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4073 ARMCPU
*cpu
= env_archcpu(env
);
4074 unsigned int cur_el
= arm_current_el(env
);
4075 bool secure
= arm_is_secure(env
);
4077 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4078 return env
->cp15
.vpidr_el2
;
4080 return raw_read(env
, ri
);
4083 static uint64_t mpidr_read_val(CPUARMState
*env
)
4085 ARMCPU
*cpu
= env_archcpu(env
);
4086 uint64_t mpidr
= cpu
->mp_affinity
;
4088 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4089 mpidr
|= (1U << 31);
4090 /* Cores which are uniprocessor (non-coherent)
4091 * but still implement the MP extensions set
4092 * bit 30. (For instance, Cortex-R5).
4094 if (cpu
->mp_is_up
) {
4095 mpidr
|= (1u << 30);
4101 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4103 unsigned int cur_el
= arm_current_el(env
);
4104 bool secure
= arm_is_secure(env
);
4106 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
4107 return env
->cp15
.vmpidr_el2
;
4109 return mpidr_read_val(env
);
4112 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4114 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4115 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4116 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
4118 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4119 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4120 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
4122 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4123 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4124 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4125 offsetof(CPUARMState
, cp15
.par_ns
)} },
4126 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4127 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4128 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4129 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4130 .writefn
= vmsa_ttbr_write
, },
4131 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4132 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4133 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4134 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4135 .writefn
= vmsa_ttbr_write
, },
4139 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4141 return vfp_get_fpcr(env
);
4144 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4147 vfp_set_fpcr(env
, value
);
4150 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4152 return vfp_get_fpsr(env
);
4155 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4158 vfp_set_fpsr(env
, value
);
4161 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4164 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4165 return CP_ACCESS_TRAP
;
4167 return CP_ACCESS_OK
;
4170 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4173 env
->daif
= value
& PSTATE_DAIF
;
4176 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4178 return env
->pstate
& PSTATE_PAN
;
4181 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4184 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4187 static const ARMCPRegInfo pan_reginfo
= {
4188 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4189 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4190 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4191 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4194 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4196 return env
->pstate
& PSTATE_UAO
;
4199 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4202 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4205 static const ARMCPRegInfo uao_reginfo
= {
4206 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4207 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4208 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4209 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4212 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
4213 const ARMCPRegInfo
*ri
,
4216 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
4217 * SCTLR_EL1.UCI is set.
4219 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4220 return CP_ACCESS_TRAP
;
4222 return CP_ACCESS_OK
;
4225 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4226 * Page D4-1736 (DDI0487A.b)
4229 static int vae1_tlbmask(CPUARMState
*env
)
4231 /* Since we exclude secure first, we may read HCR_EL2 directly. */
4232 if (arm_is_secure_below_el3(env
)) {
4233 return ARMMMUIdxBit_SE10_1
|
4234 ARMMMUIdxBit_SE10_1_PAN
|
4235 ARMMMUIdxBit_SE10_0
;
4236 } else if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
))
4237 == (HCR_E2H
| HCR_TGE
)) {
4238 return ARMMMUIdxBit_E20_2
|
4239 ARMMMUIdxBit_E20_2_PAN
|
4242 return ARMMMUIdxBit_E10_1
|
4243 ARMMMUIdxBit_E10_1_PAN
|
4248 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4251 CPUState
*cs
= env_cpu(env
);
4252 int mask
= vae1_tlbmask(env
);
4254 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4257 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4260 CPUState
*cs
= env_cpu(env
);
4261 int mask
= vae1_tlbmask(env
);
4263 if (tlb_force_broadcast(env
)) {
4264 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4266 tlb_flush_by_mmuidx(cs
, mask
);
4270 static int alle1_tlbmask(CPUARMState
*env
)
4273 * Note that the 'ALL' scope must invalidate both stage 1 and
4274 * stage 2 translations, whereas most other scopes only invalidate
4275 * stage 1 translations.
4277 if (arm_is_secure_below_el3(env
)) {
4278 return ARMMMUIdxBit_SE10_1
|
4279 ARMMMUIdxBit_SE10_1_PAN
|
4280 ARMMMUIdxBit_SE10_0
;
4281 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
4282 return ARMMMUIdxBit_E10_1
|
4283 ARMMMUIdxBit_E10_1_PAN
|
4284 ARMMMUIdxBit_E10_0
|
4285 ARMMMUIdxBit_Stage2
;
4287 return ARMMMUIdxBit_E10_1
|
4288 ARMMMUIdxBit_E10_1_PAN
|
4293 static int e2_tlbmask(CPUARMState
*env
)
4295 /* TODO: ARMv8.4-SecEL2 */
4296 return ARMMMUIdxBit_E20_0
|
4297 ARMMMUIdxBit_E20_2
|
4298 ARMMMUIdxBit_E20_2_PAN
|
4302 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4305 CPUState
*cs
= env_cpu(env
);
4306 int mask
= alle1_tlbmask(env
);
4308 tlb_flush_by_mmuidx(cs
, mask
);
4311 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4314 CPUState
*cs
= env_cpu(env
);
4315 int mask
= e2_tlbmask(env
);
4317 tlb_flush_by_mmuidx(cs
, mask
);
4320 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4323 ARMCPU
*cpu
= env_archcpu(env
);
4324 CPUState
*cs
= CPU(cpu
);
4326 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4329 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4332 CPUState
*cs
= env_cpu(env
);
4333 int mask
= alle1_tlbmask(env
);
4335 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4338 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4341 CPUState
*cs
= env_cpu(env
);
4342 int mask
= e2_tlbmask(env
);
4344 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4347 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4350 CPUState
*cs
= env_cpu(env
);
4352 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4355 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4358 /* Invalidate by VA, EL2
4359 * Currently handles both VAE2 and VALE2, since we don't support
4360 * flush-last-level-only.
4362 CPUState
*cs
= env_cpu(env
);
4363 int mask
= e2_tlbmask(env
);
4364 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4366 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4369 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4372 /* Invalidate by VA, EL3
4373 * Currently handles both VAE3 and VALE3, since we don't support
4374 * flush-last-level-only.
4376 ARMCPU
*cpu
= env_archcpu(env
);
4377 CPUState
*cs
= CPU(cpu
);
4378 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4380 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4383 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4386 CPUState
*cs
= env_cpu(env
);
4387 int mask
= vae1_tlbmask(env
);
4388 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4390 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4393 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4396 /* Invalidate by VA, EL1&0 (AArch64 version).
4397 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4398 * since we don't support flush-for-specific-ASID-only or
4399 * flush-last-level-only.
4401 CPUState
*cs
= env_cpu(env
);
4402 int mask
= vae1_tlbmask(env
);
4403 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4405 if (tlb_force_broadcast(env
)) {
4406 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4408 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4412 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4415 CPUState
*cs
= env_cpu(env
);
4416 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4418 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4422 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4425 CPUState
*cs
= env_cpu(env
);
4426 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4428 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4432 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4435 /* Invalidate by IPA. This has to invalidate any structures that
4436 * contain only stage 2 translation information, but does not need
4437 * to apply to structures that contain combined stage 1 and stage 2
4438 * translation information.
4439 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4441 ARMCPU
*cpu
= env_archcpu(env
);
4442 CPUState
*cs
= CPU(cpu
);
4445 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4449 pageaddr
= sextract64(value
<< 12, 0, 48);
4451 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
4454 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4457 CPUState
*cs
= env_cpu(env
);
4460 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4464 pageaddr
= sextract64(value
<< 12, 0, 48);
4466 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4467 ARMMMUIdxBit_Stage2
);
4470 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4473 int cur_el
= arm_current_el(env
);
4476 uint64_t hcr
= arm_hcr_el2_eff(env
);
4479 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4480 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4481 return CP_ACCESS_TRAP_EL2
;
4484 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4485 return CP_ACCESS_TRAP
;
4487 if (hcr
& HCR_TDZ
) {
4488 return CP_ACCESS_TRAP_EL2
;
4491 } else if (hcr
& HCR_TDZ
) {
4492 return CP_ACCESS_TRAP_EL2
;
4495 return CP_ACCESS_OK
;
4498 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4500 ARMCPU
*cpu
= env_archcpu(env
);
4501 int dzp_bit
= 1 << 4;
4503 /* DZP indicates whether DC ZVA access is allowed */
4504 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4507 return cpu
->dcz_blocksize
| dzp_bit
;
4510 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4513 if (!(env
->pstate
& PSTATE_SP
)) {
4514 /* Access to SP_EL0 is undefined if it's being used as
4515 * the stack pointer.
4517 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4519 return CP_ACCESS_OK
;
4522 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4524 return env
->pstate
& PSTATE_SP
;
4527 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4529 update_spsel(env
, val
);
4532 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4535 ARMCPU
*cpu
= env_archcpu(env
);
4537 if (raw_read(env
, ri
) == value
) {
4538 /* Skip the TLB flush if nothing actually changed; Linux likes
4539 * to do a lot of pointless SCTLR writes.
4544 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4545 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4549 raw_write(env
, ri
, value
);
4550 /* ??? Lots of these bits are not implemented. */
4551 /* This may enable/disable the MMU, so do a TLB flush. */
4552 tlb_flush(CPU(cpu
));
4554 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4556 * Normally we would always end the TB on an SCTLR write; see the
4557 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4558 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4559 * of hflags from the translator, so do it here.
4561 arm_rebuild_hflags(env
);
4565 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4568 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4569 return CP_ACCESS_TRAP_FP_EL2
;
4571 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4572 return CP_ACCESS_TRAP_FP_EL3
;
4574 return CP_ACCESS_OK
;
4577 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4580 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4583 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4584 /* Minimal set of EL0-visible registers. This will need to be expanded
4585 * significantly for system emulation of AArch64 CPUs.
4587 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4588 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4589 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4590 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4591 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4592 .type
= ARM_CP_NO_RAW
,
4593 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4594 .fieldoffset
= offsetof(CPUARMState
, daif
),
4595 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4596 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4597 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4598 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4599 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4600 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4601 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4602 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4603 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4604 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4605 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4606 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4607 .readfn
= aa64_dczid_read
},
4608 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4609 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4610 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4611 #ifndef CONFIG_USER_ONLY
4612 /* Avoid overhead of an access check that always passes in user-mode */
4613 .accessfn
= aa64_zva_access
,
4616 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4617 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4618 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4619 /* Cache ops: all NOPs since we don't emulate caches */
4620 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4621 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4622 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4623 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4624 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4625 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4626 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4627 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4628 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4629 .accessfn
= aa64_cacheop_access
},
4630 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4631 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4632 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4633 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4634 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4635 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4636 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4637 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4638 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4639 .accessfn
= aa64_cacheop_access
},
4640 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4641 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4642 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4643 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4644 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4645 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4646 .accessfn
= aa64_cacheop_access
},
4647 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4648 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4649 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4650 .accessfn
= aa64_cacheop_access
},
4651 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4652 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4653 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4654 /* TLBI operations */
4655 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4656 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4657 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4658 .writefn
= tlbi_aa64_vmalle1is_write
},
4659 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4660 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4661 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4662 .writefn
= tlbi_aa64_vae1is_write
},
4663 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4664 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4665 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4666 .writefn
= tlbi_aa64_vmalle1is_write
},
4667 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4668 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4669 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4670 .writefn
= tlbi_aa64_vae1is_write
},
4671 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4672 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4673 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4674 .writefn
= tlbi_aa64_vae1is_write
},
4675 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4676 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4677 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4678 .writefn
= tlbi_aa64_vae1is_write
},
4679 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4680 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4681 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4682 .writefn
= tlbi_aa64_vmalle1_write
},
4683 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4684 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4685 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4686 .writefn
= tlbi_aa64_vae1_write
},
4687 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4688 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4689 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4690 .writefn
= tlbi_aa64_vmalle1_write
},
4691 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4692 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4693 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4694 .writefn
= tlbi_aa64_vae1_write
},
4695 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4696 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4697 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4698 .writefn
= tlbi_aa64_vae1_write
},
4699 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4700 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4701 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4702 .writefn
= tlbi_aa64_vae1_write
},
4703 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4704 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4705 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4706 .writefn
= tlbi_aa64_ipas2e1is_write
},
4707 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4708 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4709 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4710 .writefn
= tlbi_aa64_ipas2e1is_write
},
4711 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4712 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4713 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4714 .writefn
= tlbi_aa64_alle1is_write
},
4715 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4716 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4717 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4718 .writefn
= tlbi_aa64_alle1is_write
},
4719 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4720 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4721 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4722 .writefn
= tlbi_aa64_ipas2e1_write
},
4723 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4724 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4725 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4726 .writefn
= tlbi_aa64_ipas2e1_write
},
4727 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4728 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4729 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4730 .writefn
= tlbi_aa64_alle1_write
},
4731 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4732 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4733 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4734 .writefn
= tlbi_aa64_alle1is_write
},
4735 #ifndef CONFIG_USER_ONLY
4736 /* 64 bit address translation operations */
4737 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4738 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4739 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4740 .writefn
= ats_write64
},
4741 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4742 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4743 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4744 .writefn
= ats_write64
},
4745 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4746 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4747 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4748 .writefn
= ats_write64
},
4749 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4750 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4751 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4752 .writefn
= ats_write64
},
4753 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4754 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4755 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4756 .writefn
= ats_write64
},
4757 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4758 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4759 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4760 .writefn
= ats_write64
},
4761 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4762 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4763 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4764 .writefn
= ats_write64
},
4765 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4766 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4767 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4768 .writefn
= ats_write64
},
4769 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4770 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4771 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4772 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4773 .writefn
= ats_write64
},
4774 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4775 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4776 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4777 .writefn
= ats_write64
},
4778 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4779 .type
= ARM_CP_ALIAS
,
4780 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4781 .access
= PL1_RW
, .resetvalue
= 0,
4782 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4783 .writefn
= par_write
},
4785 /* TLB invalidate last level of translation table walk */
4786 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4787 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4788 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4789 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4790 .writefn
= tlbimvaa_is_write
},
4791 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4792 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4793 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4794 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4795 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4796 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4797 .writefn
= tlbimva_hyp_write
},
4798 { .name
= "TLBIMVALHIS",
4799 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4800 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4801 .writefn
= tlbimva_hyp_is_write
},
4802 { .name
= "TLBIIPAS2",
4803 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4804 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4805 .writefn
= tlbiipas2_write
},
4806 { .name
= "TLBIIPAS2IS",
4807 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4808 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4809 .writefn
= tlbiipas2_is_write
},
4810 { .name
= "TLBIIPAS2L",
4811 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4812 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4813 .writefn
= tlbiipas2_write
},
4814 { .name
= "TLBIIPAS2LIS",
4815 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4816 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4817 .writefn
= tlbiipas2_is_write
},
4818 /* 32 bit cache operations */
4819 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4820 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4821 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4822 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4823 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4824 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4825 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4826 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4827 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4828 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4829 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4830 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4831 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4832 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4833 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4834 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4835 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4836 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4837 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4838 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4839 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4840 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4841 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4842 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4843 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4844 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4845 /* MMU Domain access control / MPU write buffer control */
4846 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4847 .access
= PL1_RW
, .resetvalue
= 0,
4848 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4849 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4850 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4851 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4852 .type
= ARM_CP_ALIAS
,
4853 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4855 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4856 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4857 .type
= ARM_CP_ALIAS
,
4858 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4860 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4861 /* We rely on the access checks not allowing the guest to write to the
4862 * state field when SPSel indicates that it's being used as the stack
4865 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4866 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4867 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4868 .type
= ARM_CP_ALIAS
,
4869 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4870 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4871 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4872 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4873 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4874 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4875 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4876 .type
= ARM_CP_NO_RAW
,
4877 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4878 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4879 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4880 .type
= ARM_CP_ALIAS
,
4881 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4882 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4883 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4884 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4885 .access
= PL2_RW
, .resetvalue
= 0,
4886 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4887 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4888 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4889 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4890 .access
= PL2_RW
, .resetvalue
= 0,
4891 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4892 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4893 .type
= ARM_CP_ALIAS
,
4894 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4896 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4897 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4898 .type
= ARM_CP_ALIAS
,
4899 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4901 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4902 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4903 .type
= ARM_CP_ALIAS
,
4904 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4906 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4907 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4908 .type
= ARM_CP_ALIAS
,
4909 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4911 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4912 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4913 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4915 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4916 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4917 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4918 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4919 .writefn
= sdcr_write
,
4920 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4924 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4925 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4926 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4927 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4929 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4930 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4931 .type
= ARM_CP_NO_RAW
,
4932 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4934 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4935 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4936 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4937 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4938 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4939 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4941 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4942 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4943 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4944 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4945 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4946 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4947 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4949 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4950 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4951 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4952 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4953 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4954 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4956 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4957 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4958 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4960 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4961 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4962 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4964 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4965 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4966 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4968 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4969 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4970 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4971 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4972 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4973 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4974 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4975 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4976 .cp
= 15, .opc1
= 6, .crm
= 2,
4977 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4978 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4979 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4980 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4981 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4982 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4983 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4984 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4985 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4986 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4987 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4988 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4989 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4990 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4991 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4992 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4994 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4995 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4996 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4997 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4998 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4999 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5000 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5001 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5003 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5004 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5005 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5006 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5007 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5009 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5010 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5011 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5012 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5013 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5014 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5015 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5016 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5017 .access
= PL2_RW
, .accessfn
= access_tda
,
5018 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5019 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5020 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5021 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5022 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5023 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5024 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5025 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5026 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5027 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5028 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5029 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5030 .type
= ARM_CP_CONST
,
5031 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5032 .access
= PL2_RW
, .resetvalue
= 0 },
5036 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5037 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
5038 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5039 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5041 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5045 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5047 ARMCPU
*cpu
= env_archcpu(env
);
5048 /* Begin with bits defined in base ARMv8.0. */
5049 uint64_t valid_mask
= MAKE_64BIT_MASK(0, 34);
5051 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5052 valid_mask
&= ~HCR_HCD
;
5053 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5054 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5055 * However, if we're using the SMC PSCI conduit then QEMU is
5056 * effectively acting like EL3 firmware and so the guest at
5057 * EL2 should retain the ability to prevent EL1 from being
5058 * able to make SMC calls into the ersatz firmware, so in
5059 * that case HCR.TSC should be read/write.
5061 valid_mask
&= ~HCR_TSC
;
5063 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5064 valid_mask
|= HCR_E2H
;
5066 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5067 valid_mask
|= HCR_TLOR
;
5069 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5070 valid_mask
|= HCR_API
| HCR_APK
;
5073 /* Clear RES0 bits. */
5074 value
&= valid_mask
;
5076 /* These bits change the MMU setup:
5077 * HCR_VM enables stage 2 translation
5078 * HCR_PTW forbids certain page-table setups
5079 * HCR_DC Disables stage1 and enables stage2 translation
5081 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
5082 tlb_flush(CPU(cpu
));
5084 env
->cp15
.hcr_el2
= value
;
5087 * Updates to VI and VF require us to update the status of
5088 * virtual interrupts, which are the logical OR of these bits
5089 * and the state of the input lines from the GIC. (This requires
5090 * that we have the iothread lock, which is done by marking the
5091 * reginfo structs as ARM_CP_IO.)
5092 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5093 * possible for it to be taken immediately, because VIRQ and
5094 * VFIQ are masked unless running at EL0 or EL1, and HCR
5095 * can only be written at EL2.
5097 g_assert(qemu_mutex_iothread_locked());
5098 arm_cpu_update_virq(cpu
);
5099 arm_cpu_update_vfiq(cpu
);
5102 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5105 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5106 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5107 hcr_write(env
, NULL
, value
);
5110 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5113 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5114 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5115 hcr_write(env
, NULL
, value
);
5119 * Return the effective value of HCR_EL2.
5120 * Bits that are not included here:
5121 * RW (read from SCR_EL3.RW as needed)
5123 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5125 uint64_t ret
= env
->cp15
.hcr_el2
;
5127 if (arm_is_secure_below_el3(env
)) {
5129 * "This register has no effect if EL2 is not enabled in the
5130 * current Security state". This is ARMv8.4-SecEL2 speak for
5131 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5133 * Prior to that, the language was "In an implementation that
5134 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5135 * as if this field is 0 for all purposes other than a direct
5136 * read or write access of HCR_EL2". With lots of enumeration
5137 * on a per-field basis. In current QEMU, this is condition
5138 * is arm_is_secure_below_el3.
5140 * Since the v8.4 language applies to the entire register, and
5141 * appears to be backward compatible, use that.
5144 } else if (ret
& HCR_TGE
) {
5145 /* These bits are up-to-date as of ARMv8.4. */
5146 if (ret
& HCR_E2H
) {
5147 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5148 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5149 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5150 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
5152 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5154 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5155 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5156 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5163 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5167 * For A-profile AArch32 EL3, if NSACR.CP10
5168 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5170 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5171 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5172 value
&= ~(0x3 << 10);
5173 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
5175 env
->cp15
.cptr_el
[2] = value
;
5178 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5181 * For A-profile AArch32 EL3, if NSACR.CP10
5182 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5184 uint64_t value
= env
->cp15
.cptr_el
[2];
5186 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5187 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5193 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5194 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5196 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5197 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5198 .writefn
= hcr_write
},
5199 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5200 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5201 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5202 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5203 .writefn
= hcr_writelow
},
5204 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5205 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5206 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5207 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5208 .type
= ARM_CP_ALIAS
,
5209 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5211 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5212 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5213 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5214 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5215 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5216 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5217 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5218 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5219 .type
= ARM_CP_ALIAS
,
5220 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5222 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5223 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5224 .type
= ARM_CP_ALIAS
,
5225 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5227 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5228 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5229 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5230 .access
= PL2_RW
, .writefn
= vbar_write
,
5231 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5233 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5234 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5235 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5236 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5237 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5238 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5239 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5240 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5241 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5242 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5243 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5244 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5246 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5247 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5248 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5249 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5250 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5251 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5252 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5254 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5255 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5256 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5257 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5259 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5260 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5261 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5263 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5264 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5265 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5267 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5268 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5269 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5270 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5271 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5272 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5273 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5274 .type
= ARM_CP_ALIAS
,
5275 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5276 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5277 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5278 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5280 /* no .writefn needed as this can't cause an ASID change;
5281 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5283 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5284 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5285 .cp
= 15, .opc1
= 6, .crm
= 2,
5286 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5287 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5288 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5289 .writefn
= vttbr_write
},
5290 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5291 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5292 .access
= PL2_RW
, .writefn
= vttbr_write
,
5293 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5294 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5295 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5296 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5297 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5298 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5299 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5300 .access
= PL2_RW
, .resetvalue
= 0,
5301 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5302 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5303 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5304 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5305 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5306 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5307 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5308 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5309 { .name
= "TLBIALLNSNH",
5310 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5311 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5312 .writefn
= tlbiall_nsnh_write
},
5313 { .name
= "TLBIALLNSNHIS",
5314 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5315 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5316 .writefn
= tlbiall_nsnh_is_write
},
5317 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5318 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5319 .writefn
= tlbiall_hyp_write
},
5320 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5321 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5322 .writefn
= tlbiall_hyp_is_write
},
5323 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5324 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5325 .writefn
= tlbimva_hyp_write
},
5326 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5327 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5328 .writefn
= tlbimva_hyp_is_write
},
5329 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5330 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5331 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5332 .writefn
= tlbi_aa64_alle2_write
},
5333 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5334 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5335 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5336 .writefn
= tlbi_aa64_vae2_write
},
5337 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5338 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5339 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5340 .writefn
= tlbi_aa64_vae2_write
},
5341 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5342 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5343 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5344 .writefn
= tlbi_aa64_alle2is_write
},
5345 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5346 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5347 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5348 .writefn
= tlbi_aa64_vae2is_write
},
5349 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5350 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5351 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5352 .writefn
= tlbi_aa64_vae2is_write
},
5353 #ifndef CONFIG_USER_ONLY
5354 /* Unlike the other EL2-related AT operations, these must
5355 * UNDEF from EL3 if EL2 is not implemented, which is why we
5356 * define them here rather than with the rest of the AT ops.
5358 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5359 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5360 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5361 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5362 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5363 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5364 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5365 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5366 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5367 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5368 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5369 * to behave as if SCR.NS was 1.
5371 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5373 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5374 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5376 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5377 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5378 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5379 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5380 * reset values as IMPDEF. We choose to reset to 3 to comply with
5381 * both ARMv7 and ARMv8.
5383 .access
= PL2_RW
, .resetvalue
= 3,
5384 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5385 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5386 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5387 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5388 .writefn
= gt_cntvoff_write
,
5389 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5390 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5391 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5392 .writefn
= gt_cntvoff_write
,
5393 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5394 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5395 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5396 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5397 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5398 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5399 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5400 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5401 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5402 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5403 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5404 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5405 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5406 .resetfn
= gt_hyp_timer_reset
,
5407 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5408 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5410 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5412 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5414 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5416 /* The only field of MDCR_EL2 that has a defined architectural reset value
5417 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5418 * don't implement any PMU event counters, so using zero as a reset
5419 * value for MDCR_EL2 is okay
5421 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5422 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5423 .access
= PL2_RW
, .resetvalue
= 0,
5424 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5425 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5426 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5427 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5428 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5429 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5430 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5432 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5433 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5434 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5436 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5440 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5441 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5442 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5443 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5445 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5446 .writefn
= hcr_writehigh
},
5450 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5453 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5454 * At Secure EL1 it traps to EL3.
5456 if (arm_current_el(env
) == 3) {
5457 return CP_ACCESS_OK
;
5459 if (arm_is_secure_below_el3(env
)) {
5460 return CP_ACCESS_TRAP_EL3
;
5462 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5464 return CP_ACCESS_OK
;
5466 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5469 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5470 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5471 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5472 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5473 .resetvalue
= 0, .writefn
= scr_write
},
5474 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5475 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5476 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5477 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5478 .writefn
= scr_write
},
5479 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5480 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5481 .access
= PL3_RW
, .resetvalue
= 0,
5482 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5484 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5485 .access
= PL3_RW
, .resetvalue
= 0,
5486 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5487 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5488 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5489 .writefn
= vbar_write
, .resetvalue
= 0,
5490 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5491 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5492 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5493 .access
= PL3_RW
, .resetvalue
= 0,
5494 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5495 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5496 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5498 /* no .writefn needed as this can't cause an ASID change;
5499 * we must provide a .raw_writefn and .resetfn because we handle
5500 * reset and migration for the AArch32 TTBCR(S), which might be
5501 * using mask and base_mask.
5503 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5504 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5505 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5506 .type
= ARM_CP_ALIAS
,
5507 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5509 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5510 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5511 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5512 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5513 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5514 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5515 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5516 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5517 .type
= ARM_CP_ALIAS
,
5518 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5520 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5521 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5522 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5523 .access
= PL3_RW
, .writefn
= vbar_write
,
5524 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5526 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5527 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5528 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5529 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5530 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5531 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5532 .access
= PL3_RW
, .resetvalue
= 0,
5533 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5534 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5535 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5536 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5538 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5539 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5540 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5542 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5543 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5544 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5546 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5547 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5548 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5549 .writefn
= tlbi_aa64_alle3is_write
},
5550 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5551 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5552 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5553 .writefn
= tlbi_aa64_vae3is_write
},
5554 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5555 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5556 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5557 .writefn
= tlbi_aa64_vae3is_write
},
5558 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5559 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5560 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5561 .writefn
= tlbi_aa64_alle3_write
},
5562 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5563 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5564 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5565 .writefn
= tlbi_aa64_vae3_write
},
5566 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5567 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5568 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5569 .writefn
= tlbi_aa64_vae3_write
},
5573 #ifndef CONFIG_USER_ONLY
5574 /* Test if system register redirection is to occur in the current state. */
5575 static bool redirect_for_e2h(CPUARMState
*env
)
5577 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
5580 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5584 if (redirect_for_e2h(env
)) {
5585 /* Switch to the saved EL2 version of the register. */
5587 readfn
= ri
->readfn
;
5589 readfn
= ri
->orig_readfn
;
5591 if (readfn
== NULL
) {
5594 return readfn(env
, ri
);
5597 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5602 if (redirect_for_e2h(env
)) {
5603 /* Switch to the saved EL2 version of the register. */
5605 writefn
= ri
->writefn
;
5607 writefn
= ri
->orig_writefn
;
5609 if (writefn
== NULL
) {
5610 writefn
= raw_write
;
5612 writefn(env
, ri
, value
);
5615 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
5618 uint32_t src_key
, dst_key
, new_key
;
5619 const char *src_name
, *dst_name
, *new_name
;
5620 bool (*feature
)(const ARMISARegisters
*id
);
5623 #define K(op0, op1, crn, crm, op2) \
5624 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5626 static const struct E2HAlias aliases
[] = {
5627 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5628 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5629 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5630 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5631 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5632 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5633 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5634 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5635 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5636 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5637 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5638 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5639 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5640 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5641 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5642 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5643 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5644 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5645 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5646 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5647 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5648 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5649 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5650 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5651 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5652 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5653 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5654 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5655 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5656 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5657 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5658 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5661 * Note that redirection of ZCR is mentioned in the description
5662 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5663 * not in the summary table.
5665 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5666 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
5668 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5669 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5675 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
5676 const struct E2HAlias
*a
= &aliases
[i
];
5677 ARMCPRegInfo
*src_reg
, *dst_reg
;
5679 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
5683 src_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->src_key
);
5684 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->dst_key
);
5685 g_assert(src_reg
!= NULL
);
5686 g_assert(dst_reg
!= NULL
);
5688 /* Cross-compare names to detect typos in the keys. */
5689 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
5690 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
5692 /* None of the core system registers use opaque; we will. */
5693 g_assert(src_reg
->opaque
== NULL
);
5695 /* Create alias before redirection so we dup the right data. */
5697 ARMCPRegInfo
*new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
5698 uint32_t *new_key
= g_memdup(&a
->new_key
, sizeof(uint32_t));
5701 new_reg
->name
= a
->new_name
;
5702 new_reg
->type
|= ARM_CP_ALIAS
;
5703 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5704 new_reg
->access
&= PL2_RW
| PL3_RW
;
5706 ok
= g_hash_table_insert(cpu
->cp_regs
, new_key
, new_reg
);
5710 src_reg
->opaque
= dst_reg
;
5711 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
5712 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
5713 if (!src_reg
->raw_readfn
) {
5714 src_reg
->raw_readfn
= raw_read
;
5716 if (!src_reg
->raw_writefn
) {
5717 src_reg
->raw_writefn
= raw_write
;
5719 src_reg
->readfn
= el2_e2h_read
;
5720 src_reg
->writefn
= el2_e2h_write
;
5725 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5728 int cur_el
= arm_current_el(env
);
5731 uint64_t hcr
= arm_hcr_el2_eff(env
);
5734 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
5735 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
5736 return CP_ACCESS_TRAP_EL2
;
5739 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5740 return CP_ACCESS_TRAP
;
5742 if (hcr
& HCR_TID2
) {
5743 return CP_ACCESS_TRAP_EL2
;
5746 } else if (hcr
& HCR_TID2
) {
5747 return CP_ACCESS_TRAP_EL2
;
5751 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
5752 return CP_ACCESS_TRAP_EL2
;
5755 return CP_ACCESS_OK
;
5758 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5761 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5762 * read via a bit in OSLSR_EL1.
5766 if (ri
->state
== ARM_CP_STATE_AA32
) {
5767 oslock
= (value
== 0xC5ACCE55);
5772 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5775 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5776 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5777 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5778 * unlike DBGDRAR it is never accessible from EL0.
5779 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5782 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5783 .access
= PL0_R
, .accessfn
= access_tdra
,
5784 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5785 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5786 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5787 .access
= PL1_R
, .accessfn
= access_tdra
,
5788 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5789 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5790 .access
= PL0_R
, .accessfn
= access_tdra
,
5791 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5792 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5793 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5794 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5795 .access
= PL1_RW
, .accessfn
= access_tda
,
5796 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5798 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5799 * We don't implement the configurable EL0 access.
5801 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5802 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5803 .type
= ARM_CP_ALIAS
,
5804 .access
= PL1_R
, .accessfn
= access_tda
,
5805 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5806 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5807 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5808 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5809 .accessfn
= access_tdosa
,
5810 .writefn
= oslar_write
},
5811 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5812 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5813 .access
= PL1_R
, .resetvalue
= 10,
5814 .accessfn
= access_tdosa
,
5815 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5816 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5817 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5818 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5819 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5820 .type
= ARM_CP_NOP
},
5821 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5822 * implement vector catch debug events yet.
5825 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5826 .access
= PL1_RW
, .accessfn
= access_tda
,
5827 .type
= ARM_CP_NOP
},
5828 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5829 * to save and restore a 32-bit guest's DBGVCR)
5831 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5832 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5833 .access
= PL2_RW
, .accessfn
= access_tda
,
5834 .type
= ARM_CP_NOP
},
5835 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5836 * Channel but Linux may try to access this register. The 32-bit
5837 * alias is DBGDCCINT.
5839 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5840 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5841 .access
= PL1_RW
, .accessfn
= access_tda
,
5842 .type
= ARM_CP_NOP
},
5846 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5847 /* 64 bit access versions of the (dummy) debug registers */
5848 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5849 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5850 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5851 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5855 /* Return the exception level to which exceptions should be taken
5856 * via SVEAccessTrap. If an exception should be routed through
5857 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5858 * take care of raising that exception.
5859 * C.f. the ARM pseudocode function CheckSVEEnabled.
5861 int sve_exception_el(CPUARMState
*env
, int el
)
5863 #ifndef CONFIG_USER_ONLY
5864 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
5866 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
5867 bool disabled
= false;
5869 /* The CPACR.ZEN controls traps to EL1:
5870 * 0, 2 : trap EL0 and EL1 accesses
5871 * 1 : trap only EL0 accesses
5872 * 3 : trap no accesses
5874 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5876 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5881 return hcr_el2
& HCR_TGE
? 2 : 1;
5884 /* Check CPACR.FPEN. */
5885 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5887 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5895 /* CPTR_EL2. Since TZ and TFP are positive,
5896 * they will be zero when EL2 is not present.
5898 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5899 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5902 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5907 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5908 if (arm_feature(env
, ARM_FEATURE_EL3
)
5909 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5916 static uint32_t sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
5920 end_len
= start_len
&= 0xf;
5921 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
5922 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
5923 assert(end_len
< start_len
);
5929 * Given that SVE is enabled, return the vector length for EL.
5931 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5933 ARMCPU
*cpu
= env_archcpu(env
);
5934 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5937 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5939 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5940 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5942 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5943 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5946 return sve_zcr_get_valid_len(cpu
, zcr_len
);
5949 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5952 int cur_el
= arm_current_el(env
);
5953 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5956 /* Bits other than [3:0] are RAZ/WI. */
5957 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
5958 raw_write(env
, ri
, value
& 0xf);
5961 * Because we arrived here, we know both FP and SVE are enabled;
5962 * otherwise we would have trapped access to the ZCR_ELn register.
5964 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5965 if (new_len
< old_len
) {
5966 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5970 static const ARMCPRegInfo zcr_el1_reginfo
= {
5971 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5972 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5973 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5974 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5975 .writefn
= zcr_write
, .raw_writefn
= raw_write
5978 static const ARMCPRegInfo zcr_el2_reginfo
= {
5979 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5980 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5981 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5982 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5983 .writefn
= zcr_write
, .raw_writefn
= raw_write
5986 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5987 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5988 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5989 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5990 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5993 static const ARMCPRegInfo zcr_el3_reginfo
= {
5994 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5995 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5996 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5997 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5998 .writefn
= zcr_write
, .raw_writefn
= raw_write
6001 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6003 CPUARMState
*env
= &cpu
->env
;
6005 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6006 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6008 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6010 if (env
->cpu_watchpoint
[n
]) {
6011 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6012 env
->cpu_watchpoint
[n
] = NULL
;
6015 if (!extract64(wcr
, 0, 1)) {
6016 /* E bit clear : watchpoint disabled */
6020 switch (extract64(wcr
, 3, 2)) {
6022 /* LSC 00 is reserved and must behave as if the wp is disabled */
6025 flags
|= BP_MEM_READ
;
6028 flags
|= BP_MEM_WRITE
;
6031 flags
|= BP_MEM_ACCESS
;
6035 /* Attempts to use both MASK and BAS fields simultaneously are
6036 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6037 * thus generating a watchpoint for every byte in the masked region.
6039 mask
= extract64(wcr
, 24, 4);
6040 if (mask
== 1 || mask
== 2) {
6041 /* Reserved values of MASK; we must act as if the mask value was
6042 * some non-reserved value, or as if the watchpoint were disabled.
6043 * We choose the latter.
6047 /* Watchpoint covers an aligned area up to 2GB in size */
6049 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6050 * whether the watchpoint fires when the unmasked bits match; we opt
6051 * to generate the exceptions.
6055 /* Watchpoint covers bytes defined by the byte address select bits */
6056 int bas
= extract64(wcr
, 5, 8);
6060 /* This must act as if the watchpoint is disabled */
6064 if (extract64(wvr
, 2, 1)) {
6065 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6066 * ignored, and BAS[3:0] define which bytes to watch.
6070 /* The BAS bits are supposed to be programmed to indicate a contiguous
6071 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6072 * we fire for each byte in the word/doubleword addressed by the WVR.
6073 * We choose to ignore any non-zero bits after the first range of 1s.
6075 basstart
= ctz32(bas
);
6076 len
= cto32(bas
>> basstart
);
6080 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6081 &env
->cpu_watchpoint
[n
]);
6084 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6087 CPUARMState
*env
= &cpu
->env
;
6089 /* Completely clear out existing QEMU watchpoints and our array, to
6090 * avoid possible stale entries following migration load.
6092 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6093 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6095 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6096 hw_watchpoint_update(cpu
, i
);
6100 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6103 ARMCPU
*cpu
= env_archcpu(env
);
6106 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6107 * register reads and behaves as if values written are sign extended.
6108 * Bits [1:0] are RES0.
6110 value
= sextract64(value
, 0, 49) & ~3ULL;
6112 raw_write(env
, ri
, value
);
6113 hw_watchpoint_update(cpu
, i
);
6116 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6119 ARMCPU
*cpu
= env_archcpu(env
);
6122 raw_write(env
, ri
, value
);
6123 hw_watchpoint_update(cpu
, i
);
6126 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6128 CPUARMState
*env
= &cpu
->env
;
6129 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6130 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6135 if (env
->cpu_breakpoint
[n
]) {
6136 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6137 env
->cpu_breakpoint
[n
] = NULL
;
6140 if (!extract64(bcr
, 0, 1)) {
6141 /* E bit clear : watchpoint disabled */
6145 bt
= extract64(bcr
, 20, 4);
6148 case 4: /* unlinked address mismatch (reserved if AArch64) */
6149 case 5: /* linked address mismatch (reserved if AArch64) */
6150 qemu_log_mask(LOG_UNIMP
,
6151 "arm: address mismatch breakpoint types not implemented\n");
6153 case 0: /* unlinked address match */
6154 case 1: /* linked address match */
6156 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6157 * we behave as if the register was sign extended. Bits [1:0] are
6158 * RES0. The BAS field is used to allow setting breakpoints on 16
6159 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6160 * a bp will fire if the addresses covered by the bp and the addresses
6161 * covered by the insn overlap but the insn doesn't start at the
6162 * start of the bp address range. We choose to require the insn and
6163 * the bp to have the same address. The constraints on writing to
6164 * BAS enforced in dbgbcr_write mean we have only four cases:
6165 * 0b0000 => no breakpoint
6166 * 0b0011 => breakpoint on addr
6167 * 0b1100 => breakpoint on addr + 2
6168 * 0b1111 => breakpoint on addr
6169 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6171 int bas
= extract64(bcr
, 5, 4);
6172 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
6181 case 2: /* unlinked context ID match */
6182 case 8: /* unlinked VMID match (reserved if no EL2) */
6183 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6184 qemu_log_mask(LOG_UNIMP
,
6185 "arm: unlinked context breakpoint types not implemented\n");
6187 case 9: /* linked VMID match (reserved if no EL2) */
6188 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6189 case 3: /* linked context ID match */
6191 /* We must generate no events for Linked context matches (unless
6192 * they are linked to by some other bp/wp, which is handled in
6193 * updates for the linking bp/wp). We choose to also generate no events
6194 * for reserved values.
6199 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6202 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6205 CPUARMState
*env
= &cpu
->env
;
6207 /* Completely clear out existing QEMU breakpoints and our array, to
6208 * avoid possible stale entries following migration load.
6210 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6211 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6213 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6214 hw_breakpoint_update(cpu
, i
);
6218 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6221 ARMCPU
*cpu
= env_archcpu(env
);
6224 raw_write(env
, ri
, value
);
6225 hw_breakpoint_update(cpu
, i
);
6228 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6231 ARMCPU
*cpu
= env_archcpu(env
);
6234 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6237 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6238 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6240 raw_write(env
, ri
, value
);
6241 hw_breakpoint_update(cpu
, i
);
6244 static void define_debug_regs(ARMCPU
*cpu
)
6246 /* Define v7 and v8 architectural debug registers.
6247 * These are just dummy implementations for now.
6250 int wrps
, brps
, ctx_cmps
;
6251 ARMCPRegInfo dbgdidr
= {
6252 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
6253 .access
= PL0_R
, .accessfn
= access_tda
,
6254 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
6257 /* Note that all these register fields hold "number of Xs minus 1". */
6258 brps
= extract32(cpu
->dbgdidr
, 24, 4);
6259 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
6260 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
6262 assert(ctx_cmps
<= brps
);
6264 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
6265 * of the debug registers such as number of breakpoints;
6266 * check that if they both exist then they agree.
6268 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
6269 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
6270 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
6271 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
6274 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6275 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6277 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6278 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6281 for (i
= 0; i
< brps
+ 1; i
++) {
6282 ARMCPRegInfo dbgregs
[] = {
6283 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
6284 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6285 .access
= PL1_RW
, .accessfn
= access_tda
,
6286 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6287 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6289 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
6290 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6291 .access
= PL1_RW
, .accessfn
= access_tda
,
6292 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6293 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6297 define_arm_cp_regs(cpu
, dbgregs
);
6300 for (i
= 0; i
< wrps
+ 1; i
++) {
6301 ARMCPRegInfo dbgregs
[] = {
6302 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
6303 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6304 .access
= PL1_RW
, .accessfn
= access_tda
,
6305 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6306 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6308 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
6309 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6310 .access
= PL1_RW
, .accessfn
= access_tda
,
6311 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6312 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6316 define_arm_cp_regs(cpu
, dbgregs
);
6320 /* We don't know until after realize whether there's a GICv3
6321 * attached, and that is what registers the gicv3 sysregs.
6322 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6325 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6327 ARMCPU
*cpu
= env_archcpu(env
);
6328 uint64_t pfr1
= cpu
->id_pfr1
;
6330 if (env
->gicv3state
) {
6336 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6338 ARMCPU
*cpu
= env_archcpu(env
);
6339 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6341 if (env
->gicv3state
) {
6347 /* Shared logic between LORID and the rest of the LOR* registers.
6348 * Secure state has already been delt with.
6350 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
6352 int el
= arm_current_el(env
);
6354 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
6355 return CP_ACCESS_TRAP_EL2
;
6357 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
6358 return CP_ACCESS_TRAP_EL3
;
6360 return CP_ACCESS_OK
;
6363 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6366 if (arm_is_secure_below_el3(env
)) {
6367 /* Access ok in secure mode. */
6368 return CP_ACCESS_OK
;
6370 return access_lor_ns(env
);
6373 static CPAccessResult
access_lor_other(CPUARMState
*env
,
6374 const ARMCPRegInfo
*ri
, bool isread
)
6376 if (arm_is_secure_below_el3(env
)) {
6377 /* Access denied in secure mode. */
6378 return CP_ACCESS_TRAP
;
6380 return access_lor_ns(env
);
6384 * A trivial implementation of ARMv8.1-LOR leaves all of these
6385 * registers fixed at 0, which indicates that there are zero
6386 * supported Limited Ordering regions.
6388 static const ARMCPRegInfo lor_reginfo
[] = {
6389 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6390 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6391 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6392 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6393 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6394 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6395 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6396 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6397 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6398 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6399 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6400 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6401 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6402 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6403 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6404 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6405 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6406 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6407 .access
= PL1_R
, .accessfn
= access_lorid
,
6408 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6412 #ifdef TARGET_AARCH64
6413 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6416 int el
= arm_current_el(env
);
6419 arm_feature(env
, ARM_FEATURE_EL2
) &&
6420 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
6421 return CP_ACCESS_TRAP_EL2
;
6424 arm_feature(env
, ARM_FEATURE_EL3
) &&
6425 !(env
->cp15
.scr_el3
& SCR_APK
)) {
6426 return CP_ACCESS_TRAP_EL3
;
6428 return CP_ACCESS_OK
;
6431 static const ARMCPRegInfo pauth_reginfo
[] = {
6432 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6433 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
6434 .access
= PL1_RW
, .accessfn
= access_pauth
,
6435 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
6436 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6437 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
6438 .access
= PL1_RW
, .accessfn
= access_pauth
,
6439 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
6440 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6441 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
6442 .access
= PL1_RW
, .accessfn
= access_pauth
,
6443 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
6444 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6445 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
6446 .access
= PL1_RW
, .accessfn
= access_pauth
,
6447 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
6448 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6449 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
6450 .access
= PL1_RW
, .accessfn
= access_pauth
,
6451 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
6452 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6453 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
6454 .access
= PL1_RW
, .accessfn
= access_pauth
,
6455 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
6456 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6457 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
6458 .access
= PL1_RW
, .accessfn
= access_pauth
,
6459 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
6460 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6461 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
6462 .access
= PL1_RW
, .accessfn
= access_pauth
,
6463 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
6464 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6465 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
6466 .access
= PL1_RW
, .accessfn
= access_pauth
,
6467 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
6468 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6469 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
6470 .access
= PL1_RW
, .accessfn
= access_pauth
,
6471 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
6475 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6480 /* Success sets NZCV = 0000. */
6481 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
6483 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
6485 * ??? Failed, for unknown reasons in the crypto subsystem.
6486 * The best we can do is log the reason and return the
6487 * timed-out indication to the guest. There is no reason
6488 * we know to expect this failure to be transitory, so the
6489 * guest may well hang retrying the operation.
6491 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
6492 ri
->name
, error_get_pretty(err
));
6495 env
->ZF
= 0; /* NZCF = 0100 */
6501 /* We do not support re-seeding, so the two registers operate the same. */
6502 static const ARMCPRegInfo rndr_reginfo
[] = {
6503 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
6504 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6505 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
6506 .access
= PL0_R
, .readfn
= rndr_readfn
},
6507 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
6508 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6509 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
6510 .access
= PL0_R
, .readfn
= rndr_readfn
},
6514 #ifndef CONFIG_USER_ONLY
6515 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
6518 ARMCPU
*cpu
= env_archcpu(env
);
6519 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6520 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
6521 uint64_t vaddr_in
= (uint64_t) value
;
6522 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
6524 int mem_idx
= cpu_mmu_index(env
, false);
6526 /* This won't be crossing page boundaries */
6527 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
6533 /* RCU lock is already being held */
6534 mr
= memory_region_from_host(haddr
, &offset
);
6537 memory_region_do_writeback(mr
, offset
, dline_size
);
6542 static const ARMCPRegInfo dcpop_reg
[] = {
6543 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
6544 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
6545 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6546 .accessfn
= aa64_cacheop_access
, .writefn
= dccvap_writefn
},
6550 static const ARMCPRegInfo dcpodp_reg
[] = {
6551 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
6552 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
6553 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6554 .accessfn
= aa64_cacheop_access
, .writefn
= dccvap_writefn
},
6557 #endif /*CONFIG_USER_ONLY*/
6561 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6564 int el
= arm_current_el(env
);
6567 uint64_t sctlr
= arm_sctlr(env
, el
);
6568 if (!(sctlr
& SCTLR_EnRCTX
)) {
6569 return CP_ACCESS_TRAP
;
6571 } else if (el
== 1) {
6572 uint64_t hcr
= arm_hcr_el2_eff(env
);
6574 return CP_ACCESS_TRAP_EL2
;
6577 return CP_ACCESS_OK
;
6580 static const ARMCPRegInfo predinv_reginfo
[] = {
6581 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
6582 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
6583 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6584 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
6585 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
6586 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6587 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
6588 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
6589 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6591 * Note the AArch32 opcodes have a different OPC1.
6593 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
6594 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
6595 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6596 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
6597 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
6598 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6599 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
6600 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
6601 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6605 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6608 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
6609 return CP_ACCESS_TRAP_EL2
;
6612 return CP_ACCESS_OK
;
6615 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6618 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6619 return access_aa64_tid3(env
, ri
, isread
);
6622 return CP_ACCESS_OK
;
6625 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6628 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
6629 return CP_ACCESS_TRAP_EL2
;
6632 return CP_ACCESS_OK
;
6635 static const ARMCPRegInfo jazelle_regs
[] = {
6637 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
6638 .access
= PL1_R
, .accessfn
= access_jazelle
,
6639 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6641 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
6642 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6644 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
6645 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6649 static const ARMCPRegInfo vhe_reginfo
[] = {
6650 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6651 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
6653 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
6654 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
6655 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
6656 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
6657 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
6658 #ifndef CONFIG_USER_ONLY
6659 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
6660 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
6662 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
6663 .type
= ARM_CP_IO
, .access
= PL2_RW
,
6664 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
6665 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
6666 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
6667 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
6668 .resetfn
= gt_hv_timer_reset
,
6669 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
6670 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
6672 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
6674 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
6675 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
6676 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
6677 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
6678 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6679 .access
= PL2_RW
, .accessfn
= e2h_access
,
6680 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
6681 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
6682 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
6683 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
6684 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6685 .access
= PL2_RW
, .accessfn
= e2h_access
,
6686 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
6687 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
6688 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
6689 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
6690 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
6691 .access
= PL2_RW
, .accessfn
= e2h_access
,
6692 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
6693 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
6694 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
6695 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
6696 .access
= PL2_RW
, .accessfn
= e2h_access
,
6697 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
6698 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
6699 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
6700 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6701 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
6702 .access
= PL2_RW
, .accessfn
= e2h_access
,
6703 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
6704 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
6705 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
6706 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6707 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
6708 .access
= PL2_RW
, .accessfn
= e2h_access
,
6709 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
6714 #ifndef CONFIG_USER_ONLY
6715 static const ARMCPRegInfo ats1e1_reginfo
[] = {
6716 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
6717 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
6718 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
6719 .writefn
= ats_write64
},
6720 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
6721 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
6722 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
6723 .writefn
= ats_write64
},
6727 static const ARMCPRegInfo ats1cp_reginfo
[] = {
6728 { .name
= "ATS1CPRP",
6729 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
6730 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
6731 .writefn
= ats_write
},
6732 { .name
= "ATS1CPWP",
6733 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
6734 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
6735 .writefn
= ats_write
},
6740 void register_cp_regs_for_features(ARMCPU
*cpu
)
6742 /* Register all the coprocessor registers based on feature bits */
6743 CPUARMState
*env
= &cpu
->env
;
6744 if (arm_feature(env
, ARM_FEATURE_M
)) {
6745 /* M profile has no coprocessor registers */
6749 define_arm_cp_regs(cpu
, cp_reginfo
);
6750 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
6751 /* Must go early as it is full of wildcards that may be
6752 * overridden by later definitions.
6754 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
6757 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6758 /* The ID registers all have impdef reset values */
6759 ARMCPRegInfo v6_idregs
[] = {
6760 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
6761 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6762 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6763 .accessfn
= access_aa32_tid3
,
6764 .resetvalue
= cpu
->id_pfr0
},
6765 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6766 * the value of the GIC field until after we define these regs.
6768 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
6769 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
6770 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
6771 .accessfn
= access_aa32_tid3
,
6772 .readfn
= id_pfr1_read
,
6773 .writefn
= arm_cp_write_ignore
},
6774 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
6775 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
6776 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6777 .accessfn
= access_aa32_tid3
,
6778 .resetvalue
= cpu
->id_dfr0
},
6779 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
6780 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
6781 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6782 .accessfn
= access_aa32_tid3
,
6783 .resetvalue
= cpu
->id_afr0
},
6784 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
6785 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
6786 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6787 .accessfn
= access_aa32_tid3
,
6788 .resetvalue
= cpu
->id_mmfr0
},
6789 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
6790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
6791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6792 .accessfn
= access_aa32_tid3
,
6793 .resetvalue
= cpu
->id_mmfr1
},
6794 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
6795 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
6796 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6797 .accessfn
= access_aa32_tid3
,
6798 .resetvalue
= cpu
->id_mmfr2
},
6799 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
6800 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
6801 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6802 .accessfn
= access_aa32_tid3
,
6803 .resetvalue
= cpu
->id_mmfr3
},
6804 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
6805 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6806 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6807 .accessfn
= access_aa32_tid3
,
6808 .resetvalue
= cpu
->isar
.id_isar0
},
6809 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
6810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
6811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6812 .accessfn
= access_aa32_tid3
,
6813 .resetvalue
= cpu
->isar
.id_isar1
},
6814 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
6815 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6816 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6817 .accessfn
= access_aa32_tid3
,
6818 .resetvalue
= cpu
->isar
.id_isar2
},
6819 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
6820 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
6821 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6822 .accessfn
= access_aa32_tid3
,
6823 .resetvalue
= cpu
->isar
.id_isar3
},
6824 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
6825 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
6826 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6827 .accessfn
= access_aa32_tid3
,
6828 .resetvalue
= cpu
->isar
.id_isar4
},
6829 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
6830 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
6831 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6832 .accessfn
= access_aa32_tid3
,
6833 .resetvalue
= cpu
->isar
.id_isar5
},
6834 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
6835 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
6836 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6837 .accessfn
= access_aa32_tid3
,
6838 .resetvalue
= cpu
->id_mmfr4
},
6839 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
6840 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
6841 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6842 .accessfn
= access_aa32_tid3
,
6843 .resetvalue
= cpu
->isar
.id_isar6
},
6846 define_arm_cp_regs(cpu
, v6_idregs
);
6847 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
6849 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
6851 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
6852 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
6854 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
6855 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
6856 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
6858 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
6859 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
6861 if (arm_feature(env
, ARM_FEATURE_V7
)) {
6862 /* v7 performance monitor control register: same implementor
6863 * field as main ID register, and we implement four counters in
6864 * addition to the cycle count register.
6866 unsigned int i
, pmcrn
= 4;
6867 ARMCPRegInfo pmcr
= {
6868 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6870 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6871 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6872 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6873 .raw_writefn
= raw_write
,
6875 ARMCPRegInfo pmcr64
= {
6876 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6877 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6878 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6880 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6881 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
6882 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6884 define_one_arm_cp_reg(cpu
, &pmcr
);
6885 define_one_arm_cp_reg(cpu
, &pmcr64
);
6886 for (i
= 0; i
< pmcrn
; i
++) {
6887 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6888 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6889 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6890 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6891 ARMCPRegInfo pmev_regs
[] = {
6892 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6893 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6894 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6895 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6896 .accessfn
= pmreg_access
},
6897 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6898 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6899 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6901 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6902 .raw_readfn
= pmevcntr_rawread
,
6903 .raw_writefn
= pmevcntr_rawwrite
},
6904 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6905 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6906 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6907 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6908 .accessfn
= pmreg_access
},
6909 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6910 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6911 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6913 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6914 .raw_writefn
= pmevtyper_rawwrite
},
6917 define_arm_cp_regs(cpu
, pmev_regs
);
6918 g_free(pmevcntr_name
);
6919 g_free(pmevcntr_el0_name
);
6920 g_free(pmevtyper_name
);
6921 g_free(pmevtyper_el0_name
);
6923 ARMCPRegInfo clidr
= {
6924 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
6925 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
6926 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6927 .accessfn
= access_aa64_tid2
,
6928 .resetvalue
= cpu
->clidr
6930 define_one_arm_cp_reg(cpu
, &clidr
);
6931 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
6932 define_debug_regs(cpu
);
6934 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
6936 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
6937 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
6938 ARMCPRegInfo v81_pmu_regs
[] = {
6939 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6940 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6941 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6942 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6943 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6944 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6945 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6946 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6949 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6951 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6952 /* AArch64 ID registers, which all have impdef reset values.
6953 * Note that within the ID register ranges the unused slots
6954 * must all RAZ, not UNDEF; future architecture versions may
6955 * define new registers here.
6957 ARMCPRegInfo v8_idregs
[] = {
6958 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6959 * know the right value for the GIC field until after we
6960 * define these regs.
6962 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6963 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
6964 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
6965 .accessfn
= access_aa64_tid3
,
6966 .readfn
= id_aa64pfr0_read
,
6967 .writefn
= arm_cp_write_ignore
},
6968 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6969 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
6970 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6971 .accessfn
= access_aa64_tid3
,
6972 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
6973 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6974 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
6975 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6976 .accessfn
= access_aa64_tid3
,
6978 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6979 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
6980 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6981 .accessfn
= access_aa64_tid3
,
6983 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6984 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
6985 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6986 .accessfn
= access_aa64_tid3
,
6987 /* At present, only SVEver == 0 is defined anyway. */
6989 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6990 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
6991 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6992 .accessfn
= access_aa64_tid3
,
6994 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6995 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
6996 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6997 .accessfn
= access_aa64_tid3
,
6999 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7000 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7001 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7002 .accessfn
= access_aa64_tid3
,
7004 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7007 .accessfn
= access_aa64_tid3
,
7008 .resetvalue
= cpu
->id_aa64dfr0
},
7009 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7010 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7011 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7012 .accessfn
= access_aa64_tid3
,
7013 .resetvalue
= cpu
->id_aa64dfr1
},
7014 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7015 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7016 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7017 .accessfn
= access_aa64_tid3
,
7019 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7020 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7021 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7022 .accessfn
= access_aa64_tid3
,
7024 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7025 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7026 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7027 .accessfn
= access_aa64_tid3
,
7028 .resetvalue
= cpu
->id_aa64afr0
},
7029 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7030 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7031 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7032 .accessfn
= access_aa64_tid3
,
7033 .resetvalue
= cpu
->id_aa64afr1
},
7034 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7035 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7036 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7037 .accessfn
= access_aa64_tid3
,
7039 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7040 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7041 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7042 .accessfn
= access_aa64_tid3
,
7044 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7045 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7046 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7047 .accessfn
= access_aa64_tid3
,
7048 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7049 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7050 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7051 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7052 .accessfn
= access_aa64_tid3
,
7053 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7054 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7055 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7056 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7057 .accessfn
= access_aa64_tid3
,
7059 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7060 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
7061 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7062 .accessfn
= access_aa64_tid3
,
7064 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7065 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
7066 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7067 .accessfn
= access_aa64_tid3
,
7069 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7070 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
7071 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7072 .accessfn
= access_aa64_tid3
,
7074 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7075 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
7076 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7077 .accessfn
= access_aa64_tid3
,
7079 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7080 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
7081 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7082 .accessfn
= access_aa64_tid3
,
7084 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7085 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
7086 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7087 .accessfn
= access_aa64_tid3
,
7088 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
7089 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7090 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
7091 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7092 .accessfn
= access_aa64_tid3
,
7093 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
7094 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7095 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
7096 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7097 .accessfn
= access_aa64_tid3
,
7098 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
7099 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7100 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
7101 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7102 .accessfn
= access_aa64_tid3
,
7104 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7105 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
7106 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7107 .accessfn
= access_aa64_tid3
,
7109 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7110 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
7111 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7112 .accessfn
= access_aa64_tid3
,
7114 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7115 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
7116 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7117 .accessfn
= access_aa64_tid3
,
7119 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7120 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
7121 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7122 .accessfn
= access_aa64_tid3
,
7124 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7125 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
7126 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7127 .accessfn
= access_aa64_tid3
,
7128 .resetvalue
= cpu
->isar
.mvfr0
},
7129 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7130 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
7131 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7132 .accessfn
= access_aa64_tid3
,
7133 .resetvalue
= cpu
->isar
.mvfr1
},
7134 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7135 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
7136 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7137 .accessfn
= access_aa64_tid3
,
7138 .resetvalue
= cpu
->isar
.mvfr2
},
7139 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7140 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
7141 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7142 .accessfn
= access_aa64_tid3
,
7144 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7145 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
7146 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7147 .accessfn
= access_aa64_tid3
,
7149 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7150 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
7151 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7152 .accessfn
= access_aa64_tid3
,
7154 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7155 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
7156 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7157 .accessfn
= access_aa64_tid3
,
7159 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7160 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
7161 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7162 .accessfn
= access_aa64_tid3
,
7164 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
7165 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
7166 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7167 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
7168 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
7169 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
7170 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7171 .resetvalue
= cpu
->pmceid0
},
7172 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
7173 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
7174 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7175 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
7176 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
7177 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
7178 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7179 .resetvalue
= cpu
->pmceid1
},
7182 #ifdef CONFIG_USER_ONLY
7183 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
7184 { .name
= "ID_AA64PFR0_EL1",
7185 .exported_bits
= 0x000f000f00ff0000,
7186 .fixed_bits
= 0x0000000000000011 },
7187 { .name
= "ID_AA64PFR1_EL1",
7188 .exported_bits
= 0x00000000000000f0 },
7189 { .name
= "ID_AA64PFR*_EL1_RESERVED",
7191 { .name
= "ID_AA64ZFR0_EL1" },
7192 { .name
= "ID_AA64MMFR0_EL1",
7193 .fixed_bits
= 0x00000000ff000000 },
7194 { .name
= "ID_AA64MMFR1_EL1" },
7195 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
7197 { .name
= "ID_AA64DFR0_EL1",
7198 .fixed_bits
= 0x0000000000000006 },
7199 { .name
= "ID_AA64DFR1_EL1" },
7200 { .name
= "ID_AA64DFR*_EL1_RESERVED",
7202 { .name
= "ID_AA64AFR*",
7204 { .name
= "ID_AA64ISAR0_EL1",
7205 .exported_bits
= 0x00fffffff0fffff0 },
7206 { .name
= "ID_AA64ISAR1_EL1",
7207 .exported_bits
= 0x000000f0ffffffff },
7208 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
7210 REGUSERINFO_SENTINEL
7212 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
7214 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7215 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
7216 !arm_feature(env
, ARM_FEATURE_EL2
)) {
7217 ARMCPRegInfo rvbar
= {
7218 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7219 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
7220 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
7222 define_one_arm_cp_reg(cpu
, &rvbar
);
7224 define_arm_cp_regs(cpu
, v8_idregs
);
7225 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
7227 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7228 uint64_t vmpidr_def
= mpidr_read_val(env
);
7229 ARMCPRegInfo vpidr_regs
[] = {
7230 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
7231 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7232 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7233 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
7234 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
7235 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7236 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7237 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
7238 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7239 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
7240 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7241 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7242 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
7243 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
7244 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7245 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7247 .resetvalue
= vmpidr_def
,
7248 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
7251 define_arm_cp_regs(cpu
, vpidr_regs
);
7252 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
7253 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7254 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
7256 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7257 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
7258 ARMCPRegInfo rvbar
= {
7259 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
7260 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
7261 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
7263 define_one_arm_cp_reg(cpu
, &rvbar
);
7266 /* If EL2 is missing but higher ELs are enabled, we need to
7267 * register the no_el2 reginfos.
7269 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7270 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7271 * of MIDR_EL1 and MPIDR_EL1.
7273 ARMCPRegInfo vpidr_regs
[] = {
7274 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7275 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7276 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
7277 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
7278 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7279 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
7280 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7281 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
7282 .type
= ARM_CP_NO_RAW
,
7283 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
7286 define_arm_cp_regs(cpu
, vpidr_regs
);
7287 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
7288 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7289 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
7293 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7294 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
7295 ARMCPRegInfo el3_regs
[] = {
7296 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
7297 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
7298 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
7299 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
7300 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
7302 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
7303 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
7304 .resetvalue
= cpu
->reset_sctlr
},
7308 define_arm_cp_regs(cpu
, el3_regs
);
7310 /* The behaviour of NSACR is sufficiently various that we don't
7311 * try to describe it in a single reginfo:
7312 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7313 * reads as constant 0xc00 from NS EL1 and NS EL2
7314 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7315 * if v7 without EL3, register doesn't exist
7316 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7318 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7319 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7320 ARMCPRegInfo nsacr
= {
7321 .name
= "NSACR", .type
= ARM_CP_CONST
,
7322 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7323 .access
= PL1_RW
, .accessfn
= nsacr_access
,
7326 define_one_arm_cp_reg(cpu
, &nsacr
);
7328 ARMCPRegInfo nsacr
= {
7330 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7331 .access
= PL3_RW
| PL1_R
,
7333 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
7335 define_one_arm_cp_reg(cpu
, &nsacr
);
7338 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7339 ARMCPRegInfo nsacr
= {
7340 .name
= "NSACR", .type
= ARM_CP_CONST
,
7341 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
7345 define_one_arm_cp_reg(cpu
, &nsacr
);
7349 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
7350 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7351 /* PMSAv6 not implemented */
7352 assert(arm_feature(env
, ARM_FEATURE_V7
));
7353 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7354 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
7356 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
7359 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
7360 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
7361 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
7362 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
7363 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
7366 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
7367 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
7369 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
7370 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
7372 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
7373 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
7375 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
7376 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
7378 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
7379 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
7381 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
7382 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
7384 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
7385 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
7387 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7388 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
7390 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7391 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
7393 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
7394 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
7396 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
7397 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
7399 if (cpu_isar_feature(jazelle
, cpu
)) {
7400 define_arm_cp_regs(cpu
, jazelle_regs
);
7402 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7403 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7404 * be read-only (ie write causes UNDEF exception).
7407 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
7408 /* Pre-v8 MIDR space.
7409 * Note that the MIDR isn't a simple constant register because
7410 * of the TI925 behaviour where writes to another register can
7411 * cause the MIDR value to change.
7413 * Unimplemented registers in the c15 0 0 0 space default to
7414 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7415 * and friends override accordingly.
7418 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
7419 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
7420 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
7421 .readfn
= midr_read
,
7422 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7423 .type
= ARM_CP_OVERRIDE
},
7424 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7426 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
7427 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7429 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
7430 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7432 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
7433 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7435 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
7436 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7438 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
7439 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7442 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
7443 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7444 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
7445 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
7446 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
7447 .readfn
= midr_read
},
7448 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7449 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7450 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7451 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7452 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
7453 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
7454 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
7455 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7456 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
7458 .accessfn
= access_aa64_tid1
,
7459 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
7462 ARMCPRegInfo id_cp_reginfo
[] = {
7463 /* These are common to v8 and pre-v8 */
7465 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
7466 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
7467 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7468 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
7469 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
7470 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
7471 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
7472 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7474 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
7476 .accessfn
= access_aa32_tid1
,
7477 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7480 /* TLBTR is specific to VMSA */
7481 ARMCPRegInfo id_tlbtr_reginfo
= {
7483 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
7485 .accessfn
= access_aa32_tid1
,
7486 .type
= ARM_CP_CONST
, .resetvalue
= 0,
7488 /* MPUIR is specific to PMSA V6+ */
7489 ARMCPRegInfo id_mpuir_reginfo
= {
7491 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
7492 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7493 .resetvalue
= cpu
->pmsav7_dregion
<< 8
7495 ARMCPRegInfo crn0_wi_reginfo
= {
7496 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
7497 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
7498 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
7500 #ifdef CONFIG_USER_ONLY
7501 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
7502 { .name
= "MIDR_EL1",
7503 .exported_bits
= 0x00000000ffffffff },
7504 { .name
= "REVIDR_EL1" },
7505 REGUSERINFO_SENTINEL
7507 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
7509 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
7510 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
7512 /* Register the blanket "writes ignored" value first to cover the
7513 * whole space. Then update the specific ID registers to allow write
7514 * access, so that they ignore writes rather than causing them to
7517 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
7518 for (r
= id_pre_v8_midr_cp_reginfo
;
7519 r
->type
!= ARM_CP_SENTINEL
; r
++) {
7522 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7525 id_mpuir_reginfo
.access
= PL1_RW
;
7526 id_tlbtr_reginfo
.access
= PL1_RW
;
7528 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7529 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
7531 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
7533 define_arm_cp_regs(cpu
, id_cp_reginfo
);
7534 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
7535 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
7536 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
7537 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
7541 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
7542 ARMCPRegInfo mpidr_cp_reginfo
[] = {
7543 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7544 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
7545 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
7548 #ifdef CONFIG_USER_ONLY
7549 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
7550 { .name
= "MPIDR_EL1",
7551 .fixed_bits
= 0x0000000080000000 },
7552 REGUSERINFO_SENTINEL
7554 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
7556 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
7559 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
7560 ARMCPRegInfo auxcr_reginfo
[] = {
7561 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
7562 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
7563 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
7564 .resetvalue
= cpu
->reset_auxcr
},
7565 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
7566 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
7567 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7569 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
7570 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
7571 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
7575 define_arm_cp_regs(cpu
, auxcr_reginfo
);
7576 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7577 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
7578 ARMCPRegInfo hactlr2_reginfo
= {
7579 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7580 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7581 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7584 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
7588 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
7590 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7591 * There are two flavours:
7592 * (1) older 32-bit only cores have a simple 32-bit CBAR
7593 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7594 * 32-bit register visible to AArch32 at a different encoding
7595 * to the "flavour 1" register and with the bits rearranged to
7596 * be able to squash a 64-bit address into the 32-bit view.
7597 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7598 * in future if we support AArch32-only configs of some of the
7599 * AArch64 cores we might need to add a specific feature flag
7600 * to indicate cores with "flavour 2" CBAR.
7602 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7603 /* 32 bit view is [31:18] 0...0 [43:32]. */
7604 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
7605 | extract64(cpu
->reset_cbar
, 32, 12);
7606 ARMCPRegInfo cbar_reginfo
[] = {
7608 .type
= ARM_CP_CONST
,
7609 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
7610 .access
= PL1_R
, .resetvalue
= cbar32
},
7611 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7612 .type
= ARM_CP_CONST
,
7613 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
7614 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
7617 /* We don't implement a r/w 64 bit CBAR currently */
7618 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
7619 define_arm_cp_regs(cpu
, cbar_reginfo
);
7621 ARMCPRegInfo cbar
= {
7623 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
7624 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
7625 .fieldoffset
= offsetof(CPUARMState
,
7626 cp15
.c15_config_base_address
)
7628 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
7629 cbar
.access
= PL1_R
;
7630 cbar
.fieldoffset
= 0;
7631 cbar
.type
= ARM_CP_CONST
;
7633 define_one_arm_cp_reg(cpu
, &cbar
);
7637 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
7638 ARMCPRegInfo vbar_cp_reginfo
[] = {
7639 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
7640 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
7641 .access
= PL1_RW
, .writefn
= vbar_write
,
7642 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
7643 offsetof(CPUARMState
, cp15
.vbar_ns
) },
7647 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
7650 /* Generic registers whose values depend on the implementation */
7652 ARMCPRegInfo sctlr
= {
7653 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
7654 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
7656 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
7657 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
7658 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
7659 .raw_writefn
= raw_write
,
7661 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7662 /* Normally we would always end the TB on an SCTLR write, but Linux
7663 * arch/arm/mach-pxa/sleep.S expects two instructions following
7664 * an MMU enable to execute from cache. Imitate this behaviour.
7666 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
7668 define_one_arm_cp_reg(cpu
, &sctlr
);
7671 if (cpu_isar_feature(aa64_lor
, cpu
)) {
7672 define_arm_cp_regs(cpu
, lor_reginfo
);
7674 if (cpu_isar_feature(aa64_pan
, cpu
)) {
7675 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
7677 #ifndef CONFIG_USER_ONLY
7678 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
7679 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
7681 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
7682 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
7685 if (cpu_isar_feature(aa64_uao
, cpu
)) {
7686 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
7689 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
7690 define_arm_cp_regs(cpu
, vhe_reginfo
);
7693 if (cpu_isar_feature(aa64_sve
, cpu
)) {
7694 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
7695 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7696 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
7698 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
7700 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7701 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
7705 #ifdef TARGET_AARCH64
7706 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
7707 define_arm_cp_regs(cpu
, pauth_reginfo
);
7709 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
7710 define_arm_cp_regs(cpu
, rndr_reginfo
);
7712 #ifndef CONFIG_USER_ONLY
7713 /* Data Cache clean instructions up to PoP */
7714 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
7715 define_one_arm_cp_reg(cpu
, dcpop_reg
);
7717 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
7718 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
7721 #endif /*CONFIG_USER_ONLY*/
7725 * While all v8.0 cpus support aarch64, QEMU does have configurations
7726 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
7727 * which will set ID_ISAR6.
7729 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
7730 ? cpu_isar_feature(aa64_predinv
, cpu
)
7731 : cpu_isar_feature(aa32_predinv
, cpu
)) {
7732 define_arm_cp_regs(cpu
, predinv_reginfo
);
7735 #ifndef CONFIG_USER_ONLY
7737 * Register redirections and aliases must be done last,
7738 * after the registers from the other extensions have been defined.
7740 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
7741 define_arm_vh_e2h_redirects_aliases(cpu
);
7746 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
7748 CPUState
*cs
= CPU(cpu
);
7749 CPUARMState
*env
= &cpu
->env
;
7751 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7752 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
7753 aarch64_fpu_gdb_set_reg
,
7754 34, "aarch64-fpu.xml", 0);
7755 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
7756 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7757 51, "arm-neon.xml", 0);
7758 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
7759 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7760 35, "arm-vfp3.xml", 0);
7761 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
7762 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7763 19, "arm-vfp.xml", 0);
7765 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
7766 arm_gen_dynamic_xml(cs
),
7767 "system-registers.xml", 0);
7770 /* Sort alphabetically by type name, except for "any". */
7771 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
7773 ObjectClass
*class_a
= (ObjectClass
*)a
;
7774 ObjectClass
*class_b
= (ObjectClass
*)b
;
7775 const char *name_a
, *name_b
;
7777 name_a
= object_class_get_name(class_a
);
7778 name_b
= object_class_get_name(class_b
);
7779 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
7781 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
7784 return strcmp(name_a
, name_b
);
7788 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
7790 ObjectClass
*oc
= data
;
7791 const char *typename
;
7794 typename
= object_class_get_name(oc
);
7795 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
7796 qemu_printf(" %s\n", name
);
7800 void arm_cpu_list(void)
7804 list
= object_class_get_list(TYPE_ARM_CPU
, false);
7805 list
= g_slist_sort(list
, arm_cpu_list_compare
);
7806 qemu_printf("Available CPUs:\n");
7807 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
7811 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
7813 ObjectClass
*oc
= data
;
7814 CpuDefinitionInfoList
**cpu_list
= user_data
;
7815 CpuDefinitionInfoList
*entry
;
7816 CpuDefinitionInfo
*info
;
7817 const char *typename
;
7819 typename
= object_class_get_name(oc
);
7820 info
= g_malloc0(sizeof(*info
));
7821 info
->name
= g_strndup(typename
,
7822 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
7823 info
->q_typename
= g_strdup(typename
);
7825 entry
= g_malloc0(sizeof(*entry
));
7826 entry
->value
= info
;
7827 entry
->next
= *cpu_list
;
7831 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
7833 CpuDefinitionInfoList
*cpu_list
= NULL
;
7836 list
= object_class_get_list(TYPE_ARM_CPU
, false);
7837 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
7843 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
7844 void *opaque
, int state
, int secstate
,
7845 int crm
, int opc1
, int opc2
,
7848 /* Private utility function for define_one_arm_cp_reg_with_opaque():
7849 * add a single reginfo struct to the hash table.
7851 uint32_t *key
= g_new(uint32_t, 1);
7852 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
7853 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
7854 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
7856 r2
->name
= g_strdup(name
);
7857 /* Reset the secure state to the specific incoming state. This is
7858 * necessary as the register may have been defined with both states.
7860 r2
->secure
= secstate
;
7862 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
7863 /* Register is banked (using both entries in array).
7864 * Overwriting fieldoffset as the array is only used to define
7865 * banked registers but later only fieldoffset is used.
7867 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
7870 if (state
== ARM_CP_STATE_AA32
) {
7871 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
7872 /* If the register is banked then we don't need to migrate or
7873 * reset the 32-bit instance in certain cases:
7875 * 1) If the register has both 32-bit and 64-bit instances then we
7876 * can count on the 64-bit instance taking care of the
7878 * 2) If ARMv8 is enabled then we can count on a 64-bit version
7879 * taking care of the secure bank. This requires that separate
7880 * 32 and 64-bit definitions are provided.
7882 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
7883 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
7884 r2
->type
|= ARM_CP_ALIAS
;
7886 } else if ((secstate
!= r
->secure
) && !ns
) {
7887 /* The register is not banked so we only want to allow migration of
7888 * the non-secure instance.
7890 r2
->type
|= ARM_CP_ALIAS
;
7893 if (r
->state
== ARM_CP_STATE_BOTH
) {
7894 /* We assume it is a cp15 register if the .cp field is left unset.
7900 #ifdef HOST_WORDS_BIGENDIAN
7901 if (r2
->fieldoffset
) {
7902 r2
->fieldoffset
+= sizeof(uint32_t);
7907 if (state
== ARM_CP_STATE_AA64
) {
7908 /* To allow abbreviation of ARMCPRegInfo
7909 * definitions, we treat cp == 0 as equivalent to
7910 * the value for "standard guest-visible sysreg".
7911 * STATE_BOTH definitions are also always "standard
7912 * sysreg" in their AArch64 view (the .cp value may
7913 * be non-zero for the benefit of the AArch32 view).
7915 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
7916 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
7918 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
7919 r2
->opc0
, opc1
, opc2
);
7921 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
7924 r2
->opaque
= opaque
;
7926 /* reginfo passed to helpers is correct for the actual access,
7927 * and is never ARM_CP_STATE_BOTH:
7930 /* Make sure reginfo passed to helpers for wildcarded regs
7931 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7936 /* By convention, for wildcarded registers only the first
7937 * entry is used for migration; the others are marked as
7938 * ALIAS so we don't try to transfer the register
7939 * multiple times. Special registers (ie NOP/WFI) are
7940 * never migratable and not even raw-accessible.
7942 if ((r
->type
& ARM_CP_SPECIAL
)) {
7943 r2
->type
|= ARM_CP_NO_RAW
;
7945 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
7946 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
7947 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
7948 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
7951 /* Check that raw accesses are either forbidden or handled. Note that
7952 * we can't assert this earlier because the setup of fieldoffset for
7953 * banked registers has to be done first.
7955 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
7956 assert(!raw_accessors_invalid(r2
));
7959 /* Overriding of an existing definition must be explicitly
7962 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
7963 ARMCPRegInfo
*oldreg
;
7964 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
7965 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
7966 fprintf(stderr
, "Register redefined: cp=%d %d bit "
7967 "crn=%d crm=%d opc1=%d opc2=%d, "
7968 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
7969 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
7970 oldreg
->name
, r2
->name
);
7971 g_assert_not_reached();
7974 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
7978 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
7979 const ARMCPRegInfo
*r
, void *opaque
)
7981 /* Define implementations of coprocessor registers.
7982 * We store these in a hashtable because typically
7983 * there are less than 150 registers in a space which
7984 * is 16*16*16*8*8 = 262144 in size.
7985 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7986 * If a register is defined twice then the second definition is
7987 * used, so this can be used to define some generic registers and
7988 * then override them with implementation specific variations.
7989 * At least one of the original and the second definition should
7990 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7991 * against accidental use.
7993 * The state field defines whether the register is to be
7994 * visible in the AArch32 or AArch64 execution state. If the
7995 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7996 * reginfo structure for the AArch32 view, which sees the lower
7997 * 32 bits of the 64 bit register.
7999 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8000 * be wildcarded. AArch64 registers are always considered to be 64
8001 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8002 * the register, if any.
8004 int crm
, opc1
, opc2
, state
;
8005 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8006 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8007 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8008 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8009 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8010 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8011 /* 64 bit registers have only CRm and Opc1 fields */
8012 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8013 /* op0 only exists in the AArch64 encodings */
8014 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8015 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8016 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8017 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8018 * encodes a minimum access level for the register. We roll this
8019 * runtime check into our general permission check code, so check
8020 * here that the reginfo's specified permissions are strict enough
8021 * to encompass the generic architectural permission check.
8023 if (r
->state
!= ARM_CP_STATE_AA32
) {
8027 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8028 mask
= PL0U_R
| PL1_RW
;
8048 /* min_EL EL1, secure mode only (we don't check the latter) */
8052 /* broken reginfo with out-of-range opc1 */
8056 /* assert our permissions are not too lax (stricter is fine) */
8057 assert((r
->access
& ~mask
) == 0);
8060 /* Check that the register definition has enough info to handle
8061 * reads and writes if they are permitted.
8063 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
8064 if (r
->access
& PL3_R
) {
8065 assert((r
->fieldoffset
||
8066 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8069 if (r
->access
& PL3_W
) {
8070 assert((r
->fieldoffset
||
8071 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8075 /* Bad type field probably means missing sentinel at end of reg list */
8076 assert(cptype_valid(r
->type
));
8077 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
8078 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
8079 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
8080 for (state
= ARM_CP_STATE_AA32
;
8081 state
<= ARM_CP_STATE_AA64
; state
++) {
8082 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
8085 if (state
== ARM_CP_STATE_AA32
) {
8086 /* Under AArch32 CP registers can be common
8087 * (same for secure and non-secure world) or banked.
8091 switch (r
->secure
) {
8092 case ARM_CP_SECSTATE_S
:
8093 case ARM_CP_SECSTATE_NS
:
8094 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8095 r
->secure
, crm
, opc1
, opc2
,
8099 name
= g_strdup_printf("%s_S", r
->name
);
8100 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8102 crm
, opc1
, opc2
, name
);
8104 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8106 crm
, opc1
, opc2
, r
->name
);
8110 /* AArch64 registers get mapped to non-secure instance
8112 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8114 crm
, opc1
, opc2
, r
->name
);
8122 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
8123 const ARMCPRegInfo
*regs
, void *opaque
)
8125 /* Define a whole list of registers */
8126 const ARMCPRegInfo
*r
;
8127 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8128 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
8133 * Modify ARMCPRegInfo for access from userspace.
8135 * This is a data driven modification directed by
8136 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8137 * user-space cannot alter any values and dynamic values pertaining to
8138 * execution state are hidden from user space view anyway.
8140 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
8142 const ARMCPRegUserSpaceInfo
*m
;
8145 for (m
= mods
; m
->name
; m
++) {
8146 GPatternSpec
*pat
= NULL
;
8148 pat
= g_pattern_spec_new(m
->name
);
8150 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8151 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
8152 r
->type
= ARM_CP_CONST
;
8156 } else if (strcmp(r
->name
, m
->name
) == 0) {
8157 r
->type
= ARM_CP_CONST
;
8159 r
->resetvalue
&= m
->exported_bits
;
8160 r
->resetvalue
|= m
->fixed_bits
;
8165 g_pattern_spec_free(pat
);
8170 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
8172 return g_hash_table_lookup(cpregs
, &encoded_cp
);
8175 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8178 /* Helper coprocessor write function for write-ignore registers */
8181 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8183 /* Helper coprocessor write function for read-as-zero registers */
8187 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
8189 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8192 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
8194 /* Return true if it is not valid for us to switch to
8195 * this CPU mode (ie all the UNPREDICTABLE cases in
8196 * the ARM ARM CPSRWriteByInstr pseudocode).
8199 /* Changes to or from Hyp via MSR and CPS are illegal. */
8200 if (write_type
== CPSRWriteByInstr
&&
8201 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
8202 mode
== ARM_CPU_MODE_HYP
)) {
8207 case ARM_CPU_MODE_USR
:
8209 case ARM_CPU_MODE_SYS
:
8210 case ARM_CPU_MODE_SVC
:
8211 case ARM_CPU_MODE_ABT
:
8212 case ARM_CPU_MODE_UND
:
8213 case ARM_CPU_MODE_IRQ
:
8214 case ARM_CPU_MODE_FIQ
:
8215 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8216 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8218 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8219 * and CPS are treated as illegal mode changes.
8221 if (write_type
== CPSRWriteByInstr
&&
8222 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
8223 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
8227 case ARM_CPU_MODE_HYP
:
8228 return !arm_feature(env
, ARM_FEATURE_EL2
)
8229 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
8230 case ARM_CPU_MODE_MON
:
8231 return arm_current_el(env
) < 3;
8237 uint32_t cpsr_read(CPUARMState
*env
)
8240 ZF
= (env
->ZF
== 0);
8241 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
8242 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
8243 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
8244 | ((env
->condexec_bits
& 0xfc) << 8)
8245 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
8248 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
8249 CPSRWriteType write_type
)
8251 uint32_t changed_daif
;
8253 if (mask
& CPSR_NZCV
) {
8254 env
->ZF
= (~val
) & CPSR_Z
;
8256 env
->CF
= (val
>> 29) & 1;
8257 env
->VF
= (val
<< 3) & 0x80000000;
8260 env
->QF
= ((val
& CPSR_Q
) != 0);
8262 env
->thumb
= ((val
& CPSR_T
) != 0);
8263 if (mask
& CPSR_IT_0_1
) {
8264 env
->condexec_bits
&= ~3;
8265 env
->condexec_bits
|= (val
>> 25) & 3;
8267 if (mask
& CPSR_IT_2_7
) {
8268 env
->condexec_bits
&= 3;
8269 env
->condexec_bits
|= (val
>> 8) & 0xfc;
8271 if (mask
& CPSR_GE
) {
8272 env
->GE
= (val
>> 16) & 0xf;
8275 /* In a V7 implementation that includes the security extensions but does
8276 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8277 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8278 * bits respectively.
8280 * In a V8 implementation, it is permitted for privileged software to
8281 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8283 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
8284 arm_feature(env
, ARM_FEATURE_EL3
) &&
8285 !arm_feature(env
, ARM_FEATURE_EL2
) &&
8286 !arm_is_secure(env
)) {
8288 changed_daif
= (env
->daif
^ val
) & mask
;
8290 if (changed_daif
& CPSR_A
) {
8291 /* Check to see if we are allowed to change the masking of async
8292 * abort exceptions from a non-secure state.
8294 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
8295 qemu_log_mask(LOG_GUEST_ERROR
,
8296 "Ignoring attempt to switch CPSR_A flag from "
8297 "non-secure world with SCR.AW bit clear\n");
8302 if (changed_daif
& CPSR_F
) {
8303 /* Check to see if we are allowed to change the masking of FIQ
8304 * exceptions from a non-secure state.
8306 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
8307 qemu_log_mask(LOG_GUEST_ERROR
,
8308 "Ignoring attempt to switch CPSR_F flag from "
8309 "non-secure world with SCR.FW bit clear\n");
8313 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8314 * If this bit is set software is not allowed to mask
8315 * FIQs, but is allowed to set CPSR_F to 0.
8317 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
8319 qemu_log_mask(LOG_GUEST_ERROR
,
8320 "Ignoring attempt to enable CPSR_F flag "
8321 "(non-maskable FIQ [NMFI] support enabled)\n");
8327 env
->daif
&= ~(CPSR_AIF
& mask
);
8328 env
->daif
|= val
& CPSR_AIF
& mask
;
8330 if (write_type
!= CPSRWriteRaw
&&
8331 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
8332 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
8333 /* Note that we can only get here in USR mode if this is a
8334 * gdb stub write; for this case we follow the architectural
8335 * behaviour for guest writes in USR mode of ignoring an attempt
8336 * to switch mode. (Those are caught by translate.c for writes
8337 * triggered by guest instructions.)
8340 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
8341 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8342 * v7, and has defined behaviour in v8:
8343 * + leave CPSR.M untouched
8344 * + allow changes to the other CPSR fields
8346 * For user changes via the GDB stub, we don't set PSTATE.IL,
8347 * as this would be unnecessarily harsh for a user error.
8350 if (write_type
!= CPSRWriteByGDBStub
&&
8351 arm_feature(env
, ARM_FEATURE_V8
)) {
8355 qemu_log_mask(LOG_GUEST_ERROR
,
8356 "Illegal AArch32 mode switch attempt from %s to %s\n",
8357 aarch32_mode_name(env
->uncached_cpsr
),
8358 aarch32_mode_name(val
));
8360 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
8361 write_type
== CPSRWriteExceptionReturn
?
8362 "Exception return from AArch32" :
8363 "AArch32 mode switch from",
8364 aarch32_mode_name(env
->uncached_cpsr
),
8365 aarch32_mode_name(val
), env
->regs
[15]);
8366 switch_mode(env
, val
& CPSR_M
);
8369 mask
&= ~CACHED_CPSR_BITS
;
8370 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
8373 /* Sign/zero extend */
8374 uint32_t HELPER(sxtb16
)(uint32_t x
)
8377 res
= (uint16_t)(int8_t)x
;
8378 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
8382 uint32_t HELPER(uxtb16
)(uint32_t x
)
8385 res
= (uint16_t)(uint8_t)x
;
8386 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
8390 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
8394 if (num
== INT_MIN
&& den
== -1)
8399 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
8406 uint32_t HELPER(rbit
)(uint32_t x
)
8411 #ifdef CONFIG_USER_ONLY
8413 static void switch_mode(CPUARMState
*env
, int mode
)
8415 ARMCPU
*cpu
= env_archcpu(env
);
8417 if (mode
!= ARM_CPU_MODE_USR
) {
8418 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
8422 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
8423 uint32_t cur_el
, bool secure
)
8428 void aarch64_sync_64_to_32(CPUARMState
*env
)
8430 g_assert_not_reached();
8435 static void switch_mode(CPUARMState
*env
, int mode
)
8440 old_mode
= env
->uncached_cpsr
& CPSR_M
;
8441 if (mode
== old_mode
)
8444 if (old_mode
== ARM_CPU_MODE_FIQ
) {
8445 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8446 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
8447 } else if (mode
== ARM_CPU_MODE_FIQ
) {
8448 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
8449 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
8452 i
= bank_number(old_mode
);
8453 env
->banked_r13
[i
] = env
->regs
[13];
8454 env
->banked_spsr
[i
] = env
->spsr
;
8456 i
= bank_number(mode
);
8457 env
->regs
[13] = env
->banked_r13
[i
];
8458 env
->spsr
= env
->banked_spsr
[i
];
8460 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
8461 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
8464 /* Physical Interrupt Target EL Lookup Table
8466 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8468 * The below multi-dimensional table is used for looking up the target
8469 * exception level given numerous condition criteria. Specifically, the
8470 * target EL is based on SCR and HCR routing controls as well as the
8471 * currently executing EL and secure state.
8474 * target_el_table[2][2][2][2][2][4]
8475 * | | | | | +--- Current EL
8476 * | | | | +------ Non-secure(0)/Secure(1)
8477 * | | | +--------- HCR mask override
8478 * | | +------------ SCR exec state control
8479 * | +--------------- SCR mask override
8480 * +------------------ 32-bit(0)/64-bit(1) EL3
8482 * The table values are as such:
8486 * The ARM ARM target EL table includes entries indicating that an "exception
8487 * is not taken". The two cases where this is applicable are:
8488 * 1) An exception is taken from EL3 but the SCR does not have the exception
8490 * 2) An exception is taken from EL2 but the HCR does not have the exception
8492 * In these two cases, the below table contain a target of EL1. This value is
8493 * returned as it is expected that the consumer of the table data will check
8494 * for "target EL >= current EL" to ensure the exception is not taken.
8498 * BIT IRQ IMO Non-secure Secure
8499 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8501 static const int8_t target_el_table
[2][2][2][2][2][4] = {
8502 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8503 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8504 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8505 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8506 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8507 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8508 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8509 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8510 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8511 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
8512 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
8513 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
8514 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8515 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8516 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8517 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
8521 * Determine the target EL for physical exceptions
8523 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
8524 uint32_t cur_el
, bool secure
)
8526 CPUARMState
*env
= cs
->env_ptr
;
8531 /* Is the highest EL AArch64? */
8532 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
8535 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8536 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
8538 /* Either EL2 is the highest EL (and so the EL2 register width
8539 * is given by is64); or there is no EL2 or EL3, in which case
8540 * the value of 'rw' does not affect the table lookup anyway.
8545 hcr_el2
= arm_hcr_el2_eff(env
);
8548 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
8549 hcr
= hcr_el2
& HCR_IMO
;
8552 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
8553 hcr
= hcr_el2
& HCR_FMO
;
8556 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
8557 hcr
= hcr_el2
& HCR_AMO
;
8562 * For these purposes, TGE and AMO/IMO/FMO both force the
8563 * interrupt to EL2. Fold TGE into the bit extracted above.
8565 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
8567 /* Perform a table-lookup for the target EL given the current state */
8568 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
8570 assert(target_el
> 0);
8575 void arm_log_exception(int idx
)
8577 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8578 const char *exc
= NULL
;
8579 static const char * const excnames
[] = {
8580 [EXCP_UDEF
] = "Undefined Instruction",
8582 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8583 [EXCP_DATA_ABORT
] = "Data Abort",
8586 [EXCP_BKPT
] = "Breakpoint",
8587 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8588 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8589 [EXCP_HVC
] = "Hypervisor Call",
8590 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8591 [EXCP_SMC
] = "Secure Monitor Call",
8592 [EXCP_VIRQ
] = "Virtual IRQ",
8593 [EXCP_VFIQ
] = "Virtual FIQ",
8594 [EXCP_SEMIHOST
] = "Semihosting call",
8595 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8596 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8597 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8598 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
8599 [EXCP_LSERR
] = "v8M LSERR UsageFault",
8600 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
8603 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8604 exc
= excnames
[idx
];
8609 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8614 * Function used to synchronize QEMU's AArch64 register set with AArch32
8615 * register set. This is necessary when switching between AArch32 and AArch64
8618 void aarch64_sync_32_to_64(CPUARMState
*env
)
8621 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8623 /* We can blanket copy R[0:7] to X[0:7] */
8624 for (i
= 0; i
< 8; i
++) {
8625 env
->xregs
[i
] = env
->regs
[i
];
8629 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8630 * Otherwise, they come from the banked user regs.
8632 if (mode
== ARM_CPU_MODE_FIQ
) {
8633 for (i
= 8; i
< 13; i
++) {
8634 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8637 for (i
= 8; i
< 13; i
++) {
8638 env
->xregs
[i
] = env
->regs
[i
];
8643 * Registers x13-x23 are the various mode SP and FP registers. Registers
8644 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8645 * from the mode banked register.
8647 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8648 env
->xregs
[13] = env
->regs
[13];
8649 env
->xregs
[14] = env
->regs
[14];
8651 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8652 /* HYP is an exception in that it is copied from r14 */
8653 if (mode
== ARM_CPU_MODE_HYP
) {
8654 env
->xregs
[14] = env
->regs
[14];
8656 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8660 if (mode
== ARM_CPU_MODE_HYP
) {
8661 env
->xregs
[15] = env
->regs
[13];
8663 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
8666 if (mode
== ARM_CPU_MODE_IRQ
) {
8667 env
->xregs
[16] = env
->regs
[14];
8668 env
->xregs
[17] = env
->regs
[13];
8670 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
8671 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
8674 if (mode
== ARM_CPU_MODE_SVC
) {
8675 env
->xregs
[18] = env
->regs
[14];
8676 env
->xregs
[19] = env
->regs
[13];
8678 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
8679 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
8682 if (mode
== ARM_CPU_MODE_ABT
) {
8683 env
->xregs
[20] = env
->regs
[14];
8684 env
->xregs
[21] = env
->regs
[13];
8686 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
8687 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
8690 if (mode
== ARM_CPU_MODE_UND
) {
8691 env
->xregs
[22] = env
->regs
[14];
8692 env
->xregs
[23] = env
->regs
[13];
8694 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
8695 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
8699 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8700 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8701 * FIQ bank for r8-r14.
8703 if (mode
== ARM_CPU_MODE_FIQ
) {
8704 for (i
= 24; i
< 31; i
++) {
8705 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
8708 for (i
= 24; i
< 29; i
++) {
8709 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
8711 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
8712 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
8715 env
->pc
= env
->regs
[15];
8719 * Function used to synchronize QEMU's AArch32 register set with AArch64
8720 * register set. This is necessary when switching between AArch32 and AArch64
8723 void aarch64_sync_64_to_32(CPUARMState
*env
)
8726 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8728 /* We can blanket copy X[0:7] to R[0:7] */
8729 for (i
= 0; i
< 8; i
++) {
8730 env
->regs
[i
] = env
->xregs
[i
];
8734 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8735 * Otherwise, we copy x8-x12 into the banked user regs.
8737 if (mode
== ARM_CPU_MODE_FIQ
) {
8738 for (i
= 8; i
< 13; i
++) {
8739 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
8742 for (i
= 8; i
< 13; i
++) {
8743 env
->regs
[i
] = env
->xregs
[i
];
8748 * Registers r13 & r14 depend on the current mode.
8749 * If we are in a given mode, we copy the corresponding x registers to r13
8750 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8753 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8754 env
->regs
[13] = env
->xregs
[13];
8755 env
->regs
[14] = env
->xregs
[14];
8757 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
8760 * HYP is an exception in that it does not have its own banked r14 but
8761 * shares the USR r14
8763 if (mode
== ARM_CPU_MODE_HYP
) {
8764 env
->regs
[14] = env
->xregs
[14];
8766 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
8770 if (mode
== ARM_CPU_MODE_HYP
) {
8771 env
->regs
[13] = env
->xregs
[15];
8773 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
8776 if (mode
== ARM_CPU_MODE_IRQ
) {
8777 env
->regs
[14] = env
->xregs
[16];
8778 env
->regs
[13] = env
->xregs
[17];
8780 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
8781 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
8784 if (mode
== ARM_CPU_MODE_SVC
) {
8785 env
->regs
[14] = env
->xregs
[18];
8786 env
->regs
[13] = env
->xregs
[19];
8788 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
8789 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
8792 if (mode
== ARM_CPU_MODE_ABT
) {
8793 env
->regs
[14] = env
->xregs
[20];
8794 env
->regs
[13] = env
->xregs
[21];
8796 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
8797 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
8800 if (mode
== ARM_CPU_MODE_UND
) {
8801 env
->regs
[14] = env
->xregs
[22];
8802 env
->regs
[13] = env
->xregs
[23];
8804 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
8805 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
8808 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8809 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8810 * FIQ bank for r8-r14.
8812 if (mode
== ARM_CPU_MODE_FIQ
) {
8813 for (i
= 24; i
< 31; i
++) {
8814 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
8817 for (i
= 24; i
< 29; i
++) {
8818 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
8820 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
8821 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
8824 env
->regs
[15] = env
->pc
;
8827 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
8828 uint32_t mask
, uint32_t offset
,
8833 /* Change the CPU state so as to actually take the exception. */
8834 switch_mode(env
, new_mode
);
8835 new_el
= arm_current_el(env
);
8838 * For exceptions taken to AArch32 we must clear the SS bit in both
8839 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8841 env
->uncached_cpsr
&= ~PSTATE_SS
;
8842 env
->spsr
= cpsr_read(env
);
8843 /* Clear IT bits. */
8844 env
->condexec_bits
= 0;
8845 /* Switch to the new mode, and to the correct instruction set. */
8846 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8847 /* Set new mode endianness */
8848 env
->uncached_cpsr
&= ~CPSR_E
;
8849 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
8850 env
->uncached_cpsr
|= CPSR_E
;
8852 /* J and IL must always be cleared for exception entry */
8853 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
8856 if (new_mode
== ARM_CPU_MODE_HYP
) {
8857 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
8858 env
->elr_el
[2] = env
->regs
[15];
8860 /* CPSR.PAN is normally preserved preserved unless... */
8861 if (cpu_isar_feature(aa64_pan
, env_archcpu(env
))) {
8864 if (!arm_is_secure_below_el3(env
)) {
8865 /* ... the target is EL3, from non-secure state. */
8866 env
->uncached_cpsr
&= ~CPSR_PAN
;
8869 /* ... the target is EL3, from secure state ... */
8872 /* ... the target is EL1 and SCTLR.SPAN is 0. */
8873 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
8874 env
->uncached_cpsr
|= CPSR_PAN
;
8880 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8881 * and we should just guard the thumb mode on V4
8883 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8885 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8887 env
->regs
[14] = env
->regs
[15] + offset
;
8889 env
->regs
[15] = newpc
;
8890 arm_rebuild_hflags(env
);
8893 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
8896 * Handle exception entry to Hyp mode; this is sufficiently
8897 * different to entry to other AArch32 modes that we handle it
8900 * The vector table entry used is always the 0x14 Hyp mode entry point,
8901 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8902 * The offset applied to the preferred return address is always zero
8903 * (see DDI0487C.a section G1.12.3).
8904 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8906 uint32_t addr
, mask
;
8907 ARMCPU
*cpu
= ARM_CPU(cs
);
8908 CPUARMState
*env
= &cpu
->env
;
8910 switch (cs
->exception_index
) {
8918 /* Fall through to prefetch abort. */
8919 case EXCP_PREFETCH_ABORT
:
8920 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
8921 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
8922 (uint32_t)env
->exception
.vaddress
);
8925 case EXCP_DATA_ABORT
:
8926 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
8927 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
8928 (uint32_t)env
->exception
.vaddress
);
8944 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8947 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
8948 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8950 * QEMU syndrome values are v8-style. v7 has the IL bit
8951 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8952 * If this is a v7 CPU, squash the IL bit in those cases.
8954 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
8955 (cs
->exception_index
== EXCP_DATA_ABORT
&&
8956 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
8957 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
8958 env
->exception
.syndrome
&= ~ARM_EL_IL
;
8961 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
8964 if (arm_current_el(env
) != 2 && addr
< 0x14) {
8969 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
8972 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
8975 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
8979 addr
+= env
->cp15
.hvbar
;
8981 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
8984 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
8986 ARMCPU
*cpu
= ARM_CPU(cs
);
8987 CPUARMState
*env
= &cpu
->env
;
8994 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8995 switch (syn_get_ec(env
->exception
.syndrome
)) {
8997 case EC_BREAKPOINT_SAME_EL
:
9001 case EC_WATCHPOINT_SAME_EL
:
9007 case EC_VECTORCATCH
:
9016 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9019 if (env
->exception
.target_el
== 2) {
9020 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9024 switch (cs
->exception_index
) {
9026 new_mode
= ARM_CPU_MODE_UND
;
9035 new_mode
= ARM_CPU_MODE_SVC
;
9038 /* The PC already points to the next instruction. */
9042 /* Fall through to prefetch abort. */
9043 case EXCP_PREFETCH_ABORT
:
9044 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9045 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9046 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9047 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9048 new_mode
= ARM_CPU_MODE_ABT
;
9050 mask
= CPSR_A
| CPSR_I
;
9053 case EXCP_DATA_ABORT
:
9054 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9055 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9056 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9058 (uint32_t)env
->exception
.vaddress
);
9059 new_mode
= ARM_CPU_MODE_ABT
;
9061 mask
= CPSR_A
| CPSR_I
;
9065 new_mode
= ARM_CPU_MODE_IRQ
;
9067 /* Disable IRQ and imprecise data aborts. */
9068 mask
= CPSR_A
| CPSR_I
;
9070 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9071 /* IRQ routed to monitor mode */
9072 new_mode
= ARM_CPU_MODE_MON
;
9077 new_mode
= ARM_CPU_MODE_FIQ
;
9079 /* Disable FIQ, IRQ and imprecise data aborts. */
9080 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9081 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9082 /* FIQ routed to monitor mode */
9083 new_mode
= ARM_CPU_MODE_MON
;
9088 new_mode
= ARM_CPU_MODE_IRQ
;
9090 /* Disable IRQ and imprecise data aborts. */
9091 mask
= CPSR_A
| CPSR_I
;
9095 new_mode
= ARM_CPU_MODE_FIQ
;
9097 /* Disable FIQ, IRQ and imprecise data aborts. */
9098 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9102 new_mode
= ARM_CPU_MODE_MON
;
9104 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9108 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9109 return; /* Never happens. Keep compiler happy. */
9112 if (new_mode
== ARM_CPU_MODE_MON
) {
9113 addr
+= env
->cp15
.mvbar
;
9114 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9115 /* High vectors. When enabled, base address cannot be remapped. */
9118 /* ARM v7 architectures provide a vector base address register to remap
9119 * the interrupt vector table.
9120 * This register is only followed in non-monitor mode, and is banked.
9121 * Note: only bits 31:5 are valid.
9123 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9126 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9127 env
->cp15
.scr_el3
&= ~SCR_NS
;
9130 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9133 /* Handle exception entry to a target EL which is using AArch64 */
9134 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9136 ARMCPU
*cpu
= ARM_CPU(cs
);
9137 CPUARMState
*env
= &cpu
->env
;
9138 unsigned int new_el
= env
->exception
.target_el
;
9139 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9140 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9141 unsigned int old_mode
;
9142 unsigned int cur_el
= arm_current_el(env
);
9145 * Note that new_el can never be 0. If cur_el is 0, then
9146 * el0_a64 is is_a64(), else el0_a64 is ignored.
9148 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9150 if (cur_el
< new_el
) {
9151 /* Entry vector offset depends on whether the implemented EL
9152 * immediately lower than the target level is using AArch32 or AArch64
9159 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9162 hcr
= arm_hcr_el2_eff(env
);
9163 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
9164 is_aa64
= (hcr
& HCR_RW
) != 0;
9169 is_aa64
= is_a64(env
);
9172 g_assert_not_reached();
9180 } else if (pstate_read(env
) & PSTATE_SP
) {
9184 switch (cs
->exception_index
) {
9185 case EXCP_PREFETCH_ABORT
:
9186 case EXCP_DATA_ABORT
:
9187 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9188 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9189 env
->cp15
.far_el
[new_el
]);
9197 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9199 * QEMU internal FP/SIMD syndromes from AArch32 include the
9200 * TA and coproc fields which are only exposed if the exception
9201 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9202 * AArch64 format syndrome.
9204 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9206 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9217 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9221 old_mode
= pstate_read(env
);
9222 aarch64_save_sp(env
, arm_current_el(env
));
9223 env
->elr_el
[new_el
] = env
->pc
;
9225 old_mode
= cpsr_read(env
);
9226 env
->elr_el
[new_el
] = env
->regs
[15];
9228 aarch64_sync_32_to_64(env
);
9230 env
->condexec_bits
= 0;
9232 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
9234 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9235 env
->elr_el
[new_el
]);
9237 if (cpu_isar_feature(aa64_pan
, cpu
)) {
9238 /* The value of PSTATE.PAN is normally preserved, except when ... */
9239 new_mode
|= old_mode
& PSTATE_PAN
;
9242 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9243 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
9244 != (HCR_E2H
| HCR_TGE
)) {
9249 /* ... the target is EL1 ... */
9250 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9251 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
9252 new_mode
|= PSTATE_PAN
;
9258 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9260 aarch64_restore_sp(env
, new_el
);
9261 helper_rebuild_hflags_a64(env
, new_el
);
9265 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9266 new_el
, env
->pc
, pstate_read(env
));
9270 * Do semihosting call and set the appropriate return value. All the
9271 * permission and validity checks have been done at translate time.
9273 * We only see semihosting exceptions in TCG only as they are not
9274 * trapped to the hypervisor in KVM.
9277 static void handle_semihosting(CPUState
*cs
)
9279 ARMCPU
*cpu
= ARM_CPU(cs
);
9280 CPUARMState
*env
= &cpu
->env
;
9283 qemu_log_mask(CPU_LOG_INT
,
9284 "...handling as semihosting call 0x%" PRIx64
"\n",
9286 env
->xregs
[0] = do_arm_semihosting(env
);
9289 qemu_log_mask(CPU_LOG_INT
,
9290 "...handling as semihosting call 0x%x\n",
9292 env
->regs
[0] = do_arm_semihosting(env
);
9293 env
->regs
[15] += env
->thumb
? 2 : 4;
9298 /* Handle a CPU exception for A and R profile CPUs.
9299 * Do any appropriate logging, handle PSCI calls, and then hand off
9300 * to the AArch64-entry or AArch32-entry function depending on the
9301 * target exception level's register width.
9303 void arm_cpu_do_interrupt(CPUState
*cs
)
9305 ARMCPU
*cpu
= ARM_CPU(cs
);
9306 CPUARMState
*env
= &cpu
->env
;
9307 unsigned int new_el
= env
->exception
.target_el
;
9309 assert(!arm_feature(env
, ARM_FEATURE_M
));
9311 arm_log_exception(cs
->exception_index
);
9312 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9314 if (qemu_loglevel_mask(CPU_LOG_INT
)
9315 && !excp_is_internal(cs
->exception_index
)) {
9316 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9317 syn_get_ec(env
->exception
.syndrome
),
9318 env
->exception
.syndrome
);
9321 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9322 arm_handle_psci_call(cpu
);
9323 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9328 * Semihosting semantics depend on the register width of the code
9329 * that caused the exception, not the target exception level, so
9330 * must be handled here.
9333 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9334 handle_semihosting(cs
);
9339 /* Hooks may change global state so BQL should be held, also the
9340 * BQL needs to be held for any modification of
9341 * cs->interrupt_request.
9343 g_assert(qemu_mutex_iothread_locked());
9345 arm_call_pre_el_change_hook(cpu
);
9347 assert(!excp_is_internal(cs
->exception_index
));
9348 if (arm_el_is_aa64(env
, new_el
)) {
9349 arm_cpu_do_interrupt_aarch64(cs
);
9351 arm_cpu_do_interrupt_aarch32(cs
);
9354 arm_call_el_change_hook(cpu
);
9356 if (!kvm_enabled()) {
9357 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9360 #endif /* !CONFIG_USER_ONLY */
9362 /* Return the exception level which controls this address translation regime */
9363 static uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9366 case ARMMMUIdx_E20_0
:
9367 case ARMMMUIdx_E20_2
:
9368 case ARMMMUIdx_E20_2_PAN
:
9369 case ARMMMUIdx_Stage2
:
9374 case ARMMMUIdx_SE10_0
:
9375 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9376 case ARMMMUIdx_SE10_1
:
9377 case ARMMMUIdx_SE10_1_PAN
:
9378 case ARMMMUIdx_Stage1_E0
:
9379 case ARMMMUIdx_Stage1_E1
:
9380 case ARMMMUIdx_Stage1_E1_PAN
:
9381 case ARMMMUIdx_E10_0
:
9382 case ARMMMUIdx_E10_1
:
9383 case ARMMMUIdx_E10_1_PAN
:
9384 case ARMMMUIdx_MPrivNegPri
:
9385 case ARMMMUIdx_MUserNegPri
:
9386 case ARMMMUIdx_MPriv
:
9387 case ARMMMUIdx_MUser
:
9388 case ARMMMUIdx_MSPrivNegPri
:
9389 case ARMMMUIdx_MSUserNegPri
:
9390 case ARMMMUIdx_MSPriv
:
9391 case ARMMMUIdx_MSUser
:
9394 g_assert_not_reached();
9398 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
9400 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
9402 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
9403 el
= (mmu_idx
== ARMMMUIdx_E20_0
? 2 : 1);
9405 return env
->cp15
.sctlr_el
[el
];
9408 /* Return the SCTLR value which controls this address translation regime */
9409 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9411 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9414 #ifndef CONFIG_USER_ONLY
9416 /* Return true if the specified stage of address translation is disabled */
9417 static inline bool regime_translation_disabled(CPUARMState
*env
,
9420 if (arm_feature(env
, ARM_FEATURE_M
)) {
9421 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9422 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9423 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9424 /* Enabled, but not for HardFault and NMI */
9425 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9426 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9427 /* Enabled for all cases */
9431 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9432 * we warned about that in armv7m_nvic.c when the guest set it.
9438 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9439 /* HCR.DC means HCR.VM behaves as 1 */
9440 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9443 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9444 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9445 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9450 if ((env
->cp15
.hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
9451 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9455 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9458 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9461 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9464 /* Return the TTBR associated with this translation regime */
9465 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9468 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9469 return env
->cp15
.vttbr_el2
;
9472 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9474 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9478 #endif /* !CONFIG_USER_ONLY */
9480 /* Return the TCR controlling this translation regime */
9481 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9483 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9484 return &env
->cp15
.vtcr_el2
;
9486 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9489 /* Convert a possible stage1+2 MMU index into the appropriate
9492 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9495 case ARMMMUIdx_E10_0
:
9496 return ARMMMUIdx_Stage1_E0
;
9497 case ARMMMUIdx_E10_1
:
9498 return ARMMMUIdx_Stage1_E1
;
9499 case ARMMMUIdx_E10_1_PAN
:
9500 return ARMMMUIdx_Stage1_E1_PAN
;
9506 /* Return true if the translation regime is using LPAE format page tables */
9507 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9510 int el
= regime_el(env
, mmu_idx
);
9511 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9514 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9515 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9521 /* Returns true if the stage 1 translation regime is using LPAE format page
9522 * tables. Used when raising alignment exceptions, whose FSR changes depending
9523 * on whether the long or short descriptor format is in use. */
9524 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9526 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9528 return regime_using_lpae_format(env
, mmu_idx
);
9531 #ifndef CONFIG_USER_ONLY
9532 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9535 case ARMMMUIdx_SE10_0
:
9536 case ARMMMUIdx_E20_0
:
9537 case ARMMMUIdx_Stage1_E0
:
9538 case ARMMMUIdx_MUser
:
9539 case ARMMMUIdx_MSUser
:
9540 case ARMMMUIdx_MUserNegPri
:
9541 case ARMMMUIdx_MSUserNegPri
:
9545 case ARMMMUIdx_E10_0
:
9546 case ARMMMUIdx_E10_1
:
9547 case ARMMMUIdx_E10_1_PAN
:
9548 g_assert_not_reached();
9552 /* Translate section/page access permissions to page
9553 * R/W protection flags
9556 * @mmu_idx: MMU index indicating required translation regime
9557 * @ap: The 3-bit access permissions (AP[2:0])
9558 * @domain_prot: The 2-bit domain access permissions
9560 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9561 int ap
, int domain_prot
)
9563 bool is_user
= regime_is_user(env
, mmu_idx
);
9565 if (domain_prot
== 3) {
9566 return PAGE_READ
| PAGE_WRITE
;
9571 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9574 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9576 return is_user
? 0 : PAGE_READ
;
9583 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9588 return PAGE_READ
| PAGE_WRITE
;
9591 return PAGE_READ
| PAGE_WRITE
;
9592 case 4: /* Reserved. */
9595 return is_user
? 0 : PAGE_READ
;
9599 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9604 g_assert_not_reached();
9608 /* Translate section/page access permissions to page
9609 * R/W protection flags.
9611 * @ap: The 2-bit simple AP (AP[2:1])
9612 * @is_user: TRUE if accessing from PL0
9614 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9618 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9620 return PAGE_READ
| PAGE_WRITE
;
9622 return is_user
? 0 : PAGE_READ
;
9626 g_assert_not_reached();
9631 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9633 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9636 /* Translate S2 section/page access permissions to protection flags
9639 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9640 * @xn: XN (execute-never) bit
9642 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9653 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9660 /* Translate section/page access permissions to protection flags
9663 * @mmu_idx: MMU index indicating required translation regime
9664 * @is_aa64: TRUE if AArch64
9665 * @ap: The 2-bit simple AP (AP[2:1])
9666 * @ns: NS (non-secure) bit
9667 * @xn: XN (execute-never) bit
9668 * @pxn: PXN (privileged execute-never) bit
9670 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9671 int ap
, int ns
, int xn
, int pxn
)
9673 bool is_user
= regime_is_user(env
, mmu_idx
);
9674 int prot_rw
, user_rw
;
9678 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
9680 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9684 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
9687 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9690 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
9694 /* TODO have_wxn should be replaced with
9695 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9696 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9697 * compatible processors have EL2, which is required for [U]WXN.
9699 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
9702 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
9706 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
9707 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
9709 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9710 switch (regime_el(env
, mmu_idx
)) {
9714 xn
= xn
|| !(user_rw
& PAGE_READ
);
9718 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
9720 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
9721 (uwxn
&& (user_rw
& PAGE_WRITE
));
9731 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
9734 return prot_rw
| PAGE_EXEC
;
9737 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9738 uint32_t *table
, uint32_t address
)
9740 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9741 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9743 if (address
& tcr
->mask
) {
9744 if (tcr
->raw_tcr
& TTBCR_PD1
) {
9745 /* Translation table walk disabled for TTBR1 */
9748 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
9750 if (tcr
->raw_tcr
& TTBCR_PD0
) {
9751 /* Translation table walk disabled for TTBR0 */
9754 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
9756 *table
|= (address
>> 18) & 0x3ffc;
9760 /* Translate a S1 pagetable walk through S2 if needed. */
9761 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9762 hwaddr addr
, MemTxAttrs txattrs
,
9763 ARMMMUFaultInfo
*fi
)
9765 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
9766 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
9767 target_ulong s2size
;
9771 ARMCacheAttrs cacheattrs
= {};
9772 ARMCacheAttrs
*pcacheattrs
= NULL
;
9774 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
9776 * PTW means we must fault if this S1 walk touches S2 Device
9777 * memory; otherwise we don't care about the attributes and can
9778 * save the S2 translation the effort of computing them.
9780 pcacheattrs
= &cacheattrs
;
9783 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_Stage2
, &s2pa
,
9784 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
9786 assert(fi
->type
!= ARMFault_None
);
9792 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
9793 /* Access was to Device memory: generate Permission fault */
9794 fi
->type
= ARMFault_Permission
;
9805 /* All loads done in the course of a page table walk go through here. */
9806 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9807 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9809 ARMCPU
*cpu
= ARM_CPU(cs
);
9810 CPUARMState
*env
= &cpu
->env
;
9811 MemTxAttrs attrs
= {};
9812 MemTxResult result
= MEMTX_OK
;
9816 attrs
.secure
= is_secure
;
9817 as
= arm_addressspace(cs
, attrs
);
9818 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9822 if (regime_translation_big_endian(env
, mmu_idx
)) {
9823 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
9825 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
9827 if (result
== MEMTX_OK
) {
9830 fi
->type
= ARMFault_SyncExternalOnWalk
;
9831 fi
->ea
= arm_extabort_type(result
);
9835 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9836 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9838 ARMCPU
*cpu
= ARM_CPU(cs
);
9839 CPUARMState
*env
= &cpu
->env
;
9840 MemTxAttrs attrs
= {};
9841 MemTxResult result
= MEMTX_OK
;
9845 attrs
.secure
= is_secure
;
9846 as
= arm_addressspace(cs
, attrs
);
9847 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9851 if (regime_translation_big_endian(env
, mmu_idx
)) {
9852 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
9854 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
9856 if (result
== MEMTX_OK
) {
9859 fi
->type
= ARMFault_SyncExternalOnWalk
;
9860 fi
->ea
= arm_extabort_type(result
);
9864 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
9865 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9866 hwaddr
*phys_ptr
, int *prot
,
9867 target_ulong
*page_size
,
9868 ARMMMUFaultInfo
*fi
)
9870 CPUState
*cs
= env_cpu(env
);
9881 /* Pagetable walk. */
9882 /* Lookup l1 descriptor. */
9883 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9884 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9885 fi
->type
= ARMFault_Translation
;
9888 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9890 if (fi
->type
!= ARMFault_None
) {
9894 domain
= (desc
>> 5) & 0x0f;
9895 if (regime_el(env
, mmu_idx
) == 1) {
9896 dacr
= env
->cp15
.dacr_ns
;
9898 dacr
= env
->cp15
.dacr_s
;
9900 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9902 /* Section translation fault. */
9903 fi
->type
= ARMFault_Translation
;
9909 if (domain_prot
== 0 || domain_prot
== 2) {
9910 fi
->type
= ARMFault_Domain
;
9915 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9916 ap
= (desc
>> 10) & 3;
9917 *page_size
= 1024 * 1024;
9919 /* Lookup l2 entry. */
9921 /* Coarse pagetable. */
9922 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9924 /* Fine pagetable. */
9925 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
9927 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9929 if (fi
->type
!= ARMFault_None
) {
9933 case 0: /* Page translation fault. */
9934 fi
->type
= ARMFault_Translation
;
9936 case 1: /* 64k page. */
9937 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9938 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
9939 *page_size
= 0x10000;
9941 case 2: /* 4k page. */
9942 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9943 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
9944 *page_size
= 0x1000;
9946 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9948 /* ARMv6/XScale extended small page format */
9949 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
9950 || arm_feature(env
, ARM_FEATURE_V6
)) {
9951 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9952 *page_size
= 0x1000;
9954 /* UNPREDICTABLE in ARMv5; we choose to take a
9955 * page translation fault.
9957 fi
->type
= ARMFault_Translation
;
9961 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
9964 ap
= (desc
>> 4) & 3;
9967 /* Never happens, but compiler isn't smart enough to tell. */
9971 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9972 *prot
|= *prot
? PAGE_EXEC
: 0;
9973 if (!(*prot
& (1 << access_type
))) {
9974 /* Access permission fault. */
9975 fi
->type
= ARMFault_Permission
;
9978 *phys_ptr
= phys_addr
;
9981 fi
->domain
= domain
;
9986 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
9987 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9988 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
9989 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
9991 CPUState
*cs
= env_cpu(env
);
10005 /* Pagetable walk. */
10006 /* Lookup l1 descriptor. */
10007 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10008 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10009 fi
->type
= ARMFault_Translation
;
10012 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10014 if (fi
->type
!= ARMFault_None
) {
10018 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10019 /* Section translation fault, or attempt to use the encoding
10020 * which is Reserved on implementations without PXN.
10022 fi
->type
= ARMFault_Translation
;
10025 if ((type
== 1) || !(desc
& (1 << 18))) {
10026 /* Page or Section. */
10027 domain
= (desc
>> 5) & 0x0f;
10029 if (regime_el(env
, mmu_idx
) == 1) {
10030 dacr
= env
->cp15
.dacr_ns
;
10032 dacr
= env
->cp15
.dacr_s
;
10037 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10038 if (domain_prot
== 0 || domain_prot
== 2) {
10039 /* Section or Page domain fault */
10040 fi
->type
= ARMFault_Domain
;
10044 if (desc
& (1 << 18)) {
10045 /* Supersection. */
10046 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10047 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10048 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10049 *page_size
= 0x1000000;
10052 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10053 *page_size
= 0x100000;
10055 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10056 xn
= desc
& (1 << 4);
10058 ns
= extract32(desc
, 19, 1);
10060 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10061 pxn
= (desc
>> 2) & 1;
10063 ns
= extract32(desc
, 3, 1);
10064 /* Lookup l2 entry. */
10065 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10066 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10068 if (fi
->type
!= ARMFault_None
) {
10071 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10072 switch (desc
& 3) {
10073 case 0: /* Page translation fault. */
10074 fi
->type
= ARMFault_Translation
;
10076 case 1: /* 64k page. */
10077 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10078 xn
= desc
& (1 << 15);
10079 *page_size
= 0x10000;
10081 case 2: case 3: /* 4k page. */
10082 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10084 *page_size
= 0x1000;
10087 /* Never happens, but compiler isn't smart enough to tell. */
10091 if (domain_prot
== 3) {
10092 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10094 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10097 if (xn
&& access_type
== MMU_INST_FETCH
) {
10098 fi
->type
= ARMFault_Permission
;
10102 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10103 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10104 /* The simplified model uses AP[0] as an access control bit. */
10105 if ((ap
& 1) == 0) {
10106 /* Access flag fault. */
10107 fi
->type
= ARMFault_AccessFlag
;
10110 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10112 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10114 if (*prot
&& !xn
) {
10115 *prot
|= PAGE_EXEC
;
10117 if (!(*prot
& (1 << access_type
))) {
10118 /* Access permission fault. */
10119 fi
->type
= ARMFault_Permission
;
10124 /* The NS bit will (as required by the architecture) have no effect if
10125 * the CPU doesn't support TZ or this is a non-secure translation
10126 * regime, because the attribute will already be non-secure.
10128 attrs
->secure
= false;
10130 *phys_ptr
= phys_addr
;
10133 fi
->domain
= domain
;
10139 * check_s2_mmu_setup
10141 * @is_aa64: True if the translation regime is in AArch64 state
10142 * @startlevel: Suggested starting level
10143 * @inputsize: Bitsize of IPAs
10144 * @stride: Page-table stride (See the ARM ARM)
10146 * Returns true if the suggested S2 translation parameters are OK and
10149 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10150 int inputsize
, int stride
)
10152 const int grainsize
= stride
+ 3;
10153 int startsizecheck
;
10155 /* Negative levels are never allowed. */
10160 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10161 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10166 CPUARMState
*env
= &cpu
->env
;
10167 unsigned int pamax
= arm_pamax(cpu
);
10170 case 13: /* 64KB Pages. */
10171 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10175 case 11: /* 16KB Pages. */
10176 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10180 case 9: /* 4KB Pages. */
10181 if (level
== 0 && pamax
<= 42) {
10186 g_assert_not_reached();
10189 /* Inputsize checks. */
10190 if (inputsize
> pamax
&&
10191 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10192 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10196 /* AArch32 only supports 4KB pages. Assert on that. */
10197 assert(stride
== 9);
10206 /* Translate from the 4-bit stage 2 representation of
10207 * memory attributes (without cache-allocation hints) to
10208 * the 8-bit representation of the stage 1 MAIR registers
10209 * (which includes allocation hints).
10211 * ref: shared/translation/attrs/S2AttrDecode()
10212 * .../S2ConvertAttrsHints()
10214 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10216 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10217 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10218 uint8_t hihint
= 0, lohint
= 0;
10220 if (hiattr
!= 0) { /* normal memory */
10221 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10222 hiattr
= loattr
= 1; /* non-cacheable */
10224 if (hiattr
!= 1) { /* Write-through or write-back */
10225 hihint
= 3; /* RW allocate */
10227 if (loattr
!= 1) { /* Write-through or write-back */
10228 lohint
= 3; /* RW allocate */
10233 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10235 #endif /* !CONFIG_USER_ONLY */
10237 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
10240 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10241 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
10245 * Bit 55 is always between the two regions, and is canonical for
10246 * determining if address tagging is enabled.
10248 select
= extract64(va
, 55, 1);
10250 if (!regime_has_2_ranges(mmu_idx
)) {
10251 tsz
= extract32(tcr
, 0, 6);
10252 using64k
= extract32(tcr
, 14, 1);
10253 using16k
= extract32(tcr
, 15, 1);
10254 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10256 tbi
= tbid
= hpd
= false;
10258 tbi
= extract32(tcr
, 20, 1);
10259 hpd
= extract32(tcr
, 24, 1);
10260 tbid
= extract32(tcr
, 29, 1);
10263 } else if (!select
) {
10264 tsz
= extract32(tcr
, 0, 6);
10265 epd
= extract32(tcr
, 7, 1);
10266 using64k
= extract32(tcr
, 14, 1);
10267 using16k
= extract32(tcr
, 15, 1);
10268 tbi
= extract64(tcr
, 37, 1);
10269 hpd
= extract64(tcr
, 41, 1);
10270 tbid
= extract64(tcr
, 51, 1);
10272 int tg
= extract32(tcr
, 30, 2);
10273 using16k
= tg
== 1;
10274 using64k
= tg
== 3;
10275 tsz
= extract32(tcr
, 16, 6);
10276 epd
= extract32(tcr
, 23, 1);
10277 tbi
= extract64(tcr
, 38, 1);
10278 hpd
= extract64(tcr
, 42, 1);
10279 tbid
= extract64(tcr
, 52, 1);
10281 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10282 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10284 return (ARMVAParameters
) {
10291 .using16k
= using16k
,
10292 .using64k
= using64k
,
10296 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10297 ARMMMUIdx mmu_idx
, bool data
)
10299 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
10301 /* Present TBI as a composite with TBID. */
10302 ret
.tbi
&= (data
|| !ret
.tbid
);
10306 #ifndef CONFIG_USER_ONLY
10307 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10310 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10311 uint32_t el
= regime_el(env
, mmu_idx
);
10315 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10317 bool sext
= extract32(tcr
, 4, 1);
10318 bool sign
= extract32(tcr
, 3, 1);
10321 * If the sign-extend bit is not the same as t0sz[3], the result
10322 * is unpredictable. Flag this as a guest error.
10324 if (sign
!= sext
) {
10325 qemu_log_mask(LOG_GUEST_ERROR
,
10326 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10328 tsz
= sextract32(tcr
, 0, 4) + 8;
10332 } else if (el
== 2) {
10334 tsz
= extract32(tcr
, 0, 3);
10336 hpd
= extract64(tcr
, 24, 1);
10339 int t0sz
= extract32(tcr
, 0, 3);
10340 int t1sz
= extract32(tcr
, 16, 3);
10343 select
= va
> (0xffffffffu
>> t0sz
);
10345 /* Note that we will detect errors later. */
10346 select
= va
>= ~(0xffffffffu
>> t1sz
);
10350 epd
= extract32(tcr
, 7, 1);
10351 hpd
= extract64(tcr
, 41, 1);
10354 epd
= extract32(tcr
, 23, 1);
10355 hpd
= extract64(tcr
, 42, 1);
10357 /* For aarch32, hpd0 is not enabled without t2e as well. */
10358 hpd
&= extract32(tcr
, 6, 1);
10361 return (ARMVAParameters
) {
10369 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10370 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10371 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10372 target_ulong
*page_size_ptr
,
10373 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10375 ARMCPU
*cpu
= env_archcpu(env
);
10376 CPUState
*cs
= CPU(cpu
);
10377 /* Read an LPAE long-descriptor translation table. */
10378 ARMFaultType fault_type
= ARMFault_Translation
;
10380 ARMVAParameters param
;
10382 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10383 uint32_t tableattrs
;
10384 target_ulong page_size
;
10387 int addrsize
, inputsize
;
10388 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10389 int ap
, ns
, xn
, pxn
;
10390 uint32_t el
= regime_el(env
, mmu_idx
);
10392 uint64_t descaddrmask
;
10393 bool aarch64
= arm_el_is_aa64(env
, el
);
10394 bool guarded
= false;
10397 * This code does not handle the different format TCR for VTCR_EL2.
10398 * This code also does not support shareability levels.
10399 * Attribute and permission bit handling should also be checked when adding
10400 * support for those page table walks.
10403 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10404 access_type
!= MMU_INST_FETCH
);
10406 ttbr1_valid
= regime_has_2_ranges(mmu_idx
);
10407 addrsize
= 64 - 8 * param
.tbi
;
10408 inputsize
= 64 - param
.tsz
;
10410 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10412 /* There is no TTBR1 for EL2 */
10413 ttbr1_valid
= (el
!= 2);
10414 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
10415 inputsize
= addrsize
- param
.tsz
;
10419 * We determined the region when collecting the parameters, but we
10420 * have not yet validated that the address is valid for the region.
10421 * Extract the top bits and verify that they all match select.
10423 * For aa32, if inputsize == addrsize, then we have selected the
10424 * region by exclusion in aa32_va_parameters and there is no more
10425 * validation to do here.
10427 if (inputsize
< addrsize
) {
10428 target_ulong top_bits
= sextract64(address
, inputsize
,
10429 addrsize
- inputsize
);
10430 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
10431 /* The gap between the two regions is a Translation fault */
10432 fault_type
= ARMFault_Translation
;
10437 if (param
.using64k
) {
10439 } else if (param
.using16k
) {
10445 /* Note that QEMU ignores shareability and cacheability attributes,
10446 * so we don't need to do anything with the SH, ORGN, IRGN fields
10447 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10448 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10449 * implement any ASID-like capability so we can ignore it (instead
10450 * we will always flush the TLB any time the ASID is changed).
10452 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10454 /* Here we should have set up all the parameters for the translation:
10455 * inputsize, ttbr, epd, stride, tbi
10459 /* Translation table walk disabled => Translation fault on TLB miss
10460 * Note: This is always 0 on 64-bit EL2 and EL3.
10465 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
10466 /* The starting level depends on the virtual address size (which can
10467 * be up to 48 bits) and the translation granule size. It indicates
10468 * the number of strides (stride bits at a time) needed to
10469 * consume the bits of the input address. In the pseudocode this is:
10470 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10471 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10472 * our 'stride + 3' and 'stride' is our 'stride'.
10473 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10474 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10475 * = 4 - (inputsize - 4) / stride;
10477 level
= 4 - (inputsize
- 4) / stride
;
10479 /* For stage 2 translations the starting level is specified by the
10480 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10482 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10483 uint32_t startlevel
;
10486 if (!aarch64
|| stride
== 9) {
10487 /* AArch32 or 4KB pages */
10488 startlevel
= 2 - sl0
;
10490 /* 16KB or 64KB pages */
10491 startlevel
= 3 - sl0
;
10494 /* Check that the starting level is valid. */
10495 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10496 inputsize
, stride
);
10498 fault_type
= ARMFault_Translation
;
10501 level
= startlevel
;
10504 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10505 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10507 /* Now we can extract the actual base address from the TTBR */
10508 descaddr
= extract64(ttbr
, 0, 48);
10509 descaddr
&= ~indexmask
;
10511 /* The address field in the descriptor goes up to bit 39 for ARMv7
10512 * but up to bit 47 for ARMv8, but we use the descaddrmask
10513 * up to bit 39 for AArch32, because we don't need other bits in that case
10514 * to construct next descriptor address (anyway they should be all zeroes).
10516 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10517 ~indexmask_grainsize
;
10519 /* Secure accesses start with the page table in secure memory and
10520 * can be downgraded to non-secure at any step. Non-secure accesses
10521 * remain non-secure. We implement this by just ORing in the NSTable/NS
10522 * bits at each step.
10524 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10526 uint64_t descriptor
;
10529 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10531 nstable
= extract32(tableattrs
, 4, 1);
10532 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10533 if (fi
->type
!= ARMFault_None
) {
10537 if (!(descriptor
& 1) ||
10538 (!(descriptor
& 2) && (level
== 3))) {
10539 /* Invalid, or the Reserved level 3 encoding */
10542 descaddr
= descriptor
& descaddrmask
;
10544 if ((descriptor
& 2) && (level
< 3)) {
10545 /* Table entry. The top five bits are attributes which may
10546 * propagate down through lower levels of the table (and
10547 * which are all arranged so that 0 means "no effect", so
10548 * we can gather them up by ORing in the bits at each level).
10550 tableattrs
|= extract64(descriptor
, 59, 5);
10552 indexmask
= indexmask_grainsize
;
10555 /* Block entry at level 1 or 2, or page entry at level 3.
10556 * These are basically the same thing, although the number
10557 * of bits we pull in from the vaddr varies.
10559 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10560 descaddr
|= (address
& (page_size
- 1));
10561 /* Extract attributes from the descriptor */
10562 attrs
= extract64(descriptor
, 2, 10)
10563 | (extract64(descriptor
, 52, 12) << 10);
10565 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10566 /* Stage 2 table descriptors do not include any attribute fields */
10569 /* Merge in attributes from table descriptors */
10570 attrs
|= nstable
<< 3; /* NS */
10571 guarded
= extract64(descriptor
, 50, 1); /* GP */
10573 /* HPD disables all the table attributes except NSTable. */
10576 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10577 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10578 * means "force PL1 access only", which means forcing AP[1] to 0.
10580 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10581 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10584 /* Here descaddr is the final physical address, and attributes
10585 * are all in attrs.
10587 fault_type
= ARMFault_AccessFlag
;
10588 if ((attrs
& (1 << 8)) == 0) {
10593 ap
= extract32(attrs
, 4, 2);
10594 xn
= extract32(attrs
, 12, 1);
10596 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10598 *prot
= get_S2prot(env
, ap
, xn
);
10600 ns
= extract32(attrs
, 3, 1);
10601 pxn
= extract32(attrs
, 11, 1);
10602 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10605 fault_type
= ARMFault_Permission
;
10606 if (!(*prot
& (1 << access_type
))) {
10611 /* The NS bit will (as required by the architecture) have no effect if
10612 * the CPU doesn't support TZ or this is a non-secure translation
10613 * regime, because the attribute will already be non-secure.
10615 txattrs
->secure
= false;
10617 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10618 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
10619 txattrs
->target_tlb_bit0
= true;
10622 if (cacheattrs
!= NULL
) {
10623 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10624 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10625 extract32(attrs
, 0, 4));
10627 /* Index into MAIR registers for cache attributes */
10628 uint8_t attrindx
= extract32(attrs
, 0, 3);
10629 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10630 assert(attrindx
<= 7);
10631 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10633 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10636 *phys_ptr
= descaddr
;
10637 *page_size_ptr
= page_size
;
10641 fi
->type
= fault_type
;
10643 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10644 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
);
10648 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10650 int32_t address
, int *prot
)
10652 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10653 *prot
= PAGE_READ
| PAGE_WRITE
;
10655 case 0xF0000000 ... 0xFFFFFFFF:
10656 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10657 /* hivecs execing is ok */
10658 *prot
|= PAGE_EXEC
;
10661 case 0x00000000 ... 0x7FFFFFFF:
10662 *prot
|= PAGE_EXEC
;
10666 /* Default system address map for M profile cores.
10667 * The architecture specifies which regions are execute-never;
10668 * at the MPU level no other checks are defined.
10671 case 0x00000000 ... 0x1fffffff: /* ROM */
10672 case 0x20000000 ... 0x3fffffff: /* SRAM */
10673 case 0x60000000 ... 0x7fffffff: /* RAM */
10674 case 0x80000000 ... 0x9fffffff: /* RAM */
10675 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10677 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10678 case 0xa0000000 ... 0xbfffffff: /* Device */
10679 case 0xc0000000 ... 0xdfffffff: /* Device */
10680 case 0xe0000000 ... 0xffffffff: /* System */
10681 *prot
= PAGE_READ
| PAGE_WRITE
;
10684 g_assert_not_reached();
10689 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
10690 ARMMMUIdx mmu_idx
, bool is_user
)
10692 /* Return true if we should use the default memory map as a
10693 * "background" region if there are no hits against any MPU regions.
10695 CPUARMState
*env
= &cpu
->env
;
10701 if (arm_feature(env
, ARM_FEATURE_M
)) {
10702 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
10703 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
10705 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
10709 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
10711 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10712 return arm_feature(env
, ARM_FEATURE_M
) &&
10713 extract32(address
, 20, 12) == 0xe00;
10716 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
10718 /* True if address is in the M profile system region
10719 * 0xe0000000 - 0xffffffff
10721 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
10724 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
10725 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10726 hwaddr
*phys_ptr
, int *prot
,
10727 target_ulong
*page_size
,
10728 ARMMMUFaultInfo
*fi
)
10730 ARMCPU
*cpu
= env_archcpu(env
);
10732 bool is_user
= regime_is_user(env
, mmu_idx
);
10734 *phys_ptr
= address
;
10735 *page_size
= TARGET_PAGE_SIZE
;
10738 if (regime_translation_disabled(env
, mmu_idx
) ||
10739 m_is_ppb_region(env
, address
)) {
10740 /* MPU disabled or M profile PPB access: use default memory map.
10741 * The other case which uses the default memory map in the
10742 * v7M ARM ARM pseudocode is exception vector reads from the vector
10743 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10744 * which always does a direct read using address_space_ldl(), rather
10745 * than going via this function, so we don't need to check that here.
10747 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10748 } else { /* MPU enabled */
10749 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10750 /* region search */
10751 uint32_t base
= env
->pmsav7
.drbar
[n
];
10752 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
10754 bool srdis
= false;
10756 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
10761 qemu_log_mask(LOG_GUEST_ERROR
,
10762 "DRSR[%d]: Rsize field cannot be 0\n", n
);
10766 rmask
= (1ull << rsize
) - 1;
10768 if (base
& rmask
) {
10769 qemu_log_mask(LOG_GUEST_ERROR
,
10770 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
10771 "to DRSR region size, mask = 0x%" PRIx32
"\n",
10776 if (address
< base
|| address
> base
+ rmask
) {
10778 * Address not in this region. We must check whether the
10779 * region covers addresses in the same page as our address.
10780 * In that case we must not report a size that covers the
10781 * whole page for a subsequent hit against a different MPU
10782 * region or the background region, because it would result in
10783 * incorrect TLB hits for subsequent accesses to addresses that
10784 * are in this MPU region.
10786 if (ranges_overlap(base
, rmask
,
10787 address
& TARGET_PAGE_MASK
,
10788 TARGET_PAGE_SIZE
)) {
10794 /* Region matched */
10796 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
10798 uint32_t srdis_mask
;
10800 rsize
-= 3; /* sub region size (power of 2) */
10801 snd
= ((address
- base
) >> rsize
) & 0x7;
10802 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
10804 srdis_mask
= srdis
? 0x3 : 0x0;
10805 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
10806 /* This will check in groups of 2, 4 and then 8, whether
10807 * the subregion bits are consistent. rsize is incremented
10808 * back up to give the region size, considering consistent
10809 * adjacent subregions as one region. Stop testing if rsize
10810 * is already big enough for an entire QEMU page.
10812 int snd_rounded
= snd
& ~(i
- 1);
10813 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
10814 snd_rounded
+ 8, i
);
10815 if (srdis_mask
^ srdis_multi
) {
10818 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
10825 if (rsize
< TARGET_PAGE_BITS
) {
10826 *page_size
= 1 << rsize
;
10831 if (n
== -1) { /* no hits */
10832 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10833 /* background fault */
10834 fi
->type
= ARMFault_Background
;
10837 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10838 } else { /* a MPU hit! */
10839 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
10840 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
10842 if (m_is_system_region(env
, address
)) {
10843 /* System space is always execute never */
10847 if (is_user
) { /* User mode AP bit decoding */
10852 break; /* no access */
10854 *prot
|= PAGE_WRITE
;
10858 *prot
|= PAGE_READ
| PAGE_EXEC
;
10861 /* for v7M, same as 6; for R profile a reserved value */
10862 if (arm_feature(env
, ARM_FEATURE_M
)) {
10863 *prot
|= PAGE_READ
| PAGE_EXEC
;
10868 qemu_log_mask(LOG_GUEST_ERROR
,
10869 "DRACR[%d]: Bad value for AP bits: 0x%"
10870 PRIx32
"\n", n
, ap
);
10872 } else { /* Priv. mode AP bits decoding */
10875 break; /* no access */
10879 *prot
|= PAGE_WRITE
;
10883 *prot
|= PAGE_READ
| PAGE_EXEC
;
10886 /* for v7M, same as 6; for R profile a reserved value */
10887 if (arm_feature(env
, ARM_FEATURE_M
)) {
10888 *prot
|= PAGE_READ
| PAGE_EXEC
;
10893 qemu_log_mask(LOG_GUEST_ERROR
,
10894 "DRACR[%d]: Bad value for AP bits: 0x%"
10895 PRIx32
"\n", n
, ap
);
10899 /* execute never */
10901 *prot
&= ~PAGE_EXEC
;
10906 fi
->type
= ARMFault_Permission
;
10908 return !(*prot
& (1 << access_type
));
10911 static bool v8m_is_sau_exempt(CPUARMState
*env
,
10912 uint32_t address
, MMUAccessType access_type
)
10914 /* The architecture specifies that certain address ranges are
10915 * exempt from v8M SAU/IDAU checks.
10918 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
10919 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
10920 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
10921 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
10922 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
10923 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
10926 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
10927 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10928 V8M_SAttributes
*sattrs
)
10930 /* Look up the security attributes for this address. Compare the
10931 * pseudocode SecurityCheck() function.
10932 * We assume the caller has zero-initialized *sattrs.
10934 ARMCPU
*cpu
= env_archcpu(env
);
10936 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
10937 int idau_region
= IREGION_NOTVALID
;
10938 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10939 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10942 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
10943 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
10945 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
10949 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
10950 /* 0xf0000000..0xffffffff is always S for insn fetches */
10954 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
10955 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
10959 if (idau_region
!= IREGION_NOTVALID
) {
10960 sattrs
->irvalid
= true;
10961 sattrs
->iregion
= idau_region
;
10964 switch (env
->sau
.ctrl
& 3) {
10965 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10967 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10970 default: /* SAU.ENABLE == 1 */
10971 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
10972 if (env
->sau
.rlar
[r
] & 1) {
10973 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
10974 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
10976 if (base
<= address
&& limit
>= address
) {
10977 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10978 sattrs
->subpage
= true;
10980 if (sattrs
->srvalid
) {
10981 /* If we hit in more than one region then we must report
10982 * as Secure, not NS-Callable, with no valid region
10985 sattrs
->ns
= false;
10986 sattrs
->nsc
= false;
10987 sattrs
->sregion
= 0;
10988 sattrs
->srvalid
= false;
10991 if (env
->sau
.rlar
[r
] & 2) {
10992 sattrs
->nsc
= true;
10996 sattrs
->srvalid
= true;
10997 sattrs
->sregion
= r
;
11001 * Address not in this region. We must check whether the
11002 * region covers addresses in the same page as our address.
11003 * In that case we must not report a size that covers the
11004 * whole page for a subsequent hit against a different MPU
11005 * region or the background region, because it would result
11006 * in incorrect TLB hits for subsequent accesses to
11007 * addresses that are in this MPU region.
11009 if (limit
>= base
&&
11010 ranges_overlap(base
, limit
- base
+ 1,
11012 TARGET_PAGE_SIZE
)) {
11013 sattrs
->subpage
= true;
11022 * The IDAU will override the SAU lookup results if it specifies
11023 * higher security than the SAU does.
11026 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11027 sattrs
->ns
= false;
11028 sattrs
->nsc
= idau_nsc
;
11033 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11034 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11035 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11036 int *prot
, bool *is_subpage
,
11037 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11039 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11040 * that a full phys-to-virt translation does).
11041 * mregion is (if not NULL) set to the region number which matched,
11042 * or -1 if no region number is returned (MPU off, address did not
11043 * hit a region, address hit in multiple regions).
11044 * We set is_subpage to true if the region hit doesn't cover the
11045 * entire TARGET_PAGE the address is within.
11047 ARMCPU
*cpu
= env_archcpu(env
);
11048 bool is_user
= regime_is_user(env
, mmu_idx
);
11049 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11051 int matchregion
= -1;
11053 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11054 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11056 *is_subpage
= false;
11057 *phys_ptr
= address
;
11063 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11064 * was an exception vector read from the vector table (which is always
11065 * done using the default system address map), because those accesses
11066 * are done in arm_v7m_load_vector(), which always does a direct
11067 * read using address_space_ldl(), rather than going via this function.
11069 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11071 } else if (m_is_ppb_region(env
, address
)) {
11074 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11078 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11079 /* region search */
11080 /* Note that the base address is bits [31:5] from the register
11081 * with bits [4:0] all zeroes, but the limit address is bits
11082 * [31:5] from the register with bits [4:0] all ones.
11084 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11085 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11087 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11088 /* Region disabled */
11092 if (address
< base
|| address
> limit
) {
11094 * Address not in this region. We must check whether the
11095 * region covers addresses in the same page as our address.
11096 * In that case we must not report a size that covers the
11097 * whole page for a subsequent hit against a different MPU
11098 * region or the background region, because it would result in
11099 * incorrect TLB hits for subsequent accesses to addresses that
11100 * are in this MPU region.
11102 if (limit
>= base
&&
11103 ranges_overlap(base
, limit
- base
+ 1,
11105 TARGET_PAGE_SIZE
)) {
11106 *is_subpage
= true;
11111 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11112 *is_subpage
= true;
11115 if (matchregion
!= -1) {
11116 /* Multiple regions match -- always a failure (unlike
11117 * PMSAv7 where highest-numbered-region wins)
11119 fi
->type
= ARMFault_Permission
;
11130 /* background fault */
11131 fi
->type
= ARMFault_Background
;
11135 if (matchregion
== -1) {
11136 /* hit using the background region */
11137 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11139 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11140 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11142 if (m_is_system_region(env
, address
)) {
11143 /* System space is always execute never */
11147 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11148 if (*prot
&& !xn
) {
11149 *prot
|= PAGE_EXEC
;
11151 /* We don't need to look the attribute up in the MAIR0/MAIR1
11152 * registers because that only tells us about cacheability.
11155 *mregion
= matchregion
;
11159 fi
->type
= ARMFault_Permission
;
11161 return !(*prot
& (1 << access_type
));
11165 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11166 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11167 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11168 int *prot
, target_ulong
*page_size
,
11169 ARMMMUFaultInfo
*fi
)
11171 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11172 V8M_SAttributes sattrs
= {};
11174 bool mpu_is_subpage
;
11176 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11177 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11178 if (access_type
== MMU_INST_FETCH
) {
11179 /* Instruction fetches always use the MMU bank and the
11180 * transaction attribute determined by the fetch address,
11181 * regardless of CPU state. This is painful for QEMU
11182 * to handle, because it would mean we need to encode
11183 * into the mmu_idx not just the (user, negpri) information
11184 * for the current security state but also that for the
11185 * other security state, which would balloon the number
11186 * of mmu_idx values needed alarmingly.
11187 * Fortunately we can avoid this because it's not actually
11188 * possible to arbitrarily execute code from memory with
11189 * the wrong security attribute: it will always generate
11190 * an exception of some kind or another, apart from the
11191 * special case of an NS CPU executing an SG instruction
11192 * in S&NSC memory. So we always just fail the translation
11193 * here and sort things out in the exception handler
11194 * (including possibly emulating an SG instruction).
11196 if (sattrs
.ns
!= !secure
) {
11198 fi
->type
= ARMFault_QEMU_NSCExec
;
11200 fi
->type
= ARMFault_QEMU_SFault
;
11202 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11203 *phys_ptr
= address
;
11208 /* For data accesses we always use the MMU bank indicated
11209 * by the current CPU state, but the security attributes
11210 * might downgrade a secure access to nonsecure.
11213 txattrs
->secure
= false;
11214 } else if (!secure
) {
11215 /* NS access to S memory must fault.
11216 * Architecturally we should first check whether the
11217 * MPU information for this address indicates that we
11218 * are doing an unaligned access to Device memory, which
11219 * should generate a UsageFault instead. QEMU does not
11220 * currently check for that kind of unaligned access though.
11221 * If we added it we would need to do so as a special case
11222 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11224 fi
->type
= ARMFault_QEMU_SFault
;
11225 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11226 *phys_ptr
= address
;
11233 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11234 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11235 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11239 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11240 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11241 hwaddr
*phys_ptr
, int *prot
,
11242 ARMMMUFaultInfo
*fi
)
11247 bool is_user
= regime_is_user(env
, mmu_idx
);
11249 if (regime_translation_disabled(env
, mmu_idx
)) {
11250 /* MPU disabled. */
11251 *phys_ptr
= address
;
11252 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11256 *phys_ptr
= address
;
11257 for (n
= 7; n
>= 0; n
--) {
11258 base
= env
->cp15
.c6_region
[n
];
11259 if ((base
& 1) == 0) {
11262 mask
= 1 << ((base
>> 1) & 0x1f);
11263 /* Keep this shift separate from the above to avoid an
11264 (undefined) << 32. */
11265 mask
= (mask
<< 1) - 1;
11266 if (((base
^ address
) & ~mask
) == 0) {
11271 fi
->type
= ARMFault_Background
;
11275 if (access_type
== MMU_INST_FETCH
) {
11276 mask
= env
->cp15
.pmsav5_insn_ap
;
11278 mask
= env
->cp15
.pmsav5_data_ap
;
11280 mask
= (mask
>> (n
* 4)) & 0xf;
11283 fi
->type
= ARMFault_Permission
;
11288 fi
->type
= ARMFault_Permission
;
11292 *prot
= PAGE_READ
| PAGE_WRITE
;
11297 *prot
|= PAGE_WRITE
;
11301 *prot
= PAGE_READ
| PAGE_WRITE
;
11305 fi
->type
= ARMFault_Permission
;
11315 /* Bad permission. */
11316 fi
->type
= ARMFault_Permission
;
11320 *prot
|= PAGE_EXEC
;
11324 /* Combine either inner or outer cacheability attributes for normal
11325 * memory, according to table D4-42 and pseudocode procedure
11326 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11328 * NB: only stage 1 includes allocation hints (RW bits), leading to
11331 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11333 if (s1
== 4 || s2
== 4) {
11334 /* non-cacheable has precedence */
11336 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11337 /* stage 1 write-through takes precedence */
11339 } else if (extract32(s2
, 2, 2) == 2) {
11340 /* stage 2 write-through takes precedence, but the allocation hint
11341 * is still taken from stage 1
11343 return (2 << 2) | extract32(s1
, 0, 2);
11344 } else { /* write-back */
11349 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11350 * and CombineS1S2Desc()
11352 * @s1: Attributes from stage 1 walk
11353 * @s2: Attributes from stage 2 walk
11355 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11357 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11358 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11361 /* Combine shareability attributes (table D4-43) */
11362 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11363 /* if either are outer-shareable, the result is outer-shareable */
11364 ret
.shareability
= 2;
11365 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11366 /* if either are inner-shareable, the result is inner-shareable */
11367 ret
.shareability
= 3;
11369 /* both non-shareable */
11370 ret
.shareability
= 0;
11373 /* Combine memory type and cacheability attributes */
11374 if (s1hi
== 0 || s2hi
== 0) {
11375 /* Device has precedence over normal */
11376 if (s1lo
== 0 || s2lo
== 0) {
11377 /* nGnRnE has precedence over anything */
11379 } else if (s1lo
== 4 || s2lo
== 4) {
11380 /* non-Reordering has precedence over Reordering */
11381 ret
.attrs
= 4; /* nGnRE */
11382 } else if (s1lo
== 8 || s2lo
== 8) {
11383 /* non-Gathering has precedence over Gathering */
11384 ret
.attrs
= 8; /* nGRE */
11386 ret
.attrs
= 0xc; /* GRE */
11389 /* Any location for which the resultant memory type is any
11390 * type of Device memory is always treated as Outer Shareable.
11392 ret
.shareability
= 2;
11393 } else { /* Normal memory */
11394 /* Outer/inner cacheability combine independently */
11395 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11396 | combine_cacheattr_nibble(s1lo
, s2lo
);
11398 if (ret
.attrs
== 0x44) {
11399 /* Any location for which the resultant memory type is Normal
11400 * Inner Non-cacheable, Outer Non-cacheable is always treated
11401 * as Outer Shareable.
11403 ret
.shareability
= 2;
11411 /* get_phys_addr - get the physical address for this virtual address
11413 * Find the physical address corresponding to the given virtual address,
11414 * by doing a translation table walk on MMU based systems or using the
11415 * MPU state on MPU based systems.
11417 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11418 * prot and page_size may not be filled in, and the populated fsr value provides
11419 * information on why the translation aborted, in the format of a
11420 * DFSR/IFSR fault register, with the following caveats:
11421 * * we honour the short vs long DFSR format differences.
11422 * * the WnR bit is never set (the caller must do this).
11423 * * for PSMAv5 based systems we don't bother to return a full FSR format
11426 * @env: CPUARMState
11427 * @address: virtual address to get physical address for
11428 * @access_type: 0 for read, 1 for write, 2 for execute
11429 * @mmu_idx: MMU index indicating required translation regime
11430 * @phys_ptr: set to the physical address corresponding to the virtual address
11431 * @attrs: set to the memory transaction attributes to use
11432 * @prot: set to the permissions for the page containing phys_ptr
11433 * @page_size: set to the size of the page containing phys_ptr
11434 * @fi: set to fault info if the translation fails
11435 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11437 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11438 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11439 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11440 target_ulong
*page_size
,
11441 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11443 if (mmu_idx
== ARMMMUIdx_E10_0
||
11444 mmu_idx
== ARMMMUIdx_E10_1
||
11445 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
11446 /* Call ourselves recursively to do the stage 1 and then stage 2
11449 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11453 ARMCacheAttrs cacheattrs2
= {};
11455 ret
= get_phys_addr(env
, address
, access_type
,
11456 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11457 prot
, page_size
, fi
, cacheattrs
);
11459 /* If S1 fails or S2 is disabled, return early. */
11460 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
11465 /* S1 is done. Now do S2 translation. */
11466 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_Stage2
,
11467 phys_ptr
, attrs
, &s2_prot
,
11469 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11471 /* Combine the S1 and S2 perms. */
11474 /* Combine the S1 and S2 cache attributes, if needed */
11475 if (!ret
&& cacheattrs
!= NULL
) {
11476 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11478 * HCR.DC forces the first stage attributes to
11479 * Normal Non-Shareable,
11480 * Inner Write-Back Read-Allocate Write-Allocate,
11481 * Outer Write-Back Read-Allocate Write-Allocate.
11483 cacheattrs
->attrs
= 0xff;
11484 cacheattrs
->shareability
= 0;
11486 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11492 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11494 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11498 /* The page table entries may downgrade secure to non-secure, but
11499 * cannot upgrade an non-secure translation regime's attributes
11502 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11503 attrs
->user
= regime_is_user(env
, mmu_idx
);
11505 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11506 * In v7 and earlier it affects all stage 1 translations.
11508 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
11509 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11510 if (regime_el(env
, mmu_idx
) == 3) {
11511 address
+= env
->cp15
.fcseidr_s
;
11513 address
+= env
->cp15
.fcseidr_ns
;
11517 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11519 *page_size
= TARGET_PAGE_SIZE
;
11521 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11523 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11524 phys_ptr
, attrs
, prot
, page_size
, fi
);
11525 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11527 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11528 phys_ptr
, prot
, page_size
, fi
);
11531 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11532 phys_ptr
, prot
, fi
);
11534 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11535 " mmu_idx %u -> %s (prot %c%c%c)\n",
11536 access_type
== MMU_DATA_LOAD
? "reading" :
11537 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11538 (uint32_t)address
, mmu_idx
,
11539 ret
? "Miss" : "Hit",
11540 *prot
& PAGE_READ
? 'r' : '-',
11541 *prot
& PAGE_WRITE
? 'w' : '-',
11542 *prot
& PAGE_EXEC
? 'x' : '-');
11547 /* Definitely a real MMU, not an MPU */
11549 if (regime_translation_disabled(env
, mmu_idx
)) {
11550 /* MMU disabled. */
11551 *phys_ptr
= address
;
11552 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11553 *page_size
= TARGET_PAGE_SIZE
;
11557 if (regime_using_lpae_format(env
, mmu_idx
)) {
11558 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11559 phys_ptr
, attrs
, prot
, page_size
,
11561 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11562 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11563 phys_ptr
, attrs
, prot
, page_size
, fi
);
11565 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11566 phys_ptr
, prot
, page_size
, fi
);
11570 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11573 ARMCPU
*cpu
= ARM_CPU(cs
);
11574 CPUARMState
*env
= &cpu
->env
;
11576 target_ulong page_size
;
11579 ARMMMUFaultInfo fi
= {};
11580 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11582 *attrs
= (MemTxAttrs
) {};
11584 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11585 attrs
, &prot
, &page_size
, &fi
, NULL
);
11595 /* Note that signed overflow is undefined in C. The following routines are
11596 careful to use unsigned types where modulo arithmetic is required.
11597 Failure to do so _will_ break on newer gcc. */
11599 /* Signed saturating arithmetic. */
11601 /* Perform 16-bit signed saturating addition. */
11602 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
11607 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
11616 /* Perform 8-bit signed saturating addition. */
11617 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
11622 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
11631 /* Perform 16-bit signed saturating subtraction. */
11632 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
11637 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
11646 /* Perform 8-bit signed saturating subtraction. */
11647 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
11652 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
11661 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11662 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11663 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11664 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11667 #include "op_addsub.h"
11669 /* Unsigned saturating arithmetic. */
11670 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
11679 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
11687 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
11696 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
11704 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11705 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11706 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11707 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11710 #include "op_addsub.h"
11712 /* Signed modulo arithmetic. */
11713 #define SARITH16(a, b, n, op) do { \
11715 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11716 RESULT(sum, n, 16); \
11718 ge |= 3 << (n * 2); \
11721 #define SARITH8(a, b, n, op) do { \
11723 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11724 RESULT(sum, n, 8); \
11730 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11731 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11732 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11733 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11737 #include "op_addsub.h"
11739 /* Unsigned modulo arithmetic. */
11740 #define ADD16(a, b, n) do { \
11742 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11743 RESULT(sum, n, 16); \
11744 if ((sum >> 16) == 1) \
11745 ge |= 3 << (n * 2); \
11748 #define ADD8(a, b, n) do { \
11750 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11751 RESULT(sum, n, 8); \
11752 if ((sum >> 8) == 1) \
11756 #define SUB16(a, b, n) do { \
11758 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11759 RESULT(sum, n, 16); \
11760 if ((sum >> 16) == 0) \
11761 ge |= 3 << (n * 2); \
11764 #define SUB8(a, b, n) do { \
11766 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11767 RESULT(sum, n, 8); \
11768 if ((sum >> 8) == 0) \
11775 #include "op_addsub.h"
11777 /* Halved signed arithmetic. */
11778 #define ADD16(a, b, n) \
11779 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11780 #define SUB16(a, b, n) \
11781 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11782 #define ADD8(a, b, n) \
11783 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11784 #define SUB8(a, b, n) \
11785 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11788 #include "op_addsub.h"
11790 /* Halved unsigned arithmetic. */
11791 #define ADD16(a, b, n) \
11792 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11793 #define SUB16(a, b, n) \
11794 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11795 #define ADD8(a, b, n) \
11796 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11797 #define SUB8(a, b, n) \
11798 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11801 #include "op_addsub.h"
11803 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11811 /* Unsigned sum of absolute byte differences. */
11812 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11815 sum
= do_usad(a
, b
);
11816 sum
+= do_usad(a
>> 8, b
>> 8);
11817 sum
+= do_usad(a
>> 16, b
>>16);
11818 sum
+= do_usad(a
>> 24, b
>> 24);
11822 /* For ARMv6 SEL instruction. */
11823 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11835 mask
|= 0xff000000;
11836 return (a
& mask
) | (b
& ~mask
);
11840 * The upper bytes of val (above the number specified by 'bytes') must have
11841 * been zeroed out by the caller.
11843 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11847 stl_le_p(buf
, val
);
11849 /* zlib crc32 converts the accumulator and output to one's complement. */
11850 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
11853 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11857 stl_le_p(buf
, val
);
11859 /* Linux crc32c converts the output to one's complement. */
11860 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
11863 /* Return the exception level to which FP-disabled exceptions should
11864 * be taken, or 0 if FP is enabled.
11866 int fp_exception_el(CPUARMState
*env
, int cur_el
)
11868 #ifndef CONFIG_USER_ONLY
11869 /* CPACR and the CPTR registers don't exist before v6, so FP is
11870 * always accessible
11872 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
11876 if (arm_feature(env
, ARM_FEATURE_M
)) {
11877 /* CPACR can cause a NOCP UsageFault taken to current security state */
11878 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
11882 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
11883 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
11884 /* FP insns cause a NOCP UsageFault taken to Secure */
11892 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11893 * 0, 2 : trap EL0 and EL1/PL1 accesses
11894 * 1 : trap only EL0 accesses
11895 * 3 : trap no accesses
11896 * This register is ignored if E2H+TGE are both set.
11898 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
11899 int fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
11904 if (cur_el
== 0 || cur_el
== 1) {
11905 /* Trap to PL1, which might be EL1 or EL3 */
11906 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
11911 if (cur_el
== 3 && !is_a64(env
)) {
11912 /* Secure PL1 running at EL3 */
11927 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11928 * to control non-secure access to the FPU. It doesn't have any
11929 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11931 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
11932 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
11933 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
11934 /* FP insns act as UNDEF */
11935 return cur_el
== 2 ? 2 : 1;
11939 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
11940 * check because zero bits in the registers mean "don't trap".
11943 /* CPTR_EL2 : present in v7VE or v8 */
11944 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
11945 && !arm_is_secure_below_el3(env
)) {
11946 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
11950 /* CPTR_EL3 : present in v8 */
11951 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
11952 /* Trap all FP ops to EL3 */
11959 /* Return the exception level we're running at if this is our mmu_idx */
11960 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
11962 if (mmu_idx
& ARM_MMU_IDX_M
) {
11963 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
11967 case ARMMMUIdx_E10_0
:
11968 case ARMMMUIdx_E20_0
:
11969 case ARMMMUIdx_SE10_0
:
11971 case ARMMMUIdx_E10_1
:
11972 case ARMMMUIdx_E10_1_PAN
:
11973 case ARMMMUIdx_SE10_1
:
11974 case ARMMMUIdx_SE10_1_PAN
:
11977 case ARMMMUIdx_E20_2
:
11978 case ARMMMUIdx_E20_2_PAN
:
11980 case ARMMMUIdx_SE3
:
11983 g_assert_not_reached();
11988 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
11990 g_assert_not_reached();
11994 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
11996 if (arm_feature(env
, ARM_FEATURE_M
)) {
11997 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12000 /* See ARM pseudo-function ELIsInHost. */
12003 if (arm_is_secure_below_el3(env
)) {
12004 return ARMMMUIdx_SE10_0
;
12006 if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)
12007 && arm_el_is_aa64(env
, 2)) {
12008 return ARMMMUIdx_E20_0
;
12010 return ARMMMUIdx_E10_0
;
12012 if (arm_is_secure_below_el3(env
)) {
12013 if (env
->pstate
& PSTATE_PAN
) {
12014 return ARMMMUIdx_SE10_1_PAN
;
12016 return ARMMMUIdx_SE10_1
;
12018 if (env
->pstate
& PSTATE_PAN
) {
12019 return ARMMMUIdx_E10_1_PAN
;
12021 return ARMMMUIdx_E10_1
;
12023 /* TODO: ARMv8.4-SecEL2 */
12024 /* Note that TGE does not apply at EL2. */
12025 if ((env
->cp15
.hcr_el2
& HCR_E2H
) && arm_el_is_aa64(env
, 2)) {
12026 if (env
->pstate
& PSTATE_PAN
) {
12027 return ARMMMUIdx_E20_2_PAN
;
12029 return ARMMMUIdx_E20_2
;
12031 return ARMMMUIdx_E2
;
12033 return ARMMMUIdx_SE3
;
12035 g_assert_not_reached();
12039 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12041 return arm_mmu_idx_el(env
, arm_current_el(env
));
12044 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
12046 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
12049 #ifndef CONFIG_USER_ONLY
12050 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
12052 return stage_1_mmu_idx(arm_mmu_idx(env
));
12056 static uint32_t rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
12057 ARMMMUIdx mmu_idx
, uint32_t flags
)
12059 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
12060 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
,
12061 arm_to_core_mmu_idx(mmu_idx
));
12063 if (arm_singlestep_active(env
)) {
12064 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
12069 static uint32_t rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
12070 ARMMMUIdx mmu_idx
, uint32_t flags
)
12072 bool sctlr_b
= arm_sctlr_b(env
);
12075 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, 1);
12077 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
12078 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12080 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
12082 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12085 static uint32_t rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
12088 uint32_t flags
= 0;
12090 if (arm_v7m_is_handler_mode(env
)) {
12091 flags
= FIELD_DP32(flags
, TBFLAG_M32
, HANDLER
, 1);
12095 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12096 * is suppressing them because the requested execution priority
12099 if (arm_feature(env
, ARM_FEATURE_V8
) &&
12100 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
12101 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
12102 flags
= FIELD_DP32(flags
, TBFLAG_M32
, STACKCHECK
, 1);
12105 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12108 static uint32_t rebuild_hflags_aprofile(CPUARMState
*env
)
12112 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
,
12113 arm_debug_target_el(env
));
12117 static uint32_t rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
12120 uint32_t flags
= rebuild_hflags_aprofile(env
);
12122 if (arm_el_is_aa64(env
, 1)) {
12123 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12126 if (arm_current_el(env
) < 2 && env
->cp15
.hstr_el2
&&
12127 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12128 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HSTR_ACTIVE
, 1);
12131 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
12134 static uint32_t rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
12137 uint32_t flags
= rebuild_hflags_aprofile(env
);
12138 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
12139 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
12143 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
12145 /* Get control bits for tagged addresses. */
12146 if (regime_has_2_ranges(mmu_idx
)) {
12147 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
12148 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
12149 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
12152 tbii
= tbid
& !p0
.tbid
;
12155 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
12156 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
12158 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
12159 int sve_el
= sve_exception_el(env
, el
);
12163 * If SVE is disabled, but FP is enabled,
12164 * then the effective len is 0.
12166 if (sve_el
!= 0 && fp_el
== 0) {
12169 zcr_len
= sve_zcr_len_for_el(env
, el
);
12171 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
12172 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
12175 sctlr
= regime_sctlr(env
, stage1
);
12177 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
12178 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12181 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
12183 * In order to save space in flags, we record only whether
12184 * pauth is "inactive", meaning all insns are implemented as
12185 * a nop, or "active" when some action must be performed.
12186 * The decision of which action to take is left to a helper.
12188 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
12189 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
12193 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12194 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12195 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
12196 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
12200 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
12201 if (!(env
->pstate
& PSTATE_UAO
)) {
12203 case ARMMMUIdx_E10_1
:
12204 case ARMMMUIdx_E10_1_PAN
:
12205 case ARMMMUIdx_SE10_1
:
12206 case ARMMMUIdx_SE10_1_PAN
:
12207 /* TODO: ARMv8.3-NV */
12208 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12210 case ARMMMUIdx_E20_2
:
12211 case ARMMMUIdx_E20_2_PAN
:
12212 /* TODO: ARMv8.4-SecEL2 */
12214 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
12215 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
12217 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
12218 flags
= FIELD_DP32(flags
, TBFLAG_A64
, UNPRIV
, 1);
12226 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
12229 static uint32_t rebuild_hflags_internal(CPUARMState
*env
)
12231 int el
= arm_current_el(env
);
12232 int fp_el
= fp_exception_el(env
, el
);
12233 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12236 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12237 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
12238 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12240 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12244 void arm_rebuild_hflags(CPUARMState
*env
)
12246 env
->hflags
= rebuild_hflags_internal(env
);
12249 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
12251 int fp_el
= fp_exception_el(env
, el
);
12252 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12254 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
12258 * If we have triggered a EL state change we can't rely on the
12259 * translator having passed it too us, we need to recompute.
12261 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
12263 int el
= arm_current_el(env
);
12264 int fp_el
= fp_exception_el(env
, el
);
12265 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12266 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12269 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
12271 int fp_el
= fp_exception_el(env
, el
);
12272 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12274 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
12277 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
12279 int fp_el
= fp_exception_el(env
, el
);
12280 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
12282 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
12285 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
12287 #ifdef CONFIG_DEBUG_TCG
12288 uint32_t env_flags_current
= env
->hflags
;
12289 uint32_t env_flags_rebuilt
= rebuild_hflags_internal(env
);
12291 if (unlikely(env_flags_current
!= env_flags_rebuilt
)) {
12292 fprintf(stderr
, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
12293 env_flags_current
, env_flags_rebuilt
);
12299 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12300 target_ulong
*cs_base
, uint32_t *pflags
)
12302 uint32_t flags
= env
->hflags
;
12303 uint32_t pstate_for_ss
;
12306 assert_hflags_rebuild_correctly(env
);
12308 if (FIELD_EX32(flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
12310 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12311 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
12313 pstate_for_ss
= env
->pstate
;
12315 *pc
= env
->regs
[15];
12317 if (arm_feature(env
, ARM_FEATURE_M
)) {
12318 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
12319 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
12320 != env
->v7m
.secure
) {
12321 flags
= FIELD_DP32(flags
, TBFLAG_M32
, FPCCR_S_WRONG
, 1);
12324 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
12325 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
12326 (env
->v7m
.secure
&&
12327 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
12329 * ASPEN is set, but FPCA/SFPA indicate that there is no
12330 * active FP context; we must create a new FP context before
12331 * executing any FP insn.
12333 flags
= FIELD_DP32(flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
, 1);
12336 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
12337 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
12338 flags
= FIELD_DP32(flags
, TBFLAG_M32
, LSPACT
, 1);
12342 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12343 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12345 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
12346 flags
= FIELD_DP32(flags
, TBFLAG_A32
,
12347 XSCALE_CPAR
, env
->cp15
.c15_cpar
);
12349 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
,
12351 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
,
12352 env
->vfp
.vec_stride
);
12354 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
12355 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12359 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, THUMB
, env
->thumb
);
12360 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, CONDEXEC
, env
->condexec_bits
);
12361 pstate_for_ss
= env
->uncached_cpsr
;
12365 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12366 * states defined in the ARM ARM for software singlestep:
12367 * SS_ACTIVE PSTATE.SS State
12368 * 0 x Inactive (the TB flag for SS is always 0)
12369 * 1 0 Active-pending
12370 * 1 1 Active-not-pending
12371 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
12373 if (FIELD_EX32(flags
, TBFLAG_ANY
, SS_ACTIVE
) &&
12374 (pstate_for_ss
& PSTATE_SS
)) {
12375 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12381 #ifdef TARGET_AARCH64
12383 * The manual says that when SVE is enabled and VQ is widened the
12384 * implementation is allowed to zero the previously inaccessible
12385 * portion of the registers. The corollary to that is that when
12386 * SVE is enabled and VQ is narrowed we are also allowed to zero
12387 * the now inaccessible portion of the registers.
12389 * The intent of this is that no predicate bit beyond VQ is ever set.
12390 * Which means that some operations on predicate registers themselves
12391 * may operate on full uint64_t or even unrolled across the maximum
12392 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
12393 * may well be cheaper than conditionals to restrict the operation
12394 * to the relevant portion of a uint16_t[16].
12396 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
12401 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
12402 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
12404 /* Zap the high bits of the zregs. */
12405 for (i
= 0; i
< 32; i
++) {
12406 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
12409 /* Zap the high bits of the pregs and ffr. */
12412 pmask
= ~(-1ULL << (16 * (vq
& 3)));
12414 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
12415 for (i
= 0; i
< 17; ++i
) {
12416 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
12423 * Notice a change in SVE vector size when changing EL.
12425 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
12426 int new_el
, bool el0_a64
)
12428 ARMCPU
*cpu
= env_archcpu(env
);
12429 int old_len
, new_len
;
12430 bool old_a64
, new_a64
;
12432 /* Nothing to do if no SVE. */
12433 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
12437 /* Nothing to do if FP is disabled in either EL. */
12438 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
12443 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12444 * at ELx, or not available because the EL is in AArch32 state, then
12445 * for all purposes other than a direct read, the ZCR_ELx.LEN field
12446 * has an effective value of 0".
12448 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12449 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12450 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
12451 * we already have the correct register contents when encountering the
12452 * vq0->vq0 transition between EL0->EL1.
12454 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
12455 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
12456 ? sve_zcr_len_for_el(env
, old_el
) : 0);
12457 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
12458 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
12459 ? sve_zcr_len_for_el(env
, new_el
) : 0);
12461 /* When changing vector length, clear inaccessible state. */
12462 if (new_len
< old_len
) {
12463 aarch64_sve_narrow_vq(env
, new_len
+ 1);