4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
9 #include "qemu/units.h"
10 #include "target/arm/idau.h"
13 #include "internals.h"
14 #include "exec/gdbstub.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/host-utils.h"
17 #include "sysemu/sysemu.h"
18 #include "qemu/bitops.h"
19 #include "qemu/crc32c.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
22 #include "exec/cpu_ldst.h"
24 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "qemu/range.h"
29 #include "qapi/qapi-commands-target.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
33 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
35 #ifndef CONFIG_USER_ONLY
36 /* Cacheability and shareability attributes for a memory access */
37 typedef struct ARMCacheAttrs
{
38 unsigned int attrs
:8; /* as in the MAIR register encoding */
39 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
42 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
43 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
44 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
45 target_ulong
*page_size
,
46 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
48 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
49 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
50 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
51 target_ulong
*page_size_ptr
,
52 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
54 /* Security attributes for an address, as returned by v8m_security_lookup. */
55 typedef struct V8M_SAttributes
{
56 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
65 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
66 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
67 V8M_SAttributes
*sattrs
);
70 static void switch_mode(CPUARMState
*env
, int mode
);
72 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
76 /* VFP data registers are always little-endian. */
77 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
79 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
82 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
83 /* Aliases for Q regs. */
86 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
88 stq_le_p(buf
+ 8, q
[1]);
92 switch (reg
- nregs
) {
93 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
94 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
95 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
100 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
104 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
106 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
109 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
112 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
113 q
[0] = ldq_le_p(buf
);
114 q
[1] = ldq_le_p(buf
+ 8);
118 switch (reg
- nregs
) {
119 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
120 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
121 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
126 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
130 /* 128 bit FP register */
132 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
134 stq_le_p(buf
+ 8, q
[1]);
139 stl_p(buf
, vfp_get_fpsr(env
));
143 stl_p(buf
, vfp_get_fpcr(env
));
150 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
154 /* 128 bit FP register */
156 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
157 q
[0] = ldq_le_p(buf
);
158 q
[1] = ldq_le_p(buf
+ 8);
163 vfp_set_fpsr(env
, ldl_p(buf
));
167 vfp_set_fpcr(env
, ldl_p(buf
));
174 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
176 assert(ri
->fieldoffset
);
177 if (cpreg_field_is_64bit(ri
)) {
178 return CPREG_FIELD64(env
, ri
);
180 return CPREG_FIELD32(env
, ri
);
184 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
187 assert(ri
->fieldoffset
);
188 if (cpreg_field_is_64bit(ri
)) {
189 CPREG_FIELD64(env
, ri
) = value
;
191 CPREG_FIELD32(env
, ri
) = value
;
195 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
197 return (char *)env
+ ri
->fieldoffset
;
200 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
202 /* Raw read of a coprocessor register (as needed for migration, etc). */
203 if (ri
->type
& ARM_CP_CONST
) {
204 return ri
->resetvalue
;
205 } else if (ri
->raw_readfn
) {
206 return ri
->raw_readfn(env
, ri
);
207 } else if (ri
->readfn
) {
208 return ri
->readfn(env
, ri
);
210 return raw_read(env
, ri
);
214 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
217 /* Raw write of a coprocessor register (as needed for migration, etc).
218 * Note that constant registers are treated as write-ignored; the
219 * caller should check for success by whether a readback gives the
222 if (ri
->type
& ARM_CP_CONST
) {
224 } else if (ri
->raw_writefn
) {
225 ri
->raw_writefn(env
, ri
, v
);
226 } else if (ri
->writefn
) {
227 ri
->writefn(env
, ri
, v
);
229 raw_write(env
, ri
, v
);
233 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
235 ARMCPU
*cpu
= env_archcpu(env
);
236 const ARMCPRegInfo
*ri
;
239 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
240 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
242 if (cpreg_field_is_64bit(ri
)) {
243 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
245 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
251 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
256 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
258 /* Return true if the regdef would cause an assertion if you called
259 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
260 * program bug for it not to have the NO_RAW flag).
261 * NB that returning false here doesn't necessarily mean that calling
262 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
263 * read/write access functions which are safe for raw use" from "has
264 * read/write access functions which have side effects but has forgotten
265 * to provide raw access functions".
266 * The tests here line up with the conditions in read/write_raw_cp_reg()
267 * and assertions in raw_read()/raw_write().
269 if ((ri
->type
& ARM_CP_CONST
) ||
271 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
277 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
279 /* Write the coprocessor state from cpu->env to the (index,value) list. */
283 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
284 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
285 const ARMCPRegInfo
*ri
;
288 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
293 if (ri
->type
& ARM_CP_NO_RAW
) {
297 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
300 * Only sync if the previous list->cpustate sync succeeded.
301 * Rather than tracking the success/failure state for every
302 * item in the list, we just recheck "does the raw write we must
303 * have made in write_list_to_cpustate() read back OK" here.
305 uint64_t oldval
= cpu
->cpreg_values
[i
];
307 if (oldval
== newval
) {
311 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
312 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
316 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
318 cpu
->cpreg_values
[i
] = newval
;
323 bool write_list_to_cpustate(ARMCPU
*cpu
)
328 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
329 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
330 uint64_t v
= cpu
->cpreg_values
[i
];
331 const ARMCPRegInfo
*ri
;
333 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
338 if (ri
->type
& ARM_CP_NO_RAW
) {
341 /* Write value and confirm it reads back as written
342 * (to catch read-only registers and partially read-only
343 * registers where the incoming migration value doesn't match)
345 write_raw_cp_reg(&cpu
->env
, ri
, v
);
346 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
353 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
355 ARMCPU
*cpu
= opaque
;
357 const ARMCPRegInfo
*ri
;
359 regidx
= *(uint32_t *)key
;
360 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
362 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
363 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
364 /* The value array need not be initialized at this point */
365 cpu
->cpreg_array_len
++;
369 static void count_cpreg(gpointer key
, gpointer opaque
)
371 ARMCPU
*cpu
= opaque
;
373 const ARMCPRegInfo
*ri
;
375 regidx
= *(uint32_t *)key
;
376 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
378 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
379 cpu
->cpreg_array_len
++;
383 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
385 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
386 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
397 void init_cpreg_list(ARMCPU
*cpu
)
399 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
400 * Note that we require cpreg_tuples[] to be sorted by key ID.
405 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
406 keys
= g_list_sort(keys
, cpreg_key_compare
);
408 cpu
->cpreg_array_len
= 0;
410 g_list_foreach(keys
, count_cpreg
, cpu
);
412 arraylen
= cpu
->cpreg_array_len
;
413 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
414 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
415 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
416 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
417 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
418 cpu
->cpreg_array_len
= 0;
420 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
422 assert(cpu
->cpreg_array_len
== arraylen
);
428 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
429 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
431 * access_el3_aa32ns: Used to check AArch32 register views.
432 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
434 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
435 const ARMCPRegInfo
*ri
,
438 bool secure
= arm_is_secure_below_el3(env
);
440 assert(!arm_el_is_aa64(env
, 3));
442 return CP_ACCESS_TRAP_UNCATEGORIZED
;
447 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
448 const ARMCPRegInfo
*ri
,
451 if (!arm_el_is_aa64(env
, 3)) {
452 return access_el3_aa32ns(env
, ri
, isread
);
457 /* Some secure-only AArch32 registers trap to EL3 if used from
458 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
459 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
460 * We assume that the .access field is set to PL1_RW.
462 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
463 const ARMCPRegInfo
*ri
,
466 if (arm_current_el(env
) == 3) {
469 if (arm_is_secure_below_el3(env
)) {
470 return CP_ACCESS_TRAP_EL3
;
472 /* This will be EL1 NS and EL2 NS, which just UNDEF */
473 return CP_ACCESS_TRAP_UNCATEGORIZED
;
476 /* Check for traps to "powerdown debug" registers, which are controlled
479 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
482 int el
= arm_current_el(env
);
483 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
484 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
485 (arm_hcr_el2_eff(env
) & HCR_TGE
);
487 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
488 return CP_ACCESS_TRAP_EL2
;
490 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
491 return CP_ACCESS_TRAP_EL3
;
496 /* Check for traps to "debug ROM" registers, which are controlled
497 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
499 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
502 int el
= arm_current_el(env
);
503 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
504 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
505 (arm_hcr_el2_eff(env
) & HCR_TGE
);
507 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
508 return CP_ACCESS_TRAP_EL2
;
510 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
511 return CP_ACCESS_TRAP_EL3
;
516 /* Check for traps to general debug registers, which are controlled
517 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
519 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
522 int el
= arm_current_el(env
);
523 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
524 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
525 (arm_hcr_el2_eff(env
) & HCR_TGE
);
527 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
528 return CP_ACCESS_TRAP_EL2
;
530 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
531 return CP_ACCESS_TRAP_EL3
;
536 /* Check for traps to performance monitor registers, which are controlled
537 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
539 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
542 int el
= arm_current_el(env
);
544 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
545 && !arm_is_secure_below_el3(env
)) {
546 return CP_ACCESS_TRAP_EL2
;
548 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
549 return CP_ACCESS_TRAP_EL3
;
554 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
556 ARMCPU
*cpu
= env_archcpu(env
);
558 raw_write(env
, ri
, value
);
559 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
562 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
564 ARMCPU
*cpu
= env_archcpu(env
);
566 if (raw_read(env
, ri
) != value
) {
567 /* Unlike real hardware the qemu TLB uses virtual addresses,
568 * not modified virtual addresses, so this causes a TLB flush.
571 raw_write(env
, ri
, value
);
575 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
578 ARMCPU
*cpu
= env_archcpu(env
);
580 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
581 && !extended_addresses_enabled(env
)) {
582 /* For VMSA (when not using the LPAE long descriptor page table
583 * format) this register includes the ASID, so do a TLB flush.
584 * For PMSA it is purely a process ID and no action is needed.
588 raw_write(env
, ri
, value
);
591 /* IS variants of TLB operations must affect all cores */
592 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
595 CPUState
*cs
= env_cpu(env
);
597 tlb_flush_all_cpus_synced(cs
);
600 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
603 CPUState
*cs
= env_cpu(env
);
605 tlb_flush_all_cpus_synced(cs
);
608 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
611 CPUState
*cs
= env_cpu(env
);
613 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
616 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 CPUState
*cs
= env_cpu(env
);
621 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
625 * Non-IS variants of TLB operations are upgraded to
626 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
627 * force broadcast of these operations.
629 static bool tlb_force_broadcast(CPUARMState
*env
)
631 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
632 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
635 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
638 /* Invalidate all (TLBIALL) */
639 ARMCPU
*cpu
= env_archcpu(env
);
641 if (tlb_force_broadcast(env
)) {
642 tlbiall_is_write(env
, NULL
, value
);
649 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
652 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
653 ARMCPU
*cpu
= env_archcpu(env
);
655 if (tlb_force_broadcast(env
)) {
656 tlbimva_is_write(env
, NULL
, value
);
660 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
663 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
666 /* Invalidate by ASID (TLBIASID) */
667 ARMCPU
*cpu
= env_archcpu(env
);
669 if (tlb_force_broadcast(env
)) {
670 tlbiasid_is_write(env
, NULL
, value
);
677 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
680 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
681 ARMCPU
*cpu
= env_archcpu(env
);
683 if (tlb_force_broadcast(env
)) {
684 tlbimvaa_is_write(env
, NULL
, value
);
688 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
691 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
694 CPUState
*cs
= env_cpu(env
);
696 tlb_flush_by_mmuidx(cs
,
697 ARMMMUIdxBit_S12NSE1
|
698 ARMMMUIdxBit_S12NSE0
|
702 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
705 CPUState
*cs
= env_cpu(env
);
707 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
708 ARMMMUIdxBit_S12NSE1
|
709 ARMMMUIdxBit_S12NSE0
|
713 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
716 /* Invalidate by IPA. This has to invalidate any structures that
717 * contain only stage 2 translation information, but does not need
718 * to apply to structures that contain combined stage 1 and stage 2
719 * translation information.
720 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
722 CPUState
*cs
= env_cpu(env
);
725 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
729 pageaddr
= sextract64(value
<< 12, 0, 40);
731 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
734 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
737 CPUState
*cs
= env_cpu(env
);
740 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
744 pageaddr
= sextract64(value
<< 12, 0, 40);
746 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
750 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
753 CPUState
*cs
= env_cpu(env
);
755 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
758 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
761 CPUState
*cs
= env_cpu(env
);
763 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
766 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
769 CPUState
*cs
= env_cpu(env
);
770 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
772 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
775 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
778 CPUState
*cs
= env_cpu(env
);
779 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
781 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
785 static const ARMCPRegInfo cp_reginfo
[] = {
786 /* Define the secure and non-secure FCSE identifier CP registers
787 * separately because there is no secure bank in V8 (no _EL3). This allows
788 * the secure register to be properly reset and migrated. There is also no
789 * v8 EL1 version of the register so the non-secure instance stands alone.
792 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
793 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
794 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
795 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
796 { .name
= "FCSEIDR_S",
797 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
798 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
799 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
800 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
801 /* Define the secure and non-secure context identifier CP registers
802 * separately because there is no secure bank in V8 (no _EL3). This allows
803 * the secure register to be properly reset and migrated. In the
804 * non-secure case, the 32-bit register will have reset and migration
805 * disabled during registration as it is handled by the 64-bit instance.
807 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
808 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
809 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
810 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
811 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
812 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
813 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
814 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
815 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
816 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
820 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
821 /* NB: Some of these registers exist in v8 but with more precise
822 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
824 /* MMU Domain access control / MPU write buffer control */
826 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
827 .access
= PL1_RW
, .resetvalue
= 0,
828 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
829 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
830 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
831 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
832 * For v6 and v5, these mappings are overly broad.
834 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
835 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
836 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
837 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
838 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
839 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
840 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
841 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
842 /* Cache maintenance ops; some of this space may be overridden later. */
843 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
844 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
845 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
849 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
850 /* Not all pre-v6 cores implemented this WFI, so this is slightly
853 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
854 .access
= PL1_W
, .type
= ARM_CP_WFI
},
858 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
859 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
860 * is UNPREDICTABLE; we choose to NOP as most implementations do).
862 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
863 .access
= PL1_W
, .type
= ARM_CP_WFI
},
864 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
865 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
866 * OMAPCP will override this space.
868 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
869 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
871 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
872 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
874 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
875 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
876 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
878 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
879 * implementing it as RAZ means the "debug architecture version" bits
880 * will read as a reserved value, which should cause Linux to not try
881 * to use the debug hardware.
883 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
884 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
885 /* MMU TLB control. Note that the wildcarding means we cover not just
886 * the unified TLB ops but also the dside/iside/inner-shareable variants.
888 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
889 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
890 .type
= ARM_CP_NO_RAW
},
891 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
892 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
893 .type
= ARM_CP_NO_RAW
},
894 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
895 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
896 .type
= ARM_CP_NO_RAW
},
897 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
898 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
899 .type
= ARM_CP_NO_RAW
},
900 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
901 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
902 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
903 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
907 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
912 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
913 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
914 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
915 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
916 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
918 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
919 /* VFP coprocessor: cp10 & cp11 [23:20] */
920 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
922 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
923 /* ASEDIS [31] bit is RAO/WI */
927 /* VFPv3 and upwards with NEON implement 32 double precision
928 * registers (D0-D31).
930 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
931 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
932 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
940 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
941 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
943 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
944 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
945 value
&= ~(0xf << 20);
946 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
949 env
->cp15
.cpacr_el1
= value
;
952 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
955 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
956 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
958 uint64_t value
= env
->cp15
.cpacr_el1
;
960 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
961 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
962 value
&= ~(0xf << 20);
968 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
970 /* Call cpacr_write() so that we reset with the correct RAO bits set
971 * for our CPU features.
973 cpacr_write(env
, ri
, 0);
976 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
979 if (arm_feature(env
, ARM_FEATURE_V8
)) {
980 /* Check if CPACR accesses are to be trapped to EL2 */
981 if (arm_current_el(env
) == 1 &&
982 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
983 return CP_ACCESS_TRAP_EL2
;
984 /* Check if CPACR accesses are to be trapped to EL3 */
985 } else if (arm_current_el(env
) < 3 &&
986 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
987 return CP_ACCESS_TRAP_EL3
;
994 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
997 /* Check if CPTR accesses are set to trap to EL3 */
998 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
999 return CP_ACCESS_TRAP_EL3
;
1002 return CP_ACCESS_OK
;
1005 static const ARMCPRegInfo v6_cp_reginfo
[] = {
1006 /* prefetch by MVA in v6, NOP in v7 */
1007 { .name
= "MVA_prefetch",
1008 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
1009 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1010 /* We need to break the TB after ISB to execute self-modifying code
1011 * correctly and also to take any pending interrupts immediately.
1012 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1014 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
1015 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
1016 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
1017 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1018 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
1019 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1020 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1022 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1023 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1025 /* Watchpoint Fault Address Register : should actually only be present
1026 * for 1136, 1176, 11MPCore.
1028 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1029 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1030 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1031 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1032 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1033 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1037 /* Definitions for the PMU registers */
1038 #define PMCRN_MASK 0xf800
1039 #define PMCRN_SHIFT 11
1047 #define PMXEVTYPER_P 0x80000000
1048 #define PMXEVTYPER_U 0x40000000
1049 #define PMXEVTYPER_NSK 0x20000000
1050 #define PMXEVTYPER_NSU 0x10000000
1051 #define PMXEVTYPER_NSH 0x08000000
1052 #define PMXEVTYPER_M 0x04000000
1053 #define PMXEVTYPER_MT 0x02000000
1054 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1055 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1056 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1057 PMXEVTYPER_M | PMXEVTYPER_MT | \
1058 PMXEVTYPER_EVTCOUNT)
1060 #define PMCCFILTR 0xf8000000
1061 #define PMCCFILTR_M PMXEVTYPER_M
1062 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1064 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1066 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1069 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1070 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1072 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1075 typedef struct pm_event
{
1076 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1077 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1078 bool (*supported
)(CPUARMState
*);
1080 * Retrieve the current count of the underlying event. The programmed
1081 * counters hold a difference from the return value from this function
1083 uint64_t (*get_count
)(CPUARMState
*);
1085 * Return how many nanoseconds it will take (at a minimum) for count events
1086 * to occur. A negative value indicates the counter will never overflow, or
1087 * that the counter has otherwise arranged for the overflow bit to be set
1088 * and the PMU interrupt to be raised on overflow.
1090 int64_t (*ns_per_count
)(uint64_t);
1093 static bool event_always_supported(CPUARMState
*env
)
1098 static uint64_t swinc_get_count(CPUARMState
*env
)
1101 * SW_INCR events are written directly to the pmevcntr's by writes to
1102 * PMSWINC, so there is no underlying count maintained by the PMU itself
1107 static int64_t swinc_ns_per(uint64_t ignored
)
1113 * Return the underlying cycle count for the PMU cycle counters. If we're in
1114 * usermode, simply return 0.
1116 static uint64_t cycles_get_count(CPUARMState
*env
)
1118 #ifndef CONFIG_USER_ONLY
1119 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1120 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1122 return cpu_get_host_ticks();
1126 #ifndef CONFIG_USER_ONLY
1127 static int64_t cycles_ns_per(uint64_t cycles
)
1129 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1132 static bool instructions_supported(CPUARMState
*env
)
1134 return use_icount
== 1 /* Precise instruction counting */;
1137 static uint64_t instructions_get_count(CPUARMState
*env
)
1139 return (uint64_t)cpu_get_icount_raw();
1142 static int64_t instructions_ns_per(uint64_t icount
)
1144 return cpu_icount_to_ns((int64_t)icount
);
1148 static const pm_event pm_events
[] = {
1149 { .number
= 0x000, /* SW_INCR */
1150 .supported
= event_always_supported
,
1151 .get_count
= swinc_get_count
,
1152 .ns_per_count
= swinc_ns_per
,
1154 #ifndef CONFIG_USER_ONLY
1155 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1156 .supported
= instructions_supported
,
1157 .get_count
= instructions_get_count
,
1158 .ns_per_count
= instructions_ns_per
,
1160 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1161 .supported
= event_always_supported
,
1162 .get_count
= cycles_get_count
,
1163 .ns_per_count
= cycles_ns_per
,
1169 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1170 * events (i.e. the statistical profiling extension), this implementation
1171 * should first be updated to something sparse instead of the current
1172 * supported_event_map[] array.
1174 #define MAX_EVENT_ID 0x11
1175 #define UNSUPPORTED_EVENT UINT16_MAX
1176 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1179 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1180 * of ARM event numbers to indices in our pm_events array.
1182 * Note: Events in the 0x40XX range are not currently supported.
1184 void pmu_init(ARMCPU
*cpu
)
1189 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1192 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1193 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1198 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1199 const pm_event
*cnt
= &pm_events
[i
];
1200 assert(cnt
->number
<= MAX_EVENT_ID
);
1201 /* We do not currently support events in the 0x40xx range */
1202 assert(cnt
->number
<= 0x3f);
1204 if (cnt
->supported(&cpu
->env
)) {
1205 supported_event_map
[cnt
->number
] = i
;
1206 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1207 if (cnt
->number
& 0x20) {
1208 cpu
->pmceid1
|= event_mask
;
1210 cpu
->pmceid0
|= event_mask
;
1217 * Check at runtime whether a PMU event is supported for the current machine
1219 static bool event_supported(uint16_t number
)
1221 if (number
> MAX_EVENT_ID
) {
1224 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1227 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1230 /* Performance monitor registers user accessibility is controlled
1231 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1232 * trapping to EL2 or EL3 for other accesses.
1234 int el
= arm_current_el(env
);
1236 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1237 return CP_ACCESS_TRAP
;
1239 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1240 && !arm_is_secure_below_el3(env
)) {
1241 return CP_ACCESS_TRAP_EL2
;
1243 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1244 return CP_ACCESS_TRAP_EL3
;
1247 return CP_ACCESS_OK
;
1250 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1251 const ARMCPRegInfo
*ri
,
1254 /* ER: event counter read trap control */
1255 if (arm_feature(env
, ARM_FEATURE_V8
)
1256 && arm_current_el(env
) == 0
1257 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1259 return CP_ACCESS_OK
;
1262 return pmreg_access(env
, ri
, isread
);
1265 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1266 const ARMCPRegInfo
*ri
,
1269 /* SW: software increment write trap control */
1270 if (arm_feature(env
, ARM_FEATURE_V8
)
1271 && arm_current_el(env
) == 0
1272 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1274 return CP_ACCESS_OK
;
1277 return pmreg_access(env
, ri
, isread
);
1280 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1281 const ARMCPRegInfo
*ri
,
1284 /* ER: event counter read trap control */
1285 if (arm_feature(env
, ARM_FEATURE_V8
)
1286 && arm_current_el(env
) == 0
1287 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1288 return CP_ACCESS_OK
;
1291 return pmreg_access(env
, ri
, isread
);
1294 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1295 const ARMCPRegInfo
*ri
,
1298 /* CR: cycle counter read trap control */
1299 if (arm_feature(env
, ARM_FEATURE_V8
)
1300 && arm_current_el(env
) == 0
1301 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1303 return CP_ACCESS_OK
;
1306 return pmreg_access(env
, ri
, isread
);
1309 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1310 * the current EL, security state, and register configuration.
1312 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1315 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1316 bool enabled
, prohibited
, filtered
;
1317 bool secure
= arm_is_secure(env
);
1318 int el
= arm_current_el(env
);
1319 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1321 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1325 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1326 (counter
< hpmn
|| counter
== 31)) {
1327 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1329 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1331 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1334 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1335 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1340 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1341 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1344 if (prohibited
&& counter
== 31) {
1345 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1348 if (counter
== 31) {
1349 filter
= env
->cp15
.pmccfiltr_el0
;
1351 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1354 p
= filter
& PMXEVTYPER_P
;
1355 u
= filter
& PMXEVTYPER_U
;
1356 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1357 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1358 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1359 m
= arm_el_is_aa64(env
, 1) &&
1360 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1363 filtered
= secure
? u
: u
!= nsu
;
1364 } else if (el
== 1) {
1365 filtered
= secure
? p
: p
!= nsk
;
1366 } else if (el
== 2) {
1372 if (counter
!= 31) {
1374 * If not checking PMCCNTR, ensure the counter is setup to an event we
1377 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1378 if (!event_supported(event
)) {
1383 return enabled
&& !prohibited
&& !filtered
;
1386 static void pmu_update_irq(CPUARMState
*env
)
1388 ARMCPU
*cpu
= env_archcpu(env
);
1389 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1390 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1394 * Ensure c15_ccnt is the guest-visible count so that operations such as
1395 * enabling/disabling the counter or filtering, modifying the count itself,
1396 * etc. can be done logically. This is essentially a no-op if the counter is
1397 * not enabled at the time of the call.
1399 static void pmccntr_op_start(CPUARMState
*env
)
1401 uint64_t cycles
= cycles_get_count(env
);
1403 if (pmu_counter_enabled(env
, 31)) {
1404 uint64_t eff_cycles
= cycles
;
1405 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1406 /* Increment once every 64 processor clock cycles */
1410 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1412 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1413 1ull << 63 : 1ull << 31;
1414 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1415 env
->cp15
.c9_pmovsr
|= (1 << 31);
1416 pmu_update_irq(env
);
1419 env
->cp15
.c15_ccnt
= new_pmccntr
;
1421 env
->cp15
.c15_ccnt_delta
= cycles
;
1425 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1426 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1429 static void pmccntr_op_finish(CPUARMState
*env
)
1431 if (pmu_counter_enabled(env
, 31)) {
1432 #ifndef CONFIG_USER_ONLY
1433 /* Calculate when the counter will next overflow */
1434 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1435 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1436 remaining_cycles
= (uint32_t)remaining_cycles
;
1438 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1440 if (overflow_in
> 0) {
1441 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1443 ARMCPU
*cpu
= env_archcpu(env
);
1444 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1448 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1449 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1450 /* Increment once every 64 processor clock cycles */
1453 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1457 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1460 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1462 if (event_supported(event
)) {
1463 uint16_t event_idx
= supported_event_map
[event
];
1464 count
= pm_events
[event_idx
].get_count(env
);
1467 if (pmu_counter_enabled(env
, counter
)) {
1468 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1470 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1471 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1472 pmu_update_irq(env
);
1474 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1476 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1479 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1481 if (pmu_counter_enabled(env
, counter
)) {
1482 #ifndef CONFIG_USER_ONLY
1483 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1484 uint16_t event_idx
= supported_event_map
[event
];
1485 uint64_t delta
= UINT32_MAX
-
1486 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1487 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1489 if (overflow_in
> 0) {
1490 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1492 ARMCPU
*cpu
= env_archcpu(env
);
1493 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1497 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1498 env
->cp15
.c14_pmevcntr
[counter
];
1502 void pmu_op_start(CPUARMState
*env
)
1505 pmccntr_op_start(env
);
1506 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1507 pmevcntr_op_start(env
, i
);
1511 void pmu_op_finish(CPUARMState
*env
)
1514 pmccntr_op_finish(env
);
1515 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1516 pmevcntr_op_finish(env
, i
);
1520 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1522 pmu_op_start(&cpu
->env
);
1525 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1527 pmu_op_finish(&cpu
->env
);
1530 void arm_pmu_timer_cb(void *opaque
)
1532 ARMCPU
*cpu
= opaque
;
1535 * Update all the counter values based on the current underlying counts,
1536 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1537 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1538 * counter may expire.
1540 pmu_op_start(&cpu
->env
);
1541 pmu_op_finish(&cpu
->env
);
1544 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1549 if (value
& PMCRC
) {
1550 /* The counter has been reset */
1551 env
->cp15
.c15_ccnt
= 0;
1554 if (value
& PMCRP
) {
1556 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1557 env
->cp15
.c14_pmevcntr
[i
] = 0;
1561 /* only the DP, X, D and E bits are writable */
1562 env
->cp15
.c9_pmcr
&= ~0x39;
1563 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1568 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1572 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1573 /* Increment a counter's count iff: */
1574 if ((value
& (1 << i
)) && /* counter's bit is set */
1575 /* counter is enabled and not filtered */
1576 pmu_counter_enabled(env
, i
) &&
1577 /* counter is SW_INCR */
1578 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1579 pmevcntr_op_start(env
, i
);
1582 * Detect if this write causes an overflow since we can't predict
1583 * PMSWINC overflows like we can for other events
1585 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1587 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1588 env
->cp15
.c9_pmovsr
|= (1 << i
);
1589 pmu_update_irq(env
);
1592 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1594 pmevcntr_op_finish(env
, i
);
1599 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1602 pmccntr_op_start(env
);
1603 ret
= env
->cp15
.c15_ccnt
;
1604 pmccntr_op_finish(env
);
1608 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1611 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1612 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1613 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1616 env
->cp15
.c9_pmselr
= value
& 0x1f;
1619 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1622 pmccntr_op_start(env
);
1623 env
->cp15
.c15_ccnt
= value
;
1624 pmccntr_op_finish(env
);
1627 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1630 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1632 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1635 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1638 pmccntr_op_start(env
);
1639 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1640 pmccntr_op_finish(env
);
1643 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1646 pmccntr_op_start(env
);
1647 /* M is not accessible from AArch32 */
1648 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1649 (value
& PMCCFILTR
);
1650 pmccntr_op_finish(env
);
1653 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1655 /* M is not visible in AArch32 */
1656 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1659 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1662 value
&= pmu_counter_mask(env
);
1663 env
->cp15
.c9_pmcnten
|= value
;
1666 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1669 value
&= pmu_counter_mask(env
);
1670 env
->cp15
.c9_pmcnten
&= ~value
;
1673 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1676 value
&= pmu_counter_mask(env
);
1677 env
->cp15
.c9_pmovsr
&= ~value
;
1678 pmu_update_irq(env
);
1681 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1684 value
&= pmu_counter_mask(env
);
1685 env
->cp15
.c9_pmovsr
|= value
;
1686 pmu_update_irq(env
);
1689 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1690 uint64_t value
, const uint8_t counter
)
1692 if (counter
== 31) {
1693 pmccfiltr_write(env
, ri
, value
);
1694 } else if (counter
< pmu_num_counters(env
)) {
1695 pmevcntr_op_start(env
, counter
);
1698 * If this counter's event type is changing, store the current
1699 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1700 * pmevcntr_op_finish has the correct baseline when it converts back to
1703 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1704 PMXEVTYPER_EVTCOUNT
;
1705 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1706 if (old_event
!= new_event
) {
1708 if (event_supported(new_event
)) {
1709 uint16_t event_idx
= supported_event_map
[new_event
];
1710 count
= pm_events
[event_idx
].get_count(env
);
1712 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1715 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1716 pmevcntr_op_finish(env
, counter
);
1718 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1719 * PMSELR value is equal to or greater than the number of implemented
1720 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1724 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1725 const uint8_t counter
)
1727 if (counter
== 31) {
1728 return env
->cp15
.pmccfiltr_el0
;
1729 } else if (counter
< pmu_num_counters(env
)) {
1730 return env
->cp15
.c14_pmevtyper
[counter
];
1733 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1734 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1740 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1743 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1744 pmevtyper_write(env
, ri
, value
, counter
);
1747 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1750 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1751 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1754 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1755 * pmu_op_finish calls when loading saved state for a migration. Because
1756 * we're potentially updating the type of event here, the value written to
1757 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1758 * different counter type. Therefore, we need to set this value to the
1759 * current count for the counter type we're writing so that pmu_op_finish
1760 * has the correct count for its calculation.
1762 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1763 if (event_supported(event
)) {
1764 uint16_t event_idx
= supported_event_map
[event
];
1765 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1766 pm_events
[event_idx
].get_count(env
);
1770 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1772 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1773 return pmevtyper_read(env
, ri
, counter
);
1776 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1779 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1782 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1784 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1787 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1788 uint64_t value
, uint8_t counter
)
1790 if (counter
< pmu_num_counters(env
)) {
1791 pmevcntr_op_start(env
, counter
);
1792 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1793 pmevcntr_op_finish(env
, counter
);
1796 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1797 * are CONSTRAINED UNPREDICTABLE.
1801 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1804 if (counter
< pmu_num_counters(env
)) {
1806 pmevcntr_op_start(env
, counter
);
1807 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1808 pmevcntr_op_finish(env
, counter
);
1811 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1812 * are CONSTRAINED UNPREDICTABLE. */
1817 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1820 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1821 pmevcntr_write(env
, ri
, value
, counter
);
1824 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1826 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1827 return pmevcntr_read(env
, ri
, counter
);
1830 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1833 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1834 assert(counter
< pmu_num_counters(env
));
1835 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1836 pmevcntr_write(env
, ri
, value
, counter
);
1839 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1841 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1842 assert(counter
< pmu_num_counters(env
));
1843 return env
->cp15
.c14_pmevcntr
[counter
];
1846 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1849 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1852 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1854 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1857 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1860 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1861 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1863 env
->cp15
.c9_pmuserenr
= value
& 1;
1867 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1870 /* We have no event counters so only the C bit can be changed */
1871 value
&= pmu_counter_mask(env
);
1872 env
->cp15
.c9_pminten
|= value
;
1873 pmu_update_irq(env
);
1876 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1879 value
&= pmu_counter_mask(env
);
1880 env
->cp15
.c9_pminten
&= ~value
;
1881 pmu_update_irq(env
);
1884 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1887 /* Note that even though the AArch64 view of this register has bits
1888 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1889 * architectural requirements for bits which are RES0 only in some
1890 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1891 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1893 raw_write(env
, ri
, value
& ~0x1FULL
);
1896 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1898 /* Begin with base v8.0 state. */
1899 uint32_t valid_mask
= 0x3fff;
1900 ARMCPU
*cpu
= env_archcpu(env
);
1902 if (arm_el_is_aa64(env
, 3)) {
1903 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1904 valid_mask
&= ~SCR_NET
;
1906 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1909 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1910 valid_mask
&= ~SCR_HCE
;
1912 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1913 * supported if EL2 exists. The bit is UNK/SBZP when
1914 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1915 * when EL2 is unavailable.
1916 * On ARMv8, this bit is always available.
1918 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1919 !arm_feature(env
, ARM_FEATURE_V8
)) {
1920 valid_mask
&= ~SCR_SMD
;
1923 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1924 valid_mask
|= SCR_TLOR
;
1926 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1927 valid_mask
|= SCR_API
| SCR_APK
;
1930 /* Clear all-context RES0 bits. */
1931 value
&= valid_mask
;
1932 raw_write(env
, ri
, value
);
1935 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1937 ARMCPU
*cpu
= env_archcpu(env
);
1939 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1942 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1943 ri
->secure
& ARM_CP_SECSTATE_S
);
1945 return cpu
->ccsidr
[index
];
1948 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1951 raw_write(env
, ri
, value
& 0xf);
1954 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1956 CPUState
*cs
= env_cpu(env
);
1957 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1960 if (hcr_el2
& HCR_IMO
) {
1961 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1965 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1970 if (hcr_el2
& HCR_FMO
) {
1971 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1975 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1980 /* External aborts are not possible in QEMU so A bit is always clear */
1984 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1985 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1986 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1987 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1988 /* Performance monitors are implementation defined in v7,
1989 * but with an ARM recommended set of registers, which we
1992 * Performance registers fall into three categories:
1993 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1994 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1995 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1996 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1997 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1999 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2000 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2001 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2002 .writefn
= pmcntenset_write
,
2003 .accessfn
= pmreg_access
,
2004 .raw_writefn
= raw_write
},
2005 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2006 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2007 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2008 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2009 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2010 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2012 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2013 .accessfn
= pmreg_access
,
2014 .writefn
= pmcntenclr_write
,
2015 .type
= ARM_CP_ALIAS
},
2016 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2017 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2018 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2019 .type
= ARM_CP_ALIAS
,
2020 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2021 .writefn
= pmcntenclr_write
},
2022 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2023 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2024 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2025 .accessfn
= pmreg_access
,
2026 .writefn
= pmovsr_write
,
2027 .raw_writefn
= raw_write
},
2028 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2029 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2030 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2031 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2032 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2033 .writefn
= pmovsr_write
,
2034 .raw_writefn
= raw_write
},
2035 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2036 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2037 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2038 .writefn
= pmswinc_write
},
2039 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2040 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2041 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2042 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2043 .writefn
= pmswinc_write
},
2044 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2045 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2046 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2047 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2048 .raw_writefn
= raw_write
},
2049 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2050 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2051 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2052 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2053 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2054 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2055 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2056 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2057 .accessfn
= pmreg_access_ccntr
},
2058 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2059 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2060 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2062 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2063 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2064 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2065 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2066 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2067 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2068 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2070 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2071 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2072 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2073 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2075 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2077 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2078 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2079 .accessfn
= pmreg_access
,
2080 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2081 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2082 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2083 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2084 .accessfn
= pmreg_access
,
2085 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2086 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2087 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2088 .accessfn
= pmreg_access_xevcntr
,
2089 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2090 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2091 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2092 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2093 .accessfn
= pmreg_access_xevcntr
,
2094 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2095 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2096 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2097 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2099 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2100 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2101 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2102 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2103 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2105 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2106 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2107 .access
= PL1_RW
, .accessfn
= access_tpm
,
2108 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2109 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2111 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2112 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2113 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2114 .access
= PL1_RW
, .accessfn
= access_tpm
,
2116 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2117 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2118 .resetvalue
= 0x0 },
2119 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2120 .access
= PL1_RW
, .accessfn
= access_tpm
,
2121 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2122 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2123 .writefn
= pmintenclr_write
, },
2124 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2125 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2126 .access
= PL1_RW
, .accessfn
= access_tpm
,
2127 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2128 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2129 .writefn
= pmintenclr_write
},
2130 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2131 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2132 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2133 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2134 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2135 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
2136 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2137 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2138 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2139 * just RAZ for all cores:
2141 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2142 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2143 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2144 /* Auxiliary fault status registers: these also are IMPDEF, and we
2145 * choose to RAZ/WI for all cores.
2147 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2148 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2149 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2150 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2151 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2152 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2153 /* MAIR can just read-as-written because we don't implement caches
2154 * and so don't need to care about memory attributes.
2156 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2157 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2158 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2160 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2161 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2162 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2164 /* For non-long-descriptor page tables these are PRRR and NMRR;
2165 * regardless they still act as reads-as-written for QEMU.
2167 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2168 * allows them to assign the correct fieldoffset based on the endianness
2169 * handled in the field definitions.
2171 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2172 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2173 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2174 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2175 .resetfn
= arm_cp_reset_ignore
},
2176 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2177 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2178 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2179 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2180 .resetfn
= arm_cp_reset_ignore
},
2181 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2182 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2183 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2184 /* 32 bit ITLB invalidates */
2185 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2186 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2187 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2188 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2189 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2190 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2191 /* 32 bit DTLB invalidates */
2192 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2193 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2194 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2195 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2196 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2197 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2198 /* 32 bit TLB invalidates */
2199 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2200 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2201 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2202 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2203 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2204 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2205 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2206 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2210 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2211 /* 32 bit TLB invalidates, Inner Shareable */
2212 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2213 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2214 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2215 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2216 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2217 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2218 .writefn
= tlbiasid_is_write
},
2219 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2220 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2221 .writefn
= tlbimvaa_is_write
},
2225 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2226 /* PMOVSSET is not implemented in v7 before v7ve */
2227 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2228 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2229 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2230 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2231 .writefn
= pmovsset_write
,
2232 .raw_writefn
= raw_write
},
2233 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2234 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2235 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2236 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2237 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2238 .writefn
= pmovsset_write
,
2239 .raw_writefn
= raw_write
},
2243 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2250 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2253 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2254 return CP_ACCESS_TRAP
;
2256 return CP_ACCESS_OK
;
2259 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2260 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2261 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2263 .writefn
= teecr_write
},
2264 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2265 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2266 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2270 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2271 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2272 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2274 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2275 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2277 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2278 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2279 .resetfn
= arm_cp_reset_ignore
},
2280 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2281 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2282 .access
= PL0_R
|PL1_W
,
2283 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2285 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2286 .access
= PL0_R
|PL1_W
,
2287 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2288 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2289 .resetfn
= arm_cp_reset_ignore
},
2290 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2291 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2293 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2294 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2296 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2297 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2302 #ifndef CONFIG_USER_ONLY
2304 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2307 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2308 * Writable only at the highest implemented exception level.
2310 int el
= arm_current_el(env
);
2314 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2315 return CP_ACCESS_TRAP
;
2319 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2320 arm_is_secure_below_el3(env
)) {
2321 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2322 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2330 if (!isread
&& el
< arm_highest_el(env
)) {
2331 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2334 return CP_ACCESS_OK
;
2337 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2340 unsigned int cur_el
= arm_current_el(env
);
2341 bool secure
= arm_is_secure(env
);
2343 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2345 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2346 return CP_ACCESS_TRAP
;
2349 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2350 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2351 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2352 return CP_ACCESS_TRAP_EL2
;
2354 return CP_ACCESS_OK
;
2357 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2360 unsigned int cur_el
= arm_current_el(env
);
2361 bool secure
= arm_is_secure(env
);
2363 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2364 * EL0[PV]TEN is zero.
2367 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2368 return CP_ACCESS_TRAP
;
2371 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2372 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2373 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2374 return CP_ACCESS_TRAP_EL2
;
2376 return CP_ACCESS_OK
;
2379 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2380 const ARMCPRegInfo
*ri
,
2383 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2386 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2387 const ARMCPRegInfo
*ri
,
2390 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2393 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2396 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2399 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2402 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2405 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2406 const ARMCPRegInfo
*ri
,
2409 /* The AArch64 register view of the secure physical timer is
2410 * always accessible from EL3, and configurably accessible from
2413 switch (arm_current_el(env
)) {
2415 if (!arm_is_secure(env
)) {
2416 return CP_ACCESS_TRAP
;
2418 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2419 return CP_ACCESS_TRAP_EL3
;
2421 return CP_ACCESS_OK
;
2424 return CP_ACCESS_TRAP
;
2426 return CP_ACCESS_OK
;
2428 g_assert_not_reached();
2432 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2434 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
2437 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2439 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2442 /* Timer enabled: calculate and set current ISTATUS, irq, and
2443 * reset timer to when ISTATUS next has to change
2445 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2446 cpu
->env
.cp15
.cntvoff_el2
: 0;
2447 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2448 /* Note that this must be unsigned 64 bit arithmetic: */
2449 int istatus
= count
- offset
>= gt
->cval
;
2453 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2455 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2456 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2459 /* Next transition is when count rolls back over to zero */
2460 nexttick
= UINT64_MAX
;
2462 /* Next transition is when we hit cval */
2463 nexttick
= gt
->cval
+ offset
;
2465 /* Note that the desired next expiry time might be beyond the
2466 * signed-64-bit range of a QEMUTimer -- in this case we just
2467 * set the timer for as far in the future as possible. When the
2468 * timer expires we will reset the timer for any remaining period.
2470 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
2471 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
2473 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2474 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2476 /* Timer disabled: ISTATUS and timer output always clear */
2478 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2479 timer_del(cpu
->gt_timer
[timeridx
]);
2480 trace_arm_gt_recalc_disabled(timeridx
);
2484 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2487 ARMCPU
*cpu
= env_archcpu(env
);
2489 timer_del(cpu
->gt_timer
[timeridx
]);
2492 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2494 return gt_get_countervalue(env
);
2497 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2499 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
2502 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2506 trace_arm_gt_cval_write(timeridx
, value
);
2507 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2508 gt_recalc_timer(env_archcpu(env
), timeridx
);
2511 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2514 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2516 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2517 (gt_get_countervalue(env
) - offset
));
2520 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2524 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2526 trace_arm_gt_tval_write(timeridx
, value
);
2527 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2528 sextract64(value
, 0, 32);
2529 gt_recalc_timer(env_archcpu(env
), timeridx
);
2532 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2536 ARMCPU
*cpu
= env_archcpu(env
);
2537 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2539 trace_arm_gt_ctl_write(timeridx
, value
);
2540 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2541 if ((oldval
^ value
) & 1) {
2542 /* Enable toggled */
2543 gt_recalc_timer(cpu
, timeridx
);
2544 } else if ((oldval
^ value
) & 2) {
2545 /* IMASK toggled: don't need to recalculate,
2546 * just set the interrupt line based on ISTATUS
2548 int irqstate
= (oldval
& 4) && !(value
& 2);
2550 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2551 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2555 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2557 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2560 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2563 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2566 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2568 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2571 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2574 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2577 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2580 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2583 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2585 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2588 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2591 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2594 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2596 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2599 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2602 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2605 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2608 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2611 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2614 ARMCPU
*cpu
= env_archcpu(env
);
2616 trace_arm_gt_cntvoff_write(value
);
2617 raw_write(env
, ri
, value
);
2618 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2621 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2623 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2626 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2629 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2632 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2634 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2637 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2640 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2643 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2646 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2649 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2651 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2654 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2657 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2660 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2662 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2665 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2668 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2671 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2674 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2677 void arm_gt_ptimer_cb(void *opaque
)
2679 ARMCPU
*cpu
= opaque
;
2681 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2684 void arm_gt_vtimer_cb(void *opaque
)
2686 ARMCPU
*cpu
= opaque
;
2688 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2691 void arm_gt_htimer_cb(void *opaque
)
2693 ARMCPU
*cpu
= opaque
;
2695 gt_recalc_timer(cpu
, GTIMER_HYP
);
2698 void arm_gt_stimer_cb(void *opaque
)
2700 ARMCPU
*cpu
= opaque
;
2702 gt_recalc_timer(cpu
, GTIMER_SEC
);
2705 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2706 /* Note that CNTFRQ is purely reads-as-written for the benefit
2707 * of software; writing it doesn't actually change the timer frequency.
2708 * Our reset value matches the fixed frequency we implement the timer at.
2710 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2711 .type
= ARM_CP_ALIAS
,
2712 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2713 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2715 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2716 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2717 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2718 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2719 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2721 /* overall control: mostly access permissions */
2722 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2723 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2725 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2728 /* per-timer control */
2729 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2730 .secure
= ARM_CP_SECSTATE_NS
,
2731 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2732 .accessfn
= gt_ptimer_access
,
2733 .fieldoffset
= offsetoflow32(CPUARMState
,
2734 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2735 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2737 { .name
= "CNTP_CTL_S",
2738 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2739 .secure
= ARM_CP_SECSTATE_S
,
2740 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2741 .accessfn
= gt_ptimer_access
,
2742 .fieldoffset
= offsetoflow32(CPUARMState
,
2743 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2744 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2746 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2747 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2748 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2749 .accessfn
= gt_ptimer_access
,
2750 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2752 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2754 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2755 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2756 .accessfn
= gt_vtimer_access
,
2757 .fieldoffset
= offsetoflow32(CPUARMState
,
2758 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2759 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2761 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2762 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2763 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2764 .accessfn
= gt_vtimer_access
,
2765 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2767 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2769 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2770 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2771 .secure
= ARM_CP_SECSTATE_NS
,
2772 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2773 .accessfn
= gt_ptimer_access
,
2774 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2776 { .name
= "CNTP_TVAL_S",
2777 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2778 .secure
= ARM_CP_SECSTATE_S
,
2779 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2780 .accessfn
= gt_ptimer_access
,
2781 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2783 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2784 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2785 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2786 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2787 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2789 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2790 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2791 .accessfn
= gt_vtimer_access
,
2792 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2794 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2795 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2796 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2797 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2798 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2800 /* The counter itself */
2801 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2802 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2803 .accessfn
= gt_pct_access
,
2804 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2806 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2807 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2808 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2809 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2811 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2812 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2813 .accessfn
= gt_vct_access
,
2814 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2816 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2817 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2818 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2819 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2821 /* Comparison value, indicating when the timer goes off */
2822 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2823 .secure
= ARM_CP_SECSTATE_NS
,
2825 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2826 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2827 .accessfn
= gt_ptimer_access
,
2828 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2830 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2831 .secure
= ARM_CP_SECSTATE_S
,
2833 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2834 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2835 .accessfn
= gt_ptimer_access
,
2836 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2838 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2839 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2842 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2843 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2844 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2846 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2848 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2849 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2850 .accessfn
= gt_vtimer_access
,
2851 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2853 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2854 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2857 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2858 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2859 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2861 /* Secure timer -- this is actually restricted to only EL3
2862 * and configurably Secure-EL1 via the accessfn.
2864 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2865 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2866 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2867 .accessfn
= gt_stimer_access
,
2868 .readfn
= gt_sec_tval_read
,
2869 .writefn
= gt_sec_tval_write
,
2870 .resetfn
= gt_sec_timer_reset
,
2872 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2873 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2874 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2875 .accessfn
= gt_stimer_access
,
2876 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2878 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2880 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2881 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2882 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2883 .accessfn
= gt_stimer_access
,
2884 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2885 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2892 /* In user-mode most of the generic timer registers are inaccessible
2893 * however modern kernels (4.12+) allow access to cntvct_el0
2896 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2898 /* Currently we have no support for QEMUTimer in linux-user so we
2899 * can't call gt_get_countervalue(env), instead we directly
2900 * call the lower level functions.
2902 return cpu_get_clock() / GTIMER_SCALE
;
2905 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2906 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2907 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2908 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2909 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2910 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2912 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2913 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2914 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2915 .readfn
= gt_virt_cnt_read
,
2922 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2924 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2925 raw_write(env
, ri
, value
);
2926 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2927 raw_write(env
, ri
, value
& 0xfffff6ff);
2929 raw_write(env
, ri
, value
& 0xfffff1ff);
2933 #ifndef CONFIG_USER_ONLY
2934 /* get_phys_addr() isn't present for user-mode-only targets */
2936 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2940 /* The ATS12NSO* operations must trap to EL3 if executed in
2941 * Secure EL1 (which can only happen if EL3 is AArch64).
2942 * They are simply UNDEF if executed from NS EL1.
2943 * They function normally from EL2 or EL3.
2945 if (arm_current_el(env
) == 1) {
2946 if (arm_is_secure_below_el3(env
)) {
2947 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2949 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2952 return CP_ACCESS_OK
;
2955 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2956 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2959 target_ulong page_size
;
2963 bool format64
= false;
2964 MemTxAttrs attrs
= {};
2965 ARMMMUFaultInfo fi
= {};
2966 ARMCacheAttrs cacheattrs
= {};
2968 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2969 &prot
, &page_size
, &fi
, &cacheattrs
);
2973 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2976 * * TTBCR.EAE determines whether the result is returned using the
2977 * 32-bit or the 64-bit PAR format
2978 * * Instructions executed in Hyp mode always use the 64bit format
2980 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2981 * * The Non-secure TTBCR.EAE bit is set to 1
2982 * * The implementation includes EL2, and the value of HCR.VM is 1
2984 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2986 * ATS1Hx always uses the 64bit format.
2988 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2990 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2991 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2992 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2994 format64
|= arm_current_el(env
) == 2;
3000 /* Create a 64-bit PAR */
3001 par64
= (1 << 11); /* LPAE bit always set */
3003 par64
|= phys_addr
& ~0xfffULL
;
3004 if (!attrs
.secure
) {
3005 par64
|= (1 << 9); /* NS */
3007 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3008 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3010 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3013 par64
|= (fsr
& 0x3f) << 1; /* FS */
3015 par64
|= (1 << 9); /* S */
3018 par64
|= (1 << 8); /* PTW */
3022 /* fsr is a DFSR/IFSR value for the short descriptor
3023 * translation table format (with WnR always clear).
3024 * Convert it to a 32-bit PAR.
3027 /* We do not set any attribute bits in the PAR */
3028 if (page_size
== (1 << 24)
3029 && arm_feature(env
, ARM_FEATURE_V7
)) {
3030 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3032 par64
= phys_addr
& 0xfffff000;
3034 if (!attrs
.secure
) {
3035 par64
|= (1 << 9); /* NS */
3038 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3040 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3041 ((fsr
& 0xf) << 1) | 1;
3047 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3049 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3052 int el
= arm_current_el(env
);
3053 bool secure
= arm_is_secure_below_el3(env
);
3055 switch (ri
->opc2
& 6) {
3057 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3060 mmu_idx
= ARMMMUIdx_S1E3
;
3063 mmu_idx
= ARMMMUIdx_S1NSE1
;
3066 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3069 g_assert_not_reached();
3073 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3076 mmu_idx
= ARMMMUIdx_S1SE0
;
3079 mmu_idx
= ARMMMUIdx_S1NSE0
;
3082 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3085 g_assert_not_reached();
3089 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3090 mmu_idx
= ARMMMUIdx_S12NSE1
;
3093 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3094 mmu_idx
= ARMMMUIdx_S12NSE0
;
3097 g_assert_not_reached();
3100 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3102 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3105 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3108 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3111 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
3113 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3116 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3119 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3120 return CP_ACCESS_TRAP
;
3122 return CP_ACCESS_OK
;
3125 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3128 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3130 int secure
= arm_is_secure_below_el3(env
);
3132 switch (ri
->opc2
& 6) {
3135 case 0: /* AT S1E1R, AT S1E1W */
3136 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3138 case 4: /* AT S1E2R, AT S1E2W */
3139 mmu_idx
= ARMMMUIdx_S1E2
;
3141 case 6: /* AT S1E3R, AT S1E3W */
3142 mmu_idx
= ARMMMUIdx_S1E3
;
3145 g_assert_not_reached();
3148 case 2: /* AT S1E0R, AT S1E0W */
3149 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3151 case 4: /* AT S12E1R, AT S12E1W */
3152 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
3154 case 6: /* AT S12E0R, AT S12E0W */
3155 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
3158 g_assert_not_reached();
3161 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3165 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3166 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3167 .access
= PL1_RW
, .resetvalue
= 0,
3168 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3169 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3170 .writefn
= par_write
},
3171 #ifndef CONFIG_USER_ONLY
3172 /* This underdecoding is safe because the reginfo is NO_RAW. */
3173 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3174 .access
= PL1_W
, .accessfn
= ats_access
,
3175 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
3180 /* Return basic MPU access permission bits. */
3181 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3188 for (i
= 0; i
< 16; i
+= 2) {
3189 ret
|= (val
>> i
) & mask
;
3195 /* Pad basic MPU access permission bits to extended format. */
3196 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3203 for (i
= 0; i
< 16; i
+= 2) {
3204 ret
|= (val
& mask
) << i
;
3210 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3213 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3216 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3218 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3221 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3224 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3227 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3229 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3232 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3234 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3240 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3244 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3247 ARMCPU
*cpu
= env_archcpu(env
);
3248 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3254 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3255 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3259 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3262 ARMCPU
*cpu
= env_archcpu(env
);
3263 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3265 if (value
>= nrgs
) {
3266 qemu_log_mask(LOG_GUEST_ERROR
,
3267 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3268 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3272 raw_write(env
, ri
, value
);
3275 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3276 /* Reset for all these registers is handled in arm_cpu_reset(),
3277 * because the PMSAv7 is also used by M-profile CPUs, which do
3278 * not register cpregs but still need the state to be reset.
3280 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3281 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3282 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3283 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3284 .resetfn
= arm_cp_reset_ignore
},
3285 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3286 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3287 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3288 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3289 .resetfn
= arm_cp_reset_ignore
},
3290 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3291 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3292 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3293 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3294 .resetfn
= arm_cp_reset_ignore
},
3295 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3297 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3298 .writefn
= pmsav7_rgnr_write
,
3299 .resetfn
= arm_cp_reset_ignore
},
3303 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3304 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3305 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3306 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3307 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3308 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3309 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3310 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3311 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3312 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3314 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3316 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3318 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3320 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3322 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3323 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3325 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3326 /* Protection region base and size registers */
3327 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3328 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3329 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3330 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3331 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3332 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3333 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3334 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3335 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3336 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3337 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3338 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3339 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3340 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3341 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3342 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3343 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3344 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3345 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3346 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3347 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3348 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3349 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3350 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3354 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3357 TCR
*tcr
= raw_ptr(env
, ri
);
3358 int maskshift
= extract32(value
, 0, 3);
3360 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3361 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3362 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3363 * using Long-desciptor translation table format */
3364 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3365 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3366 /* In an implementation that includes the Security Extensions
3367 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3368 * Short-descriptor translation table format.
3370 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3376 /* Update the masks corresponding to the TCR bank being written
3377 * Note that we always calculate mask and base_mask, but
3378 * they are only used for short-descriptor tables (ie if EAE is 0);
3379 * for long-descriptor tables the TCR fields are used differently
3380 * and the mask and base_mask values are meaningless.
3382 tcr
->raw_tcr
= value
;
3383 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3384 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3387 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3390 ARMCPU
*cpu
= env_archcpu(env
);
3391 TCR
*tcr
= raw_ptr(env
, ri
);
3393 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3394 /* With LPAE the TTBCR could result in a change of ASID
3395 * via the TTBCR.A1 bit, so do a TLB flush.
3397 tlb_flush(CPU(cpu
));
3399 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3400 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3401 vmsa_ttbcr_raw_write(env
, ri
, value
);
3404 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3406 TCR
*tcr
= raw_ptr(env
, ri
);
3408 /* Reset both the TCR as well as the masks corresponding to the bank of
3409 * the TCR being reset.
3413 tcr
->base_mask
= 0xffffc000u
;
3416 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3419 ARMCPU
*cpu
= env_archcpu(env
);
3420 TCR
*tcr
= raw_ptr(env
, ri
);
3422 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3423 tlb_flush(CPU(cpu
));
3424 tcr
->raw_tcr
= value
;
3427 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3430 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3431 if (cpreg_field_is_64bit(ri
) &&
3432 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3433 ARMCPU
*cpu
= env_archcpu(env
);
3434 tlb_flush(CPU(cpu
));
3436 raw_write(env
, ri
, value
);
3439 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3442 ARMCPU
*cpu
= env_archcpu(env
);
3443 CPUState
*cs
= CPU(cpu
);
3445 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3446 if (raw_read(env
, ri
) != value
) {
3447 tlb_flush_by_mmuidx(cs
,
3448 ARMMMUIdxBit_S12NSE1
|
3449 ARMMMUIdxBit_S12NSE0
|
3451 raw_write(env
, ri
, value
);
3455 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3456 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3457 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3458 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3459 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3460 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3461 .access
= PL1_RW
, .resetvalue
= 0,
3462 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3463 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3464 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3465 .access
= PL1_RW
, .resetvalue
= 0,
3466 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3467 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3468 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3469 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3470 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3475 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3476 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3477 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3479 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3480 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3481 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3482 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3483 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3484 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3485 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3486 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3487 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3488 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3489 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3490 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3491 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3492 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3493 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3494 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3495 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3496 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3497 .raw_writefn
= vmsa_ttbcr_raw_write
,
3498 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3499 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3503 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3504 * qemu tlbs nor adjusting cached masks.
3506 static const ARMCPRegInfo ttbcr2_reginfo
= {
3507 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3508 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3509 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3510 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3513 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3516 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3517 /* The OS_TYPE bit in this register changes the reported CPUID! */
3518 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3519 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3522 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3525 env
->cp15
.c15_threadid
= value
& 0xffff;
3528 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3531 /* Wait-for-interrupt (deprecated) */
3532 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
3535 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3538 /* On OMAP there are registers indicating the max/min index of dcache lines
3539 * containing a dirty line; cache flush operations have to reset these.
3541 env
->cp15
.c15_i_max
= 0x000;
3542 env
->cp15
.c15_i_min
= 0xff0;
3545 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3546 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3547 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3548 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3550 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3551 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3552 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3554 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3555 .writefn
= omap_ticonfig_write
},
3556 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3558 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3559 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3560 .access
= PL1_RW
, .resetvalue
= 0xff0,
3561 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3562 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3564 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3565 .writefn
= omap_threadid_write
},
3566 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3567 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3568 .type
= ARM_CP_NO_RAW
,
3569 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3570 /* TODO: Peripheral port remap register:
3571 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3572 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3575 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3576 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3577 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3578 .writefn
= omap_cachemaint_write
},
3579 { .name
= "C9", .cp
= 15, .crn
= 9,
3580 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3581 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3585 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3588 env
->cp15
.c15_cpar
= value
& 0x3fff;
3591 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3592 { .name
= "XSCALE_CPAR",
3593 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3594 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3595 .writefn
= xscale_cpar_write
, },
3596 { .name
= "XSCALE_AUXCR",
3597 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3598 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3600 /* XScale specific cache-lockdown: since we have no cache we NOP these
3601 * and hope the guest does not really rely on cache behaviour.
3603 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3604 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3605 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3606 { .name
= "XSCALE_UNLOCK_ICACHE",
3607 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3608 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3609 { .name
= "XSCALE_DCACHE_LOCK",
3610 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3611 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3612 { .name
= "XSCALE_UNLOCK_DCACHE",
3613 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3614 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3618 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3619 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3620 * implementation of this implementation-defined space.
3621 * Ideally this should eventually disappear in favour of actually
3622 * implementing the correct behaviour for all cores.
3624 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3625 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3627 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3632 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3633 /* Cache status: RAZ because we have no cache so it's always clean */
3634 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3635 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3640 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3641 /* We never have a a block transfer operation in progress */
3642 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3643 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3645 /* The cache ops themselves: these all NOP for QEMU */
3646 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3647 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3648 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3649 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3650 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3651 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3652 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3653 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3654 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3655 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3656 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3657 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3661 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3662 /* The cache test-and-clean instructions always return (1 << 30)
3663 * to indicate that there are no dirty cache lines.
3665 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3666 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3667 .resetvalue
= (1 << 30) },
3668 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3669 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3670 .resetvalue
= (1 << 30) },
3674 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3675 /* Ignore ReadBuffer accesses */
3676 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3677 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3678 .access
= PL1_RW
, .resetvalue
= 0,
3679 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3683 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3685 ARMCPU
*cpu
= env_archcpu(env
);
3686 unsigned int cur_el
= arm_current_el(env
);
3687 bool secure
= arm_is_secure(env
);
3689 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3690 return env
->cp15
.vpidr_el2
;
3692 return raw_read(env
, ri
);
3695 static uint64_t mpidr_read_val(CPUARMState
*env
)
3697 ARMCPU
*cpu
= env_archcpu(env
);
3698 uint64_t mpidr
= cpu
->mp_affinity
;
3700 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3701 mpidr
|= (1U << 31);
3702 /* Cores which are uniprocessor (non-coherent)
3703 * but still implement the MP extensions set
3704 * bit 30. (For instance, Cortex-R5).
3706 if (cpu
->mp_is_up
) {
3707 mpidr
|= (1u << 30);
3713 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3715 unsigned int cur_el
= arm_current_el(env
);
3716 bool secure
= arm_is_secure(env
);
3718 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3719 return env
->cp15
.vmpidr_el2
;
3721 return mpidr_read_val(env
);
3724 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3726 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3727 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3728 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3730 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3731 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3732 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3734 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3735 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3736 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3737 offsetof(CPUARMState
, cp15
.par_ns
)} },
3738 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3739 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3740 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3741 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3742 .writefn
= vmsa_ttbr_write
, },
3743 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3744 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3745 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3746 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3747 .writefn
= vmsa_ttbr_write
, },
3751 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3753 return vfp_get_fpcr(env
);
3756 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3759 vfp_set_fpcr(env
, value
);
3762 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3764 return vfp_get_fpsr(env
);
3767 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3770 vfp_set_fpsr(env
, value
);
3773 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3776 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3777 return CP_ACCESS_TRAP
;
3779 return CP_ACCESS_OK
;
3782 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3785 env
->daif
= value
& PSTATE_DAIF
;
3788 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3789 const ARMCPRegInfo
*ri
,
3792 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3793 * SCTLR_EL1.UCI is set.
3795 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3796 return CP_ACCESS_TRAP
;
3798 return CP_ACCESS_OK
;
3801 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3802 * Page D4-1736 (DDI0487A.b)
3805 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3808 CPUState
*cs
= env_cpu(env
);
3809 bool sec
= arm_is_secure_below_el3(env
);
3812 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3813 ARMMMUIdxBit_S1SE1
|
3814 ARMMMUIdxBit_S1SE0
);
3816 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3817 ARMMMUIdxBit_S12NSE1
|
3818 ARMMMUIdxBit_S12NSE0
);
3822 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3825 CPUState
*cs
= env_cpu(env
);
3827 if (tlb_force_broadcast(env
)) {
3828 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3832 if (arm_is_secure_below_el3(env
)) {
3833 tlb_flush_by_mmuidx(cs
,
3834 ARMMMUIdxBit_S1SE1
|
3835 ARMMMUIdxBit_S1SE0
);
3837 tlb_flush_by_mmuidx(cs
,
3838 ARMMMUIdxBit_S12NSE1
|
3839 ARMMMUIdxBit_S12NSE0
);
3843 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3846 /* Note that the 'ALL' scope must invalidate both stage 1 and
3847 * stage 2 translations, whereas most other scopes only invalidate
3848 * stage 1 translations.
3850 ARMCPU
*cpu
= env_archcpu(env
);
3851 CPUState
*cs
= CPU(cpu
);
3853 if (arm_is_secure_below_el3(env
)) {
3854 tlb_flush_by_mmuidx(cs
,
3855 ARMMMUIdxBit_S1SE1
|
3856 ARMMMUIdxBit_S1SE0
);
3858 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3859 tlb_flush_by_mmuidx(cs
,
3860 ARMMMUIdxBit_S12NSE1
|
3861 ARMMMUIdxBit_S12NSE0
|
3864 tlb_flush_by_mmuidx(cs
,
3865 ARMMMUIdxBit_S12NSE1
|
3866 ARMMMUIdxBit_S12NSE0
);
3871 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3874 ARMCPU
*cpu
= env_archcpu(env
);
3875 CPUState
*cs
= CPU(cpu
);
3877 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3880 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3883 ARMCPU
*cpu
= env_archcpu(env
);
3884 CPUState
*cs
= CPU(cpu
);
3886 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3889 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3892 /* Note that the 'ALL' scope must invalidate both stage 1 and
3893 * stage 2 translations, whereas most other scopes only invalidate
3894 * stage 1 translations.
3896 CPUState
*cs
= env_cpu(env
);
3897 bool sec
= arm_is_secure_below_el3(env
);
3898 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3901 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3902 ARMMMUIdxBit_S1SE1
|
3903 ARMMMUIdxBit_S1SE0
);
3904 } else if (has_el2
) {
3905 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3906 ARMMMUIdxBit_S12NSE1
|
3907 ARMMMUIdxBit_S12NSE0
|
3910 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3911 ARMMMUIdxBit_S12NSE1
|
3912 ARMMMUIdxBit_S12NSE0
);
3916 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3919 CPUState
*cs
= env_cpu(env
);
3921 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3924 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3927 CPUState
*cs
= env_cpu(env
);
3929 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3932 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3935 /* Invalidate by VA, EL2
3936 * Currently handles both VAE2 and VALE2, since we don't support
3937 * flush-last-level-only.
3939 ARMCPU
*cpu
= env_archcpu(env
);
3940 CPUState
*cs
= CPU(cpu
);
3941 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3943 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3946 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3949 /* Invalidate by VA, EL3
3950 * Currently handles both VAE3 and VALE3, since we don't support
3951 * flush-last-level-only.
3953 ARMCPU
*cpu
= env_archcpu(env
);
3954 CPUState
*cs
= CPU(cpu
);
3955 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3957 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3960 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3963 ARMCPU
*cpu
= env_archcpu(env
);
3964 CPUState
*cs
= CPU(cpu
);
3965 bool sec
= arm_is_secure_below_el3(env
);
3966 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3969 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3970 ARMMMUIdxBit_S1SE1
|
3971 ARMMMUIdxBit_S1SE0
);
3973 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3974 ARMMMUIdxBit_S12NSE1
|
3975 ARMMMUIdxBit_S12NSE0
);
3979 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3982 /* Invalidate by VA, EL1&0 (AArch64 version).
3983 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3984 * since we don't support flush-for-specific-ASID-only or
3985 * flush-last-level-only.
3987 ARMCPU
*cpu
= env_archcpu(env
);
3988 CPUState
*cs
= CPU(cpu
);
3989 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3991 if (tlb_force_broadcast(env
)) {
3992 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3996 if (arm_is_secure_below_el3(env
)) {
3997 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3998 ARMMMUIdxBit_S1SE1
|
3999 ARMMMUIdxBit_S1SE0
);
4001 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
4002 ARMMMUIdxBit_S12NSE1
|
4003 ARMMMUIdxBit_S12NSE0
);
4007 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4010 CPUState
*cs
= env_cpu(env
);
4011 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4013 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4017 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4020 CPUState
*cs
= env_cpu(env
);
4021 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4023 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4027 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4030 /* Invalidate by IPA. This has to invalidate any structures that
4031 * contain only stage 2 translation information, but does not need
4032 * to apply to structures that contain combined stage 1 and stage 2
4033 * translation information.
4034 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4036 ARMCPU
*cpu
= env_archcpu(env
);
4037 CPUState
*cs
= CPU(cpu
);
4040 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4044 pageaddr
= sextract64(value
<< 12, 0, 48);
4046 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
4049 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4052 CPUState
*cs
= env_cpu(env
);
4055 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4059 pageaddr
= sextract64(value
<< 12, 0, 48);
4061 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4065 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4068 /* We don't implement EL2, so the only control on DC ZVA is the
4069 * bit in the SCTLR which can prohibit access for EL0.
4071 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4072 return CP_ACCESS_TRAP
;
4074 return CP_ACCESS_OK
;
4077 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4079 ARMCPU
*cpu
= env_archcpu(env
);
4080 int dzp_bit
= 1 << 4;
4082 /* DZP indicates whether DC ZVA access is allowed */
4083 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4086 return cpu
->dcz_blocksize
| dzp_bit
;
4089 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4092 if (!(env
->pstate
& PSTATE_SP
)) {
4093 /* Access to SP_EL0 is undefined if it's being used as
4094 * the stack pointer.
4096 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4098 return CP_ACCESS_OK
;
4101 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4103 return env
->pstate
& PSTATE_SP
;
4106 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4108 update_spsel(env
, val
);
4111 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4114 ARMCPU
*cpu
= env_archcpu(env
);
4116 if (raw_read(env
, ri
) == value
) {
4117 /* Skip the TLB flush if nothing actually changed; Linux likes
4118 * to do a lot of pointless SCTLR writes.
4123 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4124 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4128 raw_write(env
, ri
, value
);
4129 /* ??? Lots of these bits are not implemented. */
4130 /* This may enable/disable the MMU, so do a TLB flush. */
4131 tlb_flush(CPU(cpu
));
4134 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4137 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4138 return CP_ACCESS_TRAP_FP_EL2
;
4140 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4141 return CP_ACCESS_TRAP_FP_EL3
;
4143 return CP_ACCESS_OK
;
4146 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4149 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4152 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4153 /* Minimal set of EL0-visible registers. This will need to be expanded
4154 * significantly for system emulation of AArch64 CPUs.
4156 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4157 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4158 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4159 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4160 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4161 .type
= ARM_CP_NO_RAW
,
4162 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4163 .fieldoffset
= offsetof(CPUARMState
, daif
),
4164 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4165 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4167 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4168 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4169 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4170 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4171 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4172 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4173 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4174 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4175 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4176 .readfn
= aa64_dczid_read
},
4177 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4178 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4179 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4180 #ifndef CONFIG_USER_ONLY
4181 /* Avoid overhead of an access check that always passes in user-mode */
4182 .accessfn
= aa64_zva_access
,
4185 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4186 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4187 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4188 /* Cache ops: all NOPs since we don't emulate caches */
4189 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4190 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4191 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4192 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4193 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4194 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4195 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4196 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4197 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4198 .accessfn
= aa64_cacheop_access
},
4199 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4200 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4201 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4202 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4203 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4204 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4205 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4206 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4207 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4208 .accessfn
= aa64_cacheop_access
},
4209 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4210 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4211 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4212 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4213 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4214 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4215 .accessfn
= aa64_cacheop_access
},
4216 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4217 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4218 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4219 .accessfn
= aa64_cacheop_access
},
4220 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4221 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4222 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4223 /* TLBI operations */
4224 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4225 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4226 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4227 .writefn
= tlbi_aa64_vmalle1is_write
},
4228 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4229 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4230 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4231 .writefn
= tlbi_aa64_vae1is_write
},
4232 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4233 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4234 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4235 .writefn
= tlbi_aa64_vmalle1is_write
},
4236 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4237 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4238 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4239 .writefn
= tlbi_aa64_vae1is_write
},
4240 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4241 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4242 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4243 .writefn
= tlbi_aa64_vae1is_write
},
4244 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4245 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4246 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4247 .writefn
= tlbi_aa64_vae1is_write
},
4248 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4249 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4250 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4251 .writefn
= tlbi_aa64_vmalle1_write
},
4252 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4253 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4254 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4255 .writefn
= tlbi_aa64_vae1_write
},
4256 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4257 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4258 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4259 .writefn
= tlbi_aa64_vmalle1_write
},
4260 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4261 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4262 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4263 .writefn
= tlbi_aa64_vae1_write
},
4264 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4265 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4266 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4267 .writefn
= tlbi_aa64_vae1_write
},
4268 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4269 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4270 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4271 .writefn
= tlbi_aa64_vae1_write
},
4272 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4273 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4274 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4275 .writefn
= tlbi_aa64_ipas2e1is_write
},
4276 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4277 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4278 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4279 .writefn
= tlbi_aa64_ipas2e1is_write
},
4280 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4281 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4282 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4283 .writefn
= tlbi_aa64_alle1is_write
},
4284 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4285 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4286 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4287 .writefn
= tlbi_aa64_alle1is_write
},
4288 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4289 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4290 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4291 .writefn
= tlbi_aa64_ipas2e1_write
},
4292 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4293 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4294 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4295 .writefn
= tlbi_aa64_ipas2e1_write
},
4296 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4297 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4298 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4299 .writefn
= tlbi_aa64_alle1_write
},
4300 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4301 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4302 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4303 .writefn
= tlbi_aa64_alle1is_write
},
4304 #ifndef CONFIG_USER_ONLY
4305 /* 64 bit address translation operations */
4306 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4307 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4308 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4309 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4310 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4311 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4312 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4313 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4314 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4315 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4316 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4317 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4318 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4319 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4320 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4321 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4322 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4323 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4324 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4325 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4326 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4327 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4328 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4329 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4330 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4331 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4332 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4333 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4334 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4335 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4336 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4337 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4338 .type
= ARM_CP_ALIAS
,
4339 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4340 .access
= PL1_RW
, .resetvalue
= 0,
4341 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4342 .writefn
= par_write
},
4344 /* TLB invalidate last level of translation table walk */
4345 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4346 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4347 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4348 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4349 .writefn
= tlbimvaa_is_write
},
4350 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4351 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4352 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4353 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4354 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4355 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4356 .writefn
= tlbimva_hyp_write
},
4357 { .name
= "TLBIMVALHIS",
4358 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4359 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4360 .writefn
= tlbimva_hyp_is_write
},
4361 { .name
= "TLBIIPAS2",
4362 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4363 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4364 .writefn
= tlbiipas2_write
},
4365 { .name
= "TLBIIPAS2IS",
4366 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4367 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4368 .writefn
= tlbiipas2_is_write
},
4369 { .name
= "TLBIIPAS2L",
4370 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4371 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4372 .writefn
= tlbiipas2_write
},
4373 { .name
= "TLBIIPAS2LIS",
4374 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4375 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4376 .writefn
= tlbiipas2_is_write
},
4377 /* 32 bit cache operations */
4378 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4379 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4380 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4381 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4382 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4383 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4384 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4385 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4386 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4387 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4388 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4389 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4390 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4391 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4392 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4393 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4394 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4395 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4396 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4397 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4398 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4399 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4400 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4401 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4402 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4403 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4404 /* MMU Domain access control / MPU write buffer control */
4405 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4406 .access
= PL1_RW
, .resetvalue
= 0,
4407 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4408 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4409 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4410 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4411 .type
= ARM_CP_ALIAS
,
4412 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4414 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4415 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4416 .type
= ARM_CP_ALIAS
,
4417 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4419 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4420 /* We rely on the access checks not allowing the guest to write to the
4421 * state field when SPSel indicates that it's being used as the stack
4424 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4425 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4426 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4427 .type
= ARM_CP_ALIAS
,
4428 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4429 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4430 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4431 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4432 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4433 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4434 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4435 .type
= ARM_CP_NO_RAW
,
4436 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4437 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4438 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4439 .type
= ARM_CP_ALIAS
,
4440 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4441 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4442 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4443 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4444 .access
= PL2_RW
, .resetvalue
= 0,
4445 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4446 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4447 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4448 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4449 .access
= PL2_RW
, .resetvalue
= 0,
4450 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4451 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4452 .type
= ARM_CP_ALIAS
,
4453 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4455 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4456 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4457 .type
= ARM_CP_ALIAS
,
4458 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4460 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4461 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4462 .type
= ARM_CP_ALIAS
,
4463 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4465 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4466 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4467 .type
= ARM_CP_ALIAS
,
4468 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4470 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4471 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4472 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4474 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4475 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4476 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4477 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4478 .writefn
= sdcr_write
,
4479 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4483 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4484 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4485 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4486 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4488 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4489 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4490 .type
= ARM_CP_NO_RAW
,
4491 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4493 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4494 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4495 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4496 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4497 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4498 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4500 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4501 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4502 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4503 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4504 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4505 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4506 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4508 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4509 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4510 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4511 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4512 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4513 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4515 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4516 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4517 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4519 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4520 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4521 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4523 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4524 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4525 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4527 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4528 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4529 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4530 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4531 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4532 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4533 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4534 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4535 .cp
= 15, .opc1
= 6, .crm
= 2,
4536 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4537 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4538 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4539 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4540 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4541 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4542 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4543 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4544 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4545 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4546 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4547 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4548 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4549 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4550 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4551 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4553 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4554 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4555 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4556 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4557 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4558 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4559 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4560 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4562 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4563 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4564 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4565 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4566 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4568 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4569 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4570 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4571 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4572 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4573 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4574 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4575 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4576 .access
= PL2_RW
, .accessfn
= access_tda
,
4577 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4578 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4579 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4580 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4581 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4582 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4583 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4584 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4585 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4586 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4587 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4588 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4589 .type
= ARM_CP_CONST
,
4590 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4591 .access
= PL2_RW
, .resetvalue
= 0 },
4595 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4596 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4597 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4598 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4600 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4604 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4606 ARMCPU
*cpu
= env_archcpu(env
);
4607 uint64_t valid_mask
= HCR_MASK
;
4609 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4610 valid_mask
&= ~HCR_HCD
;
4611 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4612 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4613 * However, if we're using the SMC PSCI conduit then QEMU is
4614 * effectively acting like EL3 firmware and so the guest at
4615 * EL2 should retain the ability to prevent EL1 from being
4616 * able to make SMC calls into the ersatz firmware, so in
4617 * that case HCR.TSC should be read/write.
4619 valid_mask
&= ~HCR_TSC
;
4621 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4622 valid_mask
|= HCR_TLOR
;
4624 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
4625 valid_mask
|= HCR_API
| HCR_APK
;
4628 /* Clear RES0 bits. */
4629 value
&= valid_mask
;
4631 /* These bits change the MMU setup:
4632 * HCR_VM enables stage 2 translation
4633 * HCR_PTW forbids certain page-table setups
4634 * HCR_DC Disables stage1 and enables stage2 translation
4636 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4637 tlb_flush(CPU(cpu
));
4639 env
->cp15
.hcr_el2
= value
;
4642 * Updates to VI and VF require us to update the status of
4643 * virtual interrupts, which are the logical OR of these bits
4644 * and the state of the input lines from the GIC. (This requires
4645 * that we have the iothread lock, which is done by marking the
4646 * reginfo structs as ARM_CP_IO.)
4647 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4648 * possible for it to be taken immediately, because VIRQ and
4649 * VFIQ are masked unless running at EL0 or EL1, and HCR
4650 * can only be written at EL2.
4652 g_assert(qemu_mutex_iothread_locked());
4653 arm_cpu_update_virq(cpu
);
4654 arm_cpu_update_vfiq(cpu
);
4657 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4660 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4661 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4662 hcr_write(env
, NULL
, value
);
4665 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4668 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4669 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4670 hcr_write(env
, NULL
, value
);
4674 * Return the effective value of HCR_EL2.
4675 * Bits that are not included here:
4676 * RW (read from SCR_EL3.RW as needed)
4678 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4680 uint64_t ret
= env
->cp15
.hcr_el2
;
4682 if (arm_is_secure_below_el3(env
)) {
4684 * "This register has no effect if EL2 is not enabled in the
4685 * current Security state". This is ARMv8.4-SecEL2 speak for
4686 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4688 * Prior to that, the language was "In an implementation that
4689 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4690 * as if this field is 0 for all purposes other than a direct
4691 * read or write access of HCR_EL2". With lots of enumeration
4692 * on a per-field basis. In current QEMU, this is condition
4693 * is arm_is_secure_below_el3.
4695 * Since the v8.4 language applies to the entire register, and
4696 * appears to be backward compatible, use that.
4699 } else if (ret
& HCR_TGE
) {
4700 /* These bits are up-to-date as of ARMv8.4. */
4701 if (ret
& HCR_E2H
) {
4702 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4703 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4704 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4705 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4707 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4709 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4710 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4711 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4718 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4722 * For A-profile AArch32 EL3, if NSACR.CP10
4723 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4725 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
4726 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
4727 value
&= ~(0x3 << 10);
4728 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
4730 env
->cp15
.cptr_el
[2] = value
;
4733 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4736 * For A-profile AArch32 EL3, if NSACR.CP10
4737 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4739 uint64_t value
= env
->cp15
.cptr_el
[2];
4741 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
4742 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
4748 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4749 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4751 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4752 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4753 .writefn
= hcr_write
},
4754 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4755 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4756 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4757 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4758 .writefn
= hcr_writelow
},
4759 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4760 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4761 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4762 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4763 .type
= ARM_CP_ALIAS
,
4764 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4766 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4767 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4768 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4769 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4770 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4771 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4772 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4773 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4774 .type
= ARM_CP_ALIAS
,
4775 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4777 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4778 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4779 .type
= ARM_CP_ALIAS
,
4780 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4782 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4783 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4784 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4785 .access
= PL2_RW
, .writefn
= vbar_write
,
4786 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4788 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4789 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4790 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4791 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4792 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4793 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4794 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4795 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
4796 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
4797 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4798 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4799 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4801 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4802 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4803 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4804 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4805 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4806 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4807 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4809 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4810 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4811 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4812 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4814 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4815 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4816 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4818 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4819 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4820 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4822 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4823 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4825 /* no .writefn needed as this can't cause an ASID change;
4826 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4828 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4829 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4830 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4831 .type
= ARM_CP_ALIAS
,
4832 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4833 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4834 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4835 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4837 /* no .writefn needed as this can't cause an ASID change;
4838 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4840 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4841 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4842 .cp
= 15, .opc1
= 6, .crm
= 2,
4843 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4844 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4845 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4846 .writefn
= vttbr_write
},
4847 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4848 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4849 .access
= PL2_RW
, .writefn
= vttbr_write
,
4850 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4851 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4852 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4853 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4854 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4855 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4856 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4857 .access
= PL2_RW
, .resetvalue
= 0,
4858 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4859 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4860 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4861 .access
= PL2_RW
, .resetvalue
= 0,
4862 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4863 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4864 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4865 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4866 { .name
= "TLBIALLNSNH",
4867 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4868 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4869 .writefn
= tlbiall_nsnh_write
},
4870 { .name
= "TLBIALLNSNHIS",
4871 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4872 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4873 .writefn
= tlbiall_nsnh_is_write
},
4874 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4875 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4876 .writefn
= tlbiall_hyp_write
},
4877 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4878 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4879 .writefn
= tlbiall_hyp_is_write
},
4880 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4881 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4882 .writefn
= tlbimva_hyp_write
},
4883 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4884 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4885 .writefn
= tlbimva_hyp_is_write
},
4886 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4887 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4888 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4889 .writefn
= tlbi_aa64_alle2_write
},
4890 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4891 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4892 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4893 .writefn
= tlbi_aa64_vae2_write
},
4894 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4895 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4896 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4897 .writefn
= tlbi_aa64_vae2_write
},
4898 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4899 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4900 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4901 .writefn
= tlbi_aa64_alle2is_write
},
4902 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4903 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4904 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4905 .writefn
= tlbi_aa64_vae2is_write
},
4906 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4907 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4908 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4909 .writefn
= tlbi_aa64_vae2is_write
},
4910 #ifndef CONFIG_USER_ONLY
4911 /* Unlike the other EL2-related AT operations, these must
4912 * UNDEF from EL3 if EL2 is not implemented, which is why we
4913 * define them here rather than with the rest of the AT ops.
4915 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4916 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4917 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4918 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4919 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4920 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4921 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4922 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4923 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4924 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4925 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4926 * to behave as if SCR.NS was 1.
4928 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4930 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4931 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4933 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4934 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4935 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4936 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4937 * reset values as IMPDEF. We choose to reset to 3 to comply with
4938 * both ARMv7 and ARMv8.
4940 .access
= PL2_RW
, .resetvalue
= 3,
4941 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4942 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4943 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4944 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4945 .writefn
= gt_cntvoff_write
,
4946 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4947 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4948 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4949 .writefn
= gt_cntvoff_write
,
4950 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4951 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4952 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4953 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4954 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4955 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4956 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4957 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4958 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4959 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4960 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4961 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4962 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4963 .resetfn
= gt_hyp_timer_reset
,
4964 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4965 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4967 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4969 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4971 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4973 /* The only field of MDCR_EL2 that has a defined architectural reset value
4974 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4975 * don't implement any PMU event counters, so using zero as a reset
4976 * value for MDCR_EL2 is okay
4978 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4979 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4980 .access
= PL2_RW
, .resetvalue
= 0,
4981 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4982 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4983 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4984 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4985 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4986 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4987 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4989 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4990 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4991 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4993 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4997 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4998 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4999 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5000 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5002 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5003 .writefn
= hcr_writehigh
},
5007 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5010 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5011 * At Secure EL1 it traps to EL3.
5013 if (arm_current_el(env
) == 3) {
5014 return CP_ACCESS_OK
;
5016 if (arm_is_secure_below_el3(env
)) {
5017 return CP_ACCESS_TRAP_EL3
;
5019 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5021 return CP_ACCESS_OK
;
5023 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5026 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5027 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5028 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5029 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5030 .resetvalue
= 0, .writefn
= scr_write
},
5031 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
5032 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5033 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5034 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5035 .writefn
= scr_write
},
5036 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5037 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5038 .access
= PL3_RW
, .resetvalue
= 0,
5039 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5041 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5042 .access
= PL3_RW
, .resetvalue
= 0,
5043 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5044 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5045 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5046 .writefn
= vbar_write
, .resetvalue
= 0,
5047 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5048 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5049 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5050 .access
= PL3_RW
, .resetvalue
= 0,
5051 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5052 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5053 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5055 /* no .writefn needed as this can't cause an ASID change;
5056 * we must provide a .raw_writefn and .resetfn because we handle
5057 * reset and migration for the AArch32 TTBCR(S), which might be
5058 * using mask and base_mask.
5060 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5061 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5062 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5063 .type
= ARM_CP_ALIAS
,
5064 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5066 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5067 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5068 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5069 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5070 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5071 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5072 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5073 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5074 .type
= ARM_CP_ALIAS
,
5075 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5077 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5078 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5079 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5080 .access
= PL3_RW
, .writefn
= vbar_write
,
5081 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5083 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5084 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5085 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5086 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5087 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5088 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5089 .access
= PL3_RW
, .resetvalue
= 0,
5090 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5091 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5092 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5093 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5095 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5096 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5097 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5099 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5100 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5101 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5103 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5104 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5105 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5106 .writefn
= tlbi_aa64_alle3is_write
},
5107 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5108 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5109 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5110 .writefn
= tlbi_aa64_vae3is_write
},
5111 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5112 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5113 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5114 .writefn
= tlbi_aa64_vae3is_write
},
5115 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5116 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5117 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5118 .writefn
= tlbi_aa64_alle3_write
},
5119 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5120 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5121 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5122 .writefn
= tlbi_aa64_vae3_write
},
5123 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5124 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5125 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5126 .writefn
= tlbi_aa64_vae3_write
},
5130 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5133 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5134 * but the AArch32 CTR has its own reginfo struct)
5136 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5137 return CP_ACCESS_TRAP
;
5139 return CP_ACCESS_OK
;
5142 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5145 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5146 * read via a bit in OSLSR_EL1.
5150 if (ri
->state
== ARM_CP_STATE_AA32
) {
5151 oslock
= (value
== 0xC5ACCE55);
5156 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5159 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5160 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5161 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5162 * unlike DBGDRAR it is never accessible from EL0.
5163 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5166 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5167 .access
= PL0_R
, .accessfn
= access_tdra
,
5168 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5169 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5170 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5171 .access
= PL1_R
, .accessfn
= access_tdra
,
5172 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5173 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5174 .access
= PL0_R
, .accessfn
= access_tdra
,
5175 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5176 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5177 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5178 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5179 .access
= PL1_RW
, .accessfn
= access_tda
,
5180 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5182 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5183 * We don't implement the configurable EL0 access.
5185 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5186 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5187 .type
= ARM_CP_ALIAS
,
5188 .access
= PL1_R
, .accessfn
= access_tda
,
5189 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5190 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5191 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5192 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5193 .accessfn
= access_tdosa
,
5194 .writefn
= oslar_write
},
5195 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5196 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5197 .access
= PL1_R
, .resetvalue
= 10,
5198 .accessfn
= access_tdosa
,
5199 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5200 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5201 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5202 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5203 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5204 .type
= ARM_CP_NOP
},
5205 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5206 * implement vector catch debug events yet.
5209 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5210 .access
= PL1_RW
, .accessfn
= access_tda
,
5211 .type
= ARM_CP_NOP
},
5212 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5213 * to save and restore a 32-bit guest's DBGVCR)
5215 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5216 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5217 .access
= PL2_RW
, .accessfn
= access_tda
,
5218 .type
= ARM_CP_NOP
},
5219 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5220 * Channel but Linux may try to access this register. The 32-bit
5221 * alias is DBGDCCINT.
5223 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5224 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5225 .access
= PL1_RW
, .accessfn
= access_tda
,
5226 .type
= ARM_CP_NOP
},
5230 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5231 /* 64 bit access versions of the (dummy) debug registers */
5232 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5233 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5234 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5235 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5239 /* Return the exception level to which exceptions should be taken
5240 * via SVEAccessTrap. If an exception should be routed through
5241 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5242 * take care of raising that exception.
5243 * C.f. the ARM pseudocode function CheckSVEEnabled.
5245 int sve_exception_el(CPUARMState
*env
, int el
)
5247 #ifndef CONFIG_USER_ONLY
5249 bool disabled
= false;
5251 /* The CPACR.ZEN controls traps to EL1:
5252 * 0, 2 : trap EL0 and EL1 accesses
5253 * 1 : trap only EL0 accesses
5254 * 3 : trap no accesses
5256 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5258 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5263 return (arm_feature(env
, ARM_FEATURE_EL2
)
5264 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5267 /* Check CPACR.FPEN. */
5268 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5270 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5278 /* CPTR_EL2. Since TZ and TFP are positive,
5279 * they will be zero when EL2 is not present.
5281 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5282 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5285 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5290 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5291 if (arm_feature(env
, ARM_FEATURE_EL3
)
5292 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5300 * Given that SVE is enabled, return the vector length for EL.
5302 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5304 ARMCPU
*cpu
= env_archcpu(env
);
5305 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5308 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5310 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5311 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5313 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
5314 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5319 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5322 int cur_el
= arm_current_el(env
);
5323 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5326 /* Bits other than [3:0] are RAZ/WI. */
5327 raw_write(env
, ri
, value
& 0xf);
5330 * Because we arrived here, we know both FP and SVE are enabled;
5331 * otherwise we would have trapped access to the ZCR_ELn register.
5333 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5334 if (new_len
< old_len
) {
5335 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5339 static const ARMCPRegInfo zcr_el1_reginfo
= {
5340 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5341 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5342 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5343 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5344 .writefn
= zcr_write
, .raw_writefn
= raw_write
5347 static const ARMCPRegInfo zcr_el2_reginfo
= {
5348 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5349 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5350 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5351 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5352 .writefn
= zcr_write
, .raw_writefn
= raw_write
5355 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5356 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5357 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5358 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5359 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5362 static const ARMCPRegInfo zcr_el3_reginfo
= {
5363 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5364 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5365 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5366 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5367 .writefn
= zcr_write
, .raw_writefn
= raw_write
5370 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5372 CPUARMState
*env
= &cpu
->env
;
5374 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5375 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5377 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5379 if (env
->cpu_watchpoint
[n
]) {
5380 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5381 env
->cpu_watchpoint
[n
] = NULL
;
5384 if (!extract64(wcr
, 0, 1)) {
5385 /* E bit clear : watchpoint disabled */
5389 switch (extract64(wcr
, 3, 2)) {
5391 /* LSC 00 is reserved and must behave as if the wp is disabled */
5394 flags
|= BP_MEM_READ
;
5397 flags
|= BP_MEM_WRITE
;
5400 flags
|= BP_MEM_ACCESS
;
5404 /* Attempts to use both MASK and BAS fields simultaneously are
5405 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5406 * thus generating a watchpoint for every byte in the masked region.
5408 mask
= extract64(wcr
, 24, 4);
5409 if (mask
== 1 || mask
== 2) {
5410 /* Reserved values of MASK; we must act as if the mask value was
5411 * some non-reserved value, or as if the watchpoint were disabled.
5412 * We choose the latter.
5416 /* Watchpoint covers an aligned area up to 2GB in size */
5418 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5419 * whether the watchpoint fires when the unmasked bits match; we opt
5420 * to generate the exceptions.
5424 /* Watchpoint covers bytes defined by the byte address select bits */
5425 int bas
= extract64(wcr
, 5, 8);
5429 /* This must act as if the watchpoint is disabled */
5433 if (extract64(wvr
, 2, 1)) {
5434 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5435 * ignored, and BAS[3:0] define which bytes to watch.
5439 /* The BAS bits are supposed to be programmed to indicate a contiguous
5440 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5441 * we fire for each byte in the word/doubleword addressed by the WVR.
5442 * We choose to ignore any non-zero bits after the first range of 1s.
5444 basstart
= ctz32(bas
);
5445 len
= cto32(bas
>> basstart
);
5449 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5450 &env
->cpu_watchpoint
[n
]);
5453 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5456 CPUARMState
*env
= &cpu
->env
;
5458 /* Completely clear out existing QEMU watchpoints and our array, to
5459 * avoid possible stale entries following migration load.
5461 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5462 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5464 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5465 hw_watchpoint_update(cpu
, i
);
5469 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5472 ARMCPU
*cpu
= env_archcpu(env
);
5475 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5476 * register reads and behaves as if values written are sign extended.
5477 * Bits [1:0] are RES0.
5479 value
= sextract64(value
, 0, 49) & ~3ULL;
5481 raw_write(env
, ri
, value
);
5482 hw_watchpoint_update(cpu
, i
);
5485 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5488 ARMCPU
*cpu
= env_archcpu(env
);
5491 raw_write(env
, ri
, value
);
5492 hw_watchpoint_update(cpu
, i
);
5495 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5497 CPUARMState
*env
= &cpu
->env
;
5498 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5499 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5504 if (env
->cpu_breakpoint
[n
]) {
5505 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5506 env
->cpu_breakpoint
[n
] = NULL
;
5509 if (!extract64(bcr
, 0, 1)) {
5510 /* E bit clear : watchpoint disabled */
5514 bt
= extract64(bcr
, 20, 4);
5517 case 4: /* unlinked address mismatch (reserved if AArch64) */
5518 case 5: /* linked address mismatch (reserved if AArch64) */
5519 qemu_log_mask(LOG_UNIMP
,
5520 "arm: address mismatch breakpoint types not implemented\n");
5522 case 0: /* unlinked address match */
5523 case 1: /* linked address match */
5525 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5526 * we behave as if the register was sign extended. Bits [1:0] are
5527 * RES0. The BAS field is used to allow setting breakpoints on 16
5528 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5529 * a bp will fire if the addresses covered by the bp and the addresses
5530 * covered by the insn overlap but the insn doesn't start at the
5531 * start of the bp address range. We choose to require the insn and
5532 * the bp to have the same address. The constraints on writing to
5533 * BAS enforced in dbgbcr_write mean we have only four cases:
5534 * 0b0000 => no breakpoint
5535 * 0b0011 => breakpoint on addr
5536 * 0b1100 => breakpoint on addr + 2
5537 * 0b1111 => breakpoint on addr
5538 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5540 int bas
= extract64(bcr
, 5, 4);
5541 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5550 case 2: /* unlinked context ID match */
5551 case 8: /* unlinked VMID match (reserved if no EL2) */
5552 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5553 qemu_log_mask(LOG_UNIMP
,
5554 "arm: unlinked context breakpoint types not implemented\n");
5556 case 9: /* linked VMID match (reserved if no EL2) */
5557 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5558 case 3: /* linked context ID match */
5560 /* We must generate no events for Linked context matches (unless
5561 * they are linked to by some other bp/wp, which is handled in
5562 * updates for the linking bp/wp). We choose to also generate no events
5563 * for reserved values.
5568 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5571 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5574 CPUARMState
*env
= &cpu
->env
;
5576 /* Completely clear out existing QEMU breakpoints and our array, to
5577 * avoid possible stale entries following migration load.
5579 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5580 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5582 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5583 hw_breakpoint_update(cpu
, i
);
5587 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5590 ARMCPU
*cpu
= env_archcpu(env
);
5593 raw_write(env
, ri
, value
);
5594 hw_breakpoint_update(cpu
, i
);
5597 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5600 ARMCPU
*cpu
= env_archcpu(env
);
5603 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5606 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5607 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5609 raw_write(env
, ri
, value
);
5610 hw_breakpoint_update(cpu
, i
);
5613 static void define_debug_regs(ARMCPU
*cpu
)
5615 /* Define v7 and v8 architectural debug registers.
5616 * These are just dummy implementations for now.
5619 int wrps
, brps
, ctx_cmps
;
5620 ARMCPRegInfo dbgdidr
= {
5621 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5622 .access
= PL0_R
, .accessfn
= access_tda
,
5623 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5626 /* Note that all these register fields hold "number of Xs minus 1". */
5627 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5628 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5629 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5631 assert(ctx_cmps
<= brps
);
5633 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5634 * of the debug registers such as number of breakpoints;
5635 * check that if they both exist then they agree.
5637 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5638 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5639 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5640 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5643 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5644 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5646 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5647 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5650 for (i
= 0; i
< brps
+ 1; i
++) {
5651 ARMCPRegInfo dbgregs
[] = {
5652 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5653 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5654 .access
= PL1_RW
, .accessfn
= access_tda
,
5655 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5656 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5658 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5659 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5660 .access
= PL1_RW
, .accessfn
= access_tda
,
5661 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5662 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5666 define_arm_cp_regs(cpu
, dbgregs
);
5669 for (i
= 0; i
< wrps
+ 1; i
++) {
5670 ARMCPRegInfo dbgregs
[] = {
5671 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5672 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5673 .access
= PL1_RW
, .accessfn
= access_tda
,
5674 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5675 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5677 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5678 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5679 .access
= PL1_RW
, .accessfn
= access_tda
,
5680 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5681 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5685 define_arm_cp_regs(cpu
, dbgregs
);
5689 /* We don't know until after realize whether there's a GICv3
5690 * attached, and that is what registers the gicv3 sysregs.
5691 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5694 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5696 ARMCPU
*cpu
= env_archcpu(env
);
5697 uint64_t pfr1
= cpu
->id_pfr1
;
5699 if (env
->gicv3state
) {
5705 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5707 ARMCPU
*cpu
= env_archcpu(env
);
5708 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5710 if (env
->gicv3state
) {
5716 /* Shared logic between LORID and the rest of the LOR* registers.
5717 * Secure state has already been delt with.
5719 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5721 int el
= arm_current_el(env
);
5723 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5724 return CP_ACCESS_TRAP_EL2
;
5726 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5727 return CP_ACCESS_TRAP_EL3
;
5729 return CP_ACCESS_OK
;
5732 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5735 if (arm_is_secure_below_el3(env
)) {
5736 /* Access ok in secure mode. */
5737 return CP_ACCESS_OK
;
5739 return access_lor_ns(env
);
5742 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5743 const ARMCPRegInfo
*ri
, bool isread
)
5745 if (arm_is_secure_below_el3(env
)) {
5746 /* Access denied in secure mode. */
5747 return CP_ACCESS_TRAP
;
5749 return access_lor_ns(env
);
5752 #ifdef TARGET_AARCH64
5753 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5756 int el
= arm_current_el(env
);
5759 arm_feature(env
, ARM_FEATURE_EL2
) &&
5760 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5761 return CP_ACCESS_TRAP_EL2
;
5764 arm_feature(env
, ARM_FEATURE_EL3
) &&
5765 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5766 return CP_ACCESS_TRAP_EL3
;
5768 return CP_ACCESS_OK
;
5771 static const ARMCPRegInfo pauth_reginfo
[] = {
5772 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5773 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5774 .access
= PL1_RW
, .accessfn
= access_pauth
,
5775 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
5776 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5777 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5778 .access
= PL1_RW
, .accessfn
= access_pauth
,
5779 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
5780 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5781 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5782 .access
= PL1_RW
, .accessfn
= access_pauth
,
5783 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
5784 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5785 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5786 .access
= PL1_RW
, .accessfn
= access_pauth
,
5787 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
5788 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5789 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5790 .access
= PL1_RW
, .accessfn
= access_pauth
,
5791 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
5792 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5793 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5794 .access
= PL1_RW
, .accessfn
= access_pauth
,
5795 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
5796 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5797 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5798 .access
= PL1_RW
, .accessfn
= access_pauth
,
5799 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
5800 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5801 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5802 .access
= PL1_RW
, .accessfn
= access_pauth
,
5803 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
5804 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5805 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5806 .access
= PL1_RW
, .accessfn
= access_pauth
,
5807 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
5808 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5809 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5810 .access
= PL1_RW
, .accessfn
= access_pauth
,
5811 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
5815 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5820 /* Success sets NZCV = 0000. */
5821 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
5823 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
5825 * ??? Failed, for unknown reasons in the crypto subsystem.
5826 * The best we can do is log the reason and return the
5827 * timed-out indication to the guest. There is no reason
5828 * we know to expect this failure to be transitory, so the
5829 * guest may well hang retrying the operation.
5831 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
5832 ri
->name
, error_get_pretty(err
));
5835 env
->ZF
= 0; /* NZCF = 0100 */
5841 /* We do not support re-seeding, so the two registers operate the same. */
5842 static const ARMCPRegInfo rndr_reginfo
[] = {
5843 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
5844 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
5845 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
5846 .access
= PL0_R
, .readfn
= rndr_readfn
},
5847 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
5848 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
5849 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
5850 .access
= PL0_R
, .readfn
= rndr_readfn
},
5855 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5858 int el
= arm_current_el(env
);
5861 uint64_t sctlr
= arm_sctlr(env
, el
);
5862 if (!(sctlr
& SCTLR_EnRCTX
)) {
5863 return CP_ACCESS_TRAP
;
5865 } else if (el
== 1) {
5866 uint64_t hcr
= arm_hcr_el2_eff(env
);
5868 return CP_ACCESS_TRAP_EL2
;
5871 return CP_ACCESS_OK
;
5874 static const ARMCPRegInfo predinv_reginfo
[] = {
5875 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
5876 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
5877 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5878 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
5879 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
5880 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5881 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
5882 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
5883 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5885 * Note the AArch32 opcodes have a different OPC1.
5887 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
5888 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
5889 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5890 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
5891 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
5892 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5893 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
5894 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
5895 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5899 void register_cp_regs_for_features(ARMCPU
*cpu
)
5901 /* Register all the coprocessor registers based on feature bits */
5902 CPUARMState
*env
= &cpu
->env
;
5903 if (arm_feature(env
, ARM_FEATURE_M
)) {
5904 /* M profile has no coprocessor registers */
5908 define_arm_cp_regs(cpu
, cp_reginfo
);
5909 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5910 /* Must go early as it is full of wildcards that may be
5911 * overridden by later definitions.
5913 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5916 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5917 /* The ID registers all have impdef reset values */
5918 ARMCPRegInfo v6_idregs
[] = {
5919 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5920 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5921 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5922 .resetvalue
= cpu
->id_pfr0
},
5923 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5924 * the value of the GIC field until after we define these regs.
5926 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5927 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5928 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5929 .readfn
= id_pfr1_read
,
5930 .writefn
= arm_cp_write_ignore
},
5931 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5932 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5933 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5934 .resetvalue
= cpu
->id_dfr0
},
5935 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5936 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5937 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5938 .resetvalue
= cpu
->id_afr0
},
5939 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5940 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5941 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5942 .resetvalue
= cpu
->id_mmfr0
},
5943 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5944 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5945 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5946 .resetvalue
= cpu
->id_mmfr1
},
5947 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5948 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5949 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5950 .resetvalue
= cpu
->id_mmfr2
},
5951 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5952 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5953 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5954 .resetvalue
= cpu
->id_mmfr3
},
5955 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5956 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5957 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5958 .resetvalue
= cpu
->isar
.id_isar0
},
5959 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5960 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5961 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5962 .resetvalue
= cpu
->isar
.id_isar1
},
5963 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5964 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5965 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5966 .resetvalue
= cpu
->isar
.id_isar2
},
5967 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5968 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5969 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5970 .resetvalue
= cpu
->isar
.id_isar3
},
5971 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5972 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5973 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5974 .resetvalue
= cpu
->isar
.id_isar4
},
5975 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5976 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5977 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5978 .resetvalue
= cpu
->isar
.id_isar5
},
5979 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5980 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5981 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5982 .resetvalue
= cpu
->id_mmfr4
},
5983 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5984 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5985 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5986 .resetvalue
= cpu
->isar
.id_isar6
},
5989 define_arm_cp_regs(cpu
, v6_idregs
);
5990 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5992 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5994 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5995 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5997 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5998 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5999 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
6001 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
6002 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
6004 if (arm_feature(env
, ARM_FEATURE_V7
)) {
6005 /* v7 performance monitor control register: same implementor
6006 * field as main ID register, and we implement four counters in
6007 * addition to the cycle count register.
6009 unsigned int i
, pmcrn
= 4;
6010 ARMCPRegInfo pmcr
= {
6011 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6013 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6014 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6015 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6016 .raw_writefn
= raw_write
,
6018 ARMCPRegInfo pmcr64
= {
6019 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6020 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6021 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6023 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6024 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
6025 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6027 define_one_arm_cp_reg(cpu
, &pmcr
);
6028 define_one_arm_cp_reg(cpu
, &pmcr64
);
6029 for (i
= 0; i
< pmcrn
; i
++) {
6030 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6031 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6032 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6033 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6034 ARMCPRegInfo pmev_regs
[] = {
6035 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6036 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6037 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6038 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6039 .accessfn
= pmreg_access
},
6040 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6041 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6042 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6044 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6045 .raw_readfn
= pmevcntr_rawread
,
6046 .raw_writefn
= pmevcntr_rawwrite
},
6047 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6048 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6049 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6050 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6051 .accessfn
= pmreg_access
},
6052 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6053 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6054 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6056 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6057 .raw_writefn
= pmevtyper_rawwrite
},
6060 define_arm_cp_regs(cpu
, pmev_regs
);
6061 g_free(pmevcntr_name
);
6062 g_free(pmevcntr_el0_name
);
6063 g_free(pmevtyper_name
);
6064 g_free(pmevtyper_el0_name
);
6066 ARMCPRegInfo clidr
= {
6067 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
6068 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
6069 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
6071 define_one_arm_cp_reg(cpu
, &clidr
);
6072 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
6073 define_debug_regs(cpu
);
6075 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
6077 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
6078 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
6079 ARMCPRegInfo v81_pmu_regs
[] = {
6080 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6081 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6082 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6083 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6084 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6085 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6086 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6087 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6090 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6092 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6093 /* AArch64 ID registers, which all have impdef reset values.
6094 * Note that within the ID register ranges the unused slots
6095 * must all RAZ, not UNDEF; future architecture versions may
6096 * define new registers here.
6098 ARMCPRegInfo v8_idregs
[] = {
6099 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6100 * know the right value for the GIC field until after we
6101 * define these regs.
6103 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6104 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
6105 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
6106 .readfn
= id_aa64pfr0_read
,
6107 .writefn
= arm_cp_write_ignore
},
6108 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6109 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
6110 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6111 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
6112 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6113 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
6114 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6116 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6117 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
6118 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6120 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6121 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
6122 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6123 /* At present, only SVEver == 0 is defined anyway. */
6125 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6126 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
6127 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6129 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6130 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
6131 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6133 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6134 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
6135 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6137 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6138 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
6139 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6140 .resetvalue
= cpu
->id_aa64dfr0
},
6141 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6142 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
6143 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6144 .resetvalue
= cpu
->id_aa64dfr1
},
6145 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6146 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
6147 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6149 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6150 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
6151 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6153 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6154 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
6155 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6156 .resetvalue
= cpu
->id_aa64afr0
},
6157 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6158 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
6159 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6160 .resetvalue
= cpu
->id_aa64afr1
},
6161 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6162 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
6163 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6165 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6166 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
6167 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6169 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
6170 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
6171 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6172 .resetvalue
= cpu
->isar
.id_aa64isar0
},
6173 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
6174 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
6175 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6176 .resetvalue
= cpu
->isar
.id_aa64isar1
},
6177 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6178 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
6179 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6181 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6182 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
6183 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6185 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6186 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
6187 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6189 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6190 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
6191 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6193 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6194 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
6195 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6197 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6198 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
6199 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6201 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6202 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6203 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6204 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
6205 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6206 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
6207 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6208 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
6209 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6210 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
6211 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6213 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6214 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
6215 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6217 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6218 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
6219 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6221 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6222 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
6223 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6225 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6226 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
6227 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6229 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6230 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
6231 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6233 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6234 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
6235 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6236 .resetvalue
= cpu
->isar
.mvfr0
},
6237 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6238 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
6239 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6240 .resetvalue
= cpu
->isar
.mvfr1
},
6241 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
6242 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
6243 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6244 .resetvalue
= cpu
->isar
.mvfr2
},
6245 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6246 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
6247 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6249 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6250 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
6251 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6253 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6254 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
6255 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6257 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6258 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
6259 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6261 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6262 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
6263 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6265 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
6266 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
6267 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6268 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
6269 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
6270 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
6271 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6272 .resetvalue
= cpu
->pmceid0
},
6273 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
6274 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
6275 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6276 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
6277 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
6278 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
6279 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6280 .resetvalue
= cpu
->pmceid1
},
6283 #ifdef CONFIG_USER_ONLY
6284 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
6285 { .name
= "ID_AA64PFR0_EL1",
6286 .exported_bits
= 0x000f000f00ff0000,
6287 .fixed_bits
= 0x0000000000000011 },
6288 { .name
= "ID_AA64PFR1_EL1",
6289 .exported_bits
= 0x00000000000000f0 },
6290 { .name
= "ID_AA64PFR*_EL1_RESERVED",
6292 { .name
= "ID_AA64ZFR0_EL1" },
6293 { .name
= "ID_AA64MMFR0_EL1",
6294 .fixed_bits
= 0x00000000ff000000 },
6295 { .name
= "ID_AA64MMFR1_EL1" },
6296 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
6298 { .name
= "ID_AA64DFR0_EL1",
6299 .fixed_bits
= 0x0000000000000006 },
6300 { .name
= "ID_AA64DFR1_EL1" },
6301 { .name
= "ID_AA64DFR*_EL1_RESERVED",
6303 { .name
= "ID_AA64AFR*",
6305 { .name
= "ID_AA64ISAR0_EL1",
6306 .exported_bits
= 0x00fffffff0fffff0 },
6307 { .name
= "ID_AA64ISAR1_EL1",
6308 .exported_bits
= 0x000000f0ffffffff },
6309 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
6311 REGUSERINFO_SENTINEL
6313 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
6315 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6316 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
6317 !arm_feature(env
, ARM_FEATURE_EL2
)) {
6318 ARMCPRegInfo rvbar
= {
6319 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6320 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6321 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
6323 define_one_arm_cp_reg(cpu
, &rvbar
);
6325 define_arm_cp_regs(cpu
, v8_idregs
);
6326 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6328 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6329 uint64_t vmpidr_def
= mpidr_read_val(env
);
6330 ARMCPRegInfo vpidr_regs
[] = {
6331 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6332 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6333 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6334 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6335 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6336 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6337 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6338 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6339 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6340 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6341 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6342 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6343 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6344 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6345 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6346 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6348 .resetvalue
= vmpidr_def
,
6349 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6352 define_arm_cp_regs(cpu
, vpidr_regs
);
6353 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6354 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6355 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6357 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6358 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6359 ARMCPRegInfo rvbar
= {
6360 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6361 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6362 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6364 define_one_arm_cp_reg(cpu
, &rvbar
);
6367 /* If EL2 is missing but higher ELs are enabled, we need to
6368 * register the no_el2 reginfos.
6370 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6371 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6372 * of MIDR_EL1 and MPIDR_EL1.
6374 ARMCPRegInfo vpidr_regs
[] = {
6375 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6376 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6377 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6378 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6379 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6380 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6381 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6382 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6383 .type
= ARM_CP_NO_RAW
,
6384 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6387 define_arm_cp_regs(cpu
, vpidr_regs
);
6388 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6389 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6390 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6394 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6395 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6396 ARMCPRegInfo el3_regs
[] = {
6397 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6398 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6399 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6400 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6401 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6403 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6404 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6405 .resetvalue
= cpu
->reset_sctlr
},
6409 define_arm_cp_regs(cpu
, el3_regs
);
6411 /* The behaviour of NSACR is sufficiently various that we don't
6412 * try to describe it in a single reginfo:
6413 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6414 * reads as constant 0xc00 from NS EL1 and NS EL2
6415 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6416 * if v7 without EL3, register doesn't exist
6417 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6419 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6420 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6421 ARMCPRegInfo nsacr
= {
6422 .name
= "NSACR", .type
= ARM_CP_CONST
,
6423 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6424 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6427 define_one_arm_cp_reg(cpu
, &nsacr
);
6429 ARMCPRegInfo nsacr
= {
6431 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6432 .access
= PL3_RW
| PL1_R
,
6434 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6436 define_one_arm_cp_reg(cpu
, &nsacr
);
6439 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6440 ARMCPRegInfo nsacr
= {
6441 .name
= "NSACR", .type
= ARM_CP_CONST
,
6442 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6446 define_one_arm_cp_reg(cpu
, &nsacr
);
6450 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6451 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6452 /* PMSAv6 not implemented */
6453 assert(arm_feature(env
, ARM_FEATURE_V7
));
6454 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6455 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6457 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6460 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6461 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6462 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6463 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6464 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6467 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6468 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6470 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6471 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6473 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6474 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6476 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6477 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6479 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6480 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6482 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6483 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6485 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6486 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6488 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6489 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6491 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6492 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6494 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6495 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6497 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6498 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6500 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6501 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6502 * be read-only (ie write causes UNDEF exception).
6505 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6506 /* Pre-v8 MIDR space.
6507 * Note that the MIDR isn't a simple constant register because
6508 * of the TI925 behaviour where writes to another register can
6509 * cause the MIDR value to change.
6511 * Unimplemented registers in the c15 0 0 0 space default to
6512 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6513 * and friends override accordingly.
6516 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6517 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6518 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6519 .readfn
= midr_read
,
6520 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6521 .type
= ARM_CP_OVERRIDE
},
6522 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6524 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6525 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6527 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6528 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6530 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6531 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6533 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6534 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6536 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6537 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6540 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6541 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6542 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6543 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6544 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6545 .readfn
= midr_read
},
6546 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6547 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6548 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6549 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6550 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6551 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6552 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6553 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6554 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6555 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6558 ARMCPRegInfo id_cp_reginfo
[] = {
6559 /* These are common to v8 and pre-v8 */
6561 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6562 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6563 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6564 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6565 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6566 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6567 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6569 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6570 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6573 /* TLBTR is specific to VMSA */
6574 ARMCPRegInfo id_tlbtr_reginfo
= {
6576 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6577 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
6579 /* MPUIR is specific to PMSA V6+ */
6580 ARMCPRegInfo id_mpuir_reginfo
= {
6582 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6583 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6584 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6586 ARMCPRegInfo crn0_wi_reginfo
= {
6587 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6588 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6589 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6591 #ifdef CONFIG_USER_ONLY
6592 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
6593 { .name
= "MIDR_EL1",
6594 .exported_bits
= 0x00000000ffffffff },
6595 { .name
= "REVIDR_EL1" },
6596 REGUSERINFO_SENTINEL
6598 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
6600 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6601 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6603 /* Register the blanket "writes ignored" value first to cover the
6604 * whole space. Then update the specific ID registers to allow write
6605 * access, so that they ignore writes rather than causing them to
6608 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6609 for (r
= id_pre_v8_midr_cp_reginfo
;
6610 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6613 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6616 id_mpuir_reginfo
.access
= PL1_RW
;
6617 id_tlbtr_reginfo
.access
= PL1_RW
;
6619 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6620 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6622 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6624 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6625 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6626 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6627 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6628 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6632 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6633 ARMCPRegInfo mpidr_cp_reginfo
[] = {
6634 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6635 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
6636 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
6639 #ifdef CONFIG_USER_ONLY
6640 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
6641 { .name
= "MPIDR_EL1",
6642 .fixed_bits
= 0x0000000080000000 },
6643 REGUSERINFO_SENTINEL
6645 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
6647 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6650 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6651 ARMCPRegInfo auxcr_reginfo
[] = {
6652 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6653 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6654 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6655 .resetvalue
= cpu
->reset_auxcr
},
6656 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6657 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6658 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6660 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6661 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6662 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6666 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6667 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6668 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6669 ARMCPRegInfo hactlr2_reginfo
= {
6670 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6671 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6672 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6675 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
6679 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
6680 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6681 /* 32 bit view is [31:18] 0...0 [43:32]. */
6682 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
6683 | extract64(cpu
->reset_cbar
, 32, 12);
6684 ARMCPRegInfo cbar_reginfo
[] = {
6686 .type
= ARM_CP_CONST
,
6687 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6688 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
6689 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6690 .type
= ARM_CP_CONST
,
6691 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
6692 .access
= PL1_R
, .resetvalue
= cbar32
},
6695 /* We don't implement a r/w 64 bit CBAR currently */
6696 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
6697 define_arm_cp_regs(cpu
, cbar_reginfo
);
6699 ARMCPRegInfo cbar
= {
6701 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6702 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
6703 .fieldoffset
= offsetof(CPUARMState
,
6704 cp15
.c15_config_base_address
)
6706 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
6707 cbar
.access
= PL1_R
;
6708 cbar
.fieldoffset
= 0;
6709 cbar
.type
= ARM_CP_CONST
;
6711 define_one_arm_cp_reg(cpu
, &cbar
);
6715 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
6716 ARMCPRegInfo vbar_cp_reginfo
[] = {
6717 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
6718 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
6719 .access
= PL1_RW
, .writefn
= vbar_write
,
6720 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
6721 offsetof(CPUARMState
, cp15
.vbar_ns
) },
6725 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
6728 /* Generic registers whose values depend on the implementation */
6730 ARMCPRegInfo sctlr
= {
6731 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
6732 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6734 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
6735 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
6736 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
6737 .raw_writefn
= raw_write
,
6739 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6740 /* Normally we would always end the TB on an SCTLR write, but Linux
6741 * arch/arm/mach-pxa/sleep.S expects two instructions following
6742 * an MMU enable to execute from cache. Imitate this behaviour.
6744 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
6746 define_one_arm_cp_reg(cpu
, &sctlr
);
6749 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6751 * A trivial implementation of ARMv8.1-LOR leaves all of these
6752 * registers fixed at 0, which indicates that there are zero
6753 * supported Limited Ordering regions.
6755 static const ARMCPRegInfo lor_reginfo
[] = {
6756 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6757 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6758 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6759 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6760 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6761 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6762 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6763 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6764 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6765 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6766 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6767 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6768 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6769 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6770 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6771 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6772 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6773 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6774 .access
= PL1_R
, .accessfn
= access_lorid
,
6775 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6778 define_arm_cp_regs(cpu
, lor_reginfo
);
6781 if (cpu_isar_feature(aa64_sve
, cpu
)) {
6782 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
6783 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6784 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
6786 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
6788 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6789 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
6793 #ifdef TARGET_AARCH64
6794 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6795 define_arm_cp_regs(cpu
, pauth_reginfo
);
6797 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
6798 define_arm_cp_regs(cpu
, rndr_reginfo
);
6803 * While all v8.0 cpus support aarch64, QEMU does have configurations
6804 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6805 * which will set ID_ISAR6.
6807 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
6808 ? cpu_isar_feature(aa64_predinv
, cpu
)
6809 : cpu_isar_feature(aa32_predinv
, cpu
)) {
6810 define_arm_cp_regs(cpu
, predinv_reginfo
);
6814 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
6816 CPUState
*cs
= CPU(cpu
);
6817 CPUARMState
*env
= &cpu
->env
;
6819 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6820 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
6821 aarch64_fpu_gdb_set_reg
,
6822 34, "aarch64-fpu.xml", 0);
6823 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
6824 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6825 51, "arm-neon.xml", 0);
6826 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
6827 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6828 35, "arm-vfp3.xml", 0);
6829 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
6830 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6831 19, "arm-vfp.xml", 0);
6833 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
6834 arm_gen_dynamic_xml(cs
),
6835 "system-registers.xml", 0);
6838 /* Sort alphabetically by type name, except for "any". */
6839 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
6841 ObjectClass
*class_a
= (ObjectClass
*)a
;
6842 ObjectClass
*class_b
= (ObjectClass
*)b
;
6843 const char *name_a
, *name_b
;
6845 name_a
= object_class_get_name(class_a
);
6846 name_b
= object_class_get_name(class_b
);
6847 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
6849 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
6852 return strcmp(name_a
, name_b
);
6856 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
6858 ObjectClass
*oc
= data
;
6859 const char *typename
;
6862 typename
= object_class_get_name(oc
);
6863 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6864 qemu_printf(" %s\n", name
);
6868 void arm_cpu_list(void)
6872 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6873 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6874 qemu_printf("Available CPUs:\n");
6875 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
6879 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6881 ObjectClass
*oc
= data
;
6882 CpuDefinitionInfoList
**cpu_list
= user_data
;
6883 CpuDefinitionInfoList
*entry
;
6884 CpuDefinitionInfo
*info
;
6885 const char *typename
;
6887 typename
= object_class_get_name(oc
);
6888 info
= g_malloc0(sizeof(*info
));
6889 info
->name
= g_strndup(typename
,
6890 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6891 info
->q_typename
= g_strdup(typename
);
6893 entry
= g_malloc0(sizeof(*entry
));
6894 entry
->value
= info
;
6895 entry
->next
= *cpu_list
;
6899 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
6901 CpuDefinitionInfoList
*cpu_list
= NULL
;
6904 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6905 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6911 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6912 void *opaque
, int state
, int secstate
,
6913 int crm
, int opc1
, int opc2
,
6916 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6917 * add a single reginfo struct to the hash table.
6919 uint32_t *key
= g_new(uint32_t, 1);
6920 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6921 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6922 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6924 r2
->name
= g_strdup(name
);
6925 /* Reset the secure state to the specific incoming state. This is
6926 * necessary as the register may have been defined with both states.
6928 r2
->secure
= secstate
;
6930 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6931 /* Register is banked (using both entries in array).
6932 * Overwriting fieldoffset as the array is only used to define
6933 * banked registers but later only fieldoffset is used.
6935 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6938 if (state
== ARM_CP_STATE_AA32
) {
6939 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6940 /* If the register is banked then we don't need to migrate or
6941 * reset the 32-bit instance in certain cases:
6943 * 1) If the register has both 32-bit and 64-bit instances then we
6944 * can count on the 64-bit instance taking care of the
6946 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6947 * taking care of the secure bank. This requires that separate
6948 * 32 and 64-bit definitions are provided.
6950 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6951 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6952 r2
->type
|= ARM_CP_ALIAS
;
6954 } else if ((secstate
!= r
->secure
) && !ns
) {
6955 /* The register is not banked so we only want to allow migration of
6956 * the non-secure instance.
6958 r2
->type
|= ARM_CP_ALIAS
;
6961 if (r
->state
== ARM_CP_STATE_BOTH
) {
6962 /* We assume it is a cp15 register if the .cp field is left unset.
6968 #ifdef HOST_WORDS_BIGENDIAN
6969 if (r2
->fieldoffset
) {
6970 r2
->fieldoffset
+= sizeof(uint32_t);
6975 if (state
== ARM_CP_STATE_AA64
) {
6976 /* To allow abbreviation of ARMCPRegInfo
6977 * definitions, we treat cp == 0 as equivalent to
6978 * the value for "standard guest-visible sysreg".
6979 * STATE_BOTH definitions are also always "standard
6980 * sysreg" in their AArch64 view (the .cp value may
6981 * be non-zero for the benefit of the AArch32 view).
6983 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6984 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6986 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6987 r2
->opc0
, opc1
, opc2
);
6989 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6992 r2
->opaque
= opaque
;
6994 /* reginfo passed to helpers is correct for the actual access,
6995 * and is never ARM_CP_STATE_BOTH:
6998 /* Make sure reginfo passed to helpers for wildcarded regs
6999 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7004 /* By convention, for wildcarded registers only the first
7005 * entry is used for migration; the others are marked as
7006 * ALIAS so we don't try to transfer the register
7007 * multiple times. Special registers (ie NOP/WFI) are
7008 * never migratable and not even raw-accessible.
7010 if ((r
->type
& ARM_CP_SPECIAL
)) {
7011 r2
->type
|= ARM_CP_NO_RAW
;
7013 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
7014 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
7015 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
7016 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
7019 /* Check that raw accesses are either forbidden or handled. Note that
7020 * we can't assert this earlier because the setup of fieldoffset for
7021 * banked registers has to be done first.
7023 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
7024 assert(!raw_accessors_invalid(r2
));
7027 /* Overriding of an existing definition must be explicitly
7030 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
7031 ARMCPRegInfo
*oldreg
;
7032 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
7033 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
7034 fprintf(stderr
, "Register redefined: cp=%d %d bit "
7035 "crn=%d crm=%d opc1=%d opc2=%d, "
7036 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
7037 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
7038 oldreg
->name
, r2
->name
);
7039 g_assert_not_reached();
7042 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
7046 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
7047 const ARMCPRegInfo
*r
, void *opaque
)
7049 /* Define implementations of coprocessor registers.
7050 * We store these in a hashtable because typically
7051 * there are less than 150 registers in a space which
7052 * is 16*16*16*8*8 = 262144 in size.
7053 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7054 * If a register is defined twice then the second definition is
7055 * used, so this can be used to define some generic registers and
7056 * then override them with implementation specific variations.
7057 * At least one of the original and the second definition should
7058 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7059 * against accidental use.
7061 * The state field defines whether the register is to be
7062 * visible in the AArch32 or AArch64 execution state. If the
7063 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7064 * reginfo structure for the AArch32 view, which sees the lower
7065 * 32 bits of the 64 bit register.
7067 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7068 * be wildcarded. AArch64 registers are always considered to be 64
7069 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7070 * the register, if any.
7072 int crm
, opc1
, opc2
, state
;
7073 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
7074 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
7075 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
7076 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
7077 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
7078 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
7079 /* 64 bit registers have only CRm and Opc1 fields */
7080 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
7081 /* op0 only exists in the AArch64 encodings */
7082 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
7083 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7084 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
7085 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
7086 * encodes a minimum access level for the register. We roll this
7087 * runtime check into our general permission check code, so check
7088 * here that the reginfo's specified permissions are strict enough
7089 * to encompass the generic architectural permission check.
7091 if (r
->state
!= ARM_CP_STATE_AA32
) {
7095 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7096 mask
= PL0U_R
| PL1_RW
;
7111 /* unallocated encoding, so not possible */
7119 /* min_EL EL1, secure mode only (we don't check the latter) */
7123 /* broken reginfo with out-of-range opc1 */
7127 /* assert our permissions are not too lax (stricter is fine) */
7128 assert((r
->access
& ~mask
) == 0);
7131 /* Check that the register definition has enough info to handle
7132 * reads and writes if they are permitted.
7134 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
7135 if (r
->access
& PL3_R
) {
7136 assert((r
->fieldoffset
||
7137 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7140 if (r
->access
& PL3_W
) {
7141 assert((r
->fieldoffset
||
7142 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7146 /* Bad type field probably means missing sentinel at end of reg list */
7147 assert(cptype_valid(r
->type
));
7148 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
7149 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
7150 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
7151 for (state
= ARM_CP_STATE_AA32
;
7152 state
<= ARM_CP_STATE_AA64
; state
++) {
7153 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
7156 if (state
== ARM_CP_STATE_AA32
) {
7157 /* Under AArch32 CP registers can be common
7158 * (same for secure and non-secure world) or banked.
7162 switch (r
->secure
) {
7163 case ARM_CP_SECSTATE_S
:
7164 case ARM_CP_SECSTATE_NS
:
7165 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7166 r
->secure
, crm
, opc1
, opc2
,
7170 name
= g_strdup_printf("%s_S", r
->name
);
7171 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7173 crm
, opc1
, opc2
, name
);
7175 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7177 crm
, opc1
, opc2
, r
->name
);
7181 /* AArch64 registers get mapped to non-secure instance
7183 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7185 crm
, opc1
, opc2
, r
->name
);
7193 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
7194 const ARMCPRegInfo
*regs
, void *opaque
)
7196 /* Define a whole list of registers */
7197 const ARMCPRegInfo
*r
;
7198 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7199 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
7204 * Modify ARMCPRegInfo for access from userspace.
7206 * This is a data driven modification directed by
7207 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7208 * user-space cannot alter any values and dynamic values pertaining to
7209 * execution state are hidden from user space view anyway.
7211 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
7213 const ARMCPRegUserSpaceInfo
*m
;
7216 for (m
= mods
; m
->name
; m
++) {
7217 GPatternSpec
*pat
= NULL
;
7219 pat
= g_pattern_spec_new(m
->name
);
7221 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7222 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
7223 r
->type
= ARM_CP_CONST
;
7227 } else if (strcmp(r
->name
, m
->name
) == 0) {
7228 r
->type
= ARM_CP_CONST
;
7230 r
->resetvalue
&= m
->exported_bits
;
7231 r
->resetvalue
|= m
->fixed_bits
;
7236 g_pattern_spec_free(pat
);
7241 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
7243 return g_hash_table_lookup(cpregs
, &encoded_cp
);
7246 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7249 /* Helper coprocessor write function for write-ignore registers */
7252 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7254 /* Helper coprocessor write function for read-as-zero registers */
7258 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
7260 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7263 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
7265 /* Return true if it is not valid for us to switch to
7266 * this CPU mode (ie all the UNPREDICTABLE cases in
7267 * the ARM ARM CPSRWriteByInstr pseudocode).
7270 /* Changes to or from Hyp via MSR and CPS are illegal. */
7271 if (write_type
== CPSRWriteByInstr
&&
7272 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
7273 mode
== ARM_CPU_MODE_HYP
)) {
7278 case ARM_CPU_MODE_USR
:
7280 case ARM_CPU_MODE_SYS
:
7281 case ARM_CPU_MODE_SVC
:
7282 case ARM_CPU_MODE_ABT
:
7283 case ARM_CPU_MODE_UND
:
7284 case ARM_CPU_MODE_IRQ
:
7285 case ARM_CPU_MODE_FIQ
:
7286 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7287 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7289 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7290 * and CPS are treated as illegal mode changes.
7292 if (write_type
== CPSRWriteByInstr
&&
7293 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
7294 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
7298 case ARM_CPU_MODE_HYP
:
7299 return !arm_feature(env
, ARM_FEATURE_EL2
)
7300 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
7301 case ARM_CPU_MODE_MON
:
7302 return arm_current_el(env
) < 3;
7308 uint32_t cpsr_read(CPUARMState
*env
)
7311 ZF
= (env
->ZF
== 0);
7312 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
7313 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
7314 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
7315 | ((env
->condexec_bits
& 0xfc) << 8)
7316 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
7319 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
7320 CPSRWriteType write_type
)
7322 uint32_t changed_daif
;
7324 if (mask
& CPSR_NZCV
) {
7325 env
->ZF
= (~val
) & CPSR_Z
;
7327 env
->CF
= (val
>> 29) & 1;
7328 env
->VF
= (val
<< 3) & 0x80000000;
7331 env
->QF
= ((val
& CPSR_Q
) != 0);
7333 env
->thumb
= ((val
& CPSR_T
) != 0);
7334 if (mask
& CPSR_IT_0_1
) {
7335 env
->condexec_bits
&= ~3;
7336 env
->condexec_bits
|= (val
>> 25) & 3;
7338 if (mask
& CPSR_IT_2_7
) {
7339 env
->condexec_bits
&= 3;
7340 env
->condexec_bits
|= (val
>> 8) & 0xfc;
7342 if (mask
& CPSR_GE
) {
7343 env
->GE
= (val
>> 16) & 0xf;
7346 /* In a V7 implementation that includes the security extensions but does
7347 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7348 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7349 * bits respectively.
7351 * In a V8 implementation, it is permitted for privileged software to
7352 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7354 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
7355 arm_feature(env
, ARM_FEATURE_EL3
) &&
7356 !arm_feature(env
, ARM_FEATURE_EL2
) &&
7357 !arm_is_secure(env
)) {
7359 changed_daif
= (env
->daif
^ val
) & mask
;
7361 if (changed_daif
& CPSR_A
) {
7362 /* Check to see if we are allowed to change the masking of async
7363 * abort exceptions from a non-secure state.
7365 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
7366 qemu_log_mask(LOG_GUEST_ERROR
,
7367 "Ignoring attempt to switch CPSR_A flag from "
7368 "non-secure world with SCR.AW bit clear\n");
7373 if (changed_daif
& CPSR_F
) {
7374 /* Check to see if we are allowed to change the masking of FIQ
7375 * exceptions from a non-secure state.
7377 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
7378 qemu_log_mask(LOG_GUEST_ERROR
,
7379 "Ignoring attempt to switch CPSR_F flag from "
7380 "non-secure world with SCR.FW bit clear\n");
7384 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7385 * If this bit is set software is not allowed to mask
7386 * FIQs, but is allowed to set CPSR_F to 0.
7388 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
7390 qemu_log_mask(LOG_GUEST_ERROR
,
7391 "Ignoring attempt to enable CPSR_F flag "
7392 "(non-maskable FIQ [NMFI] support enabled)\n");
7398 env
->daif
&= ~(CPSR_AIF
& mask
);
7399 env
->daif
|= val
& CPSR_AIF
& mask
;
7401 if (write_type
!= CPSRWriteRaw
&&
7402 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7403 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7404 /* Note that we can only get here in USR mode if this is a
7405 * gdb stub write; for this case we follow the architectural
7406 * behaviour for guest writes in USR mode of ignoring an attempt
7407 * to switch mode. (Those are caught by translate.c for writes
7408 * triggered by guest instructions.)
7411 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7412 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7413 * v7, and has defined behaviour in v8:
7414 * + leave CPSR.M untouched
7415 * + allow changes to the other CPSR fields
7417 * For user changes via the GDB stub, we don't set PSTATE.IL,
7418 * as this would be unnecessarily harsh for a user error.
7421 if (write_type
!= CPSRWriteByGDBStub
&&
7422 arm_feature(env
, ARM_FEATURE_V8
)) {
7426 qemu_log_mask(LOG_GUEST_ERROR
,
7427 "Illegal AArch32 mode switch attempt from %s to %s\n",
7428 aarch32_mode_name(env
->uncached_cpsr
),
7429 aarch32_mode_name(val
));
7431 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7432 write_type
== CPSRWriteExceptionReturn
?
7433 "Exception return from AArch32" :
7434 "AArch32 mode switch from",
7435 aarch32_mode_name(env
->uncached_cpsr
),
7436 aarch32_mode_name(val
), env
->regs
[15]);
7437 switch_mode(env
, val
& CPSR_M
);
7440 mask
&= ~CACHED_CPSR_BITS
;
7441 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7444 /* Sign/zero extend */
7445 uint32_t HELPER(sxtb16
)(uint32_t x
)
7448 res
= (uint16_t)(int8_t)x
;
7449 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7453 uint32_t HELPER(uxtb16
)(uint32_t x
)
7456 res
= (uint16_t)(uint8_t)x
;
7457 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7461 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7465 if (num
== INT_MIN
&& den
== -1)
7470 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7477 uint32_t HELPER(rbit
)(uint32_t x
)
7482 #ifdef CONFIG_USER_ONLY
7484 /* These should probably raise undefined insn exceptions. */
7485 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
7487 ARMCPU
*cpu
= env_archcpu(env
);
7489 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
7492 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
7494 ARMCPU
*cpu
= env_archcpu(env
);
7496 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
7500 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7502 /* translate.c should never generate calls here in user-only mode */
7503 g_assert_not_reached();
7506 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7508 /* translate.c should never generate calls here in user-only mode */
7509 g_assert_not_reached();
7512 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
7514 /* translate.c should never generate calls here in user-only mode */
7515 g_assert_not_reached();
7518 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
7520 /* translate.c should never generate calls here in user-only mode */
7521 g_assert_not_reached();
7524 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
7526 /* translate.c should never generate calls here in user-only mode */
7527 g_assert_not_reached();
7530 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
7532 /* The TT instructions can be used by unprivileged code, but in
7533 * user-only emulation we don't have the MPU.
7534 * Luckily since we know we are NonSecure unprivileged (and that in
7535 * turn means that the A flag wasn't specified), all the bits in the
7536 * register must be zero:
7537 * IREGION: 0 because IRVALID is 0
7538 * IRVALID: 0 because NS
7540 * NSRW: 0 because NS
7542 * RW: 0 because unpriv and A flag not set
7543 * R: 0 because unpriv and A flag not set
7544 * SRVALID: 0 because NS
7545 * MRVALID: 0 because unpriv and A flag not set
7546 * SREGION: 0 becaus SRVALID is 0
7547 * MREGION: 0 because MRVALID is 0
7552 static void switch_mode(CPUARMState
*env
, int mode
)
7554 ARMCPU
*cpu
= env_archcpu(env
);
7556 if (mode
!= ARM_CPU_MODE_USR
) {
7557 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7561 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7562 uint32_t cur_el
, bool secure
)
7567 void aarch64_sync_64_to_32(CPUARMState
*env
)
7569 g_assert_not_reached();
7574 static void switch_mode(CPUARMState
*env
, int mode
)
7579 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7580 if (mode
== old_mode
)
7583 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7584 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7585 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7586 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7587 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7588 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7591 i
= bank_number(old_mode
);
7592 env
->banked_r13
[i
] = env
->regs
[13];
7593 env
->banked_spsr
[i
] = env
->spsr
;
7595 i
= bank_number(mode
);
7596 env
->regs
[13] = env
->banked_r13
[i
];
7597 env
->spsr
= env
->banked_spsr
[i
];
7599 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7600 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7603 /* Physical Interrupt Target EL Lookup Table
7605 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7607 * The below multi-dimensional table is used for looking up the target
7608 * exception level given numerous condition criteria. Specifically, the
7609 * target EL is based on SCR and HCR routing controls as well as the
7610 * currently executing EL and secure state.
7613 * target_el_table[2][2][2][2][2][4]
7614 * | | | | | +--- Current EL
7615 * | | | | +------ Non-secure(0)/Secure(1)
7616 * | | | +--------- HCR mask override
7617 * | | +------------ SCR exec state control
7618 * | +--------------- SCR mask override
7619 * +------------------ 32-bit(0)/64-bit(1) EL3
7621 * The table values are as such:
7625 * The ARM ARM target EL table includes entries indicating that an "exception
7626 * is not taken". The two cases where this is applicable are:
7627 * 1) An exception is taken from EL3 but the SCR does not have the exception
7629 * 2) An exception is taken from EL2 but the HCR does not have the exception
7631 * In these two cases, the below table contain a target of EL1. This value is
7632 * returned as it is expected that the consumer of the table data will check
7633 * for "target EL >= current EL" to ensure the exception is not taken.
7637 * BIT IRQ IMO Non-secure Secure
7638 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7640 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7641 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7642 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7643 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7644 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7645 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7646 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7647 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7648 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7649 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7650 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7651 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7652 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7653 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7654 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7655 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7656 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7660 * Determine the target EL for physical exceptions
7662 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7663 uint32_t cur_el
, bool secure
)
7665 CPUARMState
*env
= cs
->env_ptr
;
7670 /* Is the highest EL AArch64? */
7671 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7674 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7675 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7677 /* Either EL2 is the highest EL (and so the EL2 register width
7678 * is given by is64); or there is no EL2 or EL3, in which case
7679 * the value of 'rw' does not affect the table lookup anyway.
7684 hcr_el2
= arm_hcr_el2_eff(env
);
7687 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7688 hcr
= hcr_el2
& HCR_IMO
;
7691 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7692 hcr
= hcr_el2
& HCR_FMO
;
7695 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7696 hcr
= hcr_el2
& HCR_AMO
;
7700 /* Perform a table-lookup for the target EL given the current state */
7701 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7703 assert(target_el
> 0);
7709 * Return true if the v7M CPACR permits access to the FPU for the specified
7710 * security state and privilege level.
7712 static bool v7m_cpacr_pass(CPUARMState
*env
, bool is_secure
, bool is_priv
)
7714 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
7716 case 2: /* UNPREDICTABLE: we treat like 0 */
7723 g_assert_not_reached();
7728 * What kind of stack write are we doing? This affects how exceptions
7729 * generated during the stacking are treated.
7731 typedef enum StackingMode
{
7737 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
7738 ARMMMUIdx mmu_idx
, StackingMode mode
)
7740 CPUState
*cs
= CPU(cpu
);
7741 CPUARMState
*env
= &cpu
->env
;
7742 MemTxAttrs attrs
= {};
7744 target_ulong page_size
;
7747 ARMMMUFaultInfo fi
= {};
7748 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7752 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
7753 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7754 /* MPU/SAU lookup failed */
7755 if (fi
.type
== ARMFault_QEMU_SFault
) {
7756 if (mode
== STACK_LAZYFP
) {
7757 qemu_log_mask(CPU_LOG_INT
,
7758 "...SecureFault with SFSR.LSPERR "
7759 "during lazy stacking\n");
7760 env
->v7m
.sfsr
|= R_V7M_SFSR_LSPERR_MASK
;
7762 qemu_log_mask(CPU_LOG_INT
,
7763 "...SecureFault with SFSR.AUVIOL "
7764 "during stacking\n");
7765 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
7767 env
->v7m
.sfsr
|= R_V7M_SFSR_SFARVALID_MASK
;
7768 env
->v7m
.sfar
= addr
;
7769 exc
= ARMV7M_EXCP_SECURE
;
7772 if (mode
== STACK_LAZYFP
) {
7773 qemu_log_mask(CPU_LOG_INT
,
7774 "...MemManageFault with CFSR.MLSPERR\n");
7775 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MLSPERR_MASK
;
7777 qemu_log_mask(CPU_LOG_INT
,
7778 "...MemManageFault with CFSR.MSTKERR\n");
7779 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
7781 exc
= ARMV7M_EXCP_MEM
;
7782 exc_secure
= secure
;
7786 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
7788 if (txres
!= MEMTX_OK
) {
7789 /* BusFault trying to write the data */
7790 if (mode
== STACK_LAZYFP
) {
7791 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.LSPERR\n");
7792 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_LSPERR_MASK
;
7794 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
7795 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
7797 exc
= ARMV7M_EXCP_BUS
;
7804 /* By pending the exception at this point we are making
7805 * the IMPDEF choice "overridden exceptions pended" (see the
7806 * MergeExcInfo() pseudocode). The other choice would be to not
7807 * pend them now and then make a choice about which to throw away
7808 * later if we have two derived exceptions.
7809 * The only case when we must not pend the exception but instead
7810 * throw it away is if we are doing the push of the callee registers
7811 * and we've already generated a derived exception (this is indicated
7812 * by the caller passing STACK_IGNFAULTS). Even in this case we will
7813 * still update the fault status registers.
7817 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
7820 armv7m_nvic_set_pending_lazyfp(env
->nvic
, exc
, exc_secure
);
7822 case STACK_IGNFAULTS
:
7828 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
7831 CPUState
*cs
= CPU(cpu
);
7832 CPUARMState
*env
= &cpu
->env
;
7833 MemTxAttrs attrs
= {};
7835 target_ulong page_size
;
7838 ARMMMUFaultInfo fi
= {};
7839 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7844 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
7845 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7846 /* MPU/SAU lookup failed */
7847 if (fi
.type
== ARMFault_QEMU_SFault
) {
7848 qemu_log_mask(CPU_LOG_INT
,
7849 "...SecureFault with SFSR.AUVIOL during unstack\n");
7850 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7851 env
->v7m
.sfar
= addr
;
7852 exc
= ARMV7M_EXCP_SECURE
;
7855 qemu_log_mask(CPU_LOG_INT
,
7856 "...MemManageFault with CFSR.MUNSTKERR\n");
7857 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
7858 exc
= ARMV7M_EXCP_MEM
;
7859 exc_secure
= secure
;
7864 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
7866 if (txres
!= MEMTX_OK
) {
7867 /* BusFault trying to read the data */
7868 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
7869 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
7870 exc
= ARMV7M_EXCP_BUS
;
7879 /* By pending the exception at this point we are making
7880 * the IMPDEF choice "overridden exceptions pended" (see the
7881 * MergeExcInfo() pseudocode). The other choice would be to not
7882 * pend them now and then make a choice about which to throw away
7883 * later if we have two derived exceptions.
7885 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
7889 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
7892 * Preserve FP state (because LSPACT was set and we are about
7893 * to execute an FP instruction). This corresponds to the
7894 * PreserveFPState() pseudocode.
7895 * We may throw an exception if the stacking fails.
7897 ARMCPU
*cpu
= env_archcpu(env
);
7898 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
7899 bool negpri
= !(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_HFRDY_MASK
);
7900 bool is_priv
= !(env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_USER_MASK
);
7901 bool splimviol
= env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_SPLIMVIOL_MASK
;
7902 uint32_t fpcar
= env
->v7m
.fpcar
[is_secure
];
7903 bool stacked_ok
= true;
7904 bool ts
= is_secure
&& (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
7905 bool take_exception
;
7907 /* Take the iothread lock as we are going to touch the NVIC */
7908 qemu_mutex_lock_iothread();
7910 /* Check the background context had access to the FPU */
7911 if (!v7m_cpacr_pass(env
, is_secure
, is_priv
)) {
7912 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, is_secure
);
7913 env
->v7m
.cfsr
[is_secure
] |= R_V7M_CFSR_NOCP_MASK
;
7915 } else if (!is_secure
&& !extract32(env
->v7m
.nsacr
, 10, 1)) {
7916 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
7917 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
7921 if (!splimviol
&& stacked_ok
) {
7922 /* We only stack if the stack limit wasn't violated */
7926 mmu_idx
= arm_v7m_mmu_idx_all(env
, is_secure
, is_priv
, negpri
);
7927 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
7928 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
7929 uint32_t faddr
= fpcar
+ 4 * i
;
7930 uint32_t slo
= extract64(dn
, 0, 32);
7931 uint32_t shi
= extract64(dn
, 32, 32);
7934 faddr
+= 8; /* skip the slot for the FPSCR */
7936 stacked_ok
= stacked_ok
&&
7937 v7m_stack_write(cpu
, faddr
, slo
, mmu_idx
, STACK_LAZYFP
) &&
7938 v7m_stack_write(cpu
, faddr
+ 4, shi
, mmu_idx
, STACK_LAZYFP
);
7941 stacked_ok
= stacked_ok
&&
7942 v7m_stack_write(cpu
, fpcar
+ 0x40,
7943 vfp_get_fpscr(env
), mmu_idx
, STACK_LAZYFP
);
7947 * We definitely pended an exception, but it's possible that it
7948 * might not be able to be taken now. If its priority permits us
7949 * to take it now, then we must not update the LSPACT or FP regs,
7950 * but instead jump out to take the exception immediately.
7951 * If it's just pending and won't be taken until the current
7952 * handler exits, then we do update LSPACT and the FP regs.
7954 take_exception
= !stacked_ok
&&
7955 armv7m_nvic_can_take_pending_exception(env
->nvic
);
7957 qemu_mutex_unlock_iothread();
7959 if (take_exception
) {
7960 raise_exception_ra(env
, EXCP_LAZYFP
, 0, 1, GETPC());
7963 env
->v7m
.fpccr
[is_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
7966 /* Clear s0 to s31 and the FPSCR */
7969 for (i
= 0; i
< 32; i
+= 2) {
7970 *aa32_vfp_dreg(env
, i
/ 2) = 0;
7972 vfp_set_fpscr(env
, 0);
7975 * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
7980 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7981 * This may change the current stack pointer between Main and Process
7982 * stack pointers if it is done for the CONTROL register for the current
7985 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
7989 bool old_is_psp
= v7m_using_psp(env
);
7991 env
->v7m
.control
[secstate
] =
7992 deposit32(env
->v7m
.control
[secstate
],
7993 R_V7M_CONTROL_SPSEL_SHIFT
,
7994 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
7996 if (secstate
== env
->v7m
.secure
) {
7997 bool new_is_psp
= v7m_using_psp(env
);
8000 if (old_is_psp
!= new_is_psp
) {
8001 tmp
= env
->v7m
.other_sp
;
8002 env
->v7m
.other_sp
= env
->regs
[13];
8003 env
->regs
[13] = tmp
;
8008 /* Write to v7M CONTROL.SPSEL bit. This may change the current
8009 * stack pointer between Main and Process stack pointers.
8011 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
8013 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
8016 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
8018 /* Write a new value to v7m.exception, thus transitioning into or out
8019 * of Handler mode; this may result in a change of active stack pointer.
8021 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
8024 env
->v7m
.exception
= new_exc
;
8026 new_is_psp
= v7m_using_psp(env
);
8028 if (old_is_psp
!= new_is_psp
) {
8029 tmp
= env
->v7m
.other_sp
;
8030 env
->v7m
.other_sp
= env
->regs
[13];
8031 env
->regs
[13] = tmp
;
8035 /* Switch M profile security state between NS and S */
8036 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
8038 uint32_t new_ss_msp
, new_ss_psp
;
8040 if (env
->v7m
.secure
== new_secstate
) {
8044 /* All the banked state is accessed by looking at env->v7m.secure
8045 * except for the stack pointer; rearrange the SP appropriately.
8047 new_ss_msp
= env
->v7m
.other_ss_msp
;
8048 new_ss_psp
= env
->v7m
.other_ss_psp
;
8050 if (v7m_using_psp(env
)) {
8051 env
->v7m
.other_ss_psp
= env
->regs
[13];
8052 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
8054 env
->v7m
.other_ss_msp
= env
->regs
[13];
8055 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
8058 env
->v7m
.secure
= new_secstate
;
8060 if (v7m_using_psp(env
)) {
8061 env
->regs
[13] = new_ss_psp
;
8062 env
->v7m
.other_sp
= new_ss_msp
;
8064 env
->regs
[13] = new_ss_msp
;
8065 env
->v7m
.other_sp
= new_ss_psp
;
8069 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
8072 * - if the return value is a magic value, do exception return (like BX)
8073 * - otherwise bit 0 of the return value is the target security state
8077 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8078 /* Covers FNC_RETURN and EXC_RETURN magic */
8079 min_magic
= FNC_RETURN_MIN_MAGIC
;
8081 /* EXC_RETURN magic only */
8082 min_magic
= EXC_RETURN_MIN_MAGIC
;
8085 if (dest
>= min_magic
) {
8086 /* This is an exception return magic value; put it where
8087 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
8088 * Note that if we ever add gen_ss_advance() singlestep support to
8089 * M profile this should count as an "instruction execution complete"
8090 * event (compare gen_bx_excret_final_code()).
8092 env
->regs
[15] = dest
& ~1;
8093 env
->thumb
= dest
& 1;
8094 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
8098 /* translate.c should have made BXNS UNDEF unless we're secure */
8099 assert(env
->v7m
.secure
);
8102 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8104 switch_v7m_security_state(env
, dest
& 1);
8106 env
->regs
[15] = dest
& ~1;
8109 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
8111 /* Handle v7M BLXNS:
8112 * - bit 0 of the destination address is the target security state
8115 /* At this point regs[15] is the address just after the BLXNS */
8116 uint32_t nextinst
= env
->regs
[15] | 1;
8117 uint32_t sp
= env
->regs
[13] - 8;
8120 /* translate.c will have made BLXNS UNDEF unless we're secure */
8121 assert(env
->v7m
.secure
);
8124 /* target is Secure, so this is just a normal BLX,
8125 * except that the low bit doesn't indicate Thumb/not.
8127 env
->regs
[14] = nextinst
;
8129 env
->regs
[15] = dest
& ~1;
8133 /* Target is non-secure: first push a stack frame */
8134 if (!QEMU_IS_ALIGNED(sp
, 8)) {
8135 qemu_log_mask(LOG_GUEST_ERROR
,
8136 "BLXNS with misaligned SP is UNPREDICTABLE\n");
8139 if (sp
< v7m_sp_limit(env
)) {
8140 raise_exception(env
, EXCP_STKOF
, 0, 1);
8143 saved_psr
= env
->v7m
.exception
;
8144 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
8145 saved_psr
|= XPSR_SFPA
;
8148 /* Note that these stores can throw exceptions on MPU faults */
8149 cpu_stl_data(env
, sp
, nextinst
);
8150 cpu_stl_data(env
, sp
+ 4, saved_psr
);
8153 env
->regs
[14] = 0xfeffffff;
8154 if (arm_v7m_is_handler_mode(env
)) {
8155 /* Write a dummy value to IPSR, to avoid leaking the current secure
8156 * exception number to non-secure code. This is guaranteed not
8157 * to cause write_v7m_exception() to actually change stacks.
8159 write_v7m_exception(env
, 1);
8161 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8162 switch_v7m_security_state(env
, 0);
8164 env
->regs
[15] = dest
;
8167 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
8170 /* Return a pointer to the location where we currently store the
8171 * stack pointer for the requested security state and thread mode.
8172 * This pointer will become invalid if the CPU state is updated
8173 * such that the stack pointers are switched around (eg changing
8174 * the SPSEL control bit).
8175 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
8176 * Unlike that pseudocode, we require the caller to pass us in the
8177 * SPSEL control bit value; this is because we also use this
8178 * function in handling of pushing of the callee-saves registers
8179 * part of the v8M stack frame (pseudocode PushCalleeStack()),
8180 * and in the tailchain codepath the SPSEL bit comes from the exception
8181 * return magic LR value from the previous exception. The pseudocode
8182 * opencodes the stack-selection in PushCalleeStack(), but we prefer
8183 * to make this utility function generic enough to do the job.
8185 bool want_psp
= threadmode
&& spsel
;
8187 if (secure
== env
->v7m
.secure
) {
8188 if (want_psp
== v7m_using_psp(env
)) {
8189 return &env
->regs
[13];
8191 return &env
->v7m
.other_sp
;
8195 return &env
->v7m
.other_ss_psp
;
8197 return &env
->v7m
.other_ss_msp
;
8202 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
8205 CPUState
*cs
= CPU(cpu
);
8206 CPUARMState
*env
= &cpu
->env
;
8208 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
8209 uint32_t vector_entry
;
8210 MemTxAttrs attrs
= {};
8214 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
8216 /* We don't do a get_phys_addr() here because the rules for vector
8217 * loads are special: they always use the default memory map, and
8218 * the default memory map permits reads from all addresses.
8219 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
8220 * that we want this special case which would always say "yes",
8221 * we just do the SAU lookup here followed by a direct physical load.
8223 attrs
.secure
= targets_secure
;
8226 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8227 V8M_SAttributes sattrs
= {};
8229 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
8231 attrs
.secure
= false;
8232 } else if (!targets_secure
) {
8233 /* NS access to S memory */
8238 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
8240 if (result
!= MEMTX_OK
) {
8243 *pvec
= vector_entry
;
8247 /* All vector table fetch fails are reported as HardFault, with
8248 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
8249 * technically the underlying exception is a MemManage or BusFault
8250 * that is escalated to HardFault.) This is a terminal exception,
8251 * so we will either take the HardFault immediately or else enter
8252 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
8254 exc_secure
= targets_secure
||
8255 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
8256 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
8257 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
8261 static uint32_t v7m_integrity_sig(CPUARMState
*env
, uint32_t lr
)
8264 * Return the integrity signature value for the callee-saves
8265 * stack frame section. @lr is the exception return payload/LR value
8266 * whose FType bit forms bit 0 of the signature if FP is present.
8268 uint32_t sig
= 0xfefa125a;
8270 if (!arm_feature(env
, ARM_FEATURE_VFP
) || (lr
& R_V7M_EXCRET_FTYPE_MASK
)) {
8276 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
8279 /* For v8M, push the callee-saves register part of the stack frame.
8280 * Compare the v8M pseudocode PushCalleeStack().
8281 * In the tailchaining case this may not be the current stack.
8283 CPUARMState
*env
= &cpu
->env
;
8284 uint32_t *frame_sp_p
;
8291 StackingMode smode
= ignore_faults
? STACK_IGNFAULTS
: STACK_NORMAL
;
8294 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
8295 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
8298 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
8299 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
8300 lr
& R_V7M_EXCRET_SPSEL_MASK
);
8301 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
8303 limit
= env
->v7m
.psplim
[M_REG_S
];
8305 limit
= env
->v7m
.msplim
[M_REG_S
];
8308 mmu_idx
= arm_mmu_idx(env
);
8309 frame_sp_p
= &env
->regs
[13];
8310 limit
= v7m_sp_limit(env
);
8313 frameptr
= *frame_sp_p
- 0x28;
8314 if (frameptr
< limit
) {
8316 * Stack limit failure: set SP to the limit value, and generate
8317 * STKOF UsageFault. Stack pushes below the limit must not be
8318 * performed. It is IMPDEF whether pushes above the limit are
8319 * performed; we choose not to.
8321 qemu_log_mask(CPU_LOG_INT
,
8322 "...STKOF during callee-saves register stacking\n");
8323 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8324 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8326 *frame_sp_p
= limit
;
8330 /* Write as much of the stack frame as we can. A write failure may
8331 * cause us to pend a derived exception.
8333 sig
= v7m_integrity_sig(env
, lr
);
8335 v7m_stack_write(cpu
, frameptr
, sig
, mmu_idx
, smode
) &&
8336 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
, smode
) &&
8337 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
, smode
) &&
8338 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
, smode
) &&
8339 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
, smode
) &&
8340 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
, smode
) &&
8341 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
, smode
) &&
8342 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
, smode
) &&
8343 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
, smode
);
8345 /* Update SP regardless of whether any of the stack accesses failed. */
8346 *frame_sp_p
= frameptr
;
8351 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
8352 bool ignore_stackfaults
)
8354 /* Do the "take the exception" parts of exception entry,
8355 * but not the pushing of state to the stack. This is
8356 * similar to the pseudocode ExceptionTaken() function.
8358 CPUARMState
*env
= &cpu
->env
;
8360 bool targets_secure
;
8362 bool push_failed
= false;
8364 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
8365 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
8366 targets_secure
? "secure" : "nonsecure", exc
);
8369 /* Sanitize LR FType and PREFIX bits */
8370 if (!arm_feature(env
, ARM_FEATURE_VFP
)) {
8371 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
8373 lr
= deposit32(lr
, 24, 8, 0xff);
8376 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8377 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8378 (lr
& R_V7M_EXCRET_S_MASK
)) {
8379 /* The background code (the owner of the registers in the
8380 * exception frame) is Secure. This means it may either already
8381 * have or now needs to push callee-saves registers.
8383 if (targets_secure
) {
8384 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
8385 /* We took an exception from Secure to NonSecure
8386 * (which means the callee-saved registers got stacked)
8387 * and are now tailchaining to a Secure exception.
8388 * Clear DCRS so eventual return from this Secure
8389 * exception unstacks the callee-saved registers.
8391 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
8394 /* We're going to a non-secure exception; push the
8395 * callee-saves registers to the stack now, if they're
8396 * not already saved.
8398 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
8399 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
8400 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
8401 ignore_stackfaults
);
8403 lr
|= R_V7M_EXCRET_DCRS_MASK
;
8407 lr
&= ~R_V7M_EXCRET_ES_MASK
;
8408 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8409 lr
|= R_V7M_EXCRET_ES_MASK
;
8411 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
8412 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
8413 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8416 /* Clear registers if necessary to prevent non-secure exception
8417 * code being able to see register values from secure code.
8418 * Where register values become architecturally UNKNOWN we leave
8419 * them with their previous values.
8421 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8422 if (!targets_secure
) {
8423 /* Always clear the caller-saved registers (they have been
8424 * pushed to the stack earlier in v7m_push_stack()).
8425 * Clear callee-saved registers if the background code is
8426 * Secure (in which case these regs were saved in
8427 * v7m_push_callee_stack()).
8431 for (i
= 0; i
< 13; i
++) {
8432 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8433 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
8438 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
8443 if (push_failed
&& !ignore_stackfaults
) {
8444 /* Derived exception on callee-saves register stacking:
8445 * we might now want to take a different exception which
8446 * targets a different security state, so try again from the top.
8448 qemu_log_mask(CPU_LOG_INT
,
8449 "...derived exception on callee-saves register stacking");
8450 v7m_exception_taken(cpu
, lr
, true, true);
8454 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
8455 /* Vector load failed: derived exception */
8456 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
8457 v7m_exception_taken(cpu
, lr
, true, true);
8461 /* Now we've done everything that might cause a derived exception
8462 * we can go ahead and activate whichever exception we're going to
8463 * take (which might now be the derived exception).
8465 armv7m_nvic_acknowledge_irq(env
->nvic
);
8467 /* Switch to target security state -- must do this before writing SPSEL */
8468 switch_v7m_security_state(env
, targets_secure
);
8469 write_v7m_control_spsel(env
, 0);
8470 arm_clear_exclusive(env
);
8471 /* Clear SFPA and FPCA (has no effect if no FPU) */
8472 env
->v7m
.control
[M_REG_S
] &=
8473 ~(R_V7M_CONTROL_FPCA_MASK
| R_V7M_CONTROL_SFPA_MASK
);
8475 env
->condexec_bits
= 0;
8477 env
->regs
[15] = addr
& 0xfffffffe;
8478 env
->thumb
= addr
& 1;
8481 static void v7m_update_fpccr(CPUARMState
*env
, uint32_t frameptr
,
8485 * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
8486 * that we will need later in order to do lazy FP reg stacking.
8488 bool is_secure
= env
->v7m
.secure
;
8489 void *nvic
= env
->nvic
;
8491 * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
8492 * are banked and we want to update the bit in the bank for the
8493 * current security state; and in one case we want to specifically
8494 * update the NS banked version of a bit even if we are secure.
8496 uint32_t *fpccr_s
= &env
->v7m
.fpccr
[M_REG_S
];
8497 uint32_t *fpccr_ns
= &env
->v7m
.fpccr
[M_REG_NS
];
8498 uint32_t *fpccr
= &env
->v7m
.fpccr
[is_secure
];
8499 bool hfrdy
, bfrdy
, mmrdy
, ns_ufrdy
, s_ufrdy
, sfrdy
, monrdy
;
8501 env
->v7m
.fpcar
[is_secure
] = frameptr
& ~0x7;
8503 if (apply_splim
&& arm_feature(env
, ARM_FEATURE_V8
)) {
8505 uint32_t splim
= v7m_sp_limit(env
);
8506 bool ign
= armv7m_nvic_neg_prio_requested(nvic
, is_secure
) &&
8507 (env
->v7m
.ccr
[is_secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
);
8509 splimviol
= !ign
&& frameptr
< splim
;
8510 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, SPLIMVIOL
, splimviol
);
8513 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, LSPACT
, 1);
8515 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, S
, is_secure
);
8517 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, USER
, arm_current_el(env
) == 0);
8519 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, THREAD
,
8520 !arm_v7m_is_handler_mode(env
));
8522 hfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_HARD
, false);
8523 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, HFRDY
, hfrdy
);
8525 bfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_BUS
, false);
8526 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, BFRDY
, bfrdy
);
8528 mmrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_MEM
, is_secure
);
8529 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, MMRDY
, mmrdy
);
8531 ns_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, false);
8532 *fpccr_ns
= FIELD_DP32(*fpccr_ns
, V7M_FPCCR
, UFRDY
, ns_ufrdy
);
8534 monrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_DEBUG
, false);
8535 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, MONRDY
, monrdy
);
8537 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8538 s_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, true);
8539 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, UFRDY
, s_ufrdy
);
8541 sfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_SECURE
, false);
8542 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, SFRDY
, sfrdy
);
8546 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
8548 /* fptr is the value of Rn, the frame pointer we store the FP regs to */
8549 bool s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
8550 bool lspact
= env
->v7m
.fpccr
[s
] & R_V7M_FPCCR_LSPACT_MASK
;
8552 assert(env
->v7m
.secure
);
8554 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
8558 /* Check access to the coprocessor is permitted */
8559 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
8560 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
8564 /* LSPACT should not be active when there is active FP state */
8565 raise_exception_ra(env
, EXCP_LSERR
, 0, 1, GETPC());
8569 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
8573 * Note that we do not use v7m_stack_write() here, because the
8574 * accesses should not set the FSR bits for stacking errors if they
8575 * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
8576 * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
8579 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
8580 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
8583 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
8584 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
8585 uint32_t faddr
= fptr
+ 4 * i
;
8586 uint32_t slo
= extract64(dn
, 0, 32);
8587 uint32_t shi
= extract64(dn
, 32, 32);
8590 faddr
+= 8; /* skip the slot for the FPSCR */
8592 cpu_stl_data(env
, faddr
, slo
);
8593 cpu_stl_data(env
, faddr
+ 4, shi
);
8595 cpu_stl_data(env
, fptr
+ 0x40, vfp_get_fpscr(env
));
8598 * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
8599 * leave them unchanged, matching our choice in v7m_preserve_fp_state.
8602 for (i
= 0; i
< 32; i
+= 2) {
8603 *aa32_vfp_dreg(env
, i
/ 2) = 0;
8605 vfp_set_fpscr(env
, 0);
8608 v7m_update_fpccr(env
, fptr
, false);
8611 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
8614 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
8616 /* fptr is the value of Rn, the frame pointer we load the FP regs from */
8617 assert(env
->v7m
.secure
);
8619 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
8623 /* Check access to the coprocessor is permitted */
8624 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
8625 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
8628 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
8629 /* State in FP is still valid */
8630 env
->v7m
.fpccr
[M_REG_S
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
8632 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
8637 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
8640 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
8643 uint32_t faddr
= fptr
+ 4 * i
;
8646 faddr
+= 8; /* skip the slot for the FPSCR */
8649 slo
= cpu_ldl_data(env
, faddr
);
8650 shi
= cpu_ldl_data(env
, faddr
+ 4);
8652 dn
= (uint64_t) shi
<< 32 | slo
;
8653 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
8655 fpscr
= cpu_ldl_data(env
, fptr
+ 0x40);
8656 vfp_set_fpscr(env
, fpscr
);
8659 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
8662 static bool v7m_push_stack(ARMCPU
*cpu
)
8664 /* Do the "set up stack frame" part of exception entry,
8665 * similar to pseudocode PushStack().
8666 * Return true if we generate a derived exception (and so
8667 * should ignore further stack faults trying to process
8668 * that derived exception.)
8670 bool stacked_ok
= true, limitviol
= false;
8671 CPUARMState
*env
= &cpu
->env
;
8672 uint32_t xpsr
= xpsr_read(env
);
8673 uint32_t frameptr
= env
->regs
[13];
8674 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
8676 bool nsacr_cp10
= extract32(env
->v7m
.nsacr
, 10, 1);
8678 if ((env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) &&
8679 (env
->v7m
.secure
|| nsacr_cp10
)) {
8680 if (env
->v7m
.secure
&&
8681 env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
) {
8690 /* Align stack pointer if the guest wants that */
8691 if ((frameptr
& 4) &&
8692 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
8694 xpsr
|= XPSR_SPREALIGN
;
8698 if (env
->v7m
.secure
&&
8699 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
8703 frameptr
-= framesize
;
8705 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8706 uint32_t limit
= v7m_sp_limit(env
);
8708 if (frameptr
< limit
) {
8710 * Stack limit failure: set SP to the limit value, and generate
8711 * STKOF UsageFault. Stack pushes below the limit must not be
8712 * performed. It is IMPDEF whether pushes above the limit are
8713 * performed; we choose not to.
8715 qemu_log_mask(CPU_LOG_INT
,
8716 "...STKOF during stacking\n");
8717 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8718 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8720 env
->regs
[13] = limit
;
8722 * We won't try to perform any further memory accesses but
8723 * we must continue through the following code to check for
8724 * permission faults during FPU state preservation, and we
8725 * must update FPCCR if lazy stacking is enabled.
8732 /* Write as much of the stack frame as we can. If we fail a stack
8733 * write this will result in a derived exception being pended
8734 * (which may be taken in preference to the one we started with
8735 * if it has higher priority).
8737 stacked_ok
= stacked_ok
&&
8738 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, STACK_NORMAL
) &&
8739 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1],
8740 mmu_idx
, STACK_NORMAL
) &&
8741 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2],
8742 mmu_idx
, STACK_NORMAL
) &&
8743 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3],
8744 mmu_idx
, STACK_NORMAL
) &&
8745 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12],
8746 mmu_idx
, STACK_NORMAL
) &&
8747 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14],
8748 mmu_idx
, STACK_NORMAL
) &&
8749 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15],
8750 mmu_idx
, STACK_NORMAL
) &&
8751 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, STACK_NORMAL
);
8753 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) {
8754 /* FPU is active, try to save its registers */
8755 bool fpccr_s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
8756 bool lspact
= env
->v7m
.fpccr
[fpccr_s
] & R_V7M_FPCCR_LSPACT_MASK
;
8758 if (lspact
&& arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8759 qemu_log_mask(CPU_LOG_INT
,
8760 "...SecureFault because LSPACT and FPCA both set\n");
8761 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
8762 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8763 } else if (!env
->v7m
.secure
&& !nsacr_cp10
) {
8764 qemu_log_mask(CPU_LOG_INT
,
8765 "...Secure UsageFault with CFSR.NOCP because "
8766 "NSACR.CP10 prevents stacking FP regs\n");
8767 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
8768 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
8770 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
8771 /* Lazy stacking disabled, save registers now */
8773 bool cpacr_pass
= v7m_cpacr_pass(env
, env
->v7m
.secure
,
8774 arm_current_el(env
) != 0);
8776 if (stacked_ok
&& !cpacr_pass
) {
8778 * Take UsageFault if CPACR forbids access. The pseudocode
8779 * here does a full CheckCPEnabled() but we know the NSACR
8780 * check can never fail as we have already handled that.
8782 qemu_log_mask(CPU_LOG_INT
,
8783 "...UsageFault with CFSR.NOCP because "
8784 "CPACR.CP10 prevents stacking FP regs\n");
8785 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8787 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8791 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
8792 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
8793 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
8794 uint32_t slo
= extract64(dn
, 0, 32);
8795 uint32_t shi
= extract64(dn
, 32, 32);
8798 faddr
+= 8; /* skip the slot for the FPSCR */
8800 stacked_ok
= stacked_ok
&&
8801 v7m_stack_write(cpu
, faddr
, slo
,
8802 mmu_idx
, STACK_NORMAL
) &&
8803 v7m_stack_write(cpu
, faddr
+ 4, shi
,
8804 mmu_idx
, STACK_NORMAL
);
8806 stacked_ok
= stacked_ok
&&
8807 v7m_stack_write(cpu
, frameptr
+ 0x60,
8808 vfp_get_fpscr(env
), mmu_idx
, STACK_NORMAL
);
8810 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
8811 *aa32_vfp_dreg(env
, i
/ 2) = 0;
8813 vfp_set_fpscr(env
, 0);
8816 /* Lazy stacking enabled, save necessary info to stack later */
8817 v7m_update_fpccr(env
, frameptr
+ 0x20, true);
8823 * If we broke a stack limit then SP was already updated earlier;
8824 * otherwise we update SP regardless of whether any of the stack
8825 * accesses failed or we took some other kind of fault.
8828 env
->regs
[13] = frameptr
;
8834 static void do_v7m_exception_exit(ARMCPU
*cpu
)
8836 CPUARMState
*env
= &cpu
->env
;
8838 uint32_t xpsr
, xpsr_mask
;
8839 bool ufault
= false;
8840 bool sfault
= false;
8841 bool return_to_sp_process
;
8842 bool return_to_handler
;
8843 bool rettobase
= false;
8844 bool exc_secure
= false;
8845 bool return_to_secure
;
8847 bool restore_s16_s31
;
8849 /* If we're not in Handler mode then jumps to magic exception-exit
8850 * addresses don't have magic behaviour. However for the v8M
8851 * security extensions the magic secure-function-return has to
8852 * work in thread mode too, so to avoid doing an extra check in
8853 * the generated code we allow exception-exit magic to also cause the
8854 * internal exception and bring us here in thread mode. Correct code
8855 * will never try to do this (the following insn fetch will always
8856 * fault) so we the overhead of having taken an unnecessary exception
8859 if (!arm_v7m_is_handler_mode(env
)) {
8863 /* In the spec pseudocode ExceptionReturn() is called directly
8864 * from BXWritePC() and gets the full target PC value including
8865 * bit zero. In QEMU's implementation we treat it as a normal
8866 * jump-to-register (which is then caught later on), and so split
8867 * the target value up between env->regs[15] and env->thumb in
8868 * gen_bx(). Reconstitute it.
8870 excret
= env
->regs
[15];
8875 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
8876 " previous exception %d\n",
8877 excret
, env
->v7m
.exception
);
8879 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
8880 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
8881 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
8885 ftype
= excret
& R_V7M_EXCRET_FTYPE_MASK
;
8887 if (!arm_feature(env
, ARM_FEATURE_VFP
) && !ftype
) {
8888 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero FTYPE in exception "
8889 "exit PC value 0x%" PRIx32
" is UNPREDICTABLE "
8890 "if FPU not present\n",
8895 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8896 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8897 * we pick which FAULTMASK to clear.
8899 if (!env
->v7m
.secure
&&
8900 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
8901 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
8903 /* For all other purposes, treat ES as 0 (R_HXSR) */
8904 excret
&= ~R_V7M_EXCRET_ES_MASK
;
8906 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
8909 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
8910 /* Auto-clear FAULTMASK on return from other than NMI.
8911 * If the security extension is implemented then this only
8912 * happens if the raw execution priority is >= 0; the
8913 * value of the ES bit in the exception return value indicates
8914 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8916 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8917 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
8918 env
->v7m
.faultmask
[exc_secure
] = 0;
8921 env
->v7m
.faultmask
[M_REG_NS
] = 0;
8925 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
8928 /* attempt to exit an exception that isn't active */
8932 /* still an irq active now */
8935 /* we returned to base exception level, no nesting.
8936 * (In the pseudocode this is written using "NestedActivation != 1"
8937 * where we have 'rettobase == false'.)
8942 g_assert_not_reached();
8945 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
8946 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
8947 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8948 (excret
& R_V7M_EXCRET_S_MASK
);
8950 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8951 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8952 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8953 * we choose to take the UsageFault.
8955 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
8956 (excret
& R_V7M_EXCRET_ES_MASK
) ||
8957 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
8961 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
8965 /* For v7M we only recognize certain combinations of the low bits */
8966 switch (excret
& 0xf) {
8967 case 1: /* Return to Handler */
8969 case 13: /* Return to Thread using Process stack */
8970 case 9: /* Return to Thread using Main stack */
8971 /* We only need to check NONBASETHRDENA for v7M, because in
8972 * v8M this bit does not exist (it is RES1).
8975 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
8976 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
8986 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8987 * Handler mode (and will be until we write the new XPSR.Interrupt
8988 * field) this does not switch around the current stack pointer.
8989 * We must do this before we do any kind of tailchaining, including
8990 * for the derived exceptions on integrity check failures, or we will
8991 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8993 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
8996 * Clear scratch FP values left in caller saved registers; this
8997 * must happen before any kind of tail chaining.
8999 if ((env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_CLRONRET_MASK
) &&
9000 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
9001 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
9002 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
9003 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9004 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
9005 "stackframe: error during lazy state deactivation\n");
9006 v7m_exception_taken(cpu
, excret
, true, false);
9009 /* Clear s0..s15 and FPSCR */
9012 for (i
= 0; i
< 16; i
+= 2) {
9013 *aa32_vfp_dreg(env
, i
/ 2) = 0;
9015 vfp_set_fpscr(env
, 0);
9020 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
9021 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9022 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
9023 "stackframe: failed EXC_RETURN.ES validity check\n");
9024 v7m_exception_taken(cpu
, excret
, true, false);
9029 /* Bad exception return: instead of popping the exception
9030 * stack, directly take a usage fault on the current stack.
9032 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
9033 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
9034 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
9035 "stackframe: failed exception return integrity check\n");
9036 v7m_exception_taken(cpu
, excret
, true, false);
9041 * Tailchaining: if there is currently a pending exception that
9042 * is high enough priority to preempt execution at the level we're
9043 * about to return to, then just directly take that exception now,
9044 * avoiding an unstack-and-then-stack. Note that now we have
9045 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
9046 * our current execution priority is already the execution priority we are
9047 * returning to -- none of the state we would unstack or set based on
9048 * the EXCRET value affects it.
9050 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
9051 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
9052 v7m_exception_taken(cpu
, excret
, true, false);
9056 switch_v7m_security_state(env
, return_to_secure
);
9059 /* The stack pointer we should be reading the exception frame from
9060 * depends on bits in the magic exception return type value (and
9061 * for v8M isn't necessarily the stack pointer we will eventually
9062 * end up resuming execution with). Get a pointer to the location
9063 * in the CPU state struct where the SP we need is currently being
9064 * stored; we will use and modify it in place.
9065 * We use this limited C variable scope so we don't accidentally
9066 * use 'frame_sp_p' after we do something that makes it invalid.
9068 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
9071 return_to_sp_process
);
9072 uint32_t frameptr
= *frame_sp_p
;
9075 bool return_to_priv
= return_to_handler
||
9076 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
9078 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
9081 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
9082 arm_feature(env
, ARM_FEATURE_V8
)) {
9083 qemu_log_mask(LOG_GUEST_ERROR
,
9084 "M profile exception return with non-8-aligned SP "
9085 "for destination state is UNPREDICTABLE\n");
9088 /* Do we need to pop callee-saved registers? */
9089 if (return_to_secure
&&
9090 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
9091 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
9092 uint32_t actual_sig
;
9094 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
9096 if (pop_ok
&& v7m_integrity_sig(env
, excret
) != actual_sig
) {
9097 /* Take a SecureFault on the current stack */
9098 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
9099 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9100 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
9101 "stackframe: failed exception return integrity "
9102 "signature check\n");
9103 v7m_exception_taken(cpu
, excret
, true, false);
9108 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
9109 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
9110 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
9111 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
9112 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
9113 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
9114 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
9115 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
9122 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
9123 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
9124 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
9125 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
9126 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
9127 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
9128 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
9129 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
9132 /* v7m_stack_read() pended a fault, so take it (as a tail
9133 * chained exception on the same stack frame)
9135 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
9136 v7m_exception_taken(cpu
, excret
, true, false);
9140 /* Returning from an exception with a PC with bit 0 set is defined
9141 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
9142 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
9143 * the lsbit, and there are several RTOSes out there which incorrectly
9144 * assume the r15 in the stack frame should be a Thumb-style "lsbit
9145 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
9146 * complain about the badly behaved guest.
9148 if (env
->regs
[15] & 1) {
9149 env
->regs
[15] &= ~1U;
9150 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9151 qemu_log_mask(LOG_GUEST_ERROR
,
9152 "M profile return from interrupt with misaligned "
9153 "PC is UNPREDICTABLE on v7M\n");
9157 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9158 /* For v8M we have to check whether the xPSR exception field
9159 * matches the EXCRET value for return to handler/thread
9160 * before we commit to changing the SP and xPSR.
9162 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
9163 if (return_to_handler
!= will_be_handler
) {
9164 /* Take an INVPC UsageFault on the current stack.
9165 * By this point we will have switched to the security state
9166 * for the background state, so this UsageFault will target
9169 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
9171 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
9172 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
9173 "stackframe: failed exception return integrity "
9175 v7m_exception_taken(cpu
, excret
, true, false);
9181 /* FP present and we need to handle it */
9182 if (!return_to_secure
&&
9183 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
)) {
9184 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9185 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
9186 qemu_log_mask(CPU_LOG_INT
,
9187 "...taking SecureFault on existing stackframe: "
9188 "Secure LSPACT set but exception return is "
9189 "not to secure state\n");
9190 v7m_exception_taken(cpu
, excret
, true, false);
9194 restore_s16_s31
= return_to_secure
&&
9195 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
9197 if (env
->v7m
.fpccr
[return_to_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
9198 /* State in FPU is still valid, just clear LSPACT */
9199 env
->v7m
.fpccr
[return_to_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
9203 bool cpacr_pass
, nsacr_pass
;
9205 cpacr_pass
= v7m_cpacr_pass(env
, return_to_secure
,
9207 nsacr_pass
= return_to_secure
||
9208 extract32(env
->v7m
.nsacr
, 10, 1);
9211 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
9213 env
->v7m
.cfsr
[return_to_secure
] |= R_V7M_CFSR_NOCP_MASK
;
9214 qemu_log_mask(CPU_LOG_INT
,
9215 "...taking UsageFault on existing "
9216 "stackframe: CPACR.CP10 prevents unstacking "
9218 v7m_exception_taken(cpu
, excret
, true, false);
9220 } else if (!nsacr_pass
) {
9221 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, true);
9222 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_INVPC_MASK
;
9223 qemu_log_mask(CPU_LOG_INT
,
9224 "...taking Secure UsageFault on existing "
9225 "stackframe: NSACR.CP10 prevents unstacking "
9227 v7m_exception_taken(cpu
, excret
, true, false);
9231 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
9234 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
9237 faddr
+= 8; /* Skip the slot for the FPSCR */
9241 v7m_stack_read(cpu
, &slo
, faddr
, mmu_idx
) &&
9242 v7m_stack_read(cpu
, &shi
, faddr
+ 4, mmu_idx
);
9248 dn
= (uint64_t)shi
<< 32 | slo
;
9249 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
9252 v7m_stack_read(cpu
, &fpscr
, frameptr
+ 0x60, mmu_idx
);
9254 vfp_set_fpscr(env
, fpscr
);
9258 * These regs are 0 if security extension present;
9259 * otherwise merely UNKNOWN. We zero always.
9261 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
9262 *aa32_vfp_dreg(env
, i
/ 2) = 0;
9264 vfp_set_fpscr(env
, 0);
9268 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
9269 V7M_CONTROL
, FPCA
, !ftype
);
9271 /* Commit to consuming the stack frame */
9275 if (restore_s16_s31
) {
9279 /* Undo stack alignment (the SPREALIGN bit indicates that the original
9280 * pre-exception SP was not 8-aligned and we added a padding word to
9281 * align it, so we undo this by ORing in the bit that increases it
9282 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
9283 * would work too but a logical OR is how the pseudocode specifies it.)
9285 if (xpsr
& XPSR_SPREALIGN
) {
9288 *frame_sp_p
= frameptr
;
9291 xpsr_mask
= ~(XPSR_SPREALIGN
| XPSR_SFPA
);
9292 if (!arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
9293 xpsr_mask
&= ~XPSR_GE
;
9295 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
9296 xpsr_write(env
, xpsr
, xpsr_mask
);
9298 if (env
->v7m
.secure
) {
9299 bool sfpa
= xpsr
& XPSR_SFPA
;
9301 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
9302 V7M_CONTROL
, SFPA
, sfpa
);
9305 /* The restored xPSR exception field will be zero if we're
9306 * resuming in Thread mode. If that doesn't match what the
9307 * exception return excret specified then this is a UsageFault.
9308 * v7M requires we make this check here; v8M did it earlier.
9310 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
9311 /* Take an INVPC UsageFault by pushing the stack again;
9312 * we know we're v7M so this is never a Secure UsageFault.
9314 bool ignore_stackfaults
;
9316 assert(!arm_feature(env
, ARM_FEATURE_V8
));
9317 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
9318 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
9319 ignore_stackfaults
= v7m_push_stack(cpu
);
9320 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
9321 "failed exception return integrity check\n");
9322 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
9326 /* Otherwise, we have a successful exception exit. */
9327 arm_clear_exclusive(env
);
9328 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
9331 static bool do_v7m_function_return(ARMCPU
*cpu
)
9333 /* v8M security extensions magic function return.
9335 * (1) throw an exception (longjump)
9336 * (2) return true if we successfully handled the function return
9337 * (3) return false if we failed a consistency check and have
9338 * pended a UsageFault that needs to be taken now
9340 * At this point the magic return value is split between env->regs[15]
9341 * and env->thumb. We don't bother to reconstitute it because we don't
9342 * need it (all values are handled the same way).
9344 CPUARMState
*env
= &cpu
->env
;
9345 uint32_t newpc
, newpsr
, newpsr_exc
;
9347 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
9350 bool threadmode
, spsel
;
9353 uint32_t *frame_sp_p
;
9356 /* Pull the return address and IPSR from the Secure stack */
9357 threadmode
= !arm_v7m_is_handler_mode(env
);
9358 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
9360 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
9361 frameptr
= *frame_sp_p
;
9363 /* These loads may throw an exception (for MPU faults). We want to
9364 * do them as secure, so work out what MMU index that is.
9366 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
9367 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
9368 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
9369 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
9371 /* Consistency checks on new IPSR */
9372 newpsr_exc
= newpsr
& XPSR_EXCP
;
9373 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
9374 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
9375 /* Pend the fault and tell our caller to take it */
9376 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
9377 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
9379 qemu_log_mask(CPU_LOG_INT
,
9380 "...taking INVPC UsageFault: "
9381 "IPSR consistency check failed\n");
9385 *frame_sp_p
= frameptr
+ 8;
9388 /* This invalidates frame_sp_p */
9389 switch_v7m_security_state(env
, true);
9390 env
->v7m
.exception
= newpsr_exc
;
9391 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
9392 if (newpsr
& XPSR_SFPA
) {
9393 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
9395 xpsr_write(env
, 0, XPSR_IT
);
9396 env
->thumb
= newpc
& 1;
9397 env
->regs
[15] = newpc
& ~1;
9399 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
9403 static void arm_log_exception(int idx
)
9405 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
9406 const char *exc
= NULL
;
9407 static const char * const excnames
[] = {
9408 [EXCP_UDEF
] = "Undefined Instruction",
9410 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
9411 [EXCP_DATA_ABORT
] = "Data Abort",
9414 [EXCP_BKPT
] = "Breakpoint",
9415 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
9416 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
9417 [EXCP_HVC
] = "Hypervisor Call",
9418 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
9419 [EXCP_SMC
] = "Secure Monitor Call",
9420 [EXCP_VIRQ
] = "Virtual IRQ",
9421 [EXCP_VFIQ
] = "Virtual FIQ",
9422 [EXCP_SEMIHOST
] = "Semihosting call",
9423 [EXCP_NOCP
] = "v7M NOCP UsageFault",
9424 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
9425 [EXCP_STKOF
] = "v8M STKOF UsageFault",
9426 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
9427 [EXCP_LSERR
] = "v8M LSERR UsageFault",
9428 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
9431 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
9432 exc
= excnames
[idx
];
9437 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
9441 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
9442 uint32_t addr
, uint16_t *insn
)
9444 /* Load a 16-bit portion of a v7M instruction, returning true on success,
9445 * or false on failure (in which case we will have pended the appropriate
9447 * We need to do the instruction fetch's MPU and SAU checks
9448 * like this because there is no MMU index that would allow
9449 * doing the load with a single function call. Instead we must
9450 * first check that the security attributes permit the load
9451 * and that they don't mismatch on the two halves of the instruction,
9452 * and then we do the load as a secure load (ie using the security
9453 * attributes of the address, not the CPU, as architecturally required).
9455 CPUState
*cs
= CPU(cpu
);
9456 CPUARMState
*env
= &cpu
->env
;
9457 V8M_SAttributes sattrs
= {};
9458 MemTxAttrs attrs
= {};
9459 ARMMMUFaultInfo fi
= {};
9461 target_ulong page_size
;
9465 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
9466 if (!sattrs
.nsc
|| sattrs
.ns
) {
9467 /* This must be the second half of the insn, and it straddles a
9468 * region boundary with the second half not being S&NSC.
9470 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
9471 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9472 qemu_log_mask(CPU_LOG_INT
,
9473 "...really SecureFault with SFSR.INVEP\n");
9476 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
9477 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
9478 /* the MPU lookup failed */
9479 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
9480 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
9481 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
9484 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
9486 if (txres
!= MEMTX_OK
) {
9487 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
9488 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
9489 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
9495 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
9497 /* Check whether this attempt to execute code in a Secure & NS-Callable
9498 * memory region is for an SG instruction; if so, then emulate the
9499 * effect of the SG instruction and return true. Otherwise pend
9500 * the correct kind of exception and return false.
9502 CPUARMState
*env
= &cpu
->env
;
9506 /* We should never get here unless get_phys_addr_pmsav8() caused
9507 * an exception for NS executing in S&NSC memory.
9509 assert(!env
->v7m
.secure
);
9510 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
9512 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
9513 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
9515 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
9523 if (insn
!= 0xe97f) {
9524 /* Not an SG instruction first half (we choose the IMPDEF
9525 * early-SG-check option).
9530 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
9534 if (insn
!= 0xe97f) {
9535 /* Not an SG instruction second half (yes, both halves of the SG
9536 * insn have the same hex value)
9541 /* OK, we have confirmed that we really have an SG instruction.
9542 * We know we're NS in S memory so don't need to repeat those checks.
9544 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
9545 ", executing it\n", env
->regs
[15]);
9546 env
->regs
[14] &= ~1;
9547 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
9548 switch_v7m_security_state(env
, true);
9549 xpsr_write(env
, 0, XPSR_IT
);
9554 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
9555 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9556 qemu_log_mask(CPU_LOG_INT
,
9557 "...really SecureFault with SFSR.INVEP\n");
9561 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
9563 ARMCPU
*cpu
= ARM_CPU(cs
);
9564 CPUARMState
*env
= &cpu
->env
;
9566 bool ignore_stackfaults
;
9568 arm_log_exception(cs
->exception_index
);
9570 /* For exceptions we just mark as pending on the NVIC, and let that
9572 switch (cs
->exception_index
) {
9574 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
9575 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
9580 * NOCP might be directed to something other than the current
9581 * security state if this fault is because of NSACR; we indicate
9582 * the target security state using exception.target_el.
9584 int target_secstate
;
9586 if (env
->exception
.target_el
== 3) {
9587 target_secstate
= M_REG_S
;
9589 target_secstate
= env
->v7m
.secure
;
9591 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, target_secstate
);
9592 env
->v7m
.cfsr
[target_secstate
] |= R_V7M_CFSR_NOCP_MASK
;
9596 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
9597 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
9600 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
9601 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
9604 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9605 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
9607 case EXCP_UNALIGNED
:
9608 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
9609 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNALIGNED_MASK
;
9612 /* The PC already points to the next instruction. */
9613 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
9615 case EXCP_PREFETCH_ABORT
:
9616 case EXCP_DATA_ABORT
:
9617 /* Note that for M profile we don't have a guest facing FSR, but
9618 * the env->exception.fsr will be populated by the code that
9619 * raises the fault, in the A profile short-descriptor format.
9621 switch (env
->exception
.fsr
& 0xf) {
9622 case M_FAKE_FSR_NSC_EXEC
:
9623 /* Exception generated when we try to execute code at an address
9624 * which is marked as Secure & Non-Secure Callable and the CPU
9625 * is in the Non-Secure state. The only instruction which can
9626 * be executed like this is SG (and that only if both halves of
9627 * the SG instruction have the same security attributes.)
9628 * Everything else must generate an INVEP SecureFault, so we
9629 * emulate the SG instruction here.
9631 if (v7m_handle_execute_nsc(cpu
)) {
9635 case M_FAKE_FSR_SFAULT
:
9636 /* Various flavours of SecureFault for attempts to execute or
9637 * access data in the wrong security state.
9639 switch (cs
->exception_index
) {
9640 case EXCP_PREFETCH_ABORT
:
9641 if (env
->v7m
.secure
) {
9642 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
9643 qemu_log_mask(CPU_LOG_INT
,
9644 "...really SecureFault with SFSR.INVTRAN\n");
9646 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
9647 qemu_log_mask(CPU_LOG_INT
,
9648 "...really SecureFault with SFSR.INVEP\n");
9651 case EXCP_DATA_ABORT
:
9652 /* This must be an NS access to S memory */
9653 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
9654 qemu_log_mask(CPU_LOG_INT
,
9655 "...really SecureFault with SFSR.AUVIOL\n");
9658 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
9660 case 0x8: /* External Abort */
9661 switch (cs
->exception_index
) {
9662 case EXCP_PREFETCH_ABORT
:
9663 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
9664 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
9666 case EXCP_DATA_ABORT
:
9667 env
->v7m
.cfsr
[M_REG_NS
] |=
9668 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
9669 env
->v7m
.bfar
= env
->exception
.vaddress
;
9670 qemu_log_mask(CPU_LOG_INT
,
9671 "...with CFSR.PRECISERR and BFAR 0x%x\n",
9675 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
9678 /* All other FSR values are either MPU faults or "can't happen
9679 * for M profile" cases.
9681 switch (cs
->exception_index
) {
9682 case EXCP_PREFETCH_ABORT
:
9683 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
9684 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
9686 case EXCP_DATA_ABORT
:
9687 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
9688 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
9689 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
9690 qemu_log_mask(CPU_LOG_INT
,
9691 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
9692 env
->v7m
.mmfar
[env
->v7m
.secure
]);
9695 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
9701 if (semihosting_enabled()) {
9703 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
9706 qemu_log_mask(CPU_LOG_INT
,
9707 "...handling as semihosting call 0x%x\n",
9709 env
->regs
[0] = do_arm_semihosting(env
);
9713 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
9717 case EXCP_EXCEPTION_EXIT
:
9718 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
9719 /* Must be v8M security extension function return */
9720 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
9721 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
9722 if (do_v7m_function_return(cpu
)) {
9726 do_v7m_exception_exit(cpu
);
9732 * We already pended the specific exception in the NVIC in the
9733 * v7m_preserve_fp_state() helper function.
9737 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9738 return; /* Never happens. Keep compiler happy. */
9741 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9742 lr
= R_V7M_EXCRET_RES1_MASK
|
9743 R_V7M_EXCRET_DCRS_MASK
;
9744 /* The S bit indicates whether we should return to Secure
9745 * or NonSecure (ie our current state).
9746 * The ES bit indicates whether we're taking this exception
9747 * to Secure or NonSecure (ie our target state). We set it
9748 * later, in v7m_exception_taken().
9749 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
9750 * This corresponds to the ARM ARM pseudocode for v8M setting
9751 * some LR bits in PushStack() and some in ExceptionTaken();
9752 * the distinction matters for the tailchain cases where we
9753 * can take an exception without pushing the stack.
9755 if (env
->v7m
.secure
) {
9756 lr
|= R_V7M_EXCRET_S_MASK
;
9758 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
9759 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
9762 lr
= R_V7M_EXCRET_RES1_MASK
|
9763 R_V7M_EXCRET_S_MASK
|
9764 R_V7M_EXCRET_DCRS_MASK
|
9765 R_V7M_EXCRET_FTYPE_MASK
|
9766 R_V7M_EXCRET_ES_MASK
;
9767 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
9768 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
9771 if (!arm_v7m_is_handler_mode(env
)) {
9772 lr
|= R_V7M_EXCRET_MODE_MASK
;
9775 ignore_stackfaults
= v7m_push_stack(cpu
);
9776 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
9779 /* Function used to synchronize QEMU's AArch64 register set with AArch32
9780 * register set. This is necessary when switching between AArch32 and AArch64
9783 void aarch64_sync_32_to_64(CPUARMState
*env
)
9786 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9788 /* We can blanket copy R[0:7] to X[0:7] */
9789 for (i
= 0; i
< 8; i
++) {
9790 env
->xregs
[i
] = env
->regs
[i
];
9793 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9794 * Otherwise, they come from the banked user regs.
9796 if (mode
== ARM_CPU_MODE_FIQ
) {
9797 for (i
= 8; i
< 13; i
++) {
9798 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9801 for (i
= 8; i
< 13; i
++) {
9802 env
->xregs
[i
] = env
->regs
[i
];
9806 /* Registers x13-x23 are the various mode SP and FP registers. Registers
9807 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9808 * from the mode banked register.
9810 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9811 env
->xregs
[13] = env
->regs
[13];
9812 env
->xregs
[14] = env
->regs
[14];
9814 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9815 /* HYP is an exception in that it is copied from r14 */
9816 if (mode
== ARM_CPU_MODE_HYP
) {
9817 env
->xregs
[14] = env
->regs
[14];
9819 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9823 if (mode
== ARM_CPU_MODE_HYP
) {
9824 env
->xregs
[15] = env
->regs
[13];
9826 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9829 if (mode
== ARM_CPU_MODE_IRQ
) {
9830 env
->xregs
[16] = env
->regs
[14];
9831 env
->xregs
[17] = env
->regs
[13];
9833 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9834 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9837 if (mode
== ARM_CPU_MODE_SVC
) {
9838 env
->xregs
[18] = env
->regs
[14];
9839 env
->xregs
[19] = env
->regs
[13];
9841 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9842 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9845 if (mode
== ARM_CPU_MODE_ABT
) {
9846 env
->xregs
[20] = env
->regs
[14];
9847 env
->xregs
[21] = env
->regs
[13];
9849 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9850 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9853 if (mode
== ARM_CPU_MODE_UND
) {
9854 env
->xregs
[22] = env
->regs
[14];
9855 env
->xregs
[23] = env
->regs
[13];
9857 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9858 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9861 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9862 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9863 * FIQ bank for r8-r14.
9865 if (mode
== ARM_CPU_MODE_FIQ
) {
9866 for (i
= 24; i
< 31; i
++) {
9867 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9870 for (i
= 24; i
< 29; i
++) {
9871 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9873 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9874 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9877 env
->pc
= env
->regs
[15];
9880 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9881 * register set. This is necessary when switching between AArch32 and AArch64
9884 void aarch64_sync_64_to_32(CPUARMState
*env
)
9887 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9889 /* We can blanket copy X[0:7] to R[0:7] */
9890 for (i
= 0; i
< 8; i
++) {
9891 env
->regs
[i
] = env
->xregs
[i
];
9894 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9895 * Otherwise, we copy x8-x12 into the banked user regs.
9897 if (mode
== ARM_CPU_MODE_FIQ
) {
9898 for (i
= 8; i
< 13; i
++) {
9899 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9902 for (i
= 8; i
< 13; i
++) {
9903 env
->regs
[i
] = env
->xregs
[i
];
9907 /* Registers r13 & r14 depend on the current mode.
9908 * If we are in a given mode, we copy the corresponding x registers to r13
9909 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9912 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9913 env
->regs
[13] = env
->xregs
[13];
9914 env
->regs
[14] = env
->xregs
[14];
9916 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9918 /* HYP is an exception in that it does not have its own banked r14 but
9919 * shares the USR r14
9921 if (mode
== ARM_CPU_MODE_HYP
) {
9922 env
->regs
[14] = env
->xregs
[14];
9924 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9928 if (mode
== ARM_CPU_MODE_HYP
) {
9929 env
->regs
[13] = env
->xregs
[15];
9931 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9934 if (mode
== ARM_CPU_MODE_IRQ
) {
9935 env
->regs
[14] = env
->xregs
[16];
9936 env
->regs
[13] = env
->xregs
[17];
9938 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9939 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9942 if (mode
== ARM_CPU_MODE_SVC
) {
9943 env
->regs
[14] = env
->xregs
[18];
9944 env
->regs
[13] = env
->xregs
[19];
9946 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9947 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9950 if (mode
== ARM_CPU_MODE_ABT
) {
9951 env
->regs
[14] = env
->xregs
[20];
9952 env
->regs
[13] = env
->xregs
[21];
9954 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9955 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9958 if (mode
== ARM_CPU_MODE_UND
) {
9959 env
->regs
[14] = env
->xregs
[22];
9960 env
->regs
[13] = env
->xregs
[23];
9962 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9963 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9966 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9967 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9968 * FIQ bank for r8-r14.
9970 if (mode
== ARM_CPU_MODE_FIQ
) {
9971 for (i
= 24; i
< 31; i
++) {
9972 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9975 for (i
= 24; i
< 29; i
++) {
9976 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9978 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9979 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9982 env
->regs
[15] = env
->pc
;
9985 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9986 uint32_t mask
, uint32_t offset
,
9989 /* Change the CPU state so as to actually take the exception. */
9990 switch_mode(env
, new_mode
);
9992 * For exceptions taken to AArch32 we must clear the SS bit in both
9993 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9995 env
->uncached_cpsr
&= ~PSTATE_SS
;
9996 env
->spsr
= cpsr_read(env
);
9997 /* Clear IT bits. */
9998 env
->condexec_bits
= 0;
9999 /* Switch to the new mode, and to the correct instruction set. */
10000 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
10001 /* Set new mode endianness */
10002 env
->uncached_cpsr
&= ~CPSR_E
;
10003 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
10004 env
->uncached_cpsr
|= CPSR_E
;
10006 /* J and IL must always be cleared for exception entry */
10007 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
10010 if (new_mode
== ARM_CPU_MODE_HYP
) {
10011 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
10012 env
->elr_el
[2] = env
->regs
[15];
10015 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
10016 * and we should just guard the thumb mode on V4
10018 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
10020 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
10022 env
->regs
[14] = env
->regs
[15] + offset
;
10024 env
->regs
[15] = newpc
;
10027 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
10030 * Handle exception entry to Hyp mode; this is sufficiently
10031 * different to entry to other AArch32 modes that we handle it
10034 * The vector table entry used is always the 0x14 Hyp mode entry point,
10035 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
10036 * The offset applied to the preferred return address is always zero
10037 * (see DDI0487C.a section G1.12.3).
10038 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
10040 uint32_t addr
, mask
;
10041 ARMCPU
*cpu
= ARM_CPU(cs
);
10042 CPUARMState
*env
= &cpu
->env
;
10044 switch (cs
->exception_index
) {
10052 /* Fall through to prefetch abort. */
10053 case EXCP_PREFETCH_ABORT
:
10054 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
10055 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
10056 (uint32_t)env
->exception
.vaddress
);
10059 case EXCP_DATA_ABORT
:
10060 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
10061 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
10062 (uint32_t)env
->exception
.vaddress
);
10074 case EXCP_HYP_TRAP
:
10077 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10080 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
10081 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10083 * QEMU syndrome values are v8-style. v7 has the IL bit
10084 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
10085 * If this is a v7 CPU, squash the IL bit in those cases.
10087 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
10088 (cs
->exception_index
== EXCP_DATA_ABORT
&&
10089 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
10090 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
10091 env
->exception
.syndrome
&= ~ARM_EL_IL
;
10094 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
10097 if (arm_current_el(env
) != 2 && addr
< 0x14) {
10102 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
10105 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
10108 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
10112 addr
+= env
->cp15
.hvbar
;
10114 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
10117 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
10119 ARMCPU
*cpu
= ARM_CPU(cs
);
10120 CPUARMState
*env
= &cpu
->env
;
10127 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10128 switch (syn_get_ec(env
->exception
.syndrome
)) {
10129 case EC_BREAKPOINT
:
10130 case EC_BREAKPOINT_SAME_EL
:
10133 case EC_WATCHPOINT
:
10134 case EC_WATCHPOINT_SAME_EL
:
10140 case EC_VECTORCATCH
:
10149 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
10152 if (env
->exception
.target_el
== 2) {
10153 arm_cpu_do_interrupt_aarch32_hyp(cs
);
10157 switch (cs
->exception_index
) {
10159 new_mode
= ARM_CPU_MODE_UND
;
10168 new_mode
= ARM_CPU_MODE_SVC
;
10171 /* The PC already points to the next instruction. */
10175 /* Fall through to prefetch abort. */
10176 case EXCP_PREFETCH_ABORT
:
10177 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
10178 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
10179 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
10180 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
10181 new_mode
= ARM_CPU_MODE_ABT
;
10183 mask
= CPSR_A
| CPSR_I
;
10186 case EXCP_DATA_ABORT
:
10187 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
10188 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
10189 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
10190 env
->exception
.fsr
,
10191 (uint32_t)env
->exception
.vaddress
);
10192 new_mode
= ARM_CPU_MODE_ABT
;
10194 mask
= CPSR_A
| CPSR_I
;
10198 new_mode
= ARM_CPU_MODE_IRQ
;
10200 /* Disable IRQ and imprecise data aborts. */
10201 mask
= CPSR_A
| CPSR_I
;
10203 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
10204 /* IRQ routed to monitor mode */
10205 new_mode
= ARM_CPU_MODE_MON
;
10210 new_mode
= ARM_CPU_MODE_FIQ
;
10212 /* Disable FIQ, IRQ and imprecise data aborts. */
10213 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10214 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
10215 /* FIQ routed to monitor mode */
10216 new_mode
= ARM_CPU_MODE_MON
;
10221 new_mode
= ARM_CPU_MODE_IRQ
;
10223 /* Disable IRQ and imprecise data aborts. */
10224 mask
= CPSR_A
| CPSR_I
;
10228 new_mode
= ARM_CPU_MODE_FIQ
;
10230 /* Disable FIQ, IRQ and imprecise data aborts. */
10231 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10235 new_mode
= ARM_CPU_MODE_MON
;
10237 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
10241 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10242 return; /* Never happens. Keep compiler happy. */
10245 if (new_mode
== ARM_CPU_MODE_MON
) {
10246 addr
+= env
->cp15
.mvbar
;
10247 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
10248 /* High vectors. When enabled, base address cannot be remapped. */
10249 addr
+= 0xffff0000;
10251 /* ARM v7 architectures provide a vector base address register to remap
10252 * the interrupt vector table.
10253 * This register is only followed in non-monitor mode, and is banked.
10254 * Note: only bits 31:5 are valid.
10256 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
10259 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
10260 env
->cp15
.scr_el3
&= ~SCR_NS
;
10263 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
10266 /* Handle exception entry to a target EL which is using AArch64 */
10267 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
10269 ARMCPU
*cpu
= ARM_CPU(cs
);
10270 CPUARMState
*env
= &cpu
->env
;
10271 unsigned int new_el
= env
->exception
.target_el
;
10272 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
10273 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
10274 unsigned int cur_el
= arm_current_el(env
);
10277 * Note that new_el can never be 0. If cur_el is 0, then
10278 * el0_a64 is is_a64(), else el0_a64 is ignored.
10280 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
10282 if (cur_el
< new_el
) {
10283 /* Entry vector offset depends on whether the implemented EL
10284 * immediately lower than the target level is using AArch32 or AArch64
10290 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
10293 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
10296 is_aa64
= is_a64(env
);
10299 g_assert_not_reached();
10307 } else if (pstate_read(env
) & PSTATE_SP
) {
10311 switch (cs
->exception_index
) {
10312 case EXCP_PREFETCH_ABORT
:
10313 case EXCP_DATA_ABORT
:
10314 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
10315 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
10316 env
->cp15
.far_el
[new_el
]);
10322 case EXCP_HYP_TRAP
:
10324 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
10326 * QEMU internal FP/SIMD syndromes from AArch32 include the
10327 * TA and coproc fields which are only exposed if the exception
10328 * is taken to AArch32 Hyp mode. Mask them out to get a valid
10329 * AArch64 format syndrome.
10331 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
10333 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
10343 case EXCP_SEMIHOST
:
10344 qemu_log_mask(CPU_LOG_INT
,
10345 "...handling as semihosting call 0x%" PRIx64
"\n",
10347 env
->xregs
[0] = do_arm_semihosting(env
);
10350 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10354 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
10355 aarch64_save_sp(env
, arm_current_el(env
));
10356 env
->elr_el
[new_el
] = env
->pc
;
10358 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
10359 env
->elr_el
[new_el
] = env
->regs
[15];
10361 aarch64_sync_32_to_64(env
);
10363 env
->condexec_bits
= 0;
10365 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
10366 env
->elr_el
[new_el
]);
10368 pstate_write(env
, PSTATE_DAIF
| new_mode
);
10370 aarch64_restore_sp(env
, new_el
);
10374 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
10375 new_el
, env
->pc
, pstate_read(env
));
10378 static inline bool check_for_semihosting(CPUState
*cs
)
10380 /* Check whether this exception is a semihosting call; if so
10381 * then handle it and return true; otherwise return false.
10383 ARMCPU
*cpu
= ARM_CPU(cs
);
10384 CPUARMState
*env
= &cpu
->env
;
10387 if (cs
->exception_index
== EXCP_SEMIHOST
) {
10388 /* This is always the 64-bit semihosting exception.
10389 * The "is this usermode" and "is semihosting enabled"
10390 * checks have been done at translate time.
10392 qemu_log_mask(CPU_LOG_INT
,
10393 "...handling as semihosting call 0x%" PRIx64
"\n",
10395 env
->xregs
[0] = do_arm_semihosting(env
);
10402 /* Only intercept calls from privileged modes, to provide some
10403 * semblance of security.
10405 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
10406 (!semihosting_enabled() ||
10407 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
10411 switch (cs
->exception_index
) {
10412 case EXCP_SEMIHOST
:
10413 /* This is always a semihosting call; the "is this usermode"
10414 * and "is semihosting enabled" checks have been done at
10419 /* Check for semihosting interrupt. */
10421 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
10427 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
10429 if (imm
== 0x123456) {
10435 /* See if this is a semihosting syscall. */
10437 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
10440 env
->regs
[15] += 2;
10449 qemu_log_mask(CPU_LOG_INT
,
10450 "...handling as semihosting call 0x%x\n",
10452 env
->regs
[0] = do_arm_semihosting(env
);
10457 /* Handle a CPU exception for A and R profile CPUs.
10458 * Do any appropriate logging, handle PSCI calls, and then hand off
10459 * to the AArch64-entry or AArch32-entry function depending on the
10460 * target exception level's register width.
10462 void arm_cpu_do_interrupt(CPUState
*cs
)
10464 ARMCPU
*cpu
= ARM_CPU(cs
);
10465 CPUARMState
*env
= &cpu
->env
;
10466 unsigned int new_el
= env
->exception
.target_el
;
10468 assert(!arm_feature(env
, ARM_FEATURE_M
));
10470 arm_log_exception(cs
->exception_index
);
10471 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
10473 if (qemu_loglevel_mask(CPU_LOG_INT
)
10474 && !excp_is_internal(cs
->exception_index
)) {
10475 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
10476 syn_get_ec(env
->exception
.syndrome
),
10477 env
->exception
.syndrome
);
10480 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
10481 arm_handle_psci_call(cpu
);
10482 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
10486 /* Semihosting semantics depend on the register width of the
10487 * code that caused the exception, not the target exception level,
10488 * so must be handled here.
10490 if (check_for_semihosting(cs
)) {
10494 /* Hooks may change global state so BQL should be held, also the
10495 * BQL needs to be held for any modification of
10496 * cs->interrupt_request.
10498 g_assert(qemu_mutex_iothread_locked());
10500 arm_call_pre_el_change_hook(cpu
);
10502 assert(!excp_is_internal(cs
->exception_index
));
10503 if (arm_el_is_aa64(env
, new_el
)) {
10504 arm_cpu_do_interrupt_aarch64(cs
);
10506 arm_cpu_do_interrupt_aarch32(cs
);
10509 arm_call_el_change_hook(cpu
);
10511 if (!kvm_enabled()) {
10512 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
10515 #endif /* !CONFIG_USER_ONLY */
10517 /* Return the exception level which controls this address translation regime */
10518 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10521 case ARMMMUIdx_S2NS
:
10522 case ARMMMUIdx_S1E2
:
10524 case ARMMMUIdx_S1E3
:
10526 case ARMMMUIdx_S1SE0
:
10527 return arm_el_is_aa64(env
, 3) ? 1 : 3;
10528 case ARMMMUIdx_S1SE1
:
10529 case ARMMMUIdx_S1NSE0
:
10530 case ARMMMUIdx_S1NSE1
:
10531 case ARMMMUIdx_MPrivNegPri
:
10532 case ARMMMUIdx_MUserNegPri
:
10533 case ARMMMUIdx_MPriv
:
10534 case ARMMMUIdx_MUser
:
10535 case ARMMMUIdx_MSPrivNegPri
:
10536 case ARMMMUIdx_MSUserNegPri
:
10537 case ARMMMUIdx_MSPriv
:
10538 case ARMMMUIdx_MSUser
:
10541 g_assert_not_reached();
10545 #ifndef CONFIG_USER_ONLY
10547 /* Return the SCTLR value which controls this address translation regime */
10548 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10550 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
10553 /* Return true if the specified stage of address translation is disabled */
10554 static inline bool regime_translation_disabled(CPUARMState
*env
,
10557 if (arm_feature(env
, ARM_FEATURE_M
)) {
10558 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
10559 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
10560 case R_V7M_MPU_CTRL_ENABLE_MASK
:
10561 /* Enabled, but not for HardFault and NMI */
10562 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
10563 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
10564 /* Enabled for all cases */
10568 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10569 * we warned about that in armv7m_nvic.c when the guest set it.
10575 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10576 /* HCR.DC means HCR.VM behaves as 1 */
10577 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
10580 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
10581 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10582 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
10587 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
10588 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
10589 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10593 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
10596 static inline bool regime_translation_big_endian(CPUARMState
*env
,
10599 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
10602 /* Return the TTBR associated with this translation regime */
10603 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10606 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10607 return env
->cp15
.vttbr_el2
;
10610 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
10612 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
10616 #endif /* !CONFIG_USER_ONLY */
10618 /* Return the TCR controlling this translation regime */
10619 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10621 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10622 return &env
->cp15
.vtcr_el2
;
10624 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
10627 /* Convert a possible stage1+2 MMU index into the appropriate
10628 * stage 1 MMU index
10630 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
10632 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
10633 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
10638 /* Return true if the translation regime is using LPAE format page tables */
10639 static inline bool regime_using_lpae_format(CPUARMState
*env
,
10642 int el
= regime_el(env
, mmu_idx
);
10643 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
10646 if (arm_feature(env
, ARM_FEATURE_LPAE
)
10647 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
10653 /* Returns true if the stage 1 translation regime is using LPAE format page
10654 * tables. Used when raising alignment exceptions, whose FSR changes depending
10655 * on whether the long or short descriptor format is in use. */
10656 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10658 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10660 return regime_using_lpae_format(env
, mmu_idx
);
10663 #ifndef CONFIG_USER_ONLY
10664 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10667 case ARMMMUIdx_S1SE0
:
10668 case ARMMMUIdx_S1NSE0
:
10669 case ARMMMUIdx_MUser
:
10670 case ARMMMUIdx_MSUser
:
10671 case ARMMMUIdx_MUserNegPri
:
10672 case ARMMMUIdx_MSUserNegPri
:
10676 case ARMMMUIdx_S12NSE0
:
10677 case ARMMMUIdx_S12NSE1
:
10678 g_assert_not_reached();
10682 /* Translate section/page access permissions to page
10683 * R/W protection flags
10685 * @env: CPUARMState
10686 * @mmu_idx: MMU index indicating required translation regime
10687 * @ap: The 3-bit access permissions (AP[2:0])
10688 * @domain_prot: The 2-bit domain access permissions
10690 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10691 int ap
, int domain_prot
)
10693 bool is_user
= regime_is_user(env
, mmu_idx
);
10695 if (domain_prot
== 3) {
10696 return PAGE_READ
| PAGE_WRITE
;
10701 if (arm_feature(env
, ARM_FEATURE_V7
)) {
10704 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
10706 return is_user
? 0 : PAGE_READ
;
10713 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10718 return PAGE_READ
| PAGE_WRITE
;
10721 return PAGE_READ
| PAGE_WRITE
;
10722 case 4: /* Reserved. */
10725 return is_user
? 0 : PAGE_READ
;
10729 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
10734 g_assert_not_reached();
10738 /* Translate section/page access permissions to page
10739 * R/W protection flags.
10741 * @ap: The 2-bit simple AP (AP[2:1])
10742 * @is_user: TRUE if accessing from PL0
10744 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
10748 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10750 return PAGE_READ
| PAGE_WRITE
;
10752 return is_user
? 0 : PAGE_READ
;
10756 g_assert_not_reached();
10761 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
10763 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
10766 /* Translate S2 section/page access permissions to protection flags
10768 * @env: CPUARMState
10769 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10770 * @xn: XN (execute-never) bit
10772 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
10780 prot
|= PAGE_WRITE
;
10783 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10790 /* Translate section/page access permissions to protection flags
10792 * @env: CPUARMState
10793 * @mmu_idx: MMU index indicating required translation regime
10794 * @is_aa64: TRUE if AArch64
10795 * @ap: The 2-bit simple AP (AP[2:1])
10796 * @ns: NS (non-secure) bit
10797 * @xn: XN (execute-never) bit
10798 * @pxn: PXN (privileged execute-never) bit
10800 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10801 int ap
, int ns
, int xn
, int pxn
)
10803 bool is_user
= regime_is_user(env
, mmu_idx
);
10804 int prot_rw
, user_rw
;
10808 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
10810 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10814 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10817 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10821 /* TODO have_wxn should be replaced with
10822 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10823 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10824 * compatible processors have EL2, which is required for [U]WXN.
10826 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10829 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10833 switch (regime_el(env
, mmu_idx
)) {
10836 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10843 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10844 switch (regime_el(env
, mmu_idx
)) {
10848 xn
= xn
|| !(user_rw
& PAGE_READ
);
10852 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10854 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10855 (uwxn
&& (user_rw
& PAGE_WRITE
));
10865 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10868 return prot_rw
| PAGE_EXEC
;
10871 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10872 uint32_t *table
, uint32_t address
)
10874 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10875 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10877 if (address
& tcr
->mask
) {
10878 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10879 /* Translation table walk disabled for TTBR1 */
10882 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10884 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10885 /* Translation table walk disabled for TTBR0 */
10888 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10890 *table
|= (address
>> 18) & 0x3ffc;
10894 /* Translate a S1 pagetable walk through S2 if needed. */
10895 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10896 hwaddr addr
, MemTxAttrs txattrs
,
10897 ARMMMUFaultInfo
*fi
)
10899 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
10900 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10901 target_ulong s2size
;
10905 ARMCacheAttrs cacheattrs
= {};
10906 ARMCacheAttrs
*pcacheattrs
= NULL
;
10908 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
10910 * PTW means we must fault if this S1 walk touches S2 Device
10911 * memory; otherwise we don't care about the attributes and can
10912 * save the S2 translation the effort of computing them.
10914 pcacheattrs
= &cacheattrs
;
10917 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
10918 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
10920 assert(fi
->type
!= ARMFault_None
);
10926 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
10927 /* Access was to Device memory: generate Permission fault */
10928 fi
->type
= ARMFault_Permission
;
10939 /* All loads done in the course of a page table walk go through here. */
10940 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10941 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10943 ARMCPU
*cpu
= ARM_CPU(cs
);
10944 CPUARMState
*env
= &cpu
->env
;
10945 MemTxAttrs attrs
= {};
10946 MemTxResult result
= MEMTX_OK
;
10950 attrs
.secure
= is_secure
;
10951 as
= arm_addressspace(cs
, attrs
);
10952 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10956 if (regime_translation_big_endian(env
, mmu_idx
)) {
10957 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10959 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10961 if (result
== MEMTX_OK
) {
10964 fi
->type
= ARMFault_SyncExternalOnWalk
;
10965 fi
->ea
= arm_extabort_type(result
);
10969 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10970 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10972 ARMCPU
*cpu
= ARM_CPU(cs
);
10973 CPUARMState
*env
= &cpu
->env
;
10974 MemTxAttrs attrs
= {};
10975 MemTxResult result
= MEMTX_OK
;
10979 attrs
.secure
= is_secure
;
10980 as
= arm_addressspace(cs
, attrs
);
10981 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10985 if (regime_translation_big_endian(env
, mmu_idx
)) {
10986 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10988 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10990 if (result
== MEMTX_OK
) {
10993 fi
->type
= ARMFault_SyncExternalOnWalk
;
10994 fi
->ea
= arm_extabort_type(result
);
10998 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10999 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11000 hwaddr
*phys_ptr
, int *prot
,
11001 target_ulong
*page_size
,
11002 ARMMMUFaultInfo
*fi
)
11004 CPUState
*cs
= env_cpu(env
);
11015 /* Pagetable walk. */
11016 /* Lookup l1 descriptor. */
11017 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
11018 /* Section translation fault if page walk is disabled by PD0 or PD1 */
11019 fi
->type
= ARMFault_Translation
;
11022 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11024 if (fi
->type
!= ARMFault_None
) {
11028 domain
= (desc
>> 5) & 0x0f;
11029 if (regime_el(env
, mmu_idx
) == 1) {
11030 dacr
= env
->cp15
.dacr_ns
;
11032 dacr
= env
->cp15
.dacr_s
;
11034 domain_prot
= (dacr
>> (domain
* 2)) & 3;
11036 /* Section translation fault. */
11037 fi
->type
= ARMFault_Translation
;
11043 if (domain_prot
== 0 || domain_prot
== 2) {
11044 fi
->type
= ARMFault_Domain
;
11049 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
11050 ap
= (desc
>> 10) & 3;
11051 *page_size
= 1024 * 1024;
11053 /* Lookup l2 entry. */
11055 /* Coarse pagetable. */
11056 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
11058 /* Fine pagetable. */
11059 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
11061 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11063 if (fi
->type
!= ARMFault_None
) {
11066 switch (desc
& 3) {
11067 case 0: /* Page translation fault. */
11068 fi
->type
= ARMFault_Translation
;
11070 case 1: /* 64k page. */
11071 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
11072 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
11073 *page_size
= 0x10000;
11075 case 2: /* 4k page. */
11076 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11077 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
11078 *page_size
= 0x1000;
11080 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
11082 /* ARMv6/XScale extended small page format */
11083 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
11084 || arm_feature(env
, ARM_FEATURE_V6
)) {
11085 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11086 *page_size
= 0x1000;
11088 /* UNPREDICTABLE in ARMv5; we choose to take a
11089 * page translation fault.
11091 fi
->type
= ARMFault_Translation
;
11095 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
11096 *page_size
= 0x400;
11098 ap
= (desc
>> 4) & 3;
11101 /* Never happens, but compiler isn't smart enough to tell. */
11105 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
11106 *prot
|= *prot
? PAGE_EXEC
: 0;
11107 if (!(*prot
& (1 << access_type
))) {
11108 /* Access permission fault. */
11109 fi
->type
= ARMFault_Permission
;
11112 *phys_ptr
= phys_addr
;
11115 fi
->domain
= domain
;
11120 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
11121 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11122 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11123 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
11125 CPUState
*cs
= env_cpu(env
);
11139 /* Pagetable walk. */
11140 /* Lookup l1 descriptor. */
11141 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
11142 /* Section translation fault if page walk is disabled by PD0 or PD1 */
11143 fi
->type
= ARMFault_Translation
;
11146 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11148 if (fi
->type
!= ARMFault_None
) {
11152 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
11153 /* Section translation fault, or attempt to use the encoding
11154 * which is Reserved on implementations without PXN.
11156 fi
->type
= ARMFault_Translation
;
11159 if ((type
== 1) || !(desc
& (1 << 18))) {
11160 /* Page or Section. */
11161 domain
= (desc
>> 5) & 0x0f;
11163 if (regime_el(env
, mmu_idx
) == 1) {
11164 dacr
= env
->cp15
.dacr_ns
;
11166 dacr
= env
->cp15
.dacr_s
;
11171 domain_prot
= (dacr
>> (domain
* 2)) & 3;
11172 if (domain_prot
== 0 || domain_prot
== 2) {
11173 /* Section or Page domain fault */
11174 fi
->type
= ARMFault_Domain
;
11178 if (desc
& (1 << 18)) {
11179 /* Supersection. */
11180 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
11181 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
11182 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
11183 *page_size
= 0x1000000;
11186 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
11187 *page_size
= 0x100000;
11189 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
11190 xn
= desc
& (1 << 4);
11192 ns
= extract32(desc
, 19, 1);
11194 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
11195 pxn
= (desc
>> 2) & 1;
11197 ns
= extract32(desc
, 3, 1);
11198 /* Lookup l2 entry. */
11199 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
11200 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11202 if (fi
->type
!= ARMFault_None
) {
11205 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
11206 switch (desc
& 3) {
11207 case 0: /* Page translation fault. */
11208 fi
->type
= ARMFault_Translation
;
11210 case 1: /* 64k page. */
11211 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
11212 xn
= desc
& (1 << 15);
11213 *page_size
= 0x10000;
11215 case 2: case 3: /* 4k page. */
11216 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11218 *page_size
= 0x1000;
11221 /* Never happens, but compiler isn't smart enough to tell. */
11225 if (domain_prot
== 3) {
11226 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11228 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
11231 if (xn
&& access_type
== MMU_INST_FETCH
) {
11232 fi
->type
= ARMFault_Permission
;
11236 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
11237 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
11238 /* The simplified model uses AP[0] as an access control bit. */
11239 if ((ap
& 1) == 0) {
11240 /* Access flag fault. */
11241 fi
->type
= ARMFault_AccessFlag
;
11244 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
11246 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
11248 if (*prot
&& !xn
) {
11249 *prot
|= PAGE_EXEC
;
11251 if (!(*prot
& (1 << access_type
))) {
11252 /* Access permission fault. */
11253 fi
->type
= ARMFault_Permission
;
11258 /* The NS bit will (as required by the architecture) have no effect if
11259 * the CPU doesn't support TZ or this is a non-secure translation
11260 * regime, because the attribute will already be non-secure.
11262 attrs
->secure
= false;
11264 *phys_ptr
= phys_addr
;
11267 fi
->domain
= domain
;
11273 * check_s2_mmu_setup
11275 * @is_aa64: True if the translation regime is in AArch64 state
11276 * @startlevel: Suggested starting level
11277 * @inputsize: Bitsize of IPAs
11278 * @stride: Page-table stride (See the ARM ARM)
11280 * Returns true if the suggested S2 translation parameters are OK and
11283 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
11284 int inputsize
, int stride
)
11286 const int grainsize
= stride
+ 3;
11287 int startsizecheck
;
11289 /* Negative levels are never allowed. */
11294 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
11295 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
11300 CPUARMState
*env
= &cpu
->env
;
11301 unsigned int pamax
= arm_pamax(cpu
);
11304 case 13: /* 64KB Pages. */
11305 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
11309 case 11: /* 16KB Pages. */
11310 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
11314 case 9: /* 4KB Pages. */
11315 if (level
== 0 && pamax
<= 42) {
11320 g_assert_not_reached();
11323 /* Inputsize checks. */
11324 if (inputsize
> pamax
&&
11325 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
11326 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
11330 /* AArch32 only supports 4KB pages. Assert on that. */
11331 assert(stride
== 9);
11340 /* Translate from the 4-bit stage 2 representation of
11341 * memory attributes (without cache-allocation hints) to
11342 * the 8-bit representation of the stage 1 MAIR registers
11343 * (which includes allocation hints).
11345 * ref: shared/translation/attrs/S2AttrDecode()
11346 * .../S2ConvertAttrsHints()
11348 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
11350 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
11351 uint8_t loattr
= extract32(s2attrs
, 0, 2);
11352 uint8_t hihint
= 0, lohint
= 0;
11354 if (hiattr
!= 0) { /* normal memory */
11355 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
11356 hiattr
= loattr
= 1; /* non-cacheable */
11358 if (hiattr
!= 1) { /* Write-through or write-back */
11359 hihint
= 3; /* RW allocate */
11361 if (loattr
!= 1) { /* Write-through or write-back */
11362 lohint
= 3; /* RW allocate */
11367 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
11369 #endif /* !CONFIG_USER_ONLY */
11371 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
11374 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11375 uint32_t el
= regime_el(env
, mmu_idx
);
11376 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
11380 * Bit 55 is always between the two regions, and is canonical for
11381 * determining if address tagging is enabled.
11383 select
= extract64(va
, 55, 1);
11386 tsz
= extract32(tcr
, 0, 6);
11387 using64k
= extract32(tcr
, 14, 1);
11388 using16k
= extract32(tcr
, 15, 1);
11389 if (mmu_idx
== ARMMMUIdx_S2NS
) {
11391 tbi
= tbid
= hpd
= false;
11393 tbi
= extract32(tcr
, 20, 1);
11394 hpd
= extract32(tcr
, 24, 1);
11395 tbid
= extract32(tcr
, 29, 1);
11398 } else if (!select
) {
11399 tsz
= extract32(tcr
, 0, 6);
11400 epd
= extract32(tcr
, 7, 1);
11401 using64k
= extract32(tcr
, 14, 1);
11402 using16k
= extract32(tcr
, 15, 1);
11403 tbi
= extract64(tcr
, 37, 1);
11404 hpd
= extract64(tcr
, 41, 1);
11405 tbid
= extract64(tcr
, 51, 1);
11407 int tg
= extract32(tcr
, 30, 2);
11408 using16k
= tg
== 1;
11409 using64k
= tg
== 3;
11410 tsz
= extract32(tcr
, 16, 6);
11411 epd
= extract32(tcr
, 23, 1);
11412 tbi
= extract64(tcr
, 38, 1);
11413 hpd
= extract64(tcr
, 42, 1);
11414 tbid
= extract64(tcr
, 52, 1);
11416 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
11417 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
11419 return (ARMVAParameters
) {
11426 .using16k
= using16k
,
11427 .using64k
= using64k
,
11431 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
11432 ARMMMUIdx mmu_idx
, bool data
)
11434 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
11436 /* Present TBI as a composite with TBID. */
11437 ret
.tbi
&= (data
|| !ret
.tbid
);
11441 #ifndef CONFIG_USER_ONLY
11442 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
11445 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11446 uint32_t el
= regime_el(env
, mmu_idx
);
11450 if (mmu_idx
== ARMMMUIdx_S2NS
) {
11452 bool sext
= extract32(tcr
, 4, 1);
11453 bool sign
= extract32(tcr
, 3, 1);
11456 * If the sign-extend bit is not the same as t0sz[3], the result
11457 * is unpredictable. Flag this as a guest error.
11459 if (sign
!= sext
) {
11460 qemu_log_mask(LOG_GUEST_ERROR
,
11461 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11463 tsz
= sextract32(tcr
, 0, 4) + 8;
11467 } else if (el
== 2) {
11469 tsz
= extract32(tcr
, 0, 3);
11471 hpd
= extract64(tcr
, 24, 1);
11474 int t0sz
= extract32(tcr
, 0, 3);
11475 int t1sz
= extract32(tcr
, 16, 3);
11478 select
= va
> (0xffffffffu
>> t0sz
);
11480 /* Note that we will detect errors later. */
11481 select
= va
>= ~(0xffffffffu
>> t1sz
);
11485 epd
= extract32(tcr
, 7, 1);
11486 hpd
= extract64(tcr
, 41, 1);
11489 epd
= extract32(tcr
, 23, 1);
11490 hpd
= extract64(tcr
, 42, 1);
11492 /* For aarch32, hpd0 is not enabled without t2e as well. */
11493 hpd
&= extract32(tcr
, 6, 1);
11496 return (ARMVAParameters
) {
11504 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
11505 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11506 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
11507 target_ulong
*page_size_ptr
,
11508 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11510 ARMCPU
*cpu
= env_archcpu(env
);
11511 CPUState
*cs
= CPU(cpu
);
11512 /* Read an LPAE long-descriptor translation table. */
11513 ARMFaultType fault_type
= ARMFault_Translation
;
11515 ARMVAParameters param
;
11517 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
11518 uint32_t tableattrs
;
11519 target_ulong page_size
;
11522 int addrsize
, inputsize
;
11523 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
11524 int ap
, ns
, xn
, pxn
;
11525 uint32_t el
= regime_el(env
, mmu_idx
);
11527 uint64_t descaddrmask
;
11528 bool aarch64
= arm_el_is_aa64(env
, el
);
11529 bool guarded
= false;
11532 * This code does not handle the different format TCR for VTCR_EL2.
11533 * This code also does not support shareability levels.
11534 * Attribute and permission bit handling should also be checked when adding
11535 * support for those page table walks.
11538 param
= aa64_va_parameters(env
, address
, mmu_idx
,
11539 access_type
!= MMU_INST_FETCH
);
11541 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
11544 ttbr1_valid
= (el
< 2);
11545 addrsize
= 64 - 8 * param
.tbi
;
11546 inputsize
= 64 - param
.tsz
;
11548 param
= aa32_va_parameters(env
, address
, mmu_idx
);
11550 /* There is no TTBR1 for EL2 */
11551 ttbr1_valid
= (el
!= 2);
11552 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
11553 inputsize
= addrsize
- param
.tsz
;
11557 * We determined the region when collecting the parameters, but we
11558 * have not yet validated that the address is valid for the region.
11559 * Extract the top bits and verify that they all match select.
11561 * For aa32, if inputsize == addrsize, then we have selected the
11562 * region by exclusion in aa32_va_parameters and there is no more
11563 * validation to do here.
11565 if (inputsize
< addrsize
) {
11566 target_ulong top_bits
= sextract64(address
, inputsize
,
11567 addrsize
- inputsize
);
11568 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
11569 /* The gap between the two regions is a Translation fault */
11570 fault_type
= ARMFault_Translation
;
11575 if (param
.using64k
) {
11577 } else if (param
.using16k
) {
11583 /* Note that QEMU ignores shareability and cacheability attributes,
11584 * so we don't need to do anything with the SH, ORGN, IRGN fields
11585 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11586 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11587 * implement any ASID-like capability so we can ignore it (instead
11588 * we will always flush the TLB any time the ASID is changed).
11590 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
11592 /* Here we should have set up all the parameters for the translation:
11593 * inputsize, ttbr, epd, stride, tbi
11597 /* Translation table walk disabled => Translation fault on TLB miss
11598 * Note: This is always 0 on 64-bit EL2 and EL3.
11603 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
11604 /* The starting level depends on the virtual address size (which can
11605 * be up to 48 bits) and the translation granule size. It indicates
11606 * the number of strides (stride bits at a time) needed to
11607 * consume the bits of the input address. In the pseudocode this is:
11608 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11609 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11610 * our 'stride + 3' and 'stride' is our 'stride'.
11611 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11612 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11613 * = 4 - (inputsize - 4) / stride;
11615 level
= 4 - (inputsize
- 4) / stride
;
11617 /* For stage 2 translations the starting level is specified by the
11618 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11620 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
11621 uint32_t startlevel
;
11624 if (!aarch64
|| stride
== 9) {
11625 /* AArch32 or 4KB pages */
11626 startlevel
= 2 - sl0
;
11628 /* 16KB or 64KB pages */
11629 startlevel
= 3 - sl0
;
11632 /* Check that the starting level is valid. */
11633 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
11634 inputsize
, stride
);
11636 fault_type
= ARMFault_Translation
;
11639 level
= startlevel
;
11642 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
11643 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
11645 /* Now we can extract the actual base address from the TTBR */
11646 descaddr
= extract64(ttbr
, 0, 48);
11647 descaddr
&= ~indexmask
;
11649 /* The address field in the descriptor goes up to bit 39 for ARMv7
11650 * but up to bit 47 for ARMv8, but we use the descaddrmask
11651 * up to bit 39 for AArch32, because we don't need other bits in that case
11652 * to construct next descriptor address (anyway they should be all zeroes).
11654 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
11655 ~indexmask_grainsize
;
11657 /* Secure accesses start with the page table in secure memory and
11658 * can be downgraded to non-secure at any step. Non-secure accesses
11659 * remain non-secure. We implement this by just ORing in the NSTable/NS
11660 * bits at each step.
11662 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
11664 uint64_t descriptor
;
11667 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
11669 nstable
= extract32(tableattrs
, 4, 1);
11670 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
11671 if (fi
->type
!= ARMFault_None
) {
11675 if (!(descriptor
& 1) ||
11676 (!(descriptor
& 2) && (level
== 3))) {
11677 /* Invalid, or the Reserved level 3 encoding */
11680 descaddr
= descriptor
& descaddrmask
;
11682 if ((descriptor
& 2) && (level
< 3)) {
11683 /* Table entry. The top five bits are attributes which may
11684 * propagate down through lower levels of the table (and
11685 * which are all arranged so that 0 means "no effect", so
11686 * we can gather them up by ORing in the bits at each level).
11688 tableattrs
|= extract64(descriptor
, 59, 5);
11690 indexmask
= indexmask_grainsize
;
11693 /* Block entry at level 1 or 2, or page entry at level 3.
11694 * These are basically the same thing, although the number
11695 * of bits we pull in from the vaddr varies.
11697 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
11698 descaddr
|= (address
& (page_size
- 1));
11699 /* Extract attributes from the descriptor */
11700 attrs
= extract64(descriptor
, 2, 10)
11701 | (extract64(descriptor
, 52, 12) << 10);
11703 if (mmu_idx
== ARMMMUIdx_S2NS
) {
11704 /* Stage 2 table descriptors do not include any attribute fields */
11707 /* Merge in attributes from table descriptors */
11708 attrs
|= nstable
<< 3; /* NS */
11709 guarded
= extract64(descriptor
, 50, 1); /* GP */
11711 /* HPD disables all the table attributes except NSTable. */
11714 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
11715 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11716 * means "force PL1 access only", which means forcing AP[1] to 0.
11718 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
11719 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
11722 /* Here descaddr is the final physical address, and attributes
11723 * are all in attrs.
11725 fault_type
= ARMFault_AccessFlag
;
11726 if ((attrs
& (1 << 8)) == 0) {
11731 ap
= extract32(attrs
, 4, 2);
11732 xn
= extract32(attrs
, 12, 1);
11734 if (mmu_idx
== ARMMMUIdx_S2NS
) {
11736 *prot
= get_S2prot(env
, ap
, xn
);
11738 ns
= extract32(attrs
, 3, 1);
11739 pxn
= extract32(attrs
, 11, 1);
11740 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
11743 fault_type
= ARMFault_Permission
;
11744 if (!(*prot
& (1 << access_type
))) {
11749 /* The NS bit will (as required by the architecture) have no effect if
11750 * the CPU doesn't support TZ or this is a non-secure translation
11751 * regime, because the attribute will already be non-secure.
11753 txattrs
->secure
= false;
11755 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11756 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
11757 txattrs
->target_tlb_bit0
= true;
11760 if (cacheattrs
!= NULL
) {
11761 if (mmu_idx
== ARMMMUIdx_S2NS
) {
11762 cacheattrs
->attrs
= convert_stage2_attrs(env
,
11763 extract32(attrs
, 0, 4));
11765 /* Index into MAIR registers for cache attributes */
11766 uint8_t attrindx
= extract32(attrs
, 0, 3);
11767 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
11768 assert(attrindx
<= 7);
11769 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
11771 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
11774 *phys_ptr
= descaddr
;
11775 *page_size_ptr
= page_size
;
11779 fi
->type
= fault_type
;
11781 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11782 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
11786 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11788 int32_t address
, int *prot
)
11790 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11791 *prot
= PAGE_READ
| PAGE_WRITE
;
11793 case 0xF0000000 ... 0xFFFFFFFF:
11794 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11795 /* hivecs execing is ok */
11796 *prot
|= PAGE_EXEC
;
11799 case 0x00000000 ... 0x7FFFFFFF:
11800 *prot
|= PAGE_EXEC
;
11804 /* Default system address map for M profile cores.
11805 * The architecture specifies which regions are execute-never;
11806 * at the MPU level no other checks are defined.
11809 case 0x00000000 ... 0x1fffffff: /* ROM */
11810 case 0x20000000 ... 0x3fffffff: /* SRAM */
11811 case 0x60000000 ... 0x7fffffff: /* RAM */
11812 case 0x80000000 ... 0x9fffffff: /* RAM */
11813 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11815 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11816 case 0xa0000000 ... 0xbfffffff: /* Device */
11817 case 0xc0000000 ... 0xdfffffff: /* Device */
11818 case 0xe0000000 ... 0xffffffff: /* System */
11819 *prot
= PAGE_READ
| PAGE_WRITE
;
11822 g_assert_not_reached();
11827 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11828 ARMMMUIdx mmu_idx
, bool is_user
)
11830 /* Return true if we should use the default memory map as a
11831 * "background" region if there are no hits against any MPU regions.
11833 CPUARMState
*env
= &cpu
->env
;
11839 if (arm_feature(env
, ARM_FEATURE_M
)) {
11840 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11841 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11843 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11847 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11849 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11850 return arm_feature(env
, ARM_FEATURE_M
) &&
11851 extract32(address
, 20, 12) == 0xe00;
11854 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11856 /* True if address is in the M profile system region
11857 * 0xe0000000 - 0xffffffff
11859 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11862 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11863 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11864 hwaddr
*phys_ptr
, int *prot
,
11865 target_ulong
*page_size
,
11866 ARMMMUFaultInfo
*fi
)
11868 ARMCPU
*cpu
= env_archcpu(env
);
11870 bool is_user
= regime_is_user(env
, mmu_idx
);
11872 *phys_ptr
= address
;
11873 *page_size
= TARGET_PAGE_SIZE
;
11876 if (regime_translation_disabled(env
, mmu_idx
) ||
11877 m_is_ppb_region(env
, address
)) {
11878 /* MPU disabled or M profile PPB access: use default memory map.
11879 * The other case which uses the default memory map in the
11880 * v7M ARM ARM pseudocode is exception vector reads from the vector
11881 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11882 * which always does a direct read using address_space_ldl(), rather
11883 * than going via this function, so we don't need to check that here.
11885 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11886 } else { /* MPU enabled */
11887 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11888 /* region search */
11889 uint32_t base
= env
->pmsav7
.drbar
[n
];
11890 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11892 bool srdis
= false;
11894 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11899 qemu_log_mask(LOG_GUEST_ERROR
,
11900 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11904 rmask
= (1ull << rsize
) - 1;
11906 if (base
& rmask
) {
11907 qemu_log_mask(LOG_GUEST_ERROR
,
11908 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11909 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11914 if (address
< base
|| address
> base
+ rmask
) {
11916 * Address not in this region. We must check whether the
11917 * region covers addresses in the same page as our address.
11918 * In that case we must not report a size that covers the
11919 * whole page for a subsequent hit against a different MPU
11920 * region or the background region, because it would result in
11921 * incorrect TLB hits for subsequent accesses to addresses that
11922 * are in this MPU region.
11924 if (ranges_overlap(base
, rmask
,
11925 address
& TARGET_PAGE_MASK
,
11926 TARGET_PAGE_SIZE
)) {
11932 /* Region matched */
11934 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11936 uint32_t srdis_mask
;
11938 rsize
-= 3; /* sub region size (power of 2) */
11939 snd
= ((address
- base
) >> rsize
) & 0x7;
11940 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11942 srdis_mask
= srdis
? 0x3 : 0x0;
11943 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11944 /* This will check in groups of 2, 4 and then 8, whether
11945 * the subregion bits are consistent. rsize is incremented
11946 * back up to give the region size, considering consistent
11947 * adjacent subregions as one region. Stop testing if rsize
11948 * is already big enough for an entire QEMU page.
11950 int snd_rounded
= snd
& ~(i
- 1);
11951 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11952 snd_rounded
+ 8, i
);
11953 if (srdis_mask
^ srdis_multi
) {
11956 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11963 if (rsize
< TARGET_PAGE_BITS
) {
11964 *page_size
= 1 << rsize
;
11969 if (n
== -1) { /* no hits */
11970 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11971 /* background fault */
11972 fi
->type
= ARMFault_Background
;
11975 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11976 } else { /* a MPU hit! */
11977 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11978 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11980 if (m_is_system_region(env
, address
)) {
11981 /* System space is always execute never */
11985 if (is_user
) { /* User mode AP bit decoding */
11990 break; /* no access */
11992 *prot
|= PAGE_WRITE
;
11996 *prot
|= PAGE_READ
| PAGE_EXEC
;
11999 /* for v7M, same as 6; for R profile a reserved value */
12000 if (arm_feature(env
, ARM_FEATURE_M
)) {
12001 *prot
|= PAGE_READ
| PAGE_EXEC
;
12006 qemu_log_mask(LOG_GUEST_ERROR
,
12007 "DRACR[%d]: Bad value for AP bits: 0x%"
12008 PRIx32
"\n", n
, ap
);
12010 } else { /* Priv. mode AP bits decoding */
12013 break; /* no access */
12017 *prot
|= PAGE_WRITE
;
12021 *prot
|= PAGE_READ
| PAGE_EXEC
;
12024 /* for v7M, same as 6; for R profile a reserved value */
12025 if (arm_feature(env
, ARM_FEATURE_M
)) {
12026 *prot
|= PAGE_READ
| PAGE_EXEC
;
12031 qemu_log_mask(LOG_GUEST_ERROR
,
12032 "DRACR[%d]: Bad value for AP bits: 0x%"
12033 PRIx32
"\n", n
, ap
);
12037 /* execute never */
12039 *prot
&= ~PAGE_EXEC
;
12044 fi
->type
= ARMFault_Permission
;
12046 return !(*prot
& (1 << access_type
));
12049 static bool v8m_is_sau_exempt(CPUARMState
*env
,
12050 uint32_t address
, MMUAccessType access_type
)
12052 /* The architecture specifies that certain address ranges are
12053 * exempt from v8M SAU/IDAU checks.
12056 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
12057 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
12058 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
12059 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
12060 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
12061 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
12064 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
12065 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12066 V8M_SAttributes
*sattrs
)
12068 /* Look up the security attributes for this address. Compare the
12069 * pseudocode SecurityCheck() function.
12070 * We assume the caller has zero-initialized *sattrs.
12072 ARMCPU
*cpu
= env_archcpu(env
);
12074 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
12075 int idau_region
= IREGION_NOTVALID
;
12076 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12077 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12080 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
12081 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
12083 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
12087 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
12088 /* 0xf0000000..0xffffffff is always S for insn fetches */
12092 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
12093 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
12097 if (idau_region
!= IREGION_NOTVALID
) {
12098 sattrs
->irvalid
= true;
12099 sattrs
->iregion
= idau_region
;
12102 switch (env
->sau
.ctrl
& 3) {
12103 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
12105 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
12108 default: /* SAU.ENABLE == 1 */
12109 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
12110 if (env
->sau
.rlar
[r
] & 1) {
12111 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
12112 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
12114 if (base
<= address
&& limit
>= address
) {
12115 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12116 sattrs
->subpage
= true;
12118 if (sattrs
->srvalid
) {
12119 /* If we hit in more than one region then we must report
12120 * as Secure, not NS-Callable, with no valid region
12123 sattrs
->ns
= false;
12124 sattrs
->nsc
= false;
12125 sattrs
->sregion
= 0;
12126 sattrs
->srvalid
= false;
12129 if (env
->sau
.rlar
[r
] & 2) {
12130 sattrs
->nsc
= true;
12134 sattrs
->srvalid
= true;
12135 sattrs
->sregion
= r
;
12139 * Address not in this region. We must check whether the
12140 * region covers addresses in the same page as our address.
12141 * In that case we must not report a size that covers the
12142 * whole page for a subsequent hit against a different MPU
12143 * region or the background region, because it would result
12144 * in incorrect TLB hits for subsequent accesses to
12145 * addresses that are in this MPU region.
12147 if (limit
>= base
&&
12148 ranges_overlap(base
, limit
- base
+ 1,
12150 TARGET_PAGE_SIZE
)) {
12151 sattrs
->subpage
= true;
12160 * The IDAU will override the SAU lookup results if it specifies
12161 * higher security than the SAU does.
12164 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
12165 sattrs
->ns
= false;
12166 sattrs
->nsc
= idau_nsc
;
12171 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
12172 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12173 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12174 int *prot
, bool *is_subpage
,
12175 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
12177 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
12178 * that a full phys-to-virt translation does).
12179 * mregion is (if not NULL) set to the region number which matched,
12180 * or -1 if no region number is returned (MPU off, address did not
12181 * hit a region, address hit in multiple regions).
12182 * We set is_subpage to true if the region hit doesn't cover the
12183 * entire TARGET_PAGE the address is within.
12185 ARMCPU
*cpu
= env_archcpu(env
);
12186 bool is_user
= regime_is_user(env
, mmu_idx
);
12187 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12189 int matchregion
= -1;
12191 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12192 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12194 *is_subpage
= false;
12195 *phys_ptr
= address
;
12201 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12202 * was an exception vector read from the vector table (which is always
12203 * done using the default system address map), because those accesses
12204 * are done in arm_v7m_load_vector(), which always does a direct
12205 * read using address_space_ldl(), rather than going via this function.
12207 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
12209 } else if (m_is_ppb_region(env
, address
)) {
12212 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12216 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
12217 /* region search */
12218 /* Note that the base address is bits [31:5] from the register
12219 * with bits [4:0] all zeroes, but the limit address is bits
12220 * [31:5] from the register with bits [4:0] all ones.
12222 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
12223 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
12225 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
12226 /* Region disabled */
12230 if (address
< base
|| address
> limit
) {
12232 * Address not in this region. We must check whether the
12233 * region covers addresses in the same page as our address.
12234 * In that case we must not report a size that covers the
12235 * whole page for a subsequent hit against a different MPU
12236 * region or the background region, because it would result in
12237 * incorrect TLB hits for subsequent accesses to addresses that
12238 * are in this MPU region.
12240 if (limit
>= base
&&
12241 ranges_overlap(base
, limit
- base
+ 1,
12243 TARGET_PAGE_SIZE
)) {
12244 *is_subpage
= true;
12249 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12250 *is_subpage
= true;
12253 if (matchregion
!= -1) {
12254 /* Multiple regions match -- always a failure (unlike
12255 * PMSAv7 where highest-numbered-region wins)
12257 fi
->type
= ARMFault_Permission
;
12268 /* background fault */
12269 fi
->type
= ARMFault_Background
;
12273 if (matchregion
== -1) {
12274 /* hit using the background region */
12275 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12277 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
12278 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
12280 if (m_is_system_region(env
, address
)) {
12281 /* System space is always execute never */
12285 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
12286 if (*prot
&& !xn
) {
12287 *prot
|= PAGE_EXEC
;
12289 /* We don't need to look the attribute up in the MAIR0/MAIR1
12290 * registers because that only tells us about cacheability.
12293 *mregion
= matchregion
;
12297 fi
->type
= ARMFault_Permission
;
12299 return !(*prot
& (1 << access_type
));
12303 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
12304 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12305 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12306 int *prot
, target_ulong
*page_size
,
12307 ARMMMUFaultInfo
*fi
)
12309 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12310 V8M_SAttributes sattrs
= {};
12312 bool mpu_is_subpage
;
12314 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12315 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
12316 if (access_type
== MMU_INST_FETCH
) {
12317 /* Instruction fetches always use the MMU bank and the
12318 * transaction attribute determined by the fetch address,
12319 * regardless of CPU state. This is painful for QEMU
12320 * to handle, because it would mean we need to encode
12321 * into the mmu_idx not just the (user, negpri) information
12322 * for the current security state but also that for the
12323 * other security state, which would balloon the number
12324 * of mmu_idx values needed alarmingly.
12325 * Fortunately we can avoid this because it's not actually
12326 * possible to arbitrarily execute code from memory with
12327 * the wrong security attribute: it will always generate
12328 * an exception of some kind or another, apart from the
12329 * special case of an NS CPU executing an SG instruction
12330 * in S&NSC memory. So we always just fail the translation
12331 * here and sort things out in the exception handler
12332 * (including possibly emulating an SG instruction).
12334 if (sattrs
.ns
!= !secure
) {
12336 fi
->type
= ARMFault_QEMU_NSCExec
;
12338 fi
->type
= ARMFault_QEMU_SFault
;
12340 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12341 *phys_ptr
= address
;
12346 /* For data accesses we always use the MMU bank indicated
12347 * by the current CPU state, but the security attributes
12348 * might downgrade a secure access to nonsecure.
12351 txattrs
->secure
= false;
12352 } else if (!secure
) {
12353 /* NS access to S memory must fault.
12354 * Architecturally we should first check whether the
12355 * MPU information for this address indicates that we
12356 * are doing an unaligned access to Device memory, which
12357 * should generate a UsageFault instead. QEMU does not
12358 * currently check for that kind of unaligned access though.
12359 * If we added it we would need to do so as a special case
12360 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12362 fi
->type
= ARMFault_QEMU_SFault
;
12363 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12364 *phys_ptr
= address
;
12371 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
12372 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
12373 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
12377 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
12378 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12379 hwaddr
*phys_ptr
, int *prot
,
12380 ARMMMUFaultInfo
*fi
)
12385 bool is_user
= regime_is_user(env
, mmu_idx
);
12387 if (regime_translation_disabled(env
, mmu_idx
)) {
12388 /* MPU disabled. */
12389 *phys_ptr
= address
;
12390 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12394 *phys_ptr
= address
;
12395 for (n
= 7; n
>= 0; n
--) {
12396 base
= env
->cp15
.c6_region
[n
];
12397 if ((base
& 1) == 0) {
12400 mask
= 1 << ((base
>> 1) & 0x1f);
12401 /* Keep this shift separate from the above to avoid an
12402 (undefined) << 32. */
12403 mask
= (mask
<< 1) - 1;
12404 if (((base
^ address
) & ~mask
) == 0) {
12409 fi
->type
= ARMFault_Background
;
12413 if (access_type
== MMU_INST_FETCH
) {
12414 mask
= env
->cp15
.pmsav5_insn_ap
;
12416 mask
= env
->cp15
.pmsav5_data_ap
;
12418 mask
= (mask
>> (n
* 4)) & 0xf;
12421 fi
->type
= ARMFault_Permission
;
12426 fi
->type
= ARMFault_Permission
;
12430 *prot
= PAGE_READ
| PAGE_WRITE
;
12435 *prot
|= PAGE_WRITE
;
12439 *prot
= PAGE_READ
| PAGE_WRITE
;
12443 fi
->type
= ARMFault_Permission
;
12453 /* Bad permission. */
12454 fi
->type
= ARMFault_Permission
;
12458 *prot
|= PAGE_EXEC
;
12462 /* Combine either inner or outer cacheability attributes for normal
12463 * memory, according to table D4-42 and pseudocode procedure
12464 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12466 * NB: only stage 1 includes allocation hints (RW bits), leading to
12469 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
12471 if (s1
== 4 || s2
== 4) {
12472 /* non-cacheable has precedence */
12474 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
12475 /* stage 1 write-through takes precedence */
12477 } else if (extract32(s2
, 2, 2) == 2) {
12478 /* stage 2 write-through takes precedence, but the allocation hint
12479 * is still taken from stage 1
12481 return (2 << 2) | extract32(s1
, 0, 2);
12482 } else { /* write-back */
12487 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12488 * and CombineS1S2Desc()
12490 * @s1: Attributes from stage 1 walk
12491 * @s2: Attributes from stage 2 walk
12493 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12495 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
12496 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
12499 /* Combine shareability attributes (table D4-43) */
12500 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
12501 /* if either are outer-shareable, the result is outer-shareable */
12502 ret
.shareability
= 2;
12503 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
12504 /* if either are inner-shareable, the result is inner-shareable */
12505 ret
.shareability
= 3;
12507 /* both non-shareable */
12508 ret
.shareability
= 0;
12511 /* Combine memory type and cacheability attributes */
12512 if (s1hi
== 0 || s2hi
== 0) {
12513 /* Device has precedence over normal */
12514 if (s1lo
== 0 || s2lo
== 0) {
12515 /* nGnRnE has precedence over anything */
12517 } else if (s1lo
== 4 || s2lo
== 4) {
12518 /* non-Reordering has precedence over Reordering */
12519 ret
.attrs
= 4; /* nGnRE */
12520 } else if (s1lo
== 8 || s2lo
== 8) {
12521 /* non-Gathering has precedence over Gathering */
12522 ret
.attrs
= 8; /* nGRE */
12524 ret
.attrs
= 0xc; /* GRE */
12527 /* Any location for which the resultant memory type is any
12528 * type of Device memory is always treated as Outer Shareable.
12530 ret
.shareability
= 2;
12531 } else { /* Normal memory */
12532 /* Outer/inner cacheability combine independently */
12533 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
12534 | combine_cacheattr_nibble(s1lo
, s2lo
);
12536 if (ret
.attrs
== 0x44) {
12537 /* Any location for which the resultant memory type is Normal
12538 * Inner Non-cacheable, Outer Non-cacheable is always treated
12539 * as Outer Shareable.
12541 ret
.shareability
= 2;
12549 /* get_phys_addr - get the physical address for this virtual address
12551 * Find the physical address corresponding to the given virtual address,
12552 * by doing a translation table walk on MMU based systems or using the
12553 * MPU state on MPU based systems.
12555 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12556 * prot and page_size may not be filled in, and the populated fsr value provides
12557 * information on why the translation aborted, in the format of a
12558 * DFSR/IFSR fault register, with the following caveats:
12559 * * we honour the short vs long DFSR format differences.
12560 * * the WnR bit is never set (the caller must do this).
12561 * * for PSMAv5 based systems we don't bother to return a full FSR format
12564 * @env: CPUARMState
12565 * @address: virtual address to get physical address for
12566 * @access_type: 0 for read, 1 for write, 2 for execute
12567 * @mmu_idx: MMU index indicating required translation regime
12568 * @phys_ptr: set to the physical address corresponding to the virtual address
12569 * @attrs: set to the memory transaction attributes to use
12570 * @prot: set to the permissions for the page containing phys_ptr
12571 * @page_size: set to the size of the page containing phys_ptr
12572 * @fi: set to fault info if the translation fails
12573 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12575 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
12576 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12577 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
12578 target_ulong
*page_size
,
12579 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
12581 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
12582 /* Call ourselves recursively to do the stage 1 and then stage 2
12585 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
12589 ARMCacheAttrs cacheattrs2
= {};
12591 ret
= get_phys_addr(env
, address
, access_type
,
12592 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
12593 prot
, page_size
, fi
, cacheattrs
);
12595 /* If S1 fails or S2 is disabled, return early. */
12596 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
12601 /* S1 is done. Now do S2 translation. */
12602 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
12603 phys_ptr
, attrs
, &s2_prot
,
12605 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
12607 /* Combine the S1 and S2 perms. */
12610 /* Combine the S1 and S2 cache attributes, if needed */
12611 if (!ret
&& cacheattrs
!= NULL
) {
12612 if (env
->cp15
.hcr_el2
& HCR_DC
) {
12614 * HCR.DC forces the first stage attributes to
12615 * Normal Non-Shareable,
12616 * Inner Write-Back Read-Allocate Write-Allocate,
12617 * Outer Write-Back Read-Allocate Write-Allocate.
12619 cacheattrs
->attrs
= 0xff;
12620 cacheattrs
->shareability
= 0;
12622 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
12628 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12630 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12634 /* The page table entries may downgrade secure to non-secure, but
12635 * cannot upgrade an non-secure translation regime's attributes
12638 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
12639 attrs
->user
= regime_is_user(env
, mmu_idx
);
12641 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12642 * In v7 and earlier it affects all stage 1 translations.
12644 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
12645 && !arm_feature(env
, ARM_FEATURE_V8
)) {
12646 if (regime_el(env
, mmu_idx
) == 3) {
12647 address
+= env
->cp15
.fcseidr_s
;
12649 address
+= env
->cp15
.fcseidr_ns
;
12653 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
12655 *page_size
= TARGET_PAGE_SIZE
;
12657 if (arm_feature(env
, ARM_FEATURE_V8
)) {
12659 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
12660 phys_ptr
, attrs
, prot
, page_size
, fi
);
12661 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
12663 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
12664 phys_ptr
, prot
, page_size
, fi
);
12667 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
12668 phys_ptr
, prot
, fi
);
12670 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
12671 " mmu_idx %u -> %s (prot %c%c%c)\n",
12672 access_type
== MMU_DATA_LOAD
? "reading" :
12673 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
12674 (uint32_t)address
, mmu_idx
,
12675 ret
? "Miss" : "Hit",
12676 *prot
& PAGE_READ
? 'r' : '-',
12677 *prot
& PAGE_WRITE
? 'w' : '-',
12678 *prot
& PAGE_EXEC
? 'x' : '-');
12683 /* Definitely a real MMU, not an MPU */
12685 if (regime_translation_disabled(env
, mmu_idx
)) {
12686 /* MMU disabled. */
12687 *phys_ptr
= address
;
12688 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12689 *page_size
= TARGET_PAGE_SIZE
;
12693 if (regime_using_lpae_format(env
, mmu_idx
)) {
12694 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
12695 phys_ptr
, attrs
, prot
, page_size
,
12697 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
12698 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
12699 phys_ptr
, attrs
, prot
, page_size
, fi
);
12701 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
12702 phys_ptr
, prot
, page_size
, fi
);
12706 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
12709 ARMCPU
*cpu
= ARM_CPU(cs
);
12710 CPUARMState
*env
= &cpu
->env
;
12712 target_ulong page_size
;
12715 ARMMMUFaultInfo fi
= {};
12716 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12718 *attrs
= (MemTxAttrs
) {};
12720 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
12721 attrs
, &prot
, &page_size
, &fi
, NULL
);
12729 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
12732 unsigned el
= arm_current_el(env
);
12734 /* First handle registers which unprivileged can read */
12737 case 0 ... 7: /* xPSR sub-fields */
12739 if ((reg
& 1) && el
) {
12740 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
12743 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
12744 if (arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
12748 /* EPSR reads as zero */
12749 return xpsr_read(env
) & mask
;
12751 case 20: /* CONTROL */
12753 uint32_t value
= env
->v7m
.control
[env
->v7m
.secure
];
12754 if (!env
->v7m
.secure
) {
12755 /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
12756 value
|= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
;
12760 case 0x94: /* CONTROL_NS */
12761 /* We have to handle this here because unprivileged Secure code
12762 * can read the NS CONTROL register.
12764 if (!env
->v7m
.secure
) {
12767 return env
->v7m
.control
[M_REG_NS
] |
12768 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
);
12772 return 0; /* unprivileged reads others as zero */
12775 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12777 case 0x88: /* MSP_NS */
12778 if (!env
->v7m
.secure
) {
12781 return env
->v7m
.other_ss_msp
;
12782 case 0x89: /* PSP_NS */
12783 if (!env
->v7m
.secure
) {
12786 return env
->v7m
.other_ss_psp
;
12787 case 0x8a: /* MSPLIM_NS */
12788 if (!env
->v7m
.secure
) {
12791 return env
->v7m
.msplim
[M_REG_NS
];
12792 case 0x8b: /* PSPLIM_NS */
12793 if (!env
->v7m
.secure
) {
12796 return env
->v7m
.psplim
[M_REG_NS
];
12797 case 0x90: /* PRIMASK_NS */
12798 if (!env
->v7m
.secure
) {
12801 return env
->v7m
.primask
[M_REG_NS
];
12802 case 0x91: /* BASEPRI_NS */
12803 if (!env
->v7m
.secure
) {
12806 return env
->v7m
.basepri
[M_REG_NS
];
12807 case 0x93: /* FAULTMASK_NS */
12808 if (!env
->v7m
.secure
) {
12811 return env
->v7m
.faultmask
[M_REG_NS
];
12812 case 0x98: /* SP_NS */
12814 /* This gives the non-secure SP selected based on whether we're
12815 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12817 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12819 if (!env
->v7m
.secure
) {
12822 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
12823 return env
->v7m
.other_ss_psp
;
12825 return env
->v7m
.other_ss_msp
;
12835 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
12837 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
12838 case 10: /* MSPLIM */
12839 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12842 return env
->v7m
.msplim
[env
->v7m
.secure
];
12843 case 11: /* PSPLIM */
12844 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12847 return env
->v7m
.psplim
[env
->v7m
.secure
];
12848 case 16: /* PRIMASK */
12849 return env
->v7m
.primask
[env
->v7m
.secure
];
12850 case 17: /* BASEPRI */
12851 case 18: /* BASEPRI_MAX */
12852 return env
->v7m
.basepri
[env
->v7m
.secure
];
12853 case 19: /* FAULTMASK */
12854 return env
->v7m
.faultmask
[env
->v7m
.secure
];
12857 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
12858 " register %d\n", reg
);
12863 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
12865 /* We're passed bits [11..0] of the instruction; extract
12866 * SYSm and the mask bits.
12867 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12868 * we choose to treat them as if the mask bits were valid.
12869 * NB that the pseudocode 'mask' variable is bits [11..10],
12870 * whereas ours is [11..8].
12872 uint32_t mask
= extract32(maskreg
, 8, 4);
12873 uint32_t reg
= extract32(maskreg
, 0, 8);
12874 int cur_el
= arm_current_el(env
);
12876 if (cur_el
== 0 && reg
> 7 && reg
!= 20) {
12878 * only xPSR sub-fields and CONTROL.SFPA may be written by
12879 * unprivileged code
12884 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12886 case 0x88: /* MSP_NS */
12887 if (!env
->v7m
.secure
) {
12890 env
->v7m
.other_ss_msp
= val
;
12892 case 0x89: /* PSP_NS */
12893 if (!env
->v7m
.secure
) {
12896 env
->v7m
.other_ss_psp
= val
;
12898 case 0x8a: /* MSPLIM_NS */
12899 if (!env
->v7m
.secure
) {
12902 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
12904 case 0x8b: /* PSPLIM_NS */
12905 if (!env
->v7m
.secure
) {
12908 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
12910 case 0x90: /* PRIMASK_NS */
12911 if (!env
->v7m
.secure
) {
12914 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
12916 case 0x91: /* BASEPRI_NS */
12917 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12920 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
12922 case 0x93: /* FAULTMASK_NS */
12923 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12926 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
12928 case 0x94: /* CONTROL_NS */
12929 if (!env
->v7m
.secure
) {
12932 write_v7m_control_spsel_for_secstate(env
,
12933 val
& R_V7M_CONTROL_SPSEL_MASK
,
12935 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12936 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12937 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12940 * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
12941 * RES0 if the FPU is not present, and is stored in the S bank
12943 if (arm_feature(env
, ARM_FEATURE_VFP
) &&
12944 extract32(env
->v7m
.nsacr
, 10, 1)) {
12945 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
12946 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
12949 case 0x98: /* SP_NS */
12951 /* This gives the non-secure SP selected based on whether we're
12952 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12954 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12955 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
12958 if (!env
->v7m
.secure
) {
12962 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
12965 CPUState
*cs
= env_cpu(env
);
12967 cpu_restore_state(cs
, GETPC(), true);
12968 raise_exception(env
, EXCP_STKOF
, 0, 1);
12972 env
->v7m
.other_ss_psp
= val
;
12974 env
->v7m
.other_ss_msp
= val
;
12984 case 0 ... 7: /* xPSR sub-fields */
12985 /* only APSR is actually writable */
12987 uint32_t apsrmask
= 0;
12990 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
12992 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
12993 apsrmask
|= XPSR_GE
;
12995 xpsr_write(env
, val
, apsrmask
);
12999 if (v7m_using_psp(env
)) {
13000 env
->v7m
.other_sp
= val
;
13002 env
->regs
[13] = val
;
13006 if (v7m_using_psp(env
)) {
13007 env
->regs
[13] = val
;
13009 env
->v7m
.other_sp
= val
;
13012 case 10: /* MSPLIM */
13013 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
13016 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
13018 case 11: /* PSPLIM */
13019 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
13022 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
13024 case 16: /* PRIMASK */
13025 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
13027 case 17: /* BASEPRI */
13028 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
13031 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
13033 case 18: /* BASEPRI_MAX */
13034 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
13038 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
13039 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
13040 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
13043 case 19: /* FAULTMASK */
13044 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
13047 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
13049 case 20: /* CONTROL */
13051 * Writing to the SPSEL bit only has an effect if we are in
13052 * thread mode; other bits can be updated by any privileged code.
13053 * write_v7m_control_spsel() deals with updating the SPSEL bit in
13054 * env->v7m.control, so we only need update the others.
13055 * For v7M, we must just ignore explicit writes to SPSEL in handler
13056 * mode; for v8M the write is permitted but will have no effect.
13057 * All these bits are writes-ignored from non-privileged code,
13060 if (cur_el
> 0 && (arm_feature(env
, ARM_FEATURE_V8
) ||
13061 !arm_v7m_is_handler_mode(env
))) {
13062 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
13064 if (cur_el
> 0 && arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
13065 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
13066 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
13068 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
13070 * SFPA is RAZ/WI from NS or if no FPU.
13071 * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
13072 * Both are stored in the S bank.
13074 if (env
->v7m
.secure
) {
13075 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
13076 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_SFPA_MASK
;
13079 (env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
) ||
13080 extract32(env
->v7m
.nsacr
, 10, 1))) {
13081 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
13082 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
13088 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
13089 " register %d\n", reg
);
13094 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
13096 /* Implement the TT instruction. op is bits [7:6] of the insn. */
13097 bool forceunpriv
= op
& 1;
13099 V8M_SAttributes sattrs
= {};
13101 bool r
, rw
, nsr
, nsrw
, mrvalid
;
13103 ARMMMUFaultInfo fi
= {};
13104 MemTxAttrs attrs
= {};
13109 bool targetsec
= env
->v7m
.secure
;
13112 /* Work out what the security state and privilege level we're
13113 * interested in is...
13116 targetsec
= !targetsec
;
13120 targetpriv
= false;
13122 targetpriv
= arm_v7m_is_handler_mode(env
) ||
13123 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
13126 /* ...and then figure out which MMU index this is */
13127 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
13129 /* We know that the MPU and SAU don't care about the access type
13130 * for our purposes beyond that we don't want to claim to be
13131 * an insn fetch, so we arbitrarily call this a read.
13134 /* MPU region info only available for privileged or if
13135 * inspecting the other MPU state.
13137 if (arm_current_el(env
) != 0 || alt
) {
13138 /* We can ignore the return value as prot is always set */
13139 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
13140 &phys_addr
, &attrs
, &prot
, &is_subpage
,
13142 if (mregion
== -1) {
13148 r
= prot
& PAGE_READ
;
13149 rw
= prot
& PAGE_WRITE
;
13157 if (env
->v7m
.secure
) {
13158 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
13159 nsr
= sattrs
.ns
&& r
;
13160 nsrw
= sattrs
.ns
&& rw
;
13167 tt_resp
= (sattrs
.iregion
<< 24) |
13168 (sattrs
.irvalid
<< 23) |
13169 ((!sattrs
.ns
) << 22) |
13174 (sattrs
.srvalid
<< 17) |
13176 (sattrs
.sregion
<< 8) |
13184 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
13185 MMUAccessType access_type
, int mmu_idx
,
13186 bool probe
, uintptr_t retaddr
)
13188 ARMCPU
*cpu
= ARM_CPU(cs
);
13190 #ifdef CONFIG_USER_ONLY
13191 cpu
->env
.exception
.vaddress
= address
;
13192 if (access_type
== MMU_INST_FETCH
) {
13193 cs
->exception_index
= EXCP_PREFETCH_ABORT
;
13195 cs
->exception_index
= EXCP_DATA_ABORT
;
13197 cpu_loop_exit_restore(cs
, retaddr
);
13200 target_ulong page_size
;
13202 MemTxAttrs attrs
= {};
13203 ARMMMUFaultInfo fi
= {};
13206 * Walk the page table and (if the mapping exists) add the page
13207 * to the TLB. On success, return true. Otherwise, if probing,
13208 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
13209 * register format, and signal the fault.
13211 ret
= get_phys_addr(&cpu
->env
, address
, access_type
,
13212 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
13213 &phys_addr
, &attrs
, &prot
, &page_size
, &fi
, NULL
);
13214 if (likely(!ret
)) {
13216 * Map a single [sub]page. Regions smaller than our declared
13217 * target page size are handled specially, so for those we
13218 * pass in the exact addresses.
13220 if (page_size
>= TARGET_PAGE_SIZE
) {
13221 phys_addr
&= TARGET_PAGE_MASK
;
13222 address
&= TARGET_PAGE_MASK
;
13224 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
13225 prot
, mmu_idx
, page_size
);
13227 } else if (probe
) {
13230 /* now we have a real cpu fault */
13231 cpu_restore_state(cs
, retaddr
, true);
13232 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, &fi
);
13237 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
13239 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
13240 * Note that we do not implement the (architecturally mandated)
13241 * alignment fault for attempts to use this on Device memory
13242 * (which matches the usual QEMU behaviour of not implementing either
13243 * alignment faults or any memory attribute handling).
13246 ARMCPU
*cpu
= env_archcpu(env
);
13247 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
13248 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
13250 #ifndef CONFIG_USER_ONLY
13252 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
13253 * the block size so we might have to do more than one TLB lookup.
13254 * We know that in fact for any v8 CPU the page size is at least 4K
13255 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
13256 * 1K as an artefact of legacy v5 subpage support being present in the
13257 * same QEMU executable. So in practice the hostaddr[] array has
13258 * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
13260 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
13261 void *hostaddr
[DIV_ROUND_UP(2 * KiB
, 1 << TARGET_PAGE_BITS_MIN
)];
13263 unsigned mmu_idx
= cpu_mmu_index(env
, false);
13264 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
13266 assert(maxidx
<= ARRAY_SIZE(hostaddr
));
13268 for (try = 0; try < 2; try++) {
13270 for (i
= 0; i
< maxidx
; i
++) {
13271 hostaddr
[i
] = tlb_vaddr_to_host(env
,
13272 vaddr
+ TARGET_PAGE_SIZE
* i
,
13274 if (!hostaddr
[i
]) {
13279 /* If it's all in the TLB it's fair game for just writing to;
13280 * we know we don't need to update dirty status, etc.
13282 for (i
= 0; i
< maxidx
- 1; i
++) {
13283 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
13285 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
13288 /* OK, try a store and see if we can populate the tlb. This
13289 * might cause an exception if the memory isn't writable,
13290 * in which case we will longjmp out of here. We must for
13291 * this purpose use the actual register value passed to us
13292 * so that we get the fault address right.
13294 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
13295 /* Now we can populate the other TLB entries, if any */
13296 for (i
= 0; i
< maxidx
; i
++) {
13297 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
13298 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
13299 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
13304 /* Slow path (probably attempt to do this to an I/O device or
13305 * similar, or clearing of a block of code we have translations
13306 * cached for). Just do a series of byte writes as the architecture
13307 * demands. It's not worth trying to use a cpu_physical_memory_map(),
13308 * memset(), unmap() sequence here because:
13309 * + we'd need to account for the blocksize being larger than a page
13310 * + the direct-RAM access case is almost always going to be dealt
13311 * with in the fastpath code above, so there's no speed benefit
13312 * + we would have to deal with the map returning NULL because the
13313 * bounce buffer was in use
13315 for (i
= 0; i
< blocklen
; i
++) {
13316 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
13320 memset(g2h(vaddr
), 0, blocklen
);
13324 /* Note that signed overflow is undefined in C. The following routines are
13325 careful to use unsigned types where modulo arithmetic is required.
13326 Failure to do so _will_ break on newer gcc. */
13328 /* Signed saturating arithmetic. */
13330 /* Perform 16-bit signed saturating addition. */
13331 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
13336 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
13345 /* Perform 8-bit signed saturating addition. */
13346 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
13351 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
13360 /* Perform 16-bit signed saturating subtraction. */
13361 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
13366 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
13375 /* Perform 8-bit signed saturating subtraction. */
13376 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
13381 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
13390 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
13391 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
13392 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
13393 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
13396 #include "op_addsub.h"
13398 /* Unsigned saturating arithmetic. */
13399 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
13408 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
13416 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
13425 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
13433 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
13434 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
13435 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
13436 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
13439 #include "op_addsub.h"
13441 /* Signed modulo arithmetic. */
13442 #define SARITH16(a, b, n, op) do { \
13444 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
13445 RESULT(sum, n, 16); \
13447 ge |= 3 << (n * 2); \
13450 #define SARITH8(a, b, n, op) do { \
13452 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
13453 RESULT(sum, n, 8); \
13459 #define ADD16(a, b, n) SARITH16(a, b, n, +)
13460 #define SUB16(a, b, n) SARITH16(a, b, n, -)
13461 #define ADD8(a, b, n) SARITH8(a, b, n, +)
13462 #define SUB8(a, b, n) SARITH8(a, b, n, -)
13466 #include "op_addsub.h"
13468 /* Unsigned modulo arithmetic. */
13469 #define ADD16(a, b, n) do { \
13471 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
13472 RESULT(sum, n, 16); \
13473 if ((sum >> 16) == 1) \
13474 ge |= 3 << (n * 2); \
13477 #define ADD8(a, b, n) do { \
13479 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
13480 RESULT(sum, n, 8); \
13481 if ((sum >> 8) == 1) \
13485 #define SUB16(a, b, n) do { \
13487 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
13488 RESULT(sum, n, 16); \
13489 if ((sum >> 16) == 0) \
13490 ge |= 3 << (n * 2); \
13493 #define SUB8(a, b, n) do { \
13495 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
13496 RESULT(sum, n, 8); \
13497 if ((sum >> 8) == 0) \
13504 #include "op_addsub.h"
13506 /* Halved signed arithmetic. */
13507 #define ADD16(a, b, n) \
13508 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
13509 #define SUB16(a, b, n) \
13510 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
13511 #define ADD8(a, b, n) \
13512 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
13513 #define SUB8(a, b, n) \
13514 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
13517 #include "op_addsub.h"
13519 /* Halved unsigned arithmetic. */
13520 #define ADD16(a, b, n) \
13521 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13522 #define SUB16(a, b, n) \
13523 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13524 #define ADD8(a, b, n) \
13525 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13526 #define SUB8(a, b, n) \
13527 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13530 #include "op_addsub.h"
13532 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
13540 /* Unsigned sum of absolute byte differences. */
13541 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
13544 sum
= do_usad(a
, b
);
13545 sum
+= do_usad(a
>> 8, b
>> 8);
13546 sum
+= do_usad(a
>> 16, b
>>16);
13547 sum
+= do_usad(a
>> 24, b
>> 24);
13551 /* For ARMv6 SEL instruction. */
13552 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
13564 mask
|= 0xff000000;
13565 return (a
& mask
) | (b
& ~mask
);
13569 * The upper bytes of val (above the number specified by 'bytes') must have
13570 * been zeroed out by the caller.
13572 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13576 stl_le_p(buf
, val
);
13578 /* zlib crc32 converts the accumulator and output to one's complement. */
13579 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
13582 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13586 stl_le_p(buf
, val
);
13588 /* Linux crc32c converts the output to one's complement. */
13589 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
13592 /* Return the exception level to which FP-disabled exceptions should
13593 * be taken, or 0 if FP is enabled.
13595 int fp_exception_el(CPUARMState
*env
, int cur_el
)
13597 #ifndef CONFIG_USER_ONLY
13600 /* CPACR and the CPTR registers don't exist before v6, so FP is
13601 * always accessible
13603 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
13607 if (arm_feature(env
, ARM_FEATURE_M
)) {
13608 /* CPACR can cause a NOCP UsageFault taken to current security state */
13609 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
13613 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
13614 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
13615 /* FP insns cause a NOCP UsageFault taken to Secure */
13623 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13624 * 0, 2 : trap EL0 and EL1/PL1 accesses
13625 * 1 : trap only EL0 accesses
13626 * 3 : trap no accesses
13628 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13632 if (cur_el
== 0 || cur_el
== 1) {
13633 /* Trap to PL1, which might be EL1 or EL3 */
13634 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13639 if (cur_el
== 3 && !is_a64(env
)) {
13640 /* Secure PL1 running at EL3 */
13654 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
13655 * to control non-secure access to the FPU. It doesn't have any
13656 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
13658 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
13659 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
13660 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
13661 /* FP insns act as UNDEF */
13662 return cur_el
== 2 ? 2 : 1;
13666 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13667 * check because zero bits in the registers mean "don't trap".
13670 /* CPTR_EL2 : present in v7VE or v8 */
13671 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
13672 && !arm_is_secure_below_el3(env
)) {
13673 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13677 /* CPTR_EL3 : present in v8 */
13678 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
13679 /* Trap all FP ops to EL3 */
13686 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
13687 bool secstate
, bool priv
, bool negpri
)
13689 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
13692 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
13696 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
13700 mmu_idx
|= ARM_MMU_IDX_M_S
;
13706 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
13707 bool secstate
, bool priv
)
13709 bool negpri
= armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
);
13711 return arm_v7m_mmu_idx_all(env
, secstate
, priv
, negpri
);
13714 /* Return the MMU index for a v7M CPU in the specified security state */
13715 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13717 bool priv
= arm_current_el(env
) != 0;
13719 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
13722 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13726 if (arm_feature(env
, ARM_FEATURE_M
)) {
13727 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13730 el
= arm_current_el(env
);
13731 if (el
< 2 && arm_is_secure_below_el3(env
)) {
13732 return ARMMMUIdx_S1SE0
+ el
;
13734 return ARMMMUIdx_S12NSE0
+ el
;
13738 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
13740 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
13743 #ifndef CONFIG_USER_ONLY
13744 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13746 return stage_1_mmu_idx(arm_mmu_idx(env
));
13750 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13751 target_ulong
*cs_base
, uint32_t *pflags
)
13753 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
13754 int current_el
= arm_current_el(env
);
13755 int fp_el
= fp_exception_el(env
, current_el
);
13756 uint32_t flags
= 0;
13759 ARMCPU
*cpu
= env_archcpu(env
);
13763 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
13765 /* Get control bits for tagged addresses. */
13767 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13768 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
13771 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13772 if (regime_el(env
, stage1
) < 2) {
13773 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
13774 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
13775 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
13778 tbii
= tbid
& !p0
.tbid
;
13781 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
13782 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
13785 if (cpu_isar_feature(aa64_sve
, cpu
)) {
13786 int sve_el
= sve_exception_el(env
, current_el
);
13789 /* If SVE is disabled, but FP is enabled,
13790 * then the effective len is 0.
13792 if (sve_el
!= 0 && fp_el
== 0) {
13795 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
13797 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
13798 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
13801 sctlr
= arm_sctlr(env
, current_el
);
13803 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
13805 * In order to save space in flags, we record only whether
13806 * pauth is "inactive", meaning all insns are implemented as
13807 * a nop, or "active" when some action must be performed.
13808 * The decision of which action to take is left to a helper.
13810 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13811 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
13815 if (cpu_isar_feature(aa64_bti
, cpu
)) {
13816 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13817 if (sctlr
& (current_el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
13818 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
13820 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
13823 *pc
= env
->regs
[15];
13824 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
13825 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
13826 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
13827 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
13828 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
13829 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
13830 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
13831 || arm_el_is_aa64(env
, 1) || arm_feature(env
, ARM_FEATURE_M
)) {
13832 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
13834 /* Note that XSCALE_CPAR shares bits with VECSTRIDE */
13835 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
13836 flags
= FIELD_DP32(flags
, TBFLAG_A32
,
13837 XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13841 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13843 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13844 * states defined in the ARM ARM for software singlestep:
13845 * SS_ACTIVE PSTATE.SS State
13846 * 0 x Inactive (the TB flag for SS is always 0)
13847 * 1 0 Active-pending
13848 * 1 1 Active-not-pending
13850 if (arm_singlestep_active(env
)) {
13851 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
13853 if (env
->pstate
& PSTATE_SS
) {
13854 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13857 if (env
->uncached_cpsr
& PSTATE_SS
) {
13858 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13862 if (arm_cpu_data_is_big_endian(env
)) {
13863 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
13865 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
13867 if (arm_v7m_is_handler_mode(env
)) {
13868 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
13871 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
13872 * suppressing them because the requested execution priority is less than 0.
13874 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13875 arm_feature(env
, ARM_FEATURE_M
) &&
13876 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13877 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13878 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
13881 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13882 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
) != env
->v7m
.secure
) {
13883 flags
= FIELD_DP32(flags
, TBFLAG_A32
, FPCCR_S_WRONG
, 1);
13886 if (arm_feature(env
, ARM_FEATURE_M
) &&
13887 (env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
13888 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
13889 (env
->v7m
.secure
&&
13890 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
13892 * ASPEN is set, but FPCA/SFPA indicate that there is no active
13893 * FP context; we must create a new FP context before executing
13896 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NEW_FP_CTXT_NEEDED
, 1);
13899 if (arm_feature(env
, ARM_FEATURE_M
)) {
13900 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
13902 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
13903 flags
= FIELD_DP32(flags
, TBFLAG_A32
, LSPACT
, 1);
13911 #ifdef TARGET_AARCH64
13913 * The manual says that when SVE is enabled and VQ is widened the
13914 * implementation is allowed to zero the previously inaccessible
13915 * portion of the registers. The corollary to that is that when
13916 * SVE is enabled and VQ is narrowed we are also allowed to zero
13917 * the now inaccessible portion of the registers.
13919 * The intent of this is that no predicate bit beyond VQ is ever set.
13920 * Which means that some operations on predicate registers themselves
13921 * may operate on full uint64_t or even unrolled across the maximum
13922 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13923 * may well be cheaper than conditionals to restrict the operation
13924 * to the relevant portion of a uint16_t[16].
13926 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13931 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13932 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
13934 /* Zap the high bits of the zregs. */
13935 for (i
= 0; i
< 32; i
++) {
13936 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13939 /* Zap the high bits of the pregs and ffr. */
13942 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13944 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13945 for (i
= 0; i
< 17; ++i
) {
13946 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13953 * Notice a change in SVE vector size when changing EL.
13955 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13956 int new_el
, bool el0_a64
)
13958 ARMCPU
*cpu
= env_archcpu(env
);
13959 int old_len
, new_len
;
13960 bool old_a64
, new_a64
;
13962 /* Nothing to do if no SVE. */
13963 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13967 /* Nothing to do if FP is disabled in either EL. */
13968 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13973 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13974 * at ELx, or not available because the EL is in AArch32 state, then
13975 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13976 * has an effective value of 0".
13978 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13979 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13980 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13981 * we already have the correct register contents when encountering the
13982 * vq0->vq0 transition between EL0->EL1.
13984 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13985 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13986 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13987 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13988 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13989 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13991 /* When changing vector length, clear inaccessible state. */
13992 if (new_len
< old_len
) {
13993 aarch64_sve_narrow_vq(env
, new_len
+ 1);