1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/kvm.h"
20 #include "fpu/softfloat.h"
21 #include "qemu/range.h"
22 #include "qapi/qapi-commands-target.h"
24 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
26 #ifndef CONFIG_USER_ONLY
27 /* Cacheability and shareability attributes for a memory access */
28 typedef struct ARMCacheAttrs
{
29 unsigned int attrs
:8; /* as in the MAIR register encoding */
30 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
33 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
34 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
35 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
36 target_ulong
*page_size
,
37 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
39 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
40 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
41 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
42 target_ulong
*page_size_ptr
,
43 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
45 /* Security attributes for an address, as returned by v8m_security_lookup. */
46 typedef struct V8M_SAttributes
{
47 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
56 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
57 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
58 V8M_SAttributes
*sattrs
);
61 static void switch_mode(CPUARMState
*env
, int mode
);
63 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
67 /* VFP data registers are always little-endian. */
68 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
70 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
73 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
74 /* Aliases for Q regs. */
77 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
79 stq_le_p(buf
+ 8, q
[1]);
83 switch (reg
- nregs
) {
84 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
85 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
86 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
91 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
95 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
97 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
100 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
103 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
104 q
[0] = ldq_le_p(buf
);
105 q
[1] = ldq_le_p(buf
+ 8);
109 switch (reg
- nregs
) {
110 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
111 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
112 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
117 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
121 /* 128 bit FP register */
123 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
125 stq_le_p(buf
+ 8, q
[1]);
130 stl_p(buf
, vfp_get_fpsr(env
));
134 stl_p(buf
, vfp_get_fpcr(env
));
141 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
145 /* 128 bit FP register */
147 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
148 q
[0] = ldq_le_p(buf
);
149 q
[1] = ldq_le_p(buf
+ 8);
154 vfp_set_fpsr(env
, ldl_p(buf
));
158 vfp_set_fpcr(env
, ldl_p(buf
));
165 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
167 assert(ri
->fieldoffset
);
168 if (cpreg_field_is_64bit(ri
)) {
169 return CPREG_FIELD64(env
, ri
);
171 return CPREG_FIELD32(env
, ri
);
175 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
178 assert(ri
->fieldoffset
);
179 if (cpreg_field_is_64bit(ri
)) {
180 CPREG_FIELD64(env
, ri
) = value
;
182 CPREG_FIELD32(env
, ri
) = value
;
186 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
188 return (char *)env
+ ri
->fieldoffset
;
191 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
193 /* Raw read of a coprocessor register (as needed for migration, etc). */
194 if (ri
->type
& ARM_CP_CONST
) {
195 return ri
->resetvalue
;
196 } else if (ri
->raw_readfn
) {
197 return ri
->raw_readfn(env
, ri
);
198 } else if (ri
->readfn
) {
199 return ri
->readfn(env
, ri
);
201 return raw_read(env
, ri
);
205 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
208 /* Raw write of a coprocessor register (as needed for migration, etc).
209 * Note that constant registers are treated as write-ignored; the
210 * caller should check for success by whether a readback gives the
213 if (ri
->type
& ARM_CP_CONST
) {
215 } else if (ri
->raw_writefn
) {
216 ri
->raw_writefn(env
, ri
, v
);
217 } else if (ri
->writefn
) {
218 ri
->writefn(env
, ri
, v
);
220 raw_write(env
, ri
, v
);
224 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
226 ARMCPU
*cpu
= arm_env_get_cpu(env
);
227 const ARMCPRegInfo
*ri
;
230 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
231 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
233 if (cpreg_field_is_64bit(ri
)) {
234 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
236 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
242 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
247 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
249 /* Return true if the regdef would cause an assertion if you called
250 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
251 * program bug for it not to have the NO_RAW flag).
252 * NB that returning false here doesn't necessarily mean that calling
253 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
254 * read/write access functions which are safe for raw use" from "has
255 * read/write access functions which have side effects but has forgotten
256 * to provide raw access functions".
257 * The tests here line up with the conditions in read/write_raw_cp_reg()
258 * and assertions in raw_read()/raw_write().
260 if ((ri
->type
& ARM_CP_CONST
) ||
262 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
268 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
270 /* Write the coprocessor state from cpu->env to the (index,value) list. */
274 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
275 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
276 const ARMCPRegInfo
*ri
;
279 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
284 if (ri
->type
& ARM_CP_NO_RAW
) {
288 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
291 * Only sync if the previous list->cpustate sync succeeded.
292 * Rather than tracking the success/failure state for every
293 * item in the list, we just recheck "does the raw write we must
294 * have made in write_list_to_cpustate() read back OK" here.
296 uint64_t oldval
= cpu
->cpreg_values
[i
];
298 if (oldval
== newval
) {
302 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
303 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
307 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
309 cpu
->cpreg_values
[i
] = newval
;
314 bool write_list_to_cpustate(ARMCPU
*cpu
)
319 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
320 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
321 uint64_t v
= cpu
->cpreg_values
[i
];
322 const ARMCPRegInfo
*ri
;
324 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
329 if (ri
->type
& ARM_CP_NO_RAW
) {
332 /* Write value and confirm it reads back as written
333 * (to catch read-only registers and partially read-only
334 * registers where the incoming migration value doesn't match)
336 write_raw_cp_reg(&cpu
->env
, ri
, v
);
337 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
344 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
346 ARMCPU
*cpu
= opaque
;
348 const ARMCPRegInfo
*ri
;
350 regidx
= *(uint32_t *)key
;
351 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
353 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
354 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
355 /* The value array need not be initialized at this point */
356 cpu
->cpreg_array_len
++;
360 static void count_cpreg(gpointer key
, gpointer opaque
)
362 ARMCPU
*cpu
= opaque
;
364 const ARMCPRegInfo
*ri
;
366 regidx
= *(uint32_t *)key
;
367 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
369 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
370 cpu
->cpreg_array_len
++;
374 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
376 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
377 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
388 void init_cpreg_list(ARMCPU
*cpu
)
390 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
391 * Note that we require cpreg_tuples[] to be sorted by key ID.
396 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
397 keys
= g_list_sort(keys
, cpreg_key_compare
);
399 cpu
->cpreg_array_len
= 0;
401 g_list_foreach(keys
, count_cpreg
, cpu
);
403 arraylen
= cpu
->cpreg_array_len
;
404 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
405 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
406 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
407 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
408 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
409 cpu
->cpreg_array_len
= 0;
411 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
413 assert(cpu
->cpreg_array_len
== arraylen
);
419 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
420 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
422 * access_el3_aa32ns: Used to check AArch32 register views.
423 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
425 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
426 const ARMCPRegInfo
*ri
,
429 bool secure
= arm_is_secure_below_el3(env
);
431 assert(!arm_el_is_aa64(env
, 3));
433 return CP_ACCESS_TRAP_UNCATEGORIZED
;
438 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
439 const ARMCPRegInfo
*ri
,
442 if (!arm_el_is_aa64(env
, 3)) {
443 return access_el3_aa32ns(env
, ri
, isread
);
448 /* Some secure-only AArch32 registers trap to EL3 if used from
449 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
450 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
451 * We assume that the .access field is set to PL1_RW.
453 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
454 const ARMCPRegInfo
*ri
,
457 if (arm_current_el(env
) == 3) {
460 if (arm_is_secure_below_el3(env
)) {
461 return CP_ACCESS_TRAP_EL3
;
463 /* This will be EL1 NS and EL2 NS, which just UNDEF */
464 return CP_ACCESS_TRAP_UNCATEGORIZED
;
467 /* Check for traps to "powerdown debug" registers, which are controlled
470 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
473 int el
= arm_current_el(env
);
474 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
475 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
476 (arm_hcr_el2_eff(env
) & HCR_TGE
);
478 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
479 return CP_ACCESS_TRAP_EL2
;
481 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
482 return CP_ACCESS_TRAP_EL3
;
487 /* Check for traps to "debug ROM" registers, which are controlled
488 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
490 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
493 int el
= arm_current_el(env
);
494 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
495 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
496 (arm_hcr_el2_eff(env
) & HCR_TGE
);
498 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
499 return CP_ACCESS_TRAP_EL2
;
501 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
502 return CP_ACCESS_TRAP_EL3
;
507 /* Check for traps to general debug registers, which are controlled
508 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
510 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
513 int el
= arm_current_el(env
);
514 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
515 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
516 (arm_hcr_el2_eff(env
) & HCR_TGE
);
518 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
519 return CP_ACCESS_TRAP_EL2
;
521 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
522 return CP_ACCESS_TRAP_EL3
;
527 /* Check for traps to performance monitor registers, which are controlled
528 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
530 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
533 int el
= arm_current_el(env
);
535 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
536 && !arm_is_secure_below_el3(env
)) {
537 return CP_ACCESS_TRAP_EL2
;
539 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
540 return CP_ACCESS_TRAP_EL3
;
545 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
547 ARMCPU
*cpu
= arm_env_get_cpu(env
);
549 raw_write(env
, ri
, value
);
550 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
553 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
555 ARMCPU
*cpu
= arm_env_get_cpu(env
);
557 if (raw_read(env
, ri
) != value
) {
558 /* Unlike real hardware the qemu TLB uses virtual addresses,
559 * not modified virtual addresses, so this causes a TLB flush.
562 raw_write(env
, ri
, value
);
566 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
569 ARMCPU
*cpu
= arm_env_get_cpu(env
);
571 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
572 && !extended_addresses_enabled(env
)) {
573 /* For VMSA (when not using the LPAE long descriptor page table
574 * format) this register includes the ASID, so do a TLB flush.
575 * For PMSA it is purely a process ID and no action is needed.
579 raw_write(env
, ri
, value
);
582 /* IS variants of TLB operations must affect all cores */
583 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
586 CPUState
*cs
= ENV_GET_CPU(env
);
588 tlb_flush_all_cpus_synced(cs
);
591 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
594 CPUState
*cs
= ENV_GET_CPU(env
);
596 tlb_flush_all_cpus_synced(cs
);
599 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
602 CPUState
*cs
= ENV_GET_CPU(env
);
604 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
607 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
610 CPUState
*cs
= ENV_GET_CPU(env
);
612 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
616 * Non-IS variants of TLB operations are upgraded to
617 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
618 * force broadcast of these operations.
620 static bool tlb_force_broadcast(CPUARMState
*env
)
622 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
623 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
626 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
629 /* Invalidate all (TLBIALL) */
630 ARMCPU
*cpu
= arm_env_get_cpu(env
);
632 if (tlb_force_broadcast(env
)) {
633 tlbiall_is_write(env
, NULL
, value
);
640 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
643 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
644 ARMCPU
*cpu
= arm_env_get_cpu(env
);
646 if (tlb_force_broadcast(env
)) {
647 tlbimva_is_write(env
, NULL
, value
);
651 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
654 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
657 /* Invalidate by ASID (TLBIASID) */
658 ARMCPU
*cpu
= arm_env_get_cpu(env
);
660 if (tlb_force_broadcast(env
)) {
661 tlbiasid_is_write(env
, NULL
, value
);
668 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
671 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
672 ARMCPU
*cpu
= arm_env_get_cpu(env
);
674 if (tlb_force_broadcast(env
)) {
675 tlbimvaa_is_write(env
, NULL
, value
);
679 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
682 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
685 CPUState
*cs
= ENV_GET_CPU(env
);
687 tlb_flush_by_mmuidx(cs
,
688 ARMMMUIdxBit_S12NSE1
|
689 ARMMMUIdxBit_S12NSE0
|
693 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
696 CPUState
*cs
= ENV_GET_CPU(env
);
698 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
699 ARMMMUIdxBit_S12NSE1
|
700 ARMMMUIdxBit_S12NSE0
|
704 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
707 /* Invalidate by IPA. This has to invalidate any structures that
708 * contain only stage 2 translation information, but does not need
709 * to apply to structures that contain combined stage 1 and stage 2
710 * translation information.
711 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
713 CPUState
*cs
= ENV_GET_CPU(env
);
716 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
720 pageaddr
= sextract64(value
<< 12, 0, 40);
722 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
725 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
728 CPUState
*cs
= ENV_GET_CPU(env
);
731 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
735 pageaddr
= sextract64(value
<< 12, 0, 40);
737 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
741 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
744 CPUState
*cs
= ENV_GET_CPU(env
);
746 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
749 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
752 CPUState
*cs
= ENV_GET_CPU(env
);
754 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
757 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
760 CPUState
*cs
= ENV_GET_CPU(env
);
761 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
763 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
766 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
769 CPUState
*cs
= ENV_GET_CPU(env
);
770 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
772 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
776 static const ARMCPRegInfo cp_reginfo
[] = {
777 /* Define the secure and non-secure FCSE identifier CP registers
778 * separately because there is no secure bank in V8 (no _EL3). This allows
779 * the secure register to be properly reset and migrated. There is also no
780 * v8 EL1 version of the register so the non-secure instance stands alone.
783 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
784 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
785 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
786 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
787 { .name
= "FCSEIDR_S",
788 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
789 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
790 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
791 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
792 /* Define the secure and non-secure context identifier CP registers
793 * separately because there is no secure bank in V8 (no _EL3). This allows
794 * the secure register to be properly reset and migrated. In the
795 * non-secure case, the 32-bit register will have reset and migration
796 * disabled during registration as it is handled by the 64-bit instance.
798 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
799 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
800 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
801 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
802 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
803 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
804 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
805 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
806 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
807 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
811 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
812 /* NB: Some of these registers exist in v8 but with more precise
813 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
815 /* MMU Domain access control / MPU write buffer control */
817 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
818 .access
= PL1_RW
, .resetvalue
= 0,
819 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
820 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
821 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
822 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
823 * For v6 and v5, these mappings are overly broad.
825 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
826 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
827 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
828 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
829 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
830 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
831 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
832 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
833 /* Cache maintenance ops; some of this space may be overridden later. */
834 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
835 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
836 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
840 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
841 /* Not all pre-v6 cores implemented this WFI, so this is slightly
844 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
845 .access
= PL1_W
, .type
= ARM_CP_WFI
},
849 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
850 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
851 * is UNPREDICTABLE; we choose to NOP as most implementations do).
853 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
854 .access
= PL1_W
, .type
= ARM_CP_WFI
},
855 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
856 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
857 * OMAPCP will override this space.
859 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
860 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
862 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
863 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
865 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
866 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
867 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
869 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
870 * implementing it as RAZ means the "debug architecture version" bits
871 * will read as a reserved value, which should cause Linux to not try
872 * to use the debug hardware.
874 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
875 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
876 /* MMU TLB control. Note that the wildcarding means we cover not just
877 * the unified TLB ops but also the dside/iside/inner-shareable variants.
879 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
880 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
881 .type
= ARM_CP_NO_RAW
},
882 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
883 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
884 .type
= ARM_CP_NO_RAW
},
885 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
886 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
887 .type
= ARM_CP_NO_RAW
},
888 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
889 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
890 .type
= ARM_CP_NO_RAW
},
891 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
892 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
893 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
894 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
898 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
903 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
904 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
905 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
906 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
907 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
909 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
910 /* VFP coprocessor: cp10 & cp11 [23:20] */
911 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
913 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
914 /* ASEDIS [31] bit is RAO/WI */
918 /* VFPv3 and upwards with NEON implement 32 double precision
919 * registers (D0-D31).
921 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
922 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
923 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
929 env
->cp15
.cpacr_el1
= value
;
932 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
934 /* Call cpacr_write() so that we reset with the correct RAO bits set
935 * for our CPU features.
937 cpacr_write(env
, ri
, 0);
940 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
943 if (arm_feature(env
, ARM_FEATURE_V8
)) {
944 /* Check if CPACR accesses are to be trapped to EL2 */
945 if (arm_current_el(env
) == 1 &&
946 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
947 return CP_ACCESS_TRAP_EL2
;
948 /* Check if CPACR accesses are to be trapped to EL3 */
949 } else if (arm_current_el(env
) < 3 &&
950 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
951 return CP_ACCESS_TRAP_EL3
;
958 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
961 /* Check if CPTR accesses are set to trap to EL3 */
962 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
963 return CP_ACCESS_TRAP_EL3
;
969 static const ARMCPRegInfo v6_cp_reginfo
[] = {
970 /* prefetch by MVA in v6, NOP in v7 */
971 { .name
= "MVA_prefetch",
972 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
973 .access
= PL1_W
, .type
= ARM_CP_NOP
},
974 /* We need to break the TB after ISB to execute self-modifying code
975 * correctly and also to take any pending interrupts immediately.
976 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
978 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
979 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
980 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
981 .access
= PL0_W
, .type
= ARM_CP_NOP
},
982 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
983 .access
= PL0_W
, .type
= ARM_CP_NOP
},
984 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
986 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
987 offsetof(CPUARMState
, cp15
.ifar_ns
) },
989 /* Watchpoint Fault Address Register : should actually only be present
990 * for 1136, 1176, 11MPCore.
992 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
993 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
994 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
995 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
996 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
997 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
1001 /* Definitions for the PMU registers */
1002 #define PMCRN_MASK 0xf800
1003 #define PMCRN_SHIFT 11
1011 #define PMXEVTYPER_P 0x80000000
1012 #define PMXEVTYPER_U 0x40000000
1013 #define PMXEVTYPER_NSK 0x20000000
1014 #define PMXEVTYPER_NSU 0x10000000
1015 #define PMXEVTYPER_NSH 0x08000000
1016 #define PMXEVTYPER_M 0x04000000
1017 #define PMXEVTYPER_MT 0x02000000
1018 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1019 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1020 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1021 PMXEVTYPER_M | PMXEVTYPER_MT | \
1022 PMXEVTYPER_EVTCOUNT)
1024 #define PMCCFILTR 0xf8000000
1025 #define PMCCFILTR_M PMXEVTYPER_M
1026 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1028 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1030 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1033 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1034 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1036 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1039 typedef struct pm_event
{
1040 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1041 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1042 bool (*supported
)(CPUARMState
*);
1044 * Retrieve the current count of the underlying event. The programmed
1045 * counters hold a difference from the return value from this function
1047 uint64_t (*get_count
)(CPUARMState
*);
1049 * Return how many nanoseconds it will take (at a minimum) for count events
1050 * to occur. A negative value indicates the counter will never overflow, or
1051 * that the counter has otherwise arranged for the overflow bit to be set
1052 * and the PMU interrupt to be raised on overflow.
1054 int64_t (*ns_per_count
)(uint64_t);
1057 static bool event_always_supported(CPUARMState
*env
)
1062 static uint64_t swinc_get_count(CPUARMState
*env
)
1065 * SW_INCR events are written directly to the pmevcntr's by writes to
1066 * PMSWINC, so there is no underlying count maintained by the PMU itself
1071 static int64_t swinc_ns_per(uint64_t ignored
)
1077 * Return the underlying cycle count for the PMU cycle counters. If we're in
1078 * usermode, simply return 0.
1080 static uint64_t cycles_get_count(CPUARMState
*env
)
1082 #ifndef CONFIG_USER_ONLY
1083 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1084 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1086 return cpu_get_host_ticks();
1090 #ifndef CONFIG_USER_ONLY
1091 static int64_t cycles_ns_per(uint64_t cycles
)
1093 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1096 static bool instructions_supported(CPUARMState
*env
)
1098 return use_icount
== 1 /* Precise instruction counting */;
1101 static uint64_t instructions_get_count(CPUARMState
*env
)
1103 return (uint64_t)cpu_get_icount_raw();
1106 static int64_t instructions_ns_per(uint64_t icount
)
1108 return cpu_icount_to_ns((int64_t)icount
);
1112 static const pm_event pm_events
[] = {
1113 { .number
= 0x000, /* SW_INCR */
1114 .supported
= event_always_supported
,
1115 .get_count
= swinc_get_count
,
1116 .ns_per_count
= swinc_ns_per
,
1118 #ifndef CONFIG_USER_ONLY
1119 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1120 .supported
= instructions_supported
,
1121 .get_count
= instructions_get_count
,
1122 .ns_per_count
= instructions_ns_per
,
1124 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1125 .supported
= event_always_supported
,
1126 .get_count
= cycles_get_count
,
1127 .ns_per_count
= cycles_ns_per
,
1133 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1134 * events (i.e. the statistical profiling extension), this implementation
1135 * should first be updated to something sparse instead of the current
1136 * supported_event_map[] array.
1138 #define MAX_EVENT_ID 0x11
1139 #define UNSUPPORTED_EVENT UINT16_MAX
1140 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1143 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1144 * of ARM event numbers to indices in our pm_events array.
1146 * Note: Events in the 0x40XX range are not currently supported.
1148 void pmu_init(ARMCPU
*cpu
)
1153 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1156 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1157 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1162 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1163 const pm_event
*cnt
= &pm_events
[i
];
1164 assert(cnt
->number
<= MAX_EVENT_ID
);
1165 /* We do not currently support events in the 0x40xx range */
1166 assert(cnt
->number
<= 0x3f);
1168 if (cnt
->supported(&cpu
->env
)) {
1169 supported_event_map
[cnt
->number
] = i
;
1170 uint64_t event_mask
= 1 << (cnt
->number
& 0x1f);
1171 if (cnt
->number
& 0x20) {
1172 cpu
->pmceid1
|= event_mask
;
1174 cpu
->pmceid0
|= event_mask
;
1181 * Check at runtime whether a PMU event is supported for the current machine
1183 static bool event_supported(uint16_t number
)
1185 if (number
> MAX_EVENT_ID
) {
1188 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1191 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1194 /* Performance monitor registers user accessibility is controlled
1195 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1196 * trapping to EL2 or EL3 for other accesses.
1198 int el
= arm_current_el(env
);
1200 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1201 return CP_ACCESS_TRAP
;
1203 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1204 && !arm_is_secure_below_el3(env
)) {
1205 return CP_ACCESS_TRAP_EL2
;
1207 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1208 return CP_ACCESS_TRAP_EL3
;
1211 return CP_ACCESS_OK
;
1214 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1215 const ARMCPRegInfo
*ri
,
1218 /* ER: event counter read trap control */
1219 if (arm_feature(env
, ARM_FEATURE_V8
)
1220 && arm_current_el(env
) == 0
1221 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1223 return CP_ACCESS_OK
;
1226 return pmreg_access(env
, ri
, isread
);
1229 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1230 const ARMCPRegInfo
*ri
,
1233 /* SW: software increment write trap control */
1234 if (arm_feature(env
, ARM_FEATURE_V8
)
1235 && arm_current_el(env
) == 0
1236 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1238 return CP_ACCESS_OK
;
1241 return pmreg_access(env
, ri
, isread
);
1244 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1245 const ARMCPRegInfo
*ri
,
1248 /* ER: event counter read trap control */
1249 if (arm_feature(env
, ARM_FEATURE_V8
)
1250 && arm_current_el(env
) == 0
1251 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1252 return CP_ACCESS_OK
;
1255 return pmreg_access(env
, ri
, isread
);
1258 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1259 const ARMCPRegInfo
*ri
,
1262 /* CR: cycle counter read trap control */
1263 if (arm_feature(env
, ARM_FEATURE_V8
)
1264 && arm_current_el(env
) == 0
1265 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1267 return CP_ACCESS_OK
;
1270 return pmreg_access(env
, ri
, isread
);
1273 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1274 * the current EL, security state, and register configuration.
1276 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1279 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1280 bool enabled
, prohibited
, filtered
;
1281 bool secure
= arm_is_secure(env
);
1282 int el
= arm_current_el(env
);
1283 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1285 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1286 (counter
< hpmn
|| counter
== 31)) {
1287 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1289 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1291 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1294 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1295 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1300 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1301 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1304 if (prohibited
&& counter
== 31) {
1305 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1308 if (counter
== 31) {
1309 filter
= env
->cp15
.pmccfiltr_el0
;
1311 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1314 p
= filter
& PMXEVTYPER_P
;
1315 u
= filter
& PMXEVTYPER_U
;
1316 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1317 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1318 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1319 m
= arm_el_is_aa64(env
, 1) &&
1320 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1323 filtered
= secure
? u
: u
!= nsu
;
1324 } else if (el
== 1) {
1325 filtered
= secure
? p
: p
!= nsk
;
1326 } else if (el
== 2) {
1332 if (counter
!= 31) {
1334 * If not checking PMCCNTR, ensure the counter is setup to an event we
1337 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1338 if (!event_supported(event
)) {
1343 return enabled
&& !prohibited
&& !filtered
;
1346 static void pmu_update_irq(CPUARMState
*env
)
1348 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1349 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1350 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1354 * Ensure c15_ccnt is the guest-visible count so that operations such as
1355 * enabling/disabling the counter or filtering, modifying the count itself,
1356 * etc. can be done logically. This is essentially a no-op if the counter is
1357 * not enabled at the time of the call.
1359 void pmccntr_op_start(CPUARMState
*env
)
1361 uint64_t cycles
= cycles_get_count(env
);
1363 if (pmu_counter_enabled(env
, 31)) {
1364 uint64_t eff_cycles
= cycles
;
1365 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1366 /* Increment once every 64 processor clock cycles */
1370 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1372 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1373 1ull << 63 : 1ull << 31;
1374 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1375 env
->cp15
.c9_pmovsr
|= (1 << 31);
1376 pmu_update_irq(env
);
1379 env
->cp15
.c15_ccnt
= new_pmccntr
;
1381 env
->cp15
.c15_ccnt_delta
= cycles
;
1385 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1386 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1389 void pmccntr_op_finish(CPUARMState
*env
)
1391 if (pmu_counter_enabled(env
, 31)) {
1392 #ifndef CONFIG_USER_ONLY
1393 /* Calculate when the counter will next overflow */
1394 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1395 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1396 remaining_cycles
= (uint32_t)remaining_cycles
;
1398 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1400 if (overflow_in
> 0) {
1401 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1403 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1404 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1408 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1409 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1410 /* Increment once every 64 processor clock cycles */
1413 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1417 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1420 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1422 if (event_supported(event
)) {
1423 uint16_t event_idx
= supported_event_map
[event
];
1424 count
= pm_events
[event_idx
].get_count(env
);
1427 if (pmu_counter_enabled(env
, counter
)) {
1428 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1430 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1431 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1432 pmu_update_irq(env
);
1434 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1436 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1439 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1441 if (pmu_counter_enabled(env
, counter
)) {
1442 #ifndef CONFIG_USER_ONLY
1443 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1444 uint16_t event_idx
= supported_event_map
[event
];
1445 uint64_t delta
= UINT32_MAX
-
1446 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1447 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1449 if (overflow_in
> 0) {
1450 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1452 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1453 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1457 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1458 env
->cp15
.c14_pmevcntr
[counter
];
1462 void pmu_op_start(CPUARMState
*env
)
1465 pmccntr_op_start(env
);
1466 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1467 pmevcntr_op_start(env
, i
);
1471 void pmu_op_finish(CPUARMState
*env
)
1474 pmccntr_op_finish(env
);
1475 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1476 pmevcntr_op_finish(env
, i
);
1480 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1482 pmu_op_start(&cpu
->env
);
1485 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1487 pmu_op_finish(&cpu
->env
);
1490 void arm_pmu_timer_cb(void *opaque
)
1492 ARMCPU
*cpu
= opaque
;
1495 * Update all the counter values based on the current underlying counts,
1496 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1497 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1498 * counter may expire.
1500 pmu_op_start(&cpu
->env
);
1501 pmu_op_finish(&cpu
->env
);
1504 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1509 if (value
& PMCRC
) {
1510 /* The counter has been reset */
1511 env
->cp15
.c15_ccnt
= 0;
1514 if (value
& PMCRP
) {
1516 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1517 env
->cp15
.c14_pmevcntr
[i
] = 0;
1521 /* only the DP, X, D and E bits are writable */
1522 env
->cp15
.c9_pmcr
&= ~0x39;
1523 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1528 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1532 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1533 /* Increment a counter's count iff: */
1534 if ((value
& (1 << i
)) && /* counter's bit is set */
1535 /* counter is enabled and not filtered */
1536 pmu_counter_enabled(env
, i
) &&
1537 /* counter is SW_INCR */
1538 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1539 pmevcntr_op_start(env
, i
);
1542 * Detect if this write causes an overflow since we can't predict
1543 * PMSWINC overflows like we can for other events
1545 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1547 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1548 env
->cp15
.c9_pmovsr
|= (1 << i
);
1549 pmu_update_irq(env
);
1552 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1554 pmevcntr_op_finish(env
, i
);
1559 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1562 pmccntr_op_start(env
);
1563 ret
= env
->cp15
.c15_ccnt
;
1564 pmccntr_op_finish(env
);
1568 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1571 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1572 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1573 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1576 env
->cp15
.c9_pmselr
= value
& 0x1f;
1579 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1582 pmccntr_op_start(env
);
1583 env
->cp15
.c15_ccnt
= value
;
1584 pmccntr_op_finish(env
);
1587 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1590 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1592 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1595 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1598 pmccntr_op_start(env
);
1599 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1600 pmccntr_op_finish(env
);
1603 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1606 pmccntr_op_start(env
);
1607 /* M is not accessible from AArch32 */
1608 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1609 (value
& PMCCFILTR
);
1610 pmccntr_op_finish(env
);
1613 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1615 /* M is not visible in AArch32 */
1616 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1619 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1622 value
&= pmu_counter_mask(env
);
1623 env
->cp15
.c9_pmcnten
|= value
;
1626 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1629 value
&= pmu_counter_mask(env
);
1630 env
->cp15
.c9_pmcnten
&= ~value
;
1633 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1636 value
&= pmu_counter_mask(env
);
1637 env
->cp15
.c9_pmovsr
&= ~value
;
1638 pmu_update_irq(env
);
1641 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1644 value
&= pmu_counter_mask(env
);
1645 env
->cp15
.c9_pmovsr
|= value
;
1646 pmu_update_irq(env
);
1649 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1650 uint64_t value
, const uint8_t counter
)
1652 if (counter
== 31) {
1653 pmccfiltr_write(env
, ri
, value
);
1654 } else if (counter
< pmu_num_counters(env
)) {
1655 pmevcntr_op_start(env
, counter
);
1658 * If this counter's event type is changing, store the current
1659 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1660 * pmevcntr_op_finish has the correct baseline when it converts back to
1663 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1664 PMXEVTYPER_EVTCOUNT
;
1665 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1666 if (old_event
!= new_event
) {
1668 if (event_supported(new_event
)) {
1669 uint16_t event_idx
= supported_event_map
[new_event
];
1670 count
= pm_events
[event_idx
].get_count(env
);
1672 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1675 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1676 pmevcntr_op_finish(env
, counter
);
1678 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1679 * PMSELR value is equal to or greater than the number of implemented
1680 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1684 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1685 const uint8_t counter
)
1687 if (counter
== 31) {
1688 return env
->cp15
.pmccfiltr_el0
;
1689 } else if (counter
< pmu_num_counters(env
)) {
1690 return env
->cp15
.c14_pmevtyper
[counter
];
1693 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1694 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1700 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1703 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1704 pmevtyper_write(env
, ri
, value
, counter
);
1707 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1710 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1711 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1714 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1715 * pmu_op_finish calls when loading saved state for a migration. Because
1716 * we're potentially updating the type of event here, the value written to
1717 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1718 * different counter type. Therefore, we need to set this value to the
1719 * current count for the counter type we're writing so that pmu_op_finish
1720 * has the correct count for its calculation.
1722 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1723 if (event_supported(event
)) {
1724 uint16_t event_idx
= supported_event_map
[event
];
1725 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1726 pm_events
[event_idx
].get_count(env
);
1730 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1732 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1733 return pmevtyper_read(env
, ri
, counter
);
1736 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1739 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1742 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1744 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1747 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1748 uint64_t value
, uint8_t counter
)
1750 if (counter
< pmu_num_counters(env
)) {
1751 pmevcntr_op_start(env
, counter
);
1752 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1753 pmevcntr_op_finish(env
, counter
);
1756 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1757 * are CONSTRAINED UNPREDICTABLE.
1761 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1764 if (counter
< pmu_num_counters(env
)) {
1766 pmevcntr_op_start(env
, counter
);
1767 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1768 pmevcntr_op_finish(env
, counter
);
1771 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1772 * are CONSTRAINED UNPREDICTABLE. */
1777 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1780 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1781 pmevcntr_write(env
, ri
, value
, counter
);
1784 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1786 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1787 return pmevcntr_read(env
, ri
, counter
);
1790 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1793 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1794 assert(counter
< pmu_num_counters(env
));
1795 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1796 pmevcntr_write(env
, ri
, value
, counter
);
1799 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1801 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1802 assert(counter
< pmu_num_counters(env
));
1803 return env
->cp15
.c14_pmevcntr
[counter
];
1806 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1809 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1812 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1814 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1817 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1820 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1821 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1823 env
->cp15
.c9_pmuserenr
= value
& 1;
1827 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1830 /* We have no event counters so only the C bit can be changed */
1831 value
&= pmu_counter_mask(env
);
1832 env
->cp15
.c9_pminten
|= value
;
1833 pmu_update_irq(env
);
1836 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1839 value
&= pmu_counter_mask(env
);
1840 env
->cp15
.c9_pminten
&= ~value
;
1841 pmu_update_irq(env
);
1844 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1847 /* Note that even though the AArch64 view of this register has bits
1848 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1849 * architectural requirements for bits which are RES0 only in some
1850 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1851 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1853 raw_write(env
, ri
, value
& ~0x1FULL
);
1856 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1858 /* Begin with base v8.0 state. */
1859 uint32_t valid_mask
= 0x3fff;
1860 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1862 if (arm_el_is_aa64(env
, 3)) {
1863 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1864 valid_mask
&= ~SCR_NET
;
1866 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1869 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1870 valid_mask
&= ~SCR_HCE
;
1872 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1873 * supported if EL2 exists. The bit is UNK/SBZP when
1874 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1875 * when EL2 is unavailable.
1876 * On ARMv8, this bit is always available.
1878 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1879 !arm_feature(env
, ARM_FEATURE_V8
)) {
1880 valid_mask
&= ~SCR_SMD
;
1883 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1884 valid_mask
|= SCR_TLOR
;
1886 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1887 valid_mask
|= SCR_API
| SCR_APK
;
1890 /* Clear all-context RES0 bits. */
1891 value
&= valid_mask
;
1892 raw_write(env
, ri
, value
);
1895 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1897 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1899 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1902 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1903 ri
->secure
& ARM_CP_SECSTATE_S
);
1905 return cpu
->ccsidr
[index
];
1908 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1911 raw_write(env
, ri
, value
& 0xf);
1914 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1916 CPUState
*cs
= ENV_GET_CPU(env
);
1917 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1920 if (hcr_el2
& HCR_IMO
) {
1921 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1925 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1930 if (hcr_el2
& HCR_FMO
) {
1931 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1935 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1940 /* External aborts are not possible in QEMU so A bit is always clear */
1944 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1945 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1946 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1947 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1948 /* Performance monitors are implementation defined in v7,
1949 * but with an ARM recommended set of registers, which we
1952 * Performance registers fall into three categories:
1953 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1954 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1955 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1956 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1957 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1959 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1960 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1961 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1962 .writefn
= pmcntenset_write
,
1963 .accessfn
= pmreg_access
,
1964 .raw_writefn
= raw_write
},
1965 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1966 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1967 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1968 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1969 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1970 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1972 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1973 .accessfn
= pmreg_access
,
1974 .writefn
= pmcntenclr_write
,
1975 .type
= ARM_CP_ALIAS
},
1976 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1977 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1978 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1979 .type
= ARM_CP_ALIAS
,
1980 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1981 .writefn
= pmcntenclr_write
},
1982 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1983 .access
= PL0_RW
, .type
= ARM_CP_IO
,
1984 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1985 .accessfn
= pmreg_access
,
1986 .writefn
= pmovsr_write
,
1987 .raw_writefn
= raw_write
},
1988 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1989 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1990 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1991 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1992 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1993 .writefn
= pmovsr_write
,
1994 .raw_writefn
= raw_write
},
1995 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1996 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1997 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1998 .writefn
= pmswinc_write
},
1999 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2000 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2001 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2002 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2003 .writefn
= pmswinc_write
},
2004 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2005 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2006 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2007 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2008 .raw_writefn
= raw_write
},
2009 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2010 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2011 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2012 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2013 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2014 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2015 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2016 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2017 .accessfn
= pmreg_access_ccntr
},
2018 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2019 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2020 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2022 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2023 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2024 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2025 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2026 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2027 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2028 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2030 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2031 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2032 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2033 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2035 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2037 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2038 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2039 .accessfn
= pmreg_access
,
2040 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2041 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2042 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2043 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2044 .accessfn
= pmreg_access
,
2045 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2046 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2047 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2048 .accessfn
= pmreg_access_xevcntr
,
2049 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2050 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2051 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2052 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2053 .accessfn
= pmreg_access_xevcntr
,
2054 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2055 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2056 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2057 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2059 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2060 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2061 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2062 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2063 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2065 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2066 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2067 .access
= PL1_RW
, .accessfn
= access_tpm
,
2068 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2069 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2071 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2072 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2073 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2074 .access
= PL1_RW
, .accessfn
= access_tpm
,
2076 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2077 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2078 .resetvalue
= 0x0 },
2079 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2080 .access
= PL1_RW
, .accessfn
= access_tpm
,
2081 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2082 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2083 .writefn
= pmintenclr_write
, },
2084 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2085 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2086 .access
= PL1_RW
, .accessfn
= access_tpm
,
2087 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2088 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2089 .writefn
= pmintenclr_write
},
2090 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2091 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2092 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2093 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2094 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2095 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
2096 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2097 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2098 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2099 * just RAZ for all cores:
2101 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2102 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2103 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2104 /* Auxiliary fault status registers: these also are IMPDEF, and we
2105 * choose to RAZ/WI for all cores.
2107 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2108 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2109 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2110 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2111 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2112 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2113 /* MAIR can just read-as-written because we don't implement caches
2114 * and so don't need to care about memory attributes.
2116 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2117 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2118 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2120 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2121 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2122 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2124 /* For non-long-descriptor page tables these are PRRR and NMRR;
2125 * regardless they still act as reads-as-written for QEMU.
2127 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2128 * allows them to assign the correct fieldoffset based on the endianness
2129 * handled in the field definitions.
2131 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2132 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2133 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2134 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2135 .resetfn
= arm_cp_reset_ignore
},
2136 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2137 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2138 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2139 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2140 .resetfn
= arm_cp_reset_ignore
},
2141 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2142 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2143 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2144 /* 32 bit ITLB invalidates */
2145 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2146 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2147 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2148 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2149 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2150 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2151 /* 32 bit DTLB invalidates */
2152 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2153 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2154 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2155 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2156 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2157 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2158 /* 32 bit TLB invalidates */
2159 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2160 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2161 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2162 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2163 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2164 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2165 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2166 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2170 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2171 /* 32 bit TLB invalidates, Inner Shareable */
2172 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2173 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2174 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2175 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2176 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2177 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2178 .writefn
= tlbiasid_is_write
},
2179 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2180 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2181 .writefn
= tlbimvaa_is_write
},
2185 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2186 /* PMOVSSET is not implemented in v7 before v7ve */
2187 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2188 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2189 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2190 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2191 .writefn
= pmovsset_write
,
2192 .raw_writefn
= raw_write
},
2193 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2194 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2195 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2196 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2197 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2198 .writefn
= pmovsset_write
,
2199 .raw_writefn
= raw_write
},
2203 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2210 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2213 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2214 return CP_ACCESS_TRAP
;
2216 return CP_ACCESS_OK
;
2219 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2220 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2221 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2223 .writefn
= teecr_write
},
2224 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2225 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2226 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2230 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2231 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2232 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2234 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2235 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2237 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2238 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2239 .resetfn
= arm_cp_reset_ignore
},
2240 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2241 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2242 .access
= PL0_R
|PL1_W
,
2243 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2245 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2246 .access
= PL0_R
|PL1_W
,
2247 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2248 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2249 .resetfn
= arm_cp_reset_ignore
},
2250 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2251 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2253 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2254 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2256 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2257 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2262 #ifndef CONFIG_USER_ONLY
2264 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2267 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2268 * Writable only at the highest implemented exception level.
2270 int el
= arm_current_el(env
);
2274 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2275 return CP_ACCESS_TRAP
;
2279 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2280 arm_is_secure_below_el3(env
)) {
2281 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2282 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2290 if (!isread
&& el
< arm_highest_el(env
)) {
2291 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2294 return CP_ACCESS_OK
;
2297 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2300 unsigned int cur_el
= arm_current_el(env
);
2301 bool secure
= arm_is_secure(env
);
2303 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2305 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2306 return CP_ACCESS_TRAP
;
2309 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2310 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2311 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2312 return CP_ACCESS_TRAP_EL2
;
2314 return CP_ACCESS_OK
;
2317 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2320 unsigned int cur_el
= arm_current_el(env
);
2321 bool secure
= arm_is_secure(env
);
2323 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2324 * EL0[PV]TEN is zero.
2327 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2328 return CP_ACCESS_TRAP
;
2331 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2332 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2333 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2334 return CP_ACCESS_TRAP_EL2
;
2336 return CP_ACCESS_OK
;
2339 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2340 const ARMCPRegInfo
*ri
,
2343 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2346 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2347 const ARMCPRegInfo
*ri
,
2350 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2353 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2356 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2359 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2362 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2365 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2366 const ARMCPRegInfo
*ri
,
2369 /* The AArch64 register view of the secure physical timer is
2370 * always accessible from EL3, and configurably accessible from
2373 switch (arm_current_el(env
)) {
2375 if (!arm_is_secure(env
)) {
2376 return CP_ACCESS_TRAP
;
2378 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2379 return CP_ACCESS_TRAP_EL3
;
2381 return CP_ACCESS_OK
;
2384 return CP_ACCESS_TRAP
;
2386 return CP_ACCESS_OK
;
2388 g_assert_not_reached();
2392 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2394 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
2397 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2399 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2402 /* Timer enabled: calculate and set current ISTATUS, irq, and
2403 * reset timer to when ISTATUS next has to change
2405 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2406 cpu
->env
.cp15
.cntvoff_el2
: 0;
2407 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2408 /* Note that this must be unsigned 64 bit arithmetic: */
2409 int istatus
= count
- offset
>= gt
->cval
;
2413 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2415 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2416 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2419 /* Next transition is when count rolls back over to zero */
2420 nexttick
= UINT64_MAX
;
2422 /* Next transition is when we hit cval */
2423 nexttick
= gt
->cval
+ offset
;
2425 /* Note that the desired next expiry time might be beyond the
2426 * signed-64-bit range of a QEMUTimer -- in this case we just
2427 * set the timer for as far in the future as possible. When the
2428 * timer expires we will reset the timer for any remaining period.
2430 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
2431 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
2433 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2434 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2436 /* Timer disabled: ISTATUS and timer output always clear */
2438 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2439 timer_del(cpu
->gt_timer
[timeridx
]);
2440 trace_arm_gt_recalc_disabled(timeridx
);
2444 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2447 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2449 timer_del(cpu
->gt_timer
[timeridx
]);
2452 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2454 return gt_get_countervalue(env
);
2457 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2459 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
2462 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2466 trace_arm_gt_cval_write(timeridx
, value
);
2467 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2468 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2471 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2474 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2476 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2477 (gt_get_countervalue(env
) - offset
));
2480 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2484 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2486 trace_arm_gt_tval_write(timeridx
, value
);
2487 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2488 sextract64(value
, 0, 32);
2489 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2492 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2496 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2497 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2499 trace_arm_gt_ctl_write(timeridx
, value
);
2500 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2501 if ((oldval
^ value
) & 1) {
2502 /* Enable toggled */
2503 gt_recalc_timer(cpu
, timeridx
);
2504 } else if ((oldval
^ value
) & 2) {
2505 /* IMASK toggled: don't need to recalculate,
2506 * just set the interrupt line based on ISTATUS
2508 int irqstate
= (oldval
& 4) && !(value
& 2);
2510 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2511 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2515 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2517 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2520 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2523 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2526 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2528 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2531 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2534 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2537 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2540 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2543 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2545 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2548 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2551 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2554 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2556 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2559 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2562 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2565 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2568 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2571 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2574 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2576 trace_arm_gt_cntvoff_write(value
);
2577 raw_write(env
, ri
, value
);
2578 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2581 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2583 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2586 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2589 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2592 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2594 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2597 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2600 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2603 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2606 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2609 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2611 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2614 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2617 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2620 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2622 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2625 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2628 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2631 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2634 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2637 void arm_gt_ptimer_cb(void *opaque
)
2639 ARMCPU
*cpu
= opaque
;
2641 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2644 void arm_gt_vtimer_cb(void *opaque
)
2646 ARMCPU
*cpu
= opaque
;
2648 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2651 void arm_gt_htimer_cb(void *opaque
)
2653 ARMCPU
*cpu
= opaque
;
2655 gt_recalc_timer(cpu
, GTIMER_HYP
);
2658 void arm_gt_stimer_cb(void *opaque
)
2660 ARMCPU
*cpu
= opaque
;
2662 gt_recalc_timer(cpu
, GTIMER_SEC
);
2665 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2666 /* Note that CNTFRQ is purely reads-as-written for the benefit
2667 * of software; writing it doesn't actually change the timer frequency.
2668 * Our reset value matches the fixed frequency we implement the timer at.
2670 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2671 .type
= ARM_CP_ALIAS
,
2672 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2673 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2675 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2676 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2677 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2678 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2679 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2681 /* overall control: mostly access permissions */
2682 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2683 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2685 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2688 /* per-timer control */
2689 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2690 .secure
= ARM_CP_SECSTATE_NS
,
2691 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2692 .accessfn
= gt_ptimer_access
,
2693 .fieldoffset
= offsetoflow32(CPUARMState
,
2694 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2695 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2697 { .name
= "CNTP_CTL_S",
2698 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2699 .secure
= ARM_CP_SECSTATE_S
,
2700 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2701 .accessfn
= gt_ptimer_access
,
2702 .fieldoffset
= offsetoflow32(CPUARMState
,
2703 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2704 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2706 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2707 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2708 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2709 .accessfn
= gt_ptimer_access
,
2710 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2712 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2714 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2715 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2716 .accessfn
= gt_vtimer_access
,
2717 .fieldoffset
= offsetoflow32(CPUARMState
,
2718 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2719 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2721 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2722 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2723 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2724 .accessfn
= gt_vtimer_access
,
2725 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2727 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2729 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2730 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2731 .secure
= ARM_CP_SECSTATE_NS
,
2732 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2733 .accessfn
= gt_ptimer_access
,
2734 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2736 { .name
= "CNTP_TVAL_S",
2737 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2738 .secure
= ARM_CP_SECSTATE_S
,
2739 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2740 .accessfn
= gt_ptimer_access
,
2741 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2743 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2744 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2745 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2746 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2747 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2749 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2750 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2751 .accessfn
= gt_vtimer_access
,
2752 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2754 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2755 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2756 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2757 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2758 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2760 /* The counter itself */
2761 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2762 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2763 .accessfn
= gt_pct_access
,
2764 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2766 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2767 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2768 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2769 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2771 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2772 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2773 .accessfn
= gt_vct_access
,
2774 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2776 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2777 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2778 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2779 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2781 /* Comparison value, indicating when the timer goes off */
2782 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2783 .secure
= ARM_CP_SECSTATE_NS
,
2784 .access
= PL1_RW
| PL0_R
,
2785 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2786 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2787 .accessfn
= gt_ptimer_access
,
2788 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2790 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2791 .secure
= ARM_CP_SECSTATE_S
,
2792 .access
= PL1_RW
| PL0_R
,
2793 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2794 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2795 .accessfn
= gt_ptimer_access
,
2796 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2798 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2799 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2800 .access
= PL1_RW
| PL0_R
,
2802 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2803 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2804 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2806 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2807 .access
= PL1_RW
| PL0_R
,
2808 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2809 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2810 .accessfn
= gt_vtimer_access
,
2811 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2813 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2814 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2815 .access
= PL1_RW
| PL0_R
,
2817 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2818 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2819 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2821 /* Secure timer -- this is actually restricted to only EL3
2822 * and configurably Secure-EL1 via the accessfn.
2824 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2825 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2826 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2827 .accessfn
= gt_stimer_access
,
2828 .readfn
= gt_sec_tval_read
,
2829 .writefn
= gt_sec_tval_write
,
2830 .resetfn
= gt_sec_timer_reset
,
2832 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2833 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2834 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2835 .accessfn
= gt_stimer_access
,
2836 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2838 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2840 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2841 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2842 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2843 .accessfn
= gt_stimer_access
,
2844 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2845 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2852 /* In user-mode most of the generic timer registers are inaccessible
2853 * however modern kernels (4.12+) allow access to cntvct_el0
2856 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2858 /* Currently we have no support for QEMUTimer in linux-user so we
2859 * can't call gt_get_countervalue(env), instead we directly
2860 * call the lower level functions.
2862 return cpu_get_clock() / GTIMER_SCALE
;
2865 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2866 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2867 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2868 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2869 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2870 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2872 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2873 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2874 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2875 .readfn
= gt_virt_cnt_read
,
2882 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2884 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2885 raw_write(env
, ri
, value
);
2886 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2887 raw_write(env
, ri
, value
& 0xfffff6ff);
2889 raw_write(env
, ri
, value
& 0xfffff1ff);
2893 #ifndef CONFIG_USER_ONLY
2894 /* get_phys_addr() isn't present for user-mode-only targets */
2896 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2900 /* The ATS12NSO* operations must trap to EL3 if executed in
2901 * Secure EL1 (which can only happen if EL3 is AArch64).
2902 * They are simply UNDEF if executed from NS EL1.
2903 * They function normally from EL2 or EL3.
2905 if (arm_current_el(env
) == 1) {
2906 if (arm_is_secure_below_el3(env
)) {
2907 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2909 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2912 return CP_ACCESS_OK
;
2915 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2916 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2919 target_ulong page_size
;
2923 bool format64
= false;
2924 MemTxAttrs attrs
= {};
2925 ARMMMUFaultInfo fi
= {};
2926 ARMCacheAttrs cacheattrs
= {};
2928 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2929 &prot
, &page_size
, &fi
, &cacheattrs
);
2933 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2936 * * TTBCR.EAE determines whether the result is returned using the
2937 * 32-bit or the 64-bit PAR format
2938 * * Instructions executed in Hyp mode always use the 64bit format
2940 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2941 * * The Non-secure TTBCR.EAE bit is set to 1
2942 * * The implementation includes EL2, and the value of HCR.VM is 1
2944 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2946 * ATS1Hx always uses the 64bit format.
2948 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2950 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2951 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2952 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2954 format64
|= arm_current_el(env
) == 2;
2960 /* Create a 64-bit PAR */
2961 par64
= (1 << 11); /* LPAE bit always set */
2963 par64
|= phys_addr
& ~0xfffULL
;
2964 if (!attrs
.secure
) {
2965 par64
|= (1 << 9); /* NS */
2967 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2968 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2970 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2973 par64
|= (fsr
& 0x3f) << 1; /* FS */
2975 par64
|= (1 << 9); /* S */
2978 par64
|= (1 << 8); /* PTW */
2982 /* fsr is a DFSR/IFSR value for the short descriptor
2983 * translation table format (with WnR always clear).
2984 * Convert it to a 32-bit PAR.
2987 /* We do not set any attribute bits in the PAR */
2988 if (page_size
== (1 << 24)
2989 && arm_feature(env
, ARM_FEATURE_V7
)) {
2990 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2992 par64
= phys_addr
& 0xfffff000;
2994 if (!attrs
.secure
) {
2995 par64
|= (1 << 9); /* NS */
2998 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3000 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3001 ((fsr
& 0xf) << 1) | 1;
3007 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3009 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3012 int el
= arm_current_el(env
);
3013 bool secure
= arm_is_secure_below_el3(env
);
3015 switch (ri
->opc2
& 6) {
3017 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3020 mmu_idx
= ARMMMUIdx_S1E3
;
3023 mmu_idx
= ARMMMUIdx_S1NSE1
;
3026 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3029 g_assert_not_reached();
3033 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3036 mmu_idx
= ARMMMUIdx_S1SE0
;
3039 mmu_idx
= ARMMMUIdx_S1NSE0
;
3042 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3045 g_assert_not_reached();
3049 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3050 mmu_idx
= ARMMMUIdx_S12NSE1
;
3053 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3054 mmu_idx
= ARMMMUIdx_S12NSE0
;
3057 g_assert_not_reached();
3060 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3062 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3065 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3068 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3071 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
3073 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3076 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3079 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3080 return CP_ACCESS_TRAP
;
3082 return CP_ACCESS_OK
;
3085 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3088 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3090 int secure
= arm_is_secure_below_el3(env
);
3092 switch (ri
->opc2
& 6) {
3095 case 0: /* AT S1E1R, AT S1E1W */
3096 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3098 case 4: /* AT S1E2R, AT S1E2W */
3099 mmu_idx
= ARMMMUIdx_S1E2
;
3101 case 6: /* AT S1E3R, AT S1E3W */
3102 mmu_idx
= ARMMMUIdx_S1E3
;
3105 g_assert_not_reached();
3108 case 2: /* AT S1E0R, AT S1E0W */
3109 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3111 case 4: /* AT S12E1R, AT S12E1W */
3112 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
3114 case 6: /* AT S12E0R, AT S12E0W */
3115 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
3118 g_assert_not_reached();
3121 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3125 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3126 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3127 .access
= PL1_RW
, .resetvalue
= 0,
3128 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3129 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3130 .writefn
= par_write
},
3131 #ifndef CONFIG_USER_ONLY
3132 /* This underdecoding is safe because the reginfo is NO_RAW. */
3133 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3134 .access
= PL1_W
, .accessfn
= ats_access
,
3135 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
3140 /* Return basic MPU access permission bits. */
3141 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3148 for (i
= 0; i
< 16; i
+= 2) {
3149 ret
|= (val
>> i
) & mask
;
3155 /* Pad basic MPU access permission bits to extended format. */
3156 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3163 for (i
= 0; i
< 16; i
+= 2) {
3164 ret
|= (val
& mask
) << i
;
3170 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3173 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3176 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3178 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3181 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3184 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3187 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3189 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3192 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3194 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3200 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3204 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3207 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3208 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3214 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3215 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3219 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3222 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3223 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3225 if (value
>= nrgs
) {
3226 qemu_log_mask(LOG_GUEST_ERROR
,
3227 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3228 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3232 raw_write(env
, ri
, value
);
3235 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3236 /* Reset for all these registers is handled in arm_cpu_reset(),
3237 * because the PMSAv7 is also used by M-profile CPUs, which do
3238 * not register cpregs but still need the state to be reset.
3240 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3241 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3242 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3243 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3244 .resetfn
= arm_cp_reset_ignore
},
3245 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3246 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3247 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3248 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3249 .resetfn
= arm_cp_reset_ignore
},
3250 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3251 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3252 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3253 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3254 .resetfn
= arm_cp_reset_ignore
},
3255 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3257 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3258 .writefn
= pmsav7_rgnr_write
,
3259 .resetfn
= arm_cp_reset_ignore
},
3263 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3264 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3265 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3266 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3267 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3268 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3269 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3270 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3271 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3272 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3274 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3276 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3278 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3280 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3282 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3283 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3285 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3286 /* Protection region base and size registers */
3287 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3288 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3289 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3290 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3291 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3292 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3293 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3294 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3295 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3296 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3297 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3298 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3299 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3300 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3301 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3302 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3303 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3304 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3305 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3306 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3307 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3308 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3309 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3310 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3314 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3317 TCR
*tcr
= raw_ptr(env
, ri
);
3318 int maskshift
= extract32(value
, 0, 3);
3320 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3321 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3322 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3323 * using Long-desciptor translation table format */
3324 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3325 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3326 /* In an implementation that includes the Security Extensions
3327 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3328 * Short-descriptor translation table format.
3330 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3336 /* Update the masks corresponding to the TCR bank being written
3337 * Note that we always calculate mask and base_mask, but
3338 * they are only used for short-descriptor tables (ie if EAE is 0);
3339 * for long-descriptor tables the TCR fields are used differently
3340 * and the mask and base_mask values are meaningless.
3342 tcr
->raw_tcr
= value
;
3343 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3344 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3347 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3350 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3351 TCR
*tcr
= raw_ptr(env
, ri
);
3353 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3354 /* With LPAE the TTBCR could result in a change of ASID
3355 * via the TTBCR.A1 bit, so do a TLB flush.
3357 tlb_flush(CPU(cpu
));
3359 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3360 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3361 vmsa_ttbcr_raw_write(env
, ri
, value
);
3364 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3366 TCR
*tcr
= raw_ptr(env
, ri
);
3368 /* Reset both the TCR as well as the masks corresponding to the bank of
3369 * the TCR being reset.
3373 tcr
->base_mask
= 0xffffc000u
;
3376 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3379 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3380 TCR
*tcr
= raw_ptr(env
, ri
);
3382 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3383 tlb_flush(CPU(cpu
));
3384 tcr
->raw_tcr
= value
;
3387 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3390 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3391 if (cpreg_field_is_64bit(ri
) &&
3392 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3393 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3394 tlb_flush(CPU(cpu
));
3396 raw_write(env
, ri
, value
);
3399 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3402 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3403 CPUState
*cs
= CPU(cpu
);
3405 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3406 if (raw_read(env
, ri
) != value
) {
3407 tlb_flush_by_mmuidx(cs
,
3408 ARMMMUIdxBit_S12NSE1
|
3409 ARMMMUIdxBit_S12NSE0
|
3411 raw_write(env
, ri
, value
);
3415 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3416 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3417 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3418 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3419 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3420 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3421 .access
= PL1_RW
, .resetvalue
= 0,
3422 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3423 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3424 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3425 .access
= PL1_RW
, .resetvalue
= 0,
3426 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3427 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3428 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3429 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3430 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3435 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3436 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3437 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3439 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3440 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3441 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3442 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3443 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3444 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3445 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3446 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3447 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3448 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3449 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3450 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3451 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3452 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3453 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3454 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3455 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3456 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3457 .raw_writefn
= vmsa_ttbcr_raw_write
,
3458 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3459 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3463 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3464 * qemu tlbs nor adjusting cached masks.
3466 static const ARMCPRegInfo ttbcr2_reginfo
= {
3467 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3468 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3469 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3470 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3473 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3476 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3477 /* The OS_TYPE bit in this register changes the reported CPUID! */
3478 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3479 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3482 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3485 env
->cp15
.c15_threadid
= value
& 0xffff;
3488 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3491 /* Wait-for-interrupt (deprecated) */
3492 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
3495 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3498 /* On OMAP there are registers indicating the max/min index of dcache lines
3499 * containing a dirty line; cache flush operations have to reset these.
3501 env
->cp15
.c15_i_max
= 0x000;
3502 env
->cp15
.c15_i_min
= 0xff0;
3505 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3506 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3507 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3508 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3510 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3511 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3512 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3514 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3515 .writefn
= omap_ticonfig_write
},
3516 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3518 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3519 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3520 .access
= PL1_RW
, .resetvalue
= 0xff0,
3521 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3522 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3524 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3525 .writefn
= omap_threadid_write
},
3526 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3527 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3528 .type
= ARM_CP_NO_RAW
,
3529 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3530 /* TODO: Peripheral port remap register:
3531 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3532 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3535 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3536 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3537 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3538 .writefn
= omap_cachemaint_write
},
3539 { .name
= "C9", .cp
= 15, .crn
= 9,
3540 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3541 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3545 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3548 env
->cp15
.c15_cpar
= value
& 0x3fff;
3551 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3552 { .name
= "XSCALE_CPAR",
3553 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3554 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3555 .writefn
= xscale_cpar_write
, },
3556 { .name
= "XSCALE_AUXCR",
3557 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3558 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3560 /* XScale specific cache-lockdown: since we have no cache we NOP these
3561 * and hope the guest does not really rely on cache behaviour.
3563 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3564 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3565 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3566 { .name
= "XSCALE_UNLOCK_ICACHE",
3567 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3568 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3569 { .name
= "XSCALE_DCACHE_LOCK",
3570 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3571 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3572 { .name
= "XSCALE_UNLOCK_DCACHE",
3573 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3574 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3578 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3579 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3580 * implementation of this implementation-defined space.
3581 * Ideally this should eventually disappear in favour of actually
3582 * implementing the correct behaviour for all cores.
3584 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3585 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3587 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3592 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3593 /* Cache status: RAZ because we have no cache so it's always clean */
3594 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3595 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3600 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3601 /* We never have a a block transfer operation in progress */
3602 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3603 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3605 /* The cache ops themselves: these all NOP for QEMU */
3606 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3607 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3608 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3609 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3610 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3611 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3612 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3613 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3614 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3615 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3616 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3617 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3621 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3622 /* The cache test-and-clean instructions always return (1 << 30)
3623 * to indicate that there are no dirty cache lines.
3625 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3626 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3627 .resetvalue
= (1 << 30) },
3628 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3629 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3630 .resetvalue
= (1 << 30) },
3634 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3635 /* Ignore ReadBuffer accesses */
3636 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3637 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3638 .access
= PL1_RW
, .resetvalue
= 0,
3639 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3643 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3645 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3646 unsigned int cur_el
= arm_current_el(env
);
3647 bool secure
= arm_is_secure(env
);
3649 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3650 return env
->cp15
.vpidr_el2
;
3652 return raw_read(env
, ri
);
3655 static uint64_t mpidr_read_val(CPUARMState
*env
)
3657 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
3658 uint64_t mpidr
= cpu
->mp_affinity
;
3660 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3661 mpidr
|= (1U << 31);
3662 /* Cores which are uniprocessor (non-coherent)
3663 * but still implement the MP extensions set
3664 * bit 30. (For instance, Cortex-R5).
3666 if (cpu
->mp_is_up
) {
3667 mpidr
|= (1u << 30);
3673 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3675 unsigned int cur_el
= arm_current_el(env
);
3676 bool secure
= arm_is_secure(env
);
3678 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3679 return env
->cp15
.vmpidr_el2
;
3681 return mpidr_read_val(env
);
3684 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3686 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3687 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3688 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3690 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3691 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3692 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3694 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3695 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3696 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3697 offsetof(CPUARMState
, cp15
.par_ns
)} },
3698 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3699 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3700 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3701 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3702 .writefn
= vmsa_ttbr_write
, },
3703 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3704 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3705 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3706 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3707 .writefn
= vmsa_ttbr_write
, },
3711 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3713 return vfp_get_fpcr(env
);
3716 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3719 vfp_set_fpcr(env
, value
);
3722 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3724 return vfp_get_fpsr(env
);
3727 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3730 vfp_set_fpsr(env
, value
);
3733 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3736 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3737 return CP_ACCESS_TRAP
;
3739 return CP_ACCESS_OK
;
3742 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3745 env
->daif
= value
& PSTATE_DAIF
;
3748 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3749 const ARMCPRegInfo
*ri
,
3752 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3753 * SCTLR_EL1.UCI is set.
3755 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3756 return CP_ACCESS_TRAP
;
3758 return CP_ACCESS_OK
;
3761 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3762 * Page D4-1736 (DDI0487A.b)
3765 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3768 CPUState
*cs
= ENV_GET_CPU(env
);
3769 bool sec
= arm_is_secure_below_el3(env
);
3772 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3773 ARMMMUIdxBit_S1SE1
|
3774 ARMMMUIdxBit_S1SE0
);
3776 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3777 ARMMMUIdxBit_S12NSE1
|
3778 ARMMMUIdxBit_S12NSE0
);
3782 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3785 CPUState
*cs
= ENV_GET_CPU(env
);
3787 if (tlb_force_broadcast(env
)) {
3788 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3792 if (arm_is_secure_below_el3(env
)) {
3793 tlb_flush_by_mmuidx(cs
,
3794 ARMMMUIdxBit_S1SE1
|
3795 ARMMMUIdxBit_S1SE0
);
3797 tlb_flush_by_mmuidx(cs
,
3798 ARMMMUIdxBit_S12NSE1
|
3799 ARMMMUIdxBit_S12NSE0
);
3803 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3806 /* Note that the 'ALL' scope must invalidate both stage 1 and
3807 * stage 2 translations, whereas most other scopes only invalidate
3808 * stage 1 translations.
3810 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3811 CPUState
*cs
= CPU(cpu
);
3813 if (arm_is_secure_below_el3(env
)) {
3814 tlb_flush_by_mmuidx(cs
,
3815 ARMMMUIdxBit_S1SE1
|
3816 ARMMMUIdxBit_S1SE0
);
3818 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3819 tlb_flush_by_mmuidx(cs
,
3820 ARMMMUIdxBit_S12NSE1
|
3821 ARMMMUIdxBit_S12NSE0
|
3824 tlb_flush_by_mmuidx(cs
,
3825 ARMMMUIdxBit_S12NSE1
|
3826 ARMMMUIdxBit_S12NSE0
);
3831 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3834 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3835 CPUState
*cs
= CPU(cpu
);
3837 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3840 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3843 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3844 CPUState
*cs
= CPU(cpu
);
3846 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3849 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3852 /* Note that the 'ALL' scope must invalidate both stage 1 and
3853 * stage 2 translations, whereas most other scopes only invalidate
3854 * stage 1 translations.
3856 CPUState
*cs
= ENV_GET_CPU(env
);
3857 bool sec
= arm_is_secure_below_el3(env
);
3858 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3861 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3862 ARMMMUIdxBit_S1SE1
|
3863 ARMMMUIdxBit_S1SE0
);
3864 } else if (has_el2
) {
3865 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3866 ARMMMUIdxBit_S12NSE1
|
3867 ARMMMUIdxBit_S12NSE0
|
3870 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3871 ARMMMUIdxBit_S12NSE1
|
3872 ARMMMUIdxBit_S12NSE0
);
3876 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3879 CPUState
*cs
= ENV_GET_CPU(env
);
3881 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3884 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3887 CPUState
*cs
= ENV_GET_CPU(env
);
3889 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3892 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3895 /* Invalidate by VA, EL2
3896 * Currently handles both VAE2 and VALE2, since we don't support
3897 * flush-last-level-only.
3899 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3900 CPUState
*cs
= CPU(cpu
);
3901 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3903 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3906 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3909 /* Invalidate by VA, EL3
3910 * Currently handles both VAE3 and VALE3, since we don't support
3911 * flush-last-level-only.
3913 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3914 CPUState
*cs
= CPU(cpu
);
3915 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3917 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3920 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3923 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3924 CPUState
*cs
= CPU(cpu
);
3925 bool sec
= arm_is_secure_below_el3(env
);
3926 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3929 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3930 ARMMMUIdxBit_S1SE1
|
3931 ARMMMUIdxBit_S1SE0
);
3933 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3934 ARMMMUIdxBit_S12NSE1
|
3935 ARMMMUIdxBit_S12NSE0
);
3939 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3942 /* Invalidate by VA, EL1&0 (AArch64 version).
3943 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3944 * since we don't support flush-for-specific-ASID-only or
3945 * flush-last-level-only.
3947 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3948 CPUState
*cs
= CPU(cpu
);
3949 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3951 if (tlb_force_broadcast(env
)) {
3952 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3956 if (arm_is_secure_below_el3(env
)) {
3957 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3958 ARMMMUIdxBit_S1SE1
|
3959 ARMMMUIdxBit_S1SE0
);
3961 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3962 ARMMMUIdxBit_S12NSE1
|
3963 ARMMMUIdxBit_S12NSE0
);
3967 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3970 CPUState
*cs
= ENV_GET_CPU(env
);
3971 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3973 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3977 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3980 CPUState
*cs
= ENV_GET_CPU(env
);
3981 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3983 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3987 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3990 /* Invalidate by IPA. This has to invalidate any structures that
3991 * contain only stage 2 translation information, but does not need
3992 * to apply to structures that contain combined stage 1 and stage 2
3993 * translation information.
3994 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3996 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3997 CPUState
*cs
= CPU(cpu
);
4000 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4004 pageaddr
= sextract64(value
<< 12, 0, 48);
4006 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
4009 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4012 CPUState
*cs
= ENV_GET_CPU(env
);
4015 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4019 pageaddr
= sextract64(value
<< 12, 0, 48);
4021 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4025 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4028 /* We don't implement EL2, so the only control on DC ZVA is the
4029 * bit in the SCTLR which can prohibit access for EL0.
4031 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4032 return CP_ACCESS_TRAP
;
4034 return CP_ACCESS_OK
;
4037 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4039 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4040 int dzp_bit
= 1 << 4;
4042 /* DZP indicates whether DC ZVA access is allowed */
4043 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4046 return cpu
->dcz_blocksize
| dzp_bit
;
4049 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4052 if (!(env
->pstate
& PSTATE_SP
)) {
4053 /* Access to SP_EL0 is undefined if it's being used as
4054 * the stack pointer.
4056 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4058 return CP_ACCESS_OK
;
4061 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4063 return env
->pstate
& PSTATE_SP
;
4066 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4068 update_spsel(env
, val
);
4071 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4074 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4076 if (raw_read(env
, ri
) == value
) {
4077 /* Skip the TLB flush if nothing actually changed; Linux likes
4078 * to do a lot of pointless SCTLR writes.
4083 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4084 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4088 raw_write(env
, ri
, value
);
4089 /* ??? Lots of these bits are not implemented. */
4090 /* This may enable/disable the MMU, so do a TLB flush. */
4091 tlb_flush(CPU(cpu
));
4094 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4097 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4098 return CP_ACCESS_TRAP_FP_EL2
;
4100 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4101 return CP_ACCESS_TRAP_FP_EL3
;
4103 return CP_ACCESS_OK
;
4106 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4109 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4112 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4113 /* Minimal set of EL0-visible registers. This will need to be expanded
4114 * significantly for system emulation of AArch64 CPUs.
4116 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4117 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4118 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4119 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4120 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4121 .type
= ARM_CP_NO_RAW
,
4122 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4123 .fieldoffset
= offsetof(CPUARMState
, daif
),
4124 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4125 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4126 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4127 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4128 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4129 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4130 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4131 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4132 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4133 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4134 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4135 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4136 .readfn
= aa64_dczid_read
},
4137 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4138 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4139 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4140 #ifndef CONFIG_USER_ONLY
4141 /* Avoid overhead of an access check that always passes in user-mode */
4142 .accessfn
= aa64_zva_access
,
4145 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4146 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4147 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4148 /* Cache ops: all NOPs since we don't emulate caches */
4149 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4150 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4151 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4152 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4153 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4154 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4155 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4156 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4157 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4158 .accessfn
= aa64_cacheop_access
},
4159 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4160 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4161 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4162 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4163 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4164 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4165 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4167 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4168 .accessfn
= aa64_cacheop_access
},
4169 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4170 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4171 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4172 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4173 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4174 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4175 .accessfn
= aa64_cacheop_access
},
4176 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4177 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4178 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4179 .accessfn
= aa64_cacheop_access
},
4180 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4181 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4182 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4183 /* TLBI operations */
4184 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4185 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4186 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4187 .writefn
= tlbi_aa64_vmalle1is_write
},
4188 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4189 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4190 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4191 .writefn
= tlbi_aa64_vae1is_write
},
4192 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4193 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4194 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4195 .writefn
= tlbi_aa64_vmalle1is_write
},
4196 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4197 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4198 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4199 .writefn
= tlbi_aa64_vae1is_write
},
4200 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4201 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4202 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4203 .writefn
= tlbi_aa64_vae1is_write
},
4204 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4205 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4206 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4207 .writefn
= tlbi_aa64_vae1is_write
},
4208 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4209 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4210 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4211 .writefn
= tlbi_aa64_vmalle1_write
},
4212 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4213 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4214 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4215 .writefn
= tlbi_aa64_vae1_write
},
4216 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4217 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4218 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4219 .writefn
= tlbi_aa64_vmalle1_write
},
4220 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4221 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4222 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4223 .writefn
= tlbi_aa64_vae1_write
},
4224 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4225 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4226 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4227 .writefn
= tlbi_aa64_vae1_write
},
4228 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4229 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4230 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4231 .writefn
= tlbi_aa64_vae1_write
},
4232 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4233 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4234 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4235 .writefn
= tlbi_aa64_ipas2e1is_write
},
4236 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4237 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4238 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4239 .writefn
= tlbi_aa64_ipas2e1is_write
},
4240 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4241 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4242 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4243 .writefn
= tlbi_aa64_alle1is_write
},
4244 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4245 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4246 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4247 .writefn
= tlbi_aa64_alle1is_write
},
4248 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4249 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4250 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4251 .writefn
= tlbi_aa64_ipas2e1_write
},
4252 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4253 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4254 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4255 .writefn
= tlbi_aa64_ipas2e1_write
},
4256 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4257 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4258 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4259 .writefn
= tlbi_aa64_alle1_write
},
4260 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4261 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4262 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4263 .writefn
= tlbi_aa64_alle1is_write
},
4264 #ifndef CONFIG_USER_ONLY
4265 /* 64 bit address translation operations */
4266 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4267 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4268 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4269 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4270 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4271 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4272 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4273 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4274 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4275 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4276 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4277 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4278 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4279 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4280 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4281 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4282 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4283 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4284 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4285 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4286 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4287 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4288 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4289 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4290 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4291 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4292 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4293 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4294 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4295 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4296 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4297 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4298 .type
= ARM_CP_ALIAS
,
4299 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4300 .access
= PL1_RW
, .resetvalue
= 0,
4301 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4302 .writefn
= par_write
},
4304 /* TLB invalidate last level of translation table walk */
4305 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4306 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4307 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4308 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4309 .writefn
= tlbimvaa_is_write
},
4310 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4311 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4312 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4313 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4314 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4315 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4316 .writefn
= tlbimva_hyp_write
},
4317 { .name
= "TLBIMVALHIS",
4318 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4319 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4320 .writefn
= tlbimva_hyp_is_write
},
4321 { .name
= "TLBIIPAS2",
4322 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4323 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4324 .writefn
= tlbiipas2_write
},
4325 { .name
= "TLBIIPAS2IS",
4326 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4327 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4328 .writefn
= tlbiipas2_is_write
},
4329 { .name
= "TLBIIPAS2L",
4330 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4331 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4332 .writefn
= tlbiipas2_write
},
4333 { .name
= "TLBIIPAS2LIS",
4334 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4335 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4336 .writefn
= tlbiipas2_is_write
},
4337 /* 32 bit cache operations */
4338 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4339 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4340 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4341 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4342 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4343 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4344 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4345 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4346 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4347 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4348 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4349 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4350 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4351 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4352 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4353 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4354 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4355 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4356 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4357 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4358 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4359 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4360 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4361 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4362 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4363 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4364 /* MMU Domain access control / MPU write buffer control */
4365 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4366 .access
= PL1_RW
, .resetvalue
= 0,
4367 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4368 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4369 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4370 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4371 .type
= ARM_CP_ALIAS
,
4372 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4374 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4375 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4376 .type
= ARM_CP_ALIAS
,
4377 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4379 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4380 /* We rely on the access checks not allowing the guest to write to the
4381 * state field when SPSel indicates that it's being used as the stack
4384 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4385 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4386 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4387 .type
= ARM_CP_ALIAS
,
4388 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4389 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4390 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4391 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4392 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4393 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4394 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4395 .type
= ARM_CP_NO_RAW
,
4396 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4397 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4398 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4399 .type
= ARM_CP_ALIAS
,
4400 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4401 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4402 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4403 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4404 .access
= PL2_RW
, .resetvalue
= 0,
4405 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4406 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4407 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4408 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4409 .access
= PL2_RW
, .resetvalue
= 0,
4410 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4411 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4412 .type
= ARM_CP_ALIAS
,
4413 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4415 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4416 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4417 .type
= ARM_CP_ALIAS
,
4418 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4420 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4421 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4422 .type
= ARM_CP_ALIAS
,
4423 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4425 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4426 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4427 .type
= ARM_CP_ALIAS
,
4428 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4430 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4431 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4432 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4434 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4435 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4436 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4437 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4438 .writefn
= sdcr_write
,
4439 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4443 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4444 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4445 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4446 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4448 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4449 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4450 .type
= ARM_CP_NO_RAW
,
4451 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4453 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4454 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4455 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4456 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4457 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4458 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4460 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4461 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4462 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4463 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4464 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4465 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4466 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4468 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4469 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4470 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4471 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4472 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4473 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4475 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4476 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4477 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4479 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4480 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4481 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4483 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4484 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4485 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4487 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4488 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4489 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4490 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4491 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4492 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4493 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4494 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4495 .cp
= 15, .opc1
= 6, .crm
= 2,
4496 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4497 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4498 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4499 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4500 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4501 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4502 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4503 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4504 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4505 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4506 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4507 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4508 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4509 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4510 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4511 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4513 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4514 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4515 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4516 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4517 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4518 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4519 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4520 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4522 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4523 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4524 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4525 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4526 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4528 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4529 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4530 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4531 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4532 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4533 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4534 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4535 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4536 .access
= PL2_RW
, .accessfn
= access_tda
,
4537 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4538 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4539 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4540 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4541 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4542 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4543 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4544 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4545 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4546 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4547 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4548 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4549 .type
= ARM_CP_CONST
,
4550 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4551 .access
= PL2_RW
, .resetvalue
= 0 },
4555 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4556 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4557 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4558 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4560 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4564 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4566 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4567 uint64_t valid_mask
= HCR_MASK
;
4569 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4570 valid_mask
&= ~HCR_HCD
;
4571 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4572 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4573 * However, if we're using the SMC PSCI conduit then QEMU is
4574 * effectively acting like EL3 firmware and so the guest at
4575 * EL2 should retain the ability to prevent EL1 from being
4576 * able to make SMC calls into the ersatz firmware, so in
4577 * that case HCR.TSC should be read/write.
4579 valid_mask
&= ~HCR_TSC
;
4581 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4582 valid_mask
|= HCR_TLOR
;
4584 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
4585 valid_mask
|= HCR_API
| HCR_APK
;
4588 /* Clear RES0 bits. */
4589 value
&= valid_mask
;
4591 /* These bits change the MMU setup:
4592 * HCR_VM enables stage 2 translation
4593 * HCR_PTW forbids certain page-table setups
4594 * HCR_DC Disables stage1 and enables stage2 translation
4596 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4597 tlb_flush(CPU(cpu
));
4599 env
->cp15
.hcr_el2
= value
;
4602 * Updates to VI and VF require us to update the status of
4603 * virtual interrupts, which are the logical OR of these bits
4604 * and the state of the input lines from the GIC. (This requires
4605 * that we have the iothread lock, which is done by marking the
4606 * reginfo structs as ARM_CP_IO.)
4607 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4608 * possible for it to be taken immediately, because VIRQ and
4609 * VFIQ are masked unless running at EL0 or EL1, and HCR
4610 * can only be written at EL2.
4612 g_assert(qemu_mutex_iothread_locked());
4613 arm_cpu_update_virq(cpu
);
4614 arm_cpu_update_vfiq(cpu
);
4617 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4620 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4621 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4622 hcr_write(env
, NULL
, value
);
4625 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4628 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4629 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4630 hcr_write(env
, NULL
, value
);
4634 * Return the effective value of HCR_EL2.
4635 * Bits that are not included here:
4636 * RW (read from SCR_EL3.RW as needed)
4638 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4640 uint64_t ret
= env
->cp15
.hcr_el2
;
4642 if (arm_is_secure_below_el3(env
)) {
4644 * "This register has no effect if EL2 is not enabled in the
4645 * current Security state". This is ARMv8.4-SecEL2 speak for
4646 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4648 * Prior to that, the language was "In an implementation that
4649 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4650 * as if this field is 0 for all purposes other than a direct
4651 * read or write access of HCR_EL2". With lots of enumeration
4652 * on a per-field basis. In current QEMU, this is condition
4653 * is arm_is_secure_below_el3.
4655 * Since the v8.4 language applies to the entire register, and
4656 * appears to be backward compatible, use that.
4659 } else if (ret
& HCR_TGE
) {
4660 /* These bits are up-to-date as of ARMv8.4. */
4661 if (ret
& HCR_E2H
) {
4662 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4663 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4664 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4665 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4667 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4669 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4670 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4671 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4678 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4679 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4681 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4682 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4683 .writefn
= hcr_write
},
4684 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4685 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4686 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4687 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4688 .writefn
= hcr_writelow
},
4689 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4690 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4691 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4692 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4693 .type
= ARM_CP_ALIAS
,
4694 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4696 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4697 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4698 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4699 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4700 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4701 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4702 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4703 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4704 .type
= ARM_CP_ALIAS
,
4705 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4707 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4708 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4709 .type
= ARM_CP_ALIAS
,
4710 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4712 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4713 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4714 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4715 .access
= PL2_RW
, .writefn
= vbar_write
,
4716 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4718 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4719 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4720 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4721 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4722 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4723 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4724 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4725 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
4726 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4727 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4728 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4730 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4731 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4732 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4733 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4734 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4735 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4736 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4738 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4739 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4740 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4741 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4743 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4744 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4745 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4747 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4748 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4749 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4751 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4752 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4754 /* no .writefn needed as this can't cause an ASID change;
4755 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4757 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4758 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4759 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4760 .type
= ARM_CP_ALIAS
,
4761 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4762 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4763 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4764 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4766 /* no .writefn needed as this can't cause an ASID change;
4767 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4769 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4770 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4771 .cp
= 15, .opc1
= 6, .crm
= 2,
4772 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4773 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4774 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4775 .writefn
= vttbr_write
},
4776 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4777 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4778 .access
= PL2_RW
, .writefn
= vttbr_write
,
4779 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4780 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4781 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4782 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4783 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4784 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4785 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4786 .access
= PL2_RW
, .resetvalue
= 0,
4787 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4788 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4789 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4790 .access
= PL2_RW
, .resetvalue
= 0,
4791 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4792 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4793 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4794 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4795 { .name
= "TLBIALLNSNH",
4796 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4797 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4798 .writefn
= tlbiall_nsnh_write
},
4799 { .name
= "TLBIALLNSNHIS",
4800 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4801 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4802 .writefn
= tlbiall_nsnh_is_write
},
4803 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4804 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4805 .writefn
= tlbiall_hyp_write
},
4806 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4807 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4808 .writefn
= tlbiall_hyp_is_write
},
4809 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4810 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4811 .writefn
= tlbimva_hyp_write
},
4812 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4813 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4814 .writefn
= tlbimva_hyp_is_write
},
4815 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4816 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4817 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4818 .writefn
= tlbi_aa64_alle2_write
},
4819 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4820 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4821 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4822 .writefn
= tlbi_aa64_vae2_write
},
4823 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4824 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4825 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4826 .writefn
= tlbi_aa64_vae2_write
},
4827 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4828 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4829 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4830 .writefn
= tlbi_aa64_alle2is_write
},
4831 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4832 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4833 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4834 .writefn
= tlbi_aa64_vae2is_write
},
4835 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4836 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4837 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4838 .writefn
= tlbi_aa64_vae2is_write
},
4839 #ifndef CONFIG_USER_ONLY
4840 /* Unlike the other EL2-related AT operations, these must
4841 * UNDEF from EL3 if EL2 is not implemented, which is why we
4842 * define them here rather than with the rest of the AT ops.
4844 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4845 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4846 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4847 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4848 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4849 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4850 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4851 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4852 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4853 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4854 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4855 * to behave as if SCR.NS was 1.
4857 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4859 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4860 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4862 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4863 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4864 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4865 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4866 * reset values as IMPDEF. We choose to reset to 3 to comply with
4867 * both ARMv7 and ARMv8.
4869 .access
= PL2_RW
, .resetvalue
= 3,
4870 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4871 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4872 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4873 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4874 .writefn
= gt_cntvoff_write
,
4875 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4876 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4877 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4878 .writefn
= gt_cntvoff_write
,
4879 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4880 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4881 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4882 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4883 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4884 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4885 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4886 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4887 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4888 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4889 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4890 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4891 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4892 .resetfn
= gt_hyp_timer_reset
,
4893 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4894 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4896 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4898 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4900 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4902 /* The only field of MDCR_EL2 that has a defined architectural reset value
4903 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4904 * don't implement any PMU event counters, so using zero as a reset
4905 * value for MDCR_EL2 is okay
4907 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4908 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4909 .access
= PL2_RW
, .resetvalue
= 0,
4910 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4911 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4912 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4913 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4914 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4915 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4916 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4918 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4919 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4920 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4922 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4926 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4927 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4928 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4929 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4931 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4932 .writefn
= hcr_writehigh
},
4936 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4939 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4940 * At Secure EL1 it traps to EL3.
4942 if (arm_current_el(env
) == 3) {
4943 return CP_ACCESS_OK
;
4945 if (arm_is_secure_below_el3(env
)) {
4946 return CP_ACCESS_TRAP_EL3
;
4948 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4950 return CP_ACCESS_OK
;
4952 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4955 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4956 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4957 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4958 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4959 .resetvalue
= 0, .writefn
= scr_write
},
4960 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4961 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4962 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4963 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4964 .writefn
= scr_write
},
4965 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4966 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4967 .access
= PL3_RW
, .resetvalue
= 0,
4968 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4970 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4971 .access
= PL3_RW
, .resetvalue
= 0,
4972 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4973 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4974 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4975 .writefn
= vbar_write
, .resetvalue
= 0,
4976 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4977 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4978 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4979 .access
= PL3_RW
, .resetvalue
= 0,
4980 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4981 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4982 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4984 /* no .writefn needed as this can't cause an ASID change;
4985 * we must provide a .raw_writefn and .resetfn because we handle
4986 * reset and migration for the AArch32 TTBCR(S), which might be
4987 * using mask and base_mask.
4989 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4990 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4991 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4992 .type
= ARM_CP_ALIAS
,
4993 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4995 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4996 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4997 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4998 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4999 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5000 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5001 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5002 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5003 .type
= ARM_CP_ALIAS
,
5004 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5006 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5007 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5008 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5009 .access
= PL3_RW
, .writefn
= vbar_write
,
5010 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5012 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5013 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5014 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5015 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5016 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5017 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5018 .access
= PL3_RW
, .resetvalue
= 0,
5019 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5020 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5021 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5022 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5024 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5025 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5026 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5028 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5029 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5030 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5032 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5033 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5034 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5035 .writefn
= tlbi_aa64_alle3is_write
},
5036 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5037 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5038 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5039 .writefn
= tlbi_aa64_vae3is_write
},
5040 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5041 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5042 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5043 .writefn
= tlbi_aa64_vae3is_write
},
5044 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5045 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5046 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5047 .writefn
= tlbi_aa64_alle3_write
},
5048 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5049 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5050 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5051 .writefn
= tlbi_aa64_vae3_write
},
5052 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5053 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5054 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5055 .writefn
= tlbi_aa64_vae3_write
},
5059 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5062 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5063 * but the AArch32 CTR has its own reginfo struct)
5065 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5066 return CP_ACCESS_TRAP
;
5068 return CP_ACCESS_OK
;
5071 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5074 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5075 * read via a bit in OSLSR_EL1.
5079 if (ri
->state
== ARM_CP_STATE_AA32
) {
5080 oslock
= (value
== 0xC5ACCE55);
5085 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5088 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5089 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5090 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5091 * unlike DBGDRAR it is never accessible from EL0.
5092 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5095 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5096 .access
= PL0_R
, .accessfn
= access_tdra
,
5097 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5098 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5099 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5100 .access
= PL1_R
, .accessfn
= access_tdra
,
5101 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5102 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5103 .access
= PL0_R
, .accessfn
= access_tdra
,
5104 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5105 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5106 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5107 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5108 .access
= PL1_RW
, .accessfn
= access_tda
,
5109 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5111 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5112 * We don't implement the configurable EL0 access.
5114 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5115 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5116 .type
= ARM_CP_ALIAS
,
5117 .access
= PL1_R
, .accessfn
= access_tda
,
5118 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5119 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5120 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5121 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5122 .accessfn
= access_tdosa
,
5123 .writefn
= oslar_write
},
5124 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5125 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5126 .access
= PL1_R
, .resetvalue
= 10,
5127 .accessfn
= access_tdosa
,
5128 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5129 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5130 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5131 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5132 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5133 .type
= ARM_CP_NOP
},
5134 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5135 * implement vector catch debug events yet.
5138 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5139 .access
= PL1_RW
, .accessfn
= access_tda
,
5140 .type
= ARM_CP_NOP
},
5141 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5142 * to save and restore a 32-bit guest's DBGVCR)
5144 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5145 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5146 .access
= PL2_RW
, .accessfn
= access_tda
,
5147 .type
= ARM_CP_NOP
},
5148 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5149 * Channel but Linux may try to access this register. The 32-bit
5150 * alias is DBGDCCINT.
5152 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5153 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5154 .access
= PL1_RW
, .accessfn
= access_tda
,
5155 .type
= ARM_CP_NOP
},
5159 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5160 /* 64 bit access versions of the (dummy) debug registers */
5161 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5162 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5163 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5164 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5168 /* Return the exception level to which exceptions should be taken
5169 * via SVEAccessTrap. If an exception should be routed through
5170 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5171 * take care of raising that exception.
5172 * C.f. the ARM pseudocode function CheckSVEEnabled.
5174 int sve_exception_el(CPUARMState
*env
, int el
)
5176 #ifndef CONFIG_USER_ONLY
5178 bool disabled
= false;
5180 /* The CPACR.ZEN controls traps to EL1:
5181 * 0, 2 : trap EL0 and EL1 accesses
5182 * 1 : trap only EL0 accesses
5183 * 3 : trap no accesses
5185 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5187 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5192 return (arm_feature(env
, ARM_FEATURE_EL2
)
5193 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5196 /* Check CPACR.FPEN. */
5197 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5199 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5207 /* CPTR_EL2. Since TZ and TFP are positive,
5208 * they will be zero when EL2 is not present.
5210 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5211 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5214 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5219 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5220 if (arm_feature(env
, ARM_FEATURE_EL3
)
5221 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5229 * Given that SVE is enabled, return the vector length for EL.
5231 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5233 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5234 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5237 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5239 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5240 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5242 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
5243 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5248 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5251 int cur_el
= arm_current_el(env
);
5252 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5255 /* Bits other than [3:0] are RAZ/WI. */
5256 raw_write(env
, ri
, value
& 0xf);
5259 * Because we arrived here, we know both FP and SVE are enabled;
5260 * otherwise we would have trapped access to the ZCR_ELn register.
5262 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5263 if (new_len
< old_len
) {
5264 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5268 static const ARMCPRegInfo zcr_el1_reginfo
= {
5269 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5270 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5271 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5272 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5273 .writefn
= zcr_write
, .raw_writefn
= raw_write
5276 static const ARMCPRegInfo zcr_el2_reginfo
= {
5277 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5278 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5279 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5280 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5281 .writefn
= zcr_write
, .raw_writefn
= raw_write
5284 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5285 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5286 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5287 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5288 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5291 static const ARMCPRegInfo zcr_el3_reginfo
= {
5292 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5293 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5294 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5295 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5296 .writefn
= zcr_write
, .raw_writefn
= raw_write
5299 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5301 CPUARMState
*env
= &cpu
->env
;
5303 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5304 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5306 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5308 if (env
->cpu_watchpoint
[n
]) {
5309 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5310 env
->cpu_watchpoint
[n
] = NULL
;
5313 if (!extract64(wcr
, 0, 1)) {
5314 /* E bit clear : watchpoint disabled */
5318 switch (extract64(wcr
, 3, 2)) {
5320 /* LSC 00 is reserved and must behave as if the wp is disabled */
5323 flags
|= BP_MEM_READ
;
5326 flags
|= BP_MEM_WRITE
;
5329 flags
|= BP_MEM_ACCESS
;
5333 /* Attempts to use both MASK and BAS fields simultaneously are
5334 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5335 * thus generating a watchpoint for every byte in the masked region.
5337 mask
= extract64(wcr
, 24, 4);
5338 if (mask
== 1 || mask
== 2) {
5339 /* Reserved values of MASK; we must act as if the mask value was
5340 * some non-reserved value, or as if the watchpoint were disabled.
5341 * We choose the latter.
5345 /* Watchpoint covers an aligned area up to 2GB in size */
5347 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5348 * whether the watchpoint fires when the unmasked bits match; we opt
5349 * to generate the exceptions.
5353 /* Watchpoint covers bytes defined by the byte address select bits */
5354 int bas
= extract64(wcr
, 5, 8);
5358 /* This must act as if the watchpoint is disabled */
5362 if (extract64(wvr
, 2, 1)) {
5363 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5364 * ignored, and BAS[3:0] define which bytes to watch.
5368 /* The BAS bits are supposed to be programmed to indicate a contiguous
5369 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5370 * we fire for each byte in the word/doubleword addressed by the WVR.
5371 * We choose to ignore any non-zero bits after the first range of 1s.
5373 basstart
= ctz32(bas
);
5374 len
= cto32(bas
>> basstart
);
5378 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5379 &env
->cpu_watchpoint
[n
]);
5382 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5385 CPUARMState
*env
= &cpu
->env
;
5387 /* Completely clear out existing QEMU watchpoints and our array, to
5388 * avoid possible stale entries following migration load.
5390 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5391 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5393 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5394 hw_watchpoint_update(cpu
, i
);
5398 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5401 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5404 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5405 * register reads and behaves as if values written are sign extended.
5406 * Bits [1:0] are RES0.
5408 value
= sextract64(value
, 0, 49) & ~3ULL;
5410 raw_write(env
, ri
, value
);
5411 hw_watchpoint_update(cpu
, i
);
5414 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5417 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5420 raw_write(env
, ri
, value
);
5421 hw_watchpoint_update(cpu
, i
);
5424 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5426 CPUARMState
*env
= &cpu
->env
;
5427 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5428 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5433 if (env
->cpu_breakpoint
[n
]) {
5434 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5435 env
->cpu_breakpoint
[n
] = NULL
;
5438 if (!extract64(bcr
, 0, 1)) {
5439 /* E bit clear : watchpoint disabled */
5443 bt
= extract64(bcr
, 20, 4);
5446 case 4: /* unlinked address mismatch (reserved if AArch64) */
5447 case 5: /* linked address mismatch (reserved if AArch64) */
5448 qemu_log_mask(LOG_UNIMP
,
5449 "arm: address mismatch breakpoint types not implemented\n");
5451 case 0: /* unlinked address match */
5452 case 1: /* linked address match */
5454 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5455 * we behave as if the register was sign extended. Bits [1:0] are
5456 * RES0. The BAS field is used to allow setting breakpoints on 16
5457 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5458 * a bp will fire if the addresses covered by the bp and the addresses
5459 * covered by the insn overlap but the insn doesn't start at the
5460 * start of the bp address range. We choose to require the insn and
5461 * the bp to have the same address. The constraints on writing to
5462 * BAS enforced in dbgbcr_write mean we have only four cases:
5463 * 0b0000 => no breakpoint
5464 * 0b0011 => breakpoint on addr
5465 * 0b1100 => breakpoint on addr + 2
5466 * 0b1111 => breakpoint on addr
5467 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5469 int bas
= extract64(bcr
, 5, 4);
5470 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5479 case 2: /* unlinked context ID match */
5480 case 8: /* unlinked VMID match (reserved if no EL2) */
5481 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5482 qemu_log_mask(LOG_UNIMP
,
5483 "arm: unlinked context breakpoint types not implemented\n");
5485 case 9: /* linked VMID match (reserved if no EL2) */
5486 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5487 case 3: /* linked context ID match */
5489 /* We must generate no events for Linked context matches (unless
5490 * they are linked to by some other bp/wp, which is handled in
5491 * updates for the linking bp/wp). We choose to also generate no events
5492 * for reserved values.
5497 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5500 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5503 CPUARMState
*env
= &cpu
->env
;
5505 /* Completely clear out existing QEMU breakpoints and our array, to
5506 * avoid possible stale entries following migration load.
5508 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5509 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5511 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5512 hw_breakpoint_update(cpu
, i
);
5516 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5519 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5522 raw_write(env
, ri
, value
);
5523 hw_breakpoint_update(cpu
, i
);
5526 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5529 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5532 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5535 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5536 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5538 raw_write(env
, ri
, value
);
5539 hw_breakpoint_update(cpu
, i
);
5542 static void define_debug_regs(ARMCPU
*cpu
)
5544 /* Define v7 and v8 architectural debug registers.
5545 * These are just dummy implementations for now.
5548 int wrps
, brps
, ctx_cmps
;
5549 ARMCPRegInfo dbgdidr
= {
5550 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5551 .access
= PL0_R
, .accessfn
= access_tda
,
5552 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5555 /* Note that all these register fields hold "number of Xs minus 1". */
5556 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5557 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5558 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5560 assert(ctx_cmps
<= brps
);
5562 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5563 * of the debug registers such as number of breakpoints;
5564 * check that if they both exist then they agree.
5566 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5567 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5568 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5569 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5572 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5573 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5575 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5576 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5579 for (i
= 0; i
< brps
+ 1; i
++) {
5580 ARMCPRegInfo dbgregs
[] = {
5581 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5582 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5583 .access
= PL1_RW
, .accessfn
= access_tda
,
5584 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5585 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5587 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5588 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5589 .access
= PL1_RW
, .accessfn
= access_tda
,
5590 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5591 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5595 define_arm_cp_regs(cpu
, dbgregs
);
5598 for (i
= 0; i
< wrps
+ 1; i
++) {
5599 ARMCPRegInfo dbgregs
[] = {
5600 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5601 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5602 .access
= PL1_RW
, .accessfn
= access_tda
,
5603 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5604 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5606 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5607 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5608 .access
= PL1_RW
, .accessfn
= access_tda
,
5609 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5610 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5614 define_arm_cp_regs(cpu
, dbgregs
);
5618 /* We don't know until after realize whether there's a GICv3
5619 * attached, and that is what registers the gicv3 sysregs.
5620 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5623 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5625 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5626 uint64_t pfr1
= cpu
->id_pfr1
;
5628 if (env
->gicv3state
) {
5634 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5636 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5637 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5639 if (env
->gicv3state
) {
5645 /* Shared logic between LORID and the rest of the LOR* registers.
5646 * Secure state has already been delt with.
5648 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5650 int el
= arm_current_el(env
);
5652 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5653 return CP_ACCESS_TRAP_EL2
;
5655 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5656 return CP_ACCESS_TRAP_EL3
;
5658 return CP_ACCESS_OK
;
5661 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5664 if (arm_is_secure_below_el3(env
)) {
5665 /* Access ok in secure mode. */
5666 return CP_ACCESS_OK
;
5668 return access_lor_ns(env
);
5671 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5672 const ARMCPRegInfo
*ri
, bool isread
)
5674 if (arm_is_secure_below_el3(env
)) {
5675 /* Access denied in secure mode. */
5676 return CP_ACCESS_TRAP
;
5678 return access_lor_ns(env
);
5681 #ifdef TARGET_AARCH64
5682 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5685 int el
= arm_current_el(env
);
5688 arm_feature(env
, ARM_FEATURE_EL2
) &&
5689 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5690 return CP_ACCESS_TRAP_EL2
;
5693 arm_feature(env
, ARM_FEATURE_EL3
) &&
5694 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5695 return CP_ACCESS_TRAP_EL3
;
5697 return CP_ACCESS_OK
;
5700 static const ARMCPRegInfo pauth_reginfo
[] = {
5701 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5702 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5703 .access
= PL1_RW
, .accessfn
= access_pauth
,
5704 .fieldoffset
= offsetof(CPUARMState
, apda_key
.lo
) },
5705 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5706 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5707 .access
= PL1_RW
, .accessfn
= access_pauth
,
5708 .fieldoffset
= offsetof(CPUARMState
, apda_key
.hi
) },
5709 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5710 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5711 .access
= PL1_RW
, .accessfn
= access_pauth
,
5712 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.lo
) },
5713 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5714 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5715 .access
= PL1_RW
, .accessfn
= access_pauth
,
5716 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.hi
) },
5717 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5718 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5719 .access
= PL1_RW
, .accessfn
= access_pauth
,
5720 .fieldoffset
= offsetof(CPUARMState
, apga_key
.lo
) },
5721 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5722 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5723 .access
= PL1_RW
, .accessfn
= access_pauth
,
5724 .fieldoffset
= offsetof(CPUARMState
, apga_key
.hi
) },
5725 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5726 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5727 .access
= PL1_RW
, .accessfn
= access_pauth
,
5728 .fieldoffset
= offsetof(CPUARMState
, apia_key
.lo
) },
5729 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5730 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5731 .access
= PL1_RW
, .accessfn
= access_pauth
,
5732 .fieldoffset
= offsetof(CPUARMState
, apia_key
.hi
) },
5733 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5734 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5735 .access
= PL1_RW
, .accessfn
= access_pauth
,
5736 .fieldoffset
= offsetof(CPUARMState
, apib_key
.lo
) },
5737 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5738 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5739 .access
= PL1_RW
, .accessfn
= access_pauth
,
5740 .fieldoffset
= offsetof(CPUARMState
, apib_key
.hi
) },
5745 void register_cp_regs_for_features(ARMCPU
*cpu
)
5747 /* Register all the coprocessor registers based on feature bits */
5748 CPUARMState
*env
= &cpu
->env
;
5749 if (arm_feature(env
, ARM_FEATURE_M
)) {
5750 /* M profile has no coprocessor registers */
5754 define_arm_cp_regs(cpu
, cp_reginfo
);
5755 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5756 /* Must go early as it is full of wildcards that may be
5757 * overridden by later definitions.
5759 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5762 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5763 /* The ID registers all have impdef reset values */
5764 ARMCPRegInfo v6_idregs
[] = {
5765 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5766 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5767 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5768 .resetvalue
= cpu
->id_pfr0
},
5769 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5770 * the value of the GIC field until after we define these regs.
5772 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5773 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5774 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5775 .readfn
= id_pfr1_read
,
5776 .writefn
= arm_cp_write_ignore
},
5777 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5778 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5779 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5780 .resetvalue
= cpu
->id_dfr0
},
5781 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5782 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5783 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5784 .resetvalue
= cpu
->id_afr0
},
5785 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5786 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5787 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5788 .resetvalue
= cpu
->id_mmfr0
},
5789 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5792 .resetvalue
= cpu
->id_mmfr1
},
5793 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5794 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5795 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5796 .resetvalue
= cpu
->id_mmfr2
},
5797 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5798 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5799 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5800 .resetvalue
= cpu
->id_mmfr3
},
5801 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5802 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5803 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5804 .resetvalue
= cpu
->isar
.id_isar0
},
5805 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5806 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5807 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5808 .resetvalue
= cpu
->isar
.id_isar1
},
5809 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5812 .resetvalue
= cpu
->isar
.id_isar2
},
5813 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5814 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5815 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5816 .resetvalue
= cpu
->isar
.id_isar3
},
5817 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5818 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5819 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5820 .resetvalue
= cpu
->isar
.id_isar4
},
5821 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5822 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5823 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5824 .resetvalue
= cpu
->isar
.id_isar5
},
5825 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5826 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5827 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5828 .resetvalue
= cpu
->id_mmfr4
},
5829 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5830 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5831 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5832 .resetvalue
= cpu
->isar
.id_isar6
},
5835 define_arm_cp_regs(cpu
, v6_idregs
);
5836 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5838 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5840 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5841 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5843 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5844 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5845 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
5847 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
5848 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
5850 if (arm_feature(env
, ARM_FEATURE_V7
)) {
5851 /* v7 performance monitor control register: same implementor
5852 * field as main ID register, and we implement four counters in
5853 * addition to the cycle count register.
5855 unsigned int i
, pmcrn
= 4;
5856 ARMCPRegInfo pmcr
= {
5857 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
5859 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5860 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
5861 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
5862 .raw_writefn
= raw_write
,
5864 ARMCPRegInfo pmcr64
= {
5865 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
5866 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
5867 .access
= PL0_RW
, .accessfn
= pmreg_access
,
5869 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
5870 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
5871 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
5873 define_one_arm_cp_reg(cpu
, &pmcr
);
5874 define_one_arm_cp_reg(cpu
, &pmcr64
);
5875 for (i
= 0; i
< pmcrn
; i
++) {
5876 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
5877 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
5878 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
5879 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
5880 ARMCPRegInfo pmev_regs
[] = {
5881 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
5882 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5883 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5884 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5885 .accessfn
= pmreg_access
},
5886 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
5887 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
5888 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5890 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5891 .raw_readfn
= pmevcntr_rawread
,
5892 .raw_writefn
= pmevcntr_rawwrite
},
5893 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
5894 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5895 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5896 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5897 .accessfn
= pmreg_access
},
5898 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
5899 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
5900 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5902 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5903 .raw_writefn
= pmevtyper_rawwrite
},
5906 define_arm_cp_regs(cpu
, pmev_regs
);
5907 g_free(pmevcntr_name
);
5908 g_free(pmevcntr_el0_name
);
5909 g_free(pmevtyper_name
);
5910 g_free(pmevtyper_el0_name
);
5912 ARMCPRegInfo clidr
= {
5913 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
5914 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
5915 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
5917 define_one_arm_cp_reg(cpu
, &clidr
);
5918 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
5919 define_debug_regs(cpu
);
5921 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
5923 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
5924 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
5925 ARMCPRegInfo v81_pmu_regs
[] = {
5926 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
5927 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
5928 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5929 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
5930 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
5931 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
5932 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5933 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
5936 define_arm_cp_regs(cpu
, v81_pmu_regs
);
5938 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5939 /* AArch64 ID registers, which all have impdef reset values.
5940 * Note that within the ID register ranges the unused slots
5941 * must all RAZ, not UNDEF; future architecture versions may
5942 * define new registers here.
5944 ARMCPRegInfo v8_idregs
[] = {
5945 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5946 * know the right value for the GIC field until after we
5947 * define these regs.
5949 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5950 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5951 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5952 .readfn
= id_aa64pfr0_read
,
5953 .writefn
= arm_cp_write_ignore
},
5954 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5955 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5956 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5957 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
5958 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5959 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5960 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5962 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5963 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5964 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5966 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5967 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5968 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5969 /* At present, only SVEver == 0 is defined anyway. */
5971 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5972 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5973 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5975 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5976 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
5977 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5979 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5980 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
5981 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5983 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5984 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
5985 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5986 .resetvalue
= cpu
->id_aa64dfr0
},
5987 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5988 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
5989 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5990 .resetvalue
= cpu
->id_aa64dfr1
},
5991 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5992 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
5993 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5995 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5996 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
5997 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5999 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6000 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
6001 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6002 .resetvalue
= cpu
->id_aa64afr0
},
6003 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6004 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
6005 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6006 .resetvalue
= cpu
->id_aa64afr1
},
6007 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6008 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
6009 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6011 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6012 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
6013 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6015 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
6016 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
6017 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6018 .resetvalue
= cpu
->isar
.id_aa64isar0
},
6019 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
6020 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
6021 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6022 .resetvalue
= cpu
->isar
.id_aa64isar1
},
6023 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6024 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
6025 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6027 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6028 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
6029 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6031 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6032 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
6033 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6035 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6036 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
6037 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6039 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6040 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
6041 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6043 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6044 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
6045 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6047 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6048 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6049 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6050 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
6051 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6052 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
6053 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6054 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
6055 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6056 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
6057 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6059 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6060 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
6061 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6063 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6064 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
6065 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6067 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6068 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
6069 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6071 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6072 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
6073 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6075 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6076 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
6077 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6079 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6080 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
6081 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6082 .resetvalue
= cpu
->isar
.mvfr0
},
6083 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6084 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
6085 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6086 .resetvalue
= cpu
->isar
.mvfr1
},
6087 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
6088 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
6089 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6090 .resetvalue
= cpu
->isar
.mvfr2
},
6091 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6092 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
6093 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6095 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6096 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
6097 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6099 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6100 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
6101 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6103 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6104 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
6105 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6107 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6108 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
6109 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6111 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
6112 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
6113 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6114 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
6115 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
6116 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
6117 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6118 .resetvalue
= cpu
->pmceid0
},
6119 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
6120 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
6121 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6122 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
6123 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
6124 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
6125 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6126 .resetvalue
= cpu
->pmceid1
},
6129 #ifdef CONFIG_USER_ONLY
6130 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
6131 { .name
= "ID_AA64PFR0_EL1",
6132 .exported_bits
= 0x000f000f00ff0000,
6133 .fixed_bits
= 0x0000000000000011 },
6134 { .name
= "ID_AA64PFR1_EL1",
6135 .exported_bits
= 0x00000000000000f0 },
6136 { .name
= "ID_AA64PFR*_EL1_RESERVED",
6138 { .name
= "ID_AA64ZFR0_EL1" },
6139 { .name
= "ID_AA64MMFR0_EL1",
6140 .fixed_bits
= 0x00000000ff000000 },
6141 { .name
= "ID_AA64MMFR1_EL1" },
6142 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
6144 { .name
= "ID_AA64DFR0_EL1",
6145 .fixed_bits
= 0x0000000000000006 },
6146 { .name
= "ID_AA64DFR1_EL1" },
6147 { .name
= "ID_AA64DFR*_EL1_RESERVED",
6149 { .name
= "ID_AA64AFR*",
6151 { .name
= "ID_AA64ISAR0_EL1",
6152 .exported_bits
= 0x00fffffff0fffff0 },
6153 { .name
= "ID_AA64ISAR1_EL1",
6154 .exported_bits
= 0x000000f0ffffffff },
6155 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
6157 REGUSERINFO_SENTINEL
6159 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
6161 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6162 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
6163 !arm_feature(env
, ARM_FEATURE_EL2
)) {
6164 ARMCPRegInfo rvbar
= {
6165 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6166 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6167 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
6169 define_one_arm_cp_reg(cpu
, &rvbar
);
6171 define_arm_cp_regs(cpu
, v8_idregs
);
6172 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6174 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6175 uint64_t vmpidr_def
= mpidr_read_val(env
);
6176 ARMCPRegInfo vpidr_regs
[] = {
6177 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6178 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6179 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6180 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6181 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6182 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6183 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6184 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6185 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6186 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6187 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6188 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6189 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6190 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6191 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6192 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6194 .resetvalue
= vmpidr_def
,
6195 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6198 define_arm_cp_regs(cpu
, vpidr_regs
);
6199 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6200 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6201 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6203 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6204 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6205 ARMCPRegInfo rvbar
= {
6206 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6207 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6208 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6210 define_one_arm_cp_reg(cpu
, &rvbar
);
6213 /* If EL2 is missing but higher ELs are enabled, we need to
6214 * register the no_el2 reginfos.
6216 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6217 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6218 * of MIDR_EL1 and MPIDR_EL1.
6220 ARMCPRegInfo vpidr_regs
[] = {
6221 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6222 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6223 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6224 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6225 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6226 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6227 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6228 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6229 .type
= ARM_CP_NO_RAW
,
6230 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6233 define_arm_cp_regs(cpu
, vpidr_regs
);
6234 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6235 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6236 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6240 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6241 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6242 ARMCPRegInfo el3_regs
[] = {
6243 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6244 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6245 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6246 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6247 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6249 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6250 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6251 .resetvalue
= cpu
->reset_sctlr
},
6255 define_arm_cp_regs(cpu
, el3_regs
);
6257 /* The behaviour of NSACR is sufficiently various that we don't
6258 * try to describe it in a single reginfo:
6259 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6260 * reads as constant 0xc00 from NS EL1 and NS EL2
6261 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6262 * if v7 without EL3, register doesn't exist
6263 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6265 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6266 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6267 ARMCPRegInfo nsacr
= {
6268 .name
= "NSACR", .type
= ARM_CP_CONST
,
6269 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6270 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6273 define_one_arm_cp_reg(cpu
, &nsacr
);
6275 ARMCPRegInfo nsacr
= {
6277 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6278 .access
= PL3_RW
| PL1_R
,
6280 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6282 define_one_arm_cp_reg(cpu
, &nsacr
);
6285 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6286 ARMCPRegInfo nsacr
= {
6287 .name
= "NSACR", .type
= ARM_CP_CONST
,
6288 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6292 define_one_arm_cp_reg(cpu
, &nsacr
);
6296 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6297 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6298 /* PMSAv6 not implemented */
6299 assert(arm_feature(env
, ARM_FEATURE_V7
));
6300 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6301 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6303 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6306 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6307 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6308 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6309 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6310 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6313 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6314 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6316 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6317 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6319 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6320 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6322 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6323 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6325 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6326 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6328 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6329 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6331 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6332 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6334 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6335 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6337 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6338 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6340 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6341 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6343 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6344 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6346 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6347 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6348 * be read-only (ie write causes UNDEF exception).
6351 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6352 /* Pre-v8 MIDR space.
6353 * Note that the MIDR isn't a simple constant register because
6354 * of the TI925 behaviour where writes to another register can
6355 * cause the MIDR value to change.
6357 * Unimplemented registers in the c15 0 0 0 space default to
6358 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6359 * and friends override accordingly.
6362 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6363 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6364 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6365 .readfn
= midr_read
,
6366 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6367 .type
= ARM_CP_OVERRIDE
},
6368 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6370 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6371 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6373 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6374 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6376 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6377 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6379 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6380 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6382 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6383 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6386 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6387 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6388 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6389 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6390 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6391 .readfn
= midr_read
},
6392 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6393 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6394 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6395 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6396 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6397 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6398 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6399 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6400 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6401 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6404 ARMCPRegInfo id_cp_reginfo
[] = {
6405 /* These are common to v8 and pre-v8 */
6407 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6408 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6409 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6410 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6411 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6412 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6413 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6415 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6416 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6419 /* TLBTR is specific to VMSA */
6420 ARMCPRegInfo id_tlbtr_reginfo
= {
6422 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6423 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
6425 /* MPUIR is specific to PMSA V6+ */
6426 ARMCPRegInfo id_mpuir_reginfo
= {
6428 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6429 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6430 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6432 ARMCPRegInfo crn0_wi_reginfo
= {
6433 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6434 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6435 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6437 #ifdef CONFIG_USER_ONLY
6438 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
6439 { .name
= "MIDR_EL1",
6440 .exported_bits
= 0x00000000ffffffff },
6441 { .name
= "REVIDR_EL1" },
6442 REGUSERINFO_SENTINEL
6444 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
6446 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6447 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6449 /* Register the blanket "writes ignored" value first to cover the
6450 * whole space. Then update the specific ID registers to allow write
6451 * access, so that they ignore writes rather than causing them to
6454 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6455 for (r
= id_pre_v8_midr_cp_reginfo
;
6456 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6459 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6462 id_mpuir_reginfo
.access
= PL1_RW
;
6463 id_tlbtr_reginfo
.access
= PL1_RW
;
6465 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6466 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6468 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6470 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6471 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6472 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6473 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6474 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6478 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6479 ARMCPRegInfo mpidr_cp_reginfo
[] = {
6480 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6481 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
6482 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
6485 #ifdef CONFIG_USER_ONLY
6486 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
6487 { .name
= "MPIDR_EL1",
6488 .fixed_bits
= 0x0000000080000000 },
6489 REGUSERINFO_SENTINEL
6491 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
6493 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6496 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6497 ARMCPRegInfo auxcr_reginfo
[] = {
6498 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6499 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6500 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6501 .resetvalue
= cpu
->reset_auxcr
},
6502 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6503 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6504 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6506 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6507 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6508 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6512 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6513 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6514 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6515 ARMCPRegInfo hactlr2_reginfo
= {
6516 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6517 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6518 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6521 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
6525 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
6526 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6527 /* 32 bit view is [31:18] 0...0 [43:32]. */
6528 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
6529 | extract64(cpu
->reset_cbar
, 32, 12);
6530 ARMCPRegInfo cbar_reginfo
[] = {
6532 .type
= ARM_CP_CONST
,
6533 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6534 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
6535 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6536 .type
= ARM_CP_CONST
,
6537 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
6538 .access
= PL1_R
, .resetvalue
= cbar32
},
6541 /* We don't implement a r/w 64 bit CBAR currently */
6542 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
6543 define_arm_cp_regs(cpu
, cbar_reginfo
);
6545 ARMCPRegInfo cbar
= {
6547 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6548 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
6549 .fieldoffset
= offsetof(CPUARMState
,
6550 cp15
.c15_config_base_address
)
6552 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
6553 cbar
.access
= PL1_R
;
6554 cbar
.fieldoffset
= 0;
6555 cbar
.type
= ARM_CP_CONST
;
6557 define_one_arm_cp_reg(cpu
, &cbar
);
6561 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
6562 ARMCPRegInfo vbar_cp_reginfo
[] = {
6563 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
6564 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
6565 .access
= PL1_RW
, .writefn
= vbar_write
,
6566 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
6567 offsetof(CPUARMState
, cp15
.vbar_ns
) },
6571 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
6574 /* Generic registers whose values depend on the implementation */
6576 ARMCPRegInfo sctlr
= {
6577 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
6578 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6580 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
6581 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
6582 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
6583 .raw_writefn
= raw_write
,
6585 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6586 /* Normally we would always end the TB on an SCTLR write, but Linux
6587 * arch/arm/mach-pxa/sleep.S expects two instructions following
6588 * an MMU enable to execute from cache. Imitate this behaviour.
6590 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
6592 define_one_arm_cp_reg(cpu
, &sctlr
);
6595 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6597 * A trivial implementation of ARMv8.1-LOR leaves all of these
6598 * registers fixed at 0, which indicates that there are zero
6599 * supported Limited Ordering regions.
6601 static const ARMCPRegInfo lor_reginfo
[] = {
6602 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6603 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6604 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6605 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6606 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6607 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6608 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6609 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6610 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6611 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6612 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6613 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6614 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6615 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6616 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6617 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6618 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6619 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6620 .access
= PL1_R
, .accessfn
= access_lorid
,
6621 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6624 define_arm_cp_regs(cpu
, lor_reginfo
);
6627 if (cpu_isar_feature(aa64_sve
, cpu
)) {
6628 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
6629 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6630 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
6632 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
6634 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6635 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
6639 #ifdef TARGET_AARCH64
6640 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6641 define_arm_cp_regs(cpu
, pauth_reginfo
);
6646 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
6648 CPUState
*cs
= CPU(cpu
);
6649 CPUARMState
*env
= &cpu
->env
;
6651 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6652 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
6653 aarch64_fpu_gdb_set_reg
,
6654 34, "aarch64-fpu.xml", 0);
6655 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
6656 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6657 51, "arm-neon.xml", 0);
6658 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
6659 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6660 35, "arm-vfp3.xml", 0);
6661 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
6662 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6663 19, "arm-vfp.xml", 0);
6665 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
6666 arm_gen_dynamic_xml(cs
),
6667 "system-registers.xml", 0);
6670 /* Sort alphabetically by type name, except for "any". */
6671 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
6673 ObjectClass
*class_a
= (ObjectClass
*)a
;
6674 ObjectClass
*class_b
= (ObjectClass
*)b
;
6675 const char *name_a
, *name_b
;
6677 name_a
= object_class_get_name(class_a
);
6678 name_b
= object_class_get_name(class_b
);
6679 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
6681 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
6684 return strcmp(name_a
, name_b
);
6688 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
6690 ObjectClass
*oc
= data
;
6691 CPUListState
*s
= user_data
;
6692 const char *typename
;
6695 typename
= object_class_get_name(oc
);
6696 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6697 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
6702 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
6706 .cpu_fprintf
= cpu_fprintf
,
6710 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6711 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6712 (*cpu_fprintf
)(f
, "Available CPUs:\n");
6713 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
6717 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6719 ObjectClass
*oc
= data
;
6720 CpuDefinitionInfoList
**cpu_list
= user_data
;
6721 CpuDefinitionInfoList
*entry
;
6722 CpuDefinitionInfo
*info
;
6723 const char *typename
;
6725 typename
= object_class_get_name(oc
);
6726 info
= g_malloc0(sizeof(*info
));
6727 info
->name
= g_strndup(typename
,
6728 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6729 info
->q_typename
= g_strdup(typename
);
6731 entry
= g_malloc0(sizeof(*entry
));
6732 entry
->value
= info
;
6733 entry
->next
= *cpu_list
;
6737 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
6739 CpuDefinitionInfoList
*cpu_list
= NULL
;
6742 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6743 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6749 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6750 void *opaque
, int state
, int secstate
,
6751 int crm
, int opc1
, int opc2
,
6754 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6755 * add a single reginfo struct to the hash table.
6757 uint32_t *key
= g_new(uint32_t, 1);
6758 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6759 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6760 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6762 r2
->name
= g_strdup(name
);
6763 /* Reset the secure state to the specific incoming state. This is
6764 * necessary as the register may have been defined with both states.
6766 r2
->secure
= secstate
;
6768 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6769 /* Register is banked (using both entries in array).
6770 * Overwriting fieldoffset as the array is only used to define
6771 * banked registers but later only fieldoffset is used.
6773 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6776 if (state
== ARM_CP_STATE_AA32
) {
6777 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6778 /* If the register is banked then we don't need to migrate or
6779 * reset the 32-bit instance in certain cases:
6781 * 1) If the register has both 32-bit and 64-bit instances then we
6782 * can count on the 64-bit instance taking care of the
6784 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6785 * taking care of the secure bank. This requires that separate
6786 * 32 and 64-bit definitions are provided.
6788 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6789 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6790 r2
->type
|= ARM_CP_ALIAS
;
6792 } else if ((secstate
!= r
->secure
) && !ns
) {
6793 /* The register is not banked so we only want to allow migration of
6794 * the non-secure instance.
6796 r2
->type
|= ARM_CP_ALIAS
;
6799 if (r
->state
== ARM_CP_STATE_BOTH
) {
6800 /* We assume it is a cp15 register if the .cp field is left unset.
6806 #ifdef HOST_WORDS_BIGENDIAN
6807 if (r2
->fieldoffset
) {
6808 r2
->fieldoffset
+= sizeof(uint32_t);
6813 if (state
== ARM_CP_STATE_AA64
) {
6814 /* To allow abbreviation of ARMCPRegInfo
6815 * definitions, we treat cp == 0 as equivalent to
6816 * the value for "standard guest-visible sysreg".
6817 * STATE_BOTH definitions are also always "standard
6818 * sysreg" in their AArch64 view (the .cp value may
6819 * be non-zero for the benefit of the AArch32 view).
6821 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6822 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6824 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6825 r2
->opc0
, opc1
, opc2
);
6827 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6830 r2
->opaque
= opaque
;
6832 /* reginfo passed to helpers is correct for the actual access,
6833 * and is never ARM_CP_STATE_BOTH:
6836 /* Make sure reginfo passed to helpers for wildcarded regs
6837 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6842 /* By convention, for wildcarded registers only the first
6843 * entry is used for migration; the others are marked as
6844 * ALIAS so we don't try to transfer the register
6845 * multiple times. Special registers (ie NOP/WFI) are
6846 * never migratable and not even raw-accessible.
6848 if ((r
->type
& ARM_CP_SPECIAL
)) {
6849 r2
->type
|= ARM_CP_NO_RAW
;
6851 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
6852 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
6853 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
6854 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
6857 /* Check that raw accesses are either forbidden or handled. Note that
6858 * we can't assert this earlier because the setup of fieldoffset for
6859 * banked registers has to be done first.
6861 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
6862 assert(!raw_accessors_invalid(r2
));
6865 /* Overriding of an existing definition must be explicitly
6868 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
6869 ARMCPRegInfo
*oldreg
;
6870 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
6871 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
6872 fprintf(stderr
, "Register redefined: cp=%d %d bit "
6873 "crn=%d crm=%d opc1=%d opc2=%d, "
6874 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
6875 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
6876 oldreg
->name
, r2
->name
);
6877 g_assert_not_reached();
6880 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
6884 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
6885 const ARMCPRegInfo
*r
, void *opaque
)
6887 /* Define implementations of coprocessor registers.
6888 * We store these in a hashtable because typically
6889 * there are less than 150 registers in a space which
6890 * is 16*16*16*8*8 = 262144 in size.
6891 * Wildcarding is supported for the crm, opc1 and opc2 fields.
6892 * If a register is defined twice then the second definition is
6893 * used, so this can be used to define some generic registers and
6894 * then override them with implementation specific variations.
6895 * At least one of the original and the second definition should
6896 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6897 * against accidental use.
6899 * The state field defines whether the register is to be
6900 * visible in the AArch32 or AArch64 execution state. If the
6901 * state is set to ARM_CP_STATE_BOTH then we synthesise a
6902 * reginfo structure for the AArch32 view, which sees the lower
6903 * 32 bits of the 64 bit register.
6905 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6906 * be wildcarded. AArch64 registers are always considered to be 64
6907 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6908 * the register, if any.
6910 int crm
, opc1
, opc2
, state
;
6911 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
6912 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
6913 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
6914 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
6915 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
6916 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
6917 /* 64 bit registers have only CRm and Opc1 fields */
6918 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
6919 /* op0 only exists in the AArch64 encodings */
6920 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
6921 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6922 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
6923 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6924 * encodes a minimum access level for the register. We roll this
6925 * runtime check into our general permission check code, so check
6926 * here that the reginfo's specified permissions are strict enough
6927 * to encompass the generic architectural permission check.
6929 if (r
->state
!= ARM_CP_STATE_AA32
) {
6933 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
6934 mask
= PL0U_R
| PL1_RW
;
6949 /* unallocated encoding, so not possible */
6957 /* min_EL EL1, secure mode only (we don't check the latter) */
6961 /* broken reginfo with out-of-range opc1 */
6965 /* assert our permissions are not too lax (stricter is fine) */
6966 assert((r
->access
& ~mask
) == 0);
6969 /* Check that the register definition has enough info to handle
6970 * reads and writes if they are permitted.
6972 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
6973 if (r
->access
& PL3_R
) {
6974 assert((r
->fieldoffset
||
6975 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6978 if (r
->access
& PL3_W
) {
6979 assert((r
->fieldoffset
||
6980 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6984 /* Bad type field probably means missing sentinel at end of reg list */
6985 assert(cptype_valid(r
->type
));
6986 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
6987 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
6988 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
6989 for (state
= ARM_CP_STATE_AA32
;
6990 state
<= ARM_CP_STATE_AA64
; state
++) {
6991 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
6994 if (state
== ARM_CP_STATE_AA32
) {
6995 /* Under AArch32 CP registers can be common
6996 * (same for secure and non-secure world) or banked.
7000 switch (r
->secure
) {
7001 case ARM_CP_SECSTATE_S
:
7002 case ARM_CP_SECSTATE_NS
:
7003 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7004 r
->secure
, crm
, opc1
, opc2
,
7008 name
= g_strdup_printf("%s_S", r
->name
);
7009 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7011 crm
, opc1
, opc2
, name
);
7013 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7015 crm
, opc1
, opc2
, r
->name
);
7019 /* AArch64 registers get mapped to non-secure instance
7021 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7023 crm
, opc1
, opc2
, r
->name
);
7031 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
7032 const ARMCPRegInfo
*regs
, void *opaque
)
7034 /* Define a whole list of registers */
7035 const ARMCPRegInfo
*r
;
7036 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7037 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
7042 * Modify ARMCPRegInfo for access from userspace.
7044 * This is a data driven modification directed by
7045 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7046 * user-space cannot alter any values and dynamic values pertaining to
7047 * execution state are hidden from user space view anyway.
7049 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
7051 const ARMCPRegUserSpaceInfo
*m
;
7054 for (m
= mods
; m
->name
; m
++) {
7055 GPatternSpec
*pat
= NULL
;
7057 pat
= g_pattern_spec_new(m
->name
);
7059 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7060 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
7061 r
->type
= ARM_CP_CONST
;
7065 } else if (strcmp(r
->name
, m
->name
) == 0) {
7066 r
->type
= ARM_CP_CONST
;
7068 r
->resetvalue
&= m
->exported_bits
;
7069 r
->resetvalue
|= m
->fixed_bits
;
7074 g_pattern_spec_free(pat
);
7079 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
7081 return g_hash_table_lookup(cpregs
, &encoded_cp
);
7084 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7087 /* Helper coprocessor write function for write-ignore registers */
7090 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7092 /* Helper coprocessor write function for read-as-zero registers */
7096 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
7098 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7101 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
7103 /* Return true if it is not valid for us to switch to
7104 * this CPU mode (ie all the UNPREDICTABLE cases in
7105 * the ARM ARM CPSRWriteByInstr pseudocode).
7108 /* Changes to or from Hyp via MSR and CPS are illegal. */
7109 if (write_type
== CPSRWriteByInstr
&&
7110 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
7111 mode
== ARM_CPU_MODE_HYP
)) {
7116 case ARM_CPU_MODE_USR
:
7118 case ARM_CPU_MODE_SYS
:
7119 case ARM_CPU_MODE_SVC
:
7120 case ARM_CPU_MODE_ABT
:
7121 case ARM_CPU_MODE_UND
:
7122 case ARM_CPU_MODE_IRQ
:
7123 case ARM_CPU_MODE_FIQ
:
7124 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7125 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7127 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7128 * and CPS are treated as illegal mode changes.
7130 if (write_type
== CPSRWriteByInstr
&&
7131 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
7132 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
7136 case ARM_CPU_MODE_HYP
:
7137 return !arm_feature(env
, ARM_FEATURE_EL2
)
7138 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
7139 case ARM_CPU_MODE_MON
:
7140 return arm_current_el(env
) < 3;
7146 uint32_t cpsr_read(CPUARMState
*env
)
7149 ZF
= (env
->ZF
== 0);
7150 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
7151 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
7152 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
7153 | ((env
->condexec_bits
& 0xfc) << 8)
7154 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
7157 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
7158 CPSRWriteType write_type
)
7160 uint32_t changed_daif
;
7162 if (mask
& CPSR_NZCV
) {
7163 env
->ZF
= (~val
) & CPSR_Z
;
7165 env
->CF
= (val
>> 29) & 1;
7166 env
->VF
= (val
<< 3) & 0x80000000;
7169 env
->QF
= ((val
& CPSR_Q
) != 0);
7171 env
->thumb
= ((val
& CPSR_T
) != 0);
7172 if (mask
& CPSR_IT_0_1
) {
7173 env
->condexec_bits
&= ~3;
7174 env
->condexec_bits
|= (val
>> 25) & 3;
7176 if (mask
& CPSR_IT_2_7
) {
7177 env
->condexec_bits
&= 3;
7178 env
->condexec_bits
|= (val
>> 8) & 0xfc;
7180 if (mask
& CPSR_GE
) {
7181 env
->GE
= (val
>> 16) & 0xf;
7184 /* In a V7 implementation that includes the security extensions but does
7185 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7186 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7187 * bits respectively.
7189 * In a V8 implementation, it is permitted for privileged software to
7190 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7192 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
7193 arm_feature(env
, ARM_FEATURE_EL3
) &&
7194 !arm_feature(env
, ARM_FEATURE_EL2
) &&
7195 !arm_is_secure(env
)) {
7197 changed_daif
= (env
->daif
^ val
) & mask
;
7199 if (changed_daif
& CPSR_A
) {
7200 /* Check to see if we are allowed to change the masking of async
7201 * abort exceptions from a non-secure state.
7203 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
7204 qemu_log_mask(LOG_GUEST_ERROR
,
7205 "Ignoring attempt to switch CPSR_A flag from "
7206 "non-secure world with SCR.AW bit clear\n");
7211 if (changed_daif
& CPSR_F
) {
7212 /* Check to see if we are allowed to change the masking of FIQ
7213 * exceptions from a non-secure state.
7215 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
7216 qemu_log_mask(LOG_GUEST_ERROR
,
7217 "Ignoring attempt to switch CPSR_F flag from "
7218 "non-secure world with SCR.FW bit clear\n");
7222 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7223 * If this bit is set software is not allowed to mask
7224 * FIQs, but is allowed to set CPSR_F to 0.
7226 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
7228 qemu_log_mask(LOG_GUEST_ERROR
,
7229 "Ignoring attempt to enable CPSR_F flag "
7230 "(non-maskable FIQ [NMFI] support enabled)\n");
7236 env
->daif
&= ~(CPSR_AIF
& mask
);
7237 env
->daif
|= val
& CPSR_AIF
& mask
;
7239 if (write_type
!= CPSRWriteRaw
&&
7240 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7241 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7242 /* Note that we can only get here in USR mode if this is a
7243 * gdb stub write; for this case we follow the architectural
7244 * behaviour for guest writes in USR mode of ignoring an attempt
7245 * to switch mode. (Those are caught by translate.c for writes
7246 * triggered by guest instructions.)
7249 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7250 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7251 * v7, and has defined behaviour in v8:
7252 * + leave CPSR.M untouched
7253 * + allow changes to the other CPSR fields
7255 * For user changes via the GDB stub, we don't set PSTATE.IL,
7256 * as this would be unnecessarily harsh for a user error.
7259 if (write_type
!= CPSRWriteByGDBStub
&&
7260 arm_feature(env
, ARM_FEATURE_V8
)) {
7264 qemu_log_mask(LOG_GUEST_ERROR
,
7265 "Illegal AArch32 mode switch attempt from %s to %s\n",
7266 aarch32_mode_name(env
->uncached_cpsr
),
7267 aarch32_mode_name(val
));
7269 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7270 write_type
== CPSRWriteExceptionReturn
?
7271 "Exception return from AArch32" :
7272 "AArch32 mode switch from",
7273 aarch32_mode_name(env
->uncached_cpsr
),
7274 aarch32_mode_name(val
), env
->regs
[15]);
7275 switch_mode(env
, val
& CPSR_M
);
7278 mask
&= ~CACHED_CPSR_BITS
;
7279 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7282 /* Sign/zero extend */
7283 uint32_t HELPER(sxtb16
)(uint32_t x
)
7286 res
= (uint16_t)(int8_t)x
;
7287 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7291 uint32_t HELPER(uxtb16
)(uint32_t x
)
7294 res
= (uint16_t)(uint8_t)x
;
7295 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7299 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7303 if (num
== INT_MIN
&& den
== -1)
7308 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7315 uint32_t HELPER(rbit
)(uint32_t x
)
7320 #ifdef CONFIG_USER_ONLY
7322 /* These should probably raise undefined insn exceptions. */
7323 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
7325 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7327 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
7330 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
7332 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7334 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
7338 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7340 /* translate.c should never generate calls here in user-only mode */
7341 g_assert_not_reached();
7344 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7346 /* translate.c should never generate calls here in user-only mode */
7347 g_assert_not_reached();
7350 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
7352 /* The TT instructions can be used by unprivileged code, but in
7353 * user-only emulation we don't have the MPU.
7354 * Luckily since we know we are NonSecure unprivileged (and that in
7355 * turn means that the A flag wasn't specified), all the bits in the
7356 * register must be zero:
7357 * IREGION: 0 because IRVALID is 0
7358 * IRVALID: 0 because NS
7360 * NSRW: 0 because NS
7362 * RW: 0 because unpriv and A flag not set
7363 * R: 0 because unpriv and A flag not set
7364 * SRVALID: 0 because NS
7365 * MRVALID: 0 because unpriv and A flag not set
7366 * SREGION: 0 becaus SRVALID is 0
7367 * MREGION: 0 because MRVALID is 0
7372 static void switch_mode(CPUARMState
*env
, int mode
)
7374 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7376 if (mode
!= ARM_CPU_MODE_USR
) {
7377 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7381 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7382 uint32_t cur_el
, bool secure
)
7387 void aarch64_sync_64_to_32(CPUARMState
*env
)
7389 g_assert_not_reached();
7394 static void switch_mode(CPUARMState
*env
, int mode
)
7399 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7400 if (mode
== old_mode
)
7403 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7404 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7405 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7406 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7407 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7408 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7411 i
= bank_number(old_mode
);
7412 env
->banked_r13
[i
] = env
->regs
[13];
7413 env
->banked_spsr
[i
] = env
->spsr
;
7415 i
= bank_number(mode
);
7416 env
->regs
[13] = env
->banked_r13
[i
];
7417 env
->spsr
= env
->banked_spsr
[i
];
7419 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7420 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7423 /* Physical Interrupt Target EL Lookup Table
7425 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7427 * The below multi-dimensional table is used for looking up the target
7428 * exception level given numerous condition criteria. Specifically, the
7429 * target EL is based on SCR and HCR routing controls as well as the
7430 * currently executing EL and secure state.
7433 * target_el_table[2][2][2][2][2][4]
7434 * | | | | | +--- Current EL
7435 * | | | | +------ Non-secure(0)/Secure(1)
7436 * | | | +--------- HCR mask override
7437 * | | +------------ SCR exec state control
7438 * | +--------------- SCR mask override
7439 * +------------------ 32-bit(0)/64-bit(1) EL3
7441 * The table values are as such:
7445 * The ARM ARM target EL table includes entries indicating that an "exception
7446 * is not taken". The two cases where this is applicable are:
7447 * 1) An exception is taken from EL3 but the SCR does not have the exception
7449 * 2) An exception is taken from EL2 but the HCR does not have the exception
7451 * In these two cases, the below table contain a target of EL1. This value is
7452 * returned as it is expected that the consumer of the table data will check
7453 * for "target EL >= current EL" to ensure the exception is not taken.
7457 * BIT IRQ IMO Non-secure Secure
7458 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7460 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7461 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7462 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7463 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7464 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7465 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7466 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7467 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7468 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7469 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7470 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7471 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7472 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7473 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7474 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7475 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7476 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7480 * Determine the target EL for physical exceptions
7482 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7483 uint32_t cur_el
, bool secure
)
7485 CPUARMState
*env
= cs
->env_ptr
;
7490 /* Is the highest EL AArch64? */
7491 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7494 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7495 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7497 /* Either EL2 is the highest EL (and so the EL2 register width
7498 * is given by is64); or there is no EL2 or EL3, in which case
7499 * the value of 'rw' does not affect the table lookup anyway.
7504 hcr_el2
= arm_hcr_el2_eff(env
);
7507 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7508 hcr
= hcr_el2
& HCR_IMO
;
7511 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7512 hcr
= hcr_el2
& HCR_FMO
;
7515 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7516 hcr
= hcr_el2
& HCR_AMO
;
7520 /* Perform a table-lookup for the target EL given the current state */
7521 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7523 assert(target_el
> 0);
7528 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
7529 ARMMMUIdx mmu_idx
, bool ignfault
)
7531 CPUState
*cs
= CPU(cpu
);
7532 CPUARMState
*env
= &cpu
->env
;
7533 MemTxAttrs attrs
= {};
7535 target_ulong page_size
;
7538 ARMMMUFaultInfo fi
= {};
7539 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7543 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
7544 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7545 /* MPU/SAU lookup failed */
7546 if (fi
.type
== ARMFault_QEMU_SFault
) {
7547 qemu_log_mask(CPU_LOG_INT
,
7548 "...SecureFault with SFSR.AUVIOL during stacking\n");
7549 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7550 env
->v7m
.sfar
= addr
;
7551 exc
= ARMV7M_EXCP_SECURE
;
7554 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
7555 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
7556 exc
= ARMV7M_EXCP_MEM
;
7557 exc_secure
= secure
;
7561 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
7563 if (txres
!= MEMTX_OK
) {
7564 /* BusFault trying to write the data */
7565 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
7566 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
7567 exc
= ARMV7M_EXCP_BUS
;
7574 /* By pending the exception at this point we are making
7575 * the IMPDEF choice "overridden exceptions pended" (see the
7576 * MergeExcInfo() pseudocode). The other choice would be to not
7577 * pend them now and then make a choice about which to throw away
7578 * later if we have two derived exceptions.
7579 * The only case when we must not pend the exception but instead
7580 * throw it away is if we are doing the push of the callee registers
7581 * and we've already generated a derived exception. Even in this
7582 * case we will still update the fault status registers.
7585 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
7590 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
7593 CPUState
*cs
= CPU(cpu
);
7594 CPUARMState
*env
= &cpu
->env
;
7595 MemTxAttrs attrs
= {};
7597 target_ulong page_size
;
7600 ARMMMUFaultInfo fi
= {};
7601 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7606 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
7607 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7608 /* MPU/SAU lookup failed */
7609 if (fi
.type
== ARMFault_QEMU_SFault
) {
7610 qemu_log_mask(CPU_LOG_INT
,
7611 "...SecureFault with SFSR.AUVIOL during unstack\n");
7612 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7613 env
->v7m
.sfar
= addr
;
7614 exc
= ARMV7M_EXCP_SECURE
;
7617 qemu_log_mask(CPU_LOG_INT
,
7618 "...MemManageFault with CFSR.MUNSTKERR\n");
7619 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
7620 exc
= ARMV7M_EXCP_MEM
;
7621 exc_secure
= secure
;
7626 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
7628 if (txres
!= MEMTX_OK
) {
7629 /* BusFault trying to read the data */
7630 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
7631 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
7632 exc
= ARMV7M_EXCP_BUS
;
7641 /* By pending the exception at this point we are making
7642 * the IMPDEF choice "overridden exceptions pended" (see the
7643 * MergeExcInfo() pseudocode). The other choice would be to not
7644 * pend them now and then make a choice about which to throw away
7645 * later if we have two derived exceptions.
7647 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
7651 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7652 * This may change the current stack pointer between Main and Process
7653 * stack pointers if it is done for the CONTROL register for the current
7656 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
7660 bool old_is_psp
= v7m_using_psp(env
);
7662 env
->v7m
.control
[secstate
] =
7663 deposit32(env
->v7m
.control
[secstate
],
7664 R_V7M_CONTROL_SPSEL_SHIFT
,
7665 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
7667 if (secstate
== env
->v7m
.secure
) {
7668 bool new_is_psp
= v7m_using_psp(env
);
7671 if (old_is_psp
!= new_is_psp
) {
7672 tmp
= env
->v7m
.other_sp
;
7673 env
->v7m
.other_sp
= env
->regs
[13];
7674 env
->regs
[13] = tmp
;
7679 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7680 * stack pointer between Main and Process stack pointers.
7682 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
7684 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
7687 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
7689 /* Write a new value to v7m.exception, thus transitioning into or out
7690 * of Handler mode; this may result in a change of active stack pointer.
7692 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
7695 env
->v7m
.exception
= new_exc
;
7697 new_is_psp
= v7m_using_psp(env
);
7699 if (old_is_psp
!= new_is_psp
) {
7700 tmp
= env
->v7m
.other_sp
;
7701 env
->v7m
.other_sp
= env
->regs
[13];
7702 env
->regs
[13] = tmp
;
7706 /* Switch M profile security state between NS and S */
7707 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
7709 uint32_t new_ss_msp
, new_ss_psp
;
7711 if (env
->v7m
.secure
== new_secstate
) {
7715 /* All the banked state is accessed by looking at env->v7m.secure
7716 * except for the stack pointer; rearrange the SP appropriately.
7718 new_ss_msp
= env
->v7m
.other_ss_msp
;
7719 new_ss_psp
= env
->v7m
.other_ss_psp
;
7721 if (v7m_using_psp(env
)) {
7722 env
->v7m
.other_ss_psp
= env
->regs
[13];
7723 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
7725 env
->v7m
.other_ss_msp
= env
->regs
[13];
7726 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
7729 env
->v7m
.secure
= new_secstate
;
7731 if (v7m_using_psp(env
)) {
7732 env
->regs
[13] = new_ss_psp
;
7733 env
->v7m
.other_sp
= new_ss_msp
;
7735 env
->regs
[13] = new_ss_msp
;
7736 env
->v7m
.other_sp
= new_ss_psp
;
7740 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7743 * - if the return value is a magic value, do exception return (like BX)
7744 * - otherwise bit 0 of the return value is the target security state
7748 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7749 /* Covers FNC_RETURN and EXC_RETURN magic */
7750 min_magic
= FNC_RETURN_MIN_MAGIC
;
7752 /* EXC_RETURN magic only */
7753 min_magic
= EXC_RETURN_MIN_MAGIC
;
7756 if (dest
>= min_magic
) {
7757 /* This is an exception return magic value; put it where
7758 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7759 * Note that if we ever add gen_ss_advance() singlestep support to
7760 * M profile this should count as an "instruction execution complete"
7761 * event (compare gen_bx_excret_final_code()).
7763 env
->regs
[15] = dest
& ~1;
7764 env
->thumb
= dest
& 1;
7765 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
7769 /* translate.c should have made BXNS UNDEF unless we're secure */
7770 assert(env
->v7m
.secure
);
7772 switch_v7m_security_state(env
, dest
& 1);
7774 env
->regs
[15] = dest
& ~1;
7777 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7779 /* Handle v7M BLXNS:
7780 * - bit 0 of the destination address is the target security state
7783 /* At this point regs[15] is the address just after the BLXNS */
7784 uint32_t nextinst
= env
->regs
[15] | 1;
7785 uint32_t sp
= env
->regs
[13] - 8;
7788 /* translate.c will have made BLXNS UNDEF unless we're secure */
7789 assert(env
->v7m
.secure
);
7792 /* target is Secure, so this is just a normal BLX,
7793 * except that the low bit doesn't indicate Thumb/not.
7795 env
->regs
[14] = nextinst
;
7797 env
->regs
[15] = dest
& ~1;
7801 /* Target is non-secure: first push a stack frame */
7802 if (!QEMU_IS_ALIGNED(sp
, 8)) {
7803 qemu_log_mask(LOG_GUEST_ERROR
,
7804 "BLXNS with misaligned SP is UNPREDICTABLE\n");
7807 if (sp
< v7m_sp_limit(env
)) {
7808 raise_exception(env
, EXCP_STKOF
, 0, 1);
7811 saved_psr
= env
->v7m
.exception
;
7812 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
7813 saved_psr
|= XPSR_SFPA
;
7816 /* Note that these stores can throw exceptions on MPU faults */
7817 cpu_stl_data(env
, sp
, nextinst
);
7818 cpu_stl_data(env
, sp
+ 4, saved_psr
);
7821 env
->regs
[14] = 0xfeffffff;
7822 if (arm_v7m_is_handler_mode(env
)) {
7823 /* Write a dummy value to IPSR, to avoid leaking the current secure
7824 * exception number to non-secure code. This is guaranteed not
7825 * to cause write_v7m_exception() to actually change stacks.
7827 write_v7m_exception(env
, 1);
7829 switch_v7m_security_state(env
, 0);
7831 env
->regs
[15] = dest
;
7834 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
7837 /* Return a pointer to the location where we currently store the
7838 * stack pointer for the requested security state and thread mode.
7839 * This pointer will become invalid if the CPU state is updated
7840 * such that the stack pointers are switched around (eg changing
7841 * the SPSEL control bit).
7842 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7843 * Unlike that pseudocode, we require the caller to pass us in the
7844 * SPSEL control bit value; this is because we also use this
7845 * function in handling of pushing of the callee-saves registers
7846 * part of the v8M stack frame (pseudocode PushCalleeStack()),
7847 * and in the tailchain codepath the SPSEL bit comes from the exception
7848 * return magic LR value from the previous exception. The pseudocode
7849 * opencodes the stack-selection in PushCalleeStack(), but we prefer
7850 * to make this utility function generic enough to do the job.
7852 bool want_psp
= threadmode
&& spsel
;
7854 if (secure
== env
->v7m
.secure
) {
7855 if (want_psp
== v7m_using_psp(env
)) {
7856 return &env
->regs
[13];
7858 return &env
->v7m
.other_sp
;
7862 return &env
->v7m
.other_ss_psp
;
7864 return &env
->v7m
.other_ss_msp
;
7869 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
7872 CPUState
*cs
= CPU(cpu
);
7873 CPUARMState
*env
= &cpu
->env
;
7875 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
7876 uint32_t vector_entry
;
7877 MemTxAttrs attrs
= {};
7881 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
7883 /* We don't do a get_phys_addr() here because the rules for vector
7884 * loads are special: they always use the default memory map, and
7885 * the default memory map permits reads from all addresses.
7886 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7887 * that we want this special case which would always say "yes",
7888 * we just do the SAU lookup here followed by a direct physical load.
7890 attrs
.secure
= targets_secure
;
7893 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7894 V8M_SAttributes sattrs
= {};
7896 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
7898 attrs
.secure
= false;
7899 } else if (!targets_secure
) {
7900 /* NS access to S memory */
7905 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
7907 if (result
!= MEMTX_OK
) {
7910 *pvec
= vector_entry
;
7914 /* All vector table fetch fails are reported as HardFault, with
7915 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7916 * technically the underlying exception is a MemManage or BusFault
7917 * that is escalated to HardFault.) This is a terminal exception,
7918 * so we will either take the HardFault immediately or else enter
7919 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7921 exc_secure
= targets_secure
||
7922 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
7923 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
7924 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
7928 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7931 /* For v8M, push the callee-saves register part of the stack frame.
7932 * Compare the v8M pseudocode PushCalleeStack().
7933 * In the tailchaining case this may not be the current stack.
7935 CPUARMState
*env
= &cpu
->env
;
7936 uint32_t *frame_sp_p
;
7944 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
7945 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
7948 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
7949 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
7950 lr
& R_V7M_EXCRET_SPSEL_MASK
);
7951 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
7953 limit
= env
->v7m
.psplim
[M_REG_S
];
7955 limit
= env
->v7m
.msplim
[M_REG_S
];
7958 mmu_idx
= arm_mmu_idx(env
);
7959 frame_sp_p
= &env
->regs
[13];
7960 limit
= v7m_sp_limit(env
);
7963 frameptr
= *frame_sp_p
- 0x28;
7964 if (frameptr
< limit
) {
7966 * Stack limit failure: set SP to the limit value, and generate
7967 * STKOF UsageFault. Stack pushes below the limit must not be
7968 * performed. It is IMPDEF whether pushes above the limit are
7969 * performed; we choose not to.
7971 qemu_log_mask(CPU_LOG_INT
,
7972 "...STKOF during callee-saves register stacking\n");
7973 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7974 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7976 *frame_sp_p
= limit
;
7980 /* Write as much of the stack frame as we can. A write failure may
7981 * cause us to pend a derived exception.
7984 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
7985 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
7987 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
7989 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
7991 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
7993 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
7995 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
7997 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
7999 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
8002 /* Update SP regardless of whether any of the stack accesses failed. */
8003 *frame_sp_p
= frameptr
;
8008 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
8009 bool ignore_stackfaults
)
8011 /* Do the "take the exception" parts of exception entry,
8012 * but not the pushing of state to the stack. This is
8013 * similar to the pseudocode ExceptionTaken() function.
8015 CPUARMState
*env
= &cpu
->env
;
8017 bool targets_secure
;
8019 bool push_failed
= false;
8021 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
8022 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
8023 targets_secure
? "secure" : "nonsecure", exc
);
8025 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8026 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8027 (lr
& R_V7M_EXCRET_S_MASK
)) {
8028 /* The background code (the owner of the registers in the
8029 * exception frame) is Secure. This means it may either already
8030 * have or now needs to push callee-saves registers.
8032 if (targets_secure
) {
8033 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
8034 /* We took an exception from Secure to NonSecure
8035 * (which means the callee-saved registers got stacked)
8036 * and are now tailchaining to a Secure exception.
8037 * Clear DCRS so eventual return from this Secure
8038 * exception unstacks the callee-saved registers.
8040 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
8043 /* We're going to a non-secure exception; push the
8044 * callee-saves registers to the stack now, if they're
8045 * not already saved.
8047 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
8048 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
8049 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
8050 ignore_stackfaults
);
8052 lr
|= R_V7M_EXCRET_DCRS_MASK
;
8056 lr
&= ~R_V7M_EXCRET_ES_MASK
;
8057 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8058 lr
|= R_V7M_EXCRET_ES_MASK
;
8060 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
8061 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
8062 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8065 /* Clear registers if necessary to prevent non-secure exception
8066 * code being able to see register values from secure code.
8067 * Where register values become architecturally UNKNOWN we leave
8068 * them with their previous values.
8070 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8071 if (!targets_secure
) {
8072 /* Always clear the caller-saved registers (they have been
8073 * pushed to the stack earlier in v7m_push_stack()).
8074 * Clear callee-saved registers if the background code is
8075 * Secure (in which case these regs were saved in
8076 * v7m_push_callee_stack()).
8080 for (i
= 0; i
< 13; i
++) {
8081 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8082 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
8087 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
8092 if (push_failed
&& !ignore_stackfaults
) {
8093 /* Derived exception on callee-saves register stacking:
8094 * we might now want to take a different exception which
8095 * targets a different security state, so try again from the top.
8097 qemu_log_mask(CPU_LOG_INT
,
8098 "...derived exception on callee-saves register stacking");
8099 v7m_exception_taken(cpu
, lr
, true, true);
8103 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
8104 /* Vector load failed: derived exception */
8105 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
8106 v7m_exception_taken(cpu
, lr
, true, true);
8110 /* Now we've done everything that might cause a derived exception
8111 * we can go ahead and activate whichever exception we're going to
8112 * take (which might now be the derived exception).
8114 armv7m_nvic_acknowledge_irq(env
->nvic
);
8116 /* Switch to target security state -- must do this before writing SPSEL */
8117 switch_v7m_security_state(env
, targets_secure
);
8118 write_v7m_control_spsel(env
, 0);
8119 arm_clear_exclusive(env
);
8121 env
->condexec_bits
= 0;
8123 env
->regs
[15] = addr
& 0xfffffffe;
8124 env
->thumb
= addr
& 1;
8127 static bool v7m_push_stack(ARMCPU
*cpu
)
8129 /* Do the "set up stack frame" part of exception entry,
8130 * similar to pseudocode PushStack().
8131 * Return true if we generate a derived exception (and so
8132 * should ignore further stack faults trying to process
8133 * that derived exception.)
8136 CPUARMState
*env
= &cpu
->env
;
8137 uint32_t xpsr
= xpsr_read(env
);
8138 uint32_t frameptr
= env
->regs
[13];
8139 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
8141 /* Align stack pointer if the guest wants that */
8142 if ((frameptr
& 4) &&
8143 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
8145 xpsr
|= XPSR_SPREALIGN
;
8150 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8151 uint32_t limit
= v7m_sp_limit(env
);
8153 if (frameptr
< limit
) {
8155 * Stack limit failure: set SP to the limit value, and generate
8156 * STKOF UsageFault. Stack pushes below the limit must not be
8157 * performed. It is IMPDEF whether pushes above the limit are
8158 * performed; we choose not to.
8160 qemu_log_mask(CPU_LOG_INT
,
8161 "...STKOF during stacking\n");
8162 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8163 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8165 env
->regs
[13] = limit
;
8170 /* Write as much of the stack frame as we can. If we fail a stack
8171 * write this will result in a derived exception being pended
8172 * (which may be taken in preference to the one we started with
8173 * if it has higher priority).
8176 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
8177 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
8178 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
8179 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
8180 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
8181 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
8182 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
8183 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
8185 /* Update SP regardless of whether any of the stack accesses failed. */
8186 env
->regs
[13] = frameptr
;
8191 static void do_v7m_exception_exit(ARMCPU
*cpu
)
8193 CPUARMState
*env
= &cpu
->env
;
8196 bool ufault
= false;
8197 bool sfault
= false;
8198 bool return_to_sp_process
;
8199 bool return_to_handler
;
8200 bool rettobase
= false;
8201 bool exc_secure
= false;
8202 bool return_to_secure
;
8204 /* If we're not in Handler mode then jumps to magic exception-exit
8205 * addresses don't have magic behaviour. However for the v8M
8206 * security extensions the magic secure-function-return has to
8207 * work in thread mode too, so to avoid doing an extra check in
8208 * the generated code we allow exception-exit magic to also cause the
8209 * internal exception and bring us here in thread mode. Correct code
8210 * will never try to do this (the following insn fetch will always
8211 * fault) so we the overhead of having taken an unnecessary exception
8214 if (!arm_v7m_is_handler_mode(env
)) {
8218 /* In the spec pseudocode ExceptionReturn() is called directly
8219 * from BXWritePC() and gets the full target PC value including
8220 * bit zero. In QEMU's implementation we treat it as a normal
8221 * jump-to-register (which is then caught later on), and so split
8222 * the target value up between env->regs[15] and env->thumb in
8223 * gen_bx(). Reconstitute it.
8225 excret
= env
->regs
[15];
8230 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
8231 " previous exception %d\n",
8232 excret
, env
->v7m
.exception
);
8234 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
8235 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
8236 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
8240 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8241 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8242 * we pick which FAULTMASK to clear.
8244 if (!env
->v7m
.secure
&&
8245 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
8246 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
8248 /* For all other purposes, treat ES as 0 (R_HXSR) */
8249 excret
&= ~R_V7M_EXCRET_ES_MASK
;
8251 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
8254 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
8255 /* Auto-clear FAULTMASK on return from other than NMI.
8256 * If the security extension is implemented then this only
8257 * happens if the raw execution priority is >= 0; the
8258 * value of the ES bit in the exception return value indicates
8259 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8261 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8262 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
8263 env
->v7m
.faultmask
[exc_secure
] = 0;
8266 env
->v7m
.faultmask
[M_REG_NS
] = 0;
8270 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
8273 /* attempt to exit an exception that isn't active */
8277 /* still an irq active now */
8280 /* we returned to base exception level, no nesting.
8281 * (In the pseudocode this is written using "NestedActivation != 1"
8282 * where we have 'rettobase == false'.)
8287 g_assert_not_reached();
8290 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
8291 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
8292 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8293 (excret
& R_V7M_EXCRET_S_MASK
);
8295 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8296 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8297 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8298 * we choose to take the UsageFault.
8300 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
8301 (excret
& R_V7M_EXCRET_ES_MASK
) ||
8302 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
8306 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
8310 /* For v7M we only recognize certain combinations of the low bits */
8311 switch (excret
& 0xf) {
8312 case 1: /* Return to Handler */
8314 case 13: /* Return to Thread using Process stack */
8315 case 9: /* Return to Thread using Main stack */
8316 /* We only need to check NONBASETHRDENA for v7M, because in
8317 * v8M this bit does not exist (it is RES1).
8320 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
8321 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
8331 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8332 * Handler mode (and will be until we write the new XPSR.Interrupt
8333 * field) this does not switch around the current stack pointer.
8334 * We must do this before we do any kind of tailchaining, including
8335 * for the derived exceptions on integrity check failures, or we will
8336 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8338 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
8341 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
8342 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8343 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8344 "stackframe: failed EXC_RETURN.ES validity check\n");
8345 v7m_exception_taken(cpu
, excret
, true, false);
8350 /* Bad exception return: instead of popping the exception
8351 * stack, directly take a usage fault on the current stack.
8353 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8354 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8355 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8356 "stackframe: failed exception return integrity check\n");
8357 v7m_exception_taken(cpu
, excret
, true, false);
8362 * Tailchaining: if there is currently a pending exception that
8363 * is high enough priority to preempt execution at the level we're
8364 * about to return to, then just directly take that exception now,
8365 * avoiding an unstack-and-then-stack. Note that now we have
8366 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8367 * our current execution priority is already the execution priority we are
8368 * returning to -- none of the state we would unstack or set based on
8369 * the EXCRET value affects it.
8371 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
8372 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
8373 v7m_exception_taken(cpu
, excret
, true, false);
8377 switch_v7m_security_state(env
, return_to_secure
);
8380 /* The stack pointer we should be reading the exception frame from
8381 * depends on bits in the magic exception return type value (and
8382 * for v8M isn't necessarily the stack pointer we will eventually
8383 * end up resuming execution with). Get a pointer to the location
8384 * in the CPU state struct where the SP we need is currently being
8385 * stored; we will use and modify it in place.
8386 * We use this limited C variable scope so we don't accidentally
8387 * use 'frame_sp_p' after we do something that makes it invalid.
8389 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
8392 return_to_sp_process
);
8393 uint32_t frameptr
= *frame_sp_p
;
8396 bool return_to_priv
= return_to_handler
||
8397 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
8399 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
8402 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
8403 arm_feature(env
, ARM_FEATURE_V8
)) {
8404 qemu_log_mask(LOG_GUEST_ERROR
,
8405 "M profile exception return with non-8-aligned SP "
8406 "for destination state is UNPREDICTABLE\n");
8409 /* Do we need to pop callee-saved registers? */
8410 if (return_to_secure
&&
8411 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
8412 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
8413 uint32_t expected_sig
= 0xfefa125b;
8414 uint32_t actual_sig
;
8416 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
8418 if (pop_ok
&& expected_sig
!= actual_sig
) {
8419 /* Take a SecureFault on the current stack */
8420 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
8421 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8422 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8423 "stackframe: failed exception return integrity "
8424 "signature check\n");
8425 v7m_exception_taken(cpu
, excret
, true, false);
8430 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
8431 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
8432 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
8433 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
8434 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
8435 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
8436 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
8437 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
8444 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
8445 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
8446 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
8447 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
8448 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
8449 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
8450 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
8451 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
8454 /* v7m_stack_read() pended a fault, so take it (as a tail
8455 * chained exception on the same stack frame)
8457 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
8458 v7m_exception_taken(cpu
, excret
, true, false);
8462 /* Returning from an exception with a PC with bit 0 set is defined
8463 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
8464 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
8465 * the lsbit, and there are several RTOSes out there which incorrectly
8466 * assume the r15 in the stack frame should be a Thumb-style "lsbit
8467 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
8468 * complain about the badly behaved guest.
8470 if (env
->regs
[15] & 1) {
8471 env
->regs
[15] &= ~1U;
8472 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8473 qemu_log_mask(LOG_GUEST_ERROR
,
8474 "M profile return from interrupt with misaligned "
8475 "PC is UNPREDICTABLE on v7M\n");
8479 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8480 /* For v8M we have to check whether the xPSR exception field
8481 * matches the EXCRET value for return to handler/thread
8482 * before we commit to changing the SP and xPSR.
8484 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
8485 if (return_to_handler
!= will_be_handler
) {
8486 /* Take an INVPC UsageFault on the current stack.
8487 * By this point we will have switched to the security state
8488 * for the background state, so this UsageFault will target
8491 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8493 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8494 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8495 "stackframe: failed exception return integrity "
8497 v7m_exception_taken(cpu
, excret
, true, false);
8502 /* Commit to consuming the stack frame */
8504 /* Undo stack alignment (the SPREALIGN bit indicates that the original
8505 * pre-exception SP was not 8-aligned and we added a padding word to
8506 * align it, so we undo this by ORing in the bit that increases it
8507 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
8508 * would work too but a logical OR is how the pseudocode specifies it.)
8510 if (xpsr
& XPSR_SPREALIGN
) {
8513 *frame_sp_p
= frameptr
;
8515 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
8516 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
8518 /* The restored xPSR exception field will be zero if we're
8519 * resuming in Thread mode. If that doesn't match what the
8520 * exception return excret specified then this is a UsageFault.
8521 * v7M requires we make this check here; v8M did it earlier.
8523 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
8524 /* Take an INVPC UsageFault by pushing the stack again;
8525 * we know we're v7M so this is never a Secure UsageFault.
8527 bool ignore_stackfaults
;
8529 assert(!arm_feature(env
, ARM_FEATURE_V8
));
8530 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
8531 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8532 ignore_stackfaults
= v7m_push_stack(cpu
);
8533 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
8534 "failed exception return integrity check\n");
8535 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
8539 /* Otherwise, we have a successful exception exit. */
8540 arm_clear_exclusive(env
);
8541 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
8544 static bool do_v7m_function_return(ARMCPU
*cpu
)
8546 /* v8M security extensions magic function return.
8548 * (1) throw an exception (longjump)
8549 * (2) return true if we successfully handled the function return
8550 * (3) return false if we failed a consistency check and have
8551 * pended a UsageFault that needs to be taken now
8553 * At this point the magic return value is split between env->regs[15]
8554 * and env->thumb. We don't bother to reconstitute it because we don't
8555 * need it (all values are handled the same way).
8557 CPUARMState
*env
= &cpu
->env
;
8558 uint32_t newpc
, newpsr
, newpsr_exc
;
8560 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
8563 bool threadmode
, spsel
;
8566 uint32_t *frame_sp_p
;
8569 /* Pull the return address and IPSR from the Secure stack */
8570 threadmode
= !arm_v7m_is_handler_mode(env
);
8571 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
8573 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
8574 frameptr
= *frame_sp_p
;
8576 /* These loads may throw an exception (for MPU faults). We want to
8577 * do them as secure, so work out what MMU index that is.
8579 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8580 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
8581 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
8582 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
8584 /* Consistency checks on new IPSR */
8585 newpsr_exc
= newpsr
& XPSR_EXCP
;
8586 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
8587 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
8588 /* Pend the fault and tell our caller to take it */
8589 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8590 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8592 qemu_log_mask(CPU_LOG_INT
,
8593 "...taking INVPC UsageFault: "
8594 "IPSR consistency check failed\n");
8598 *frame_sp_p
= frameptr
+ 8;
8601 /* This invalidates frame_sp_p */
8602 switch_v7m_security_state(env
, true);
8603 env
->v7m
.exception
= newpsr_exc
;
8604 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8605 if (newpsr
& XPSR_SFPA
) {
8606 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
8608 xpsr_write(env
, 0, XPSR_IT
);
8609 env
->thumb
= newpc
& 1;
8610 env
->regs
[15] = newpc
& ~1;
8612 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
8616 static void arm_log_exception(int idx
)
8618 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8619 const char *exc
= NULL
;
8620 static const char * const excnames
[] = {
8621 [EXCP_UDEF
] = "Undefined Instruction",
8623 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8624 [EXCP_DATA_ABORT
] = "Data Abort",
8627 [EXCP_BKPT
] = "Breakpoint",
8628 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8629 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8630 [EXCP_HVC
] = "Hypervisor Call",
8631 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8632 [EXCP_SMC
] = "Secure Monitor Call",
8633 [EXCP_VIRQ
] = "Virtual IRQ",
8634 [EXCP_VFIQ
] = "Virtual FIQ",
8635 [EXCP_SEMIHOST
] = "Semihosting call",
8636 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8637 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8638 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8641 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8642 exc
= excnames
[idx
];
8647 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8651 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
8652 uint32_t addr
, uint16_t *insn
)
8654 /* Load a 16-bit portion of a v7M instruction, returning true on success,
8655 * or false on failure (in which case we will have pended the appropriate
8657 * We need to do the instruction fetch's MPU and SAU checks
8658 * like this because there is no MMU index that would allow
8659 * doing the load with a single function call. Instead we must
8660 * first check that the security attributes permit the load
8661 * and that they don't mismatch on the two halves of the instruction,
8662 * and then we do the load as a secure load (ie using the security
8663 * attributes of the address, not the CPU, as architecturally required).
8665 CPUState
*cs
= CPU(cpu
);
8666 CPUARMState
*env
= &cpu
->env
;
8667 V8M_SAttributes sattrs
= {};
8668 MemTxAttrs attrs
= {};
8669 ARMMMUFaultInfo fi
= {};
8671 target_ulong page_size
;
8675 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
8676 if (!sattrs
.nsc
|| sattrs
.ns
) {
8677 /* This must be the second half of the insn, and it straddles a
8678 * region boundary with the second half not being S&NSC.
8680 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8681 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8682 qemu_log_mask(CPU_LOG_INT
,
8683 "...really SecureFault with SFSR.INVEP\n");
8686 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
8687 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
8688 /* the MPU lookup failed */
8689 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8690 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
8691 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
8694 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
8696 if (txres
!= MEMTX_OK
) {
8697 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8698 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8699 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
8705 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
8707 /* Check whether this attempt to execute code in a Secure & NS-Callable
8708 * memory region is for an SG instruction; if so, then emulate the
8709 * effect of the SG instruction and return true. Otherwise pend
8710 * the correct kind of exception and return false.
8712 CPUARMState
*env
= &cpu
->env
;
8716 /* We should never get here unless get_phys_addr_pmsav8() caused
8717 * an exception for NS executing in S&NSC memory.
8719 assert(!env
->v7m
.secure
);
8720 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8722 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
8723 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8725 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
8733 if (insn
!= 0xe97f) {
8734 /* Not an SG instruction first half (we choose the IMPDEF
8735 * early-SG-check option).
8740 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
8744 if (insn
!= 0xe97f) {
8745 /* Not an SG instruction second half (yes, both halves of the SG
8746 * insn have the same hex value)
8751 /* OK, we have confirmed that we really have an SG instruction.
8752 * We know we're NS in S memory so don't need to repeat those checks.
8754 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
8755 ", executing it\n", env
->regs
[15]);
8756 env
->regs
[14] &= ~1;
8757 switch_v7m_security_state(env
, true);
8758 xpsr_write(env
, 0, XPSR_IT
);
8763 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8764 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8765 qemu_log_mask(CPU_LOG_INT
,
8766 "...really SecureFault with SFSR.INVEP\n");
8770 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
8772 ARMCPU
*cpu
= ARM_CPU(cs
);
8773 CPUARMState
*env
= &cpu
->env
;
8775 bool ignore_stackfaults
;
8777 arm_log_exception(cs
->exception_index
);
8779 /* For exceptions we just mark as pending on the NVIC, and let that
8781 switch (cs
->exception_index
) {
8783 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8784 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
8787 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8788 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8791 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8792 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
8795 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8796 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8799 /* The PC already points to the next instruction. */
8800 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
8802 case EXCP_PREFETCH_ABORT
:
8803 case EXCP_DATA_ABORT
:
8804 /* Note that for M profile we don't have a guest facing FSR, but
8805 * the env->exception.fsr will be populated by the code that
8806 * raises the fault, in the A profile short-descriptor format.
8808 switch (env
->exception
.fsr
& 0xf) {
8809 case M_FAKE_FSR_NSC_EXEC
:
8810 /* Exception generated when we try to execute code at an address
8811 * which is marked as Secure & Non-Secure Callable and the CPU
8812 * is in the Non-Secure state. The only instruction which can
8813 * be executed like this is SG (and that only if both halves of
8814 * the SG instruction have the same security attributes.)
8815 * Everything else must generate an INVEP SecureFault, so we
8816 * emulate the SG instruction here.
8818 if (v7m_handle_execute_nsc(cpu
)) {
8822 case M_FAKE_FSR_SFAULT
:
8823 /* Various flavours of SecureFault for attempts to execute or
8824 * access data in the wrong security state.
8826 switch (cs
->exception_index
) {
8827 case EXCP_PREFETCH_ABORT
:
8828 if (env
->v7m
.secure
) {
8829 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
8830 qemu_log_mask(CPU_LOG_INT
,
8831 "...really SecureFault with SFSR.INVTRAN\n");
8833 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8834 qemu_log_mask(CPU_LOG_INT
,
8835 "...really SecureFault with SFSR.INVEP\n");
8838 case EXCP_DATA_ABORT
:
8839 /* This must be an NS access to S memory */
8840 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
8841 qemu_log_mask(CPU_LOG_INT
,
8842 "...really SecureFault with SFSR.AUVIOL\n");
8845 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8847 case 0x8: /* External Abort */
8848 switch (cs
->exception_index
) {
8849 case EXCP_PREFETCH_ABORT
:
8850 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8851 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
8853 case EXCP_DATA_ABORT
:
8854 env
->v7m
.cfsr
[M_REG_NS
] |=
8855 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
8856 env
->v7m
.bfar
= env
->exception
.vaddress
;
8857 qemu_log_mask(CPU_LOG_INT
,
8858 "...with CFSR.PRECISERR and BFAR 0x%x\n",
8862 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8865 /* All other FSR values are either MPU faults or "can't happen
8866 * for M profile" cases.
8868 switch (cs
->exception_index
) {
8869 case EXCP_PREFETCH_ABORT
:
8870 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8871 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
8873 case EXCP_DATA_ABORT
:
8874 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
8875 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
8876 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
8877 qemu_log_mask(CPU_LOG_INT
,
8878 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8879 env
->v7m
.mmfar
[env
->v7m
.secure
]);
8882 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
8888 if (semihosting_enabled()) {
8890 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
8893 qemu_log_mask(CPU_LOG_INT
,
8894 "...handling as semihosting call 0x%x\n",
8896 env
->regs
[0] = do_arm_semihosting(env
);
8900 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
8904 case EXCP_EXCEPTION_EXIT
:
8905 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
8906 /* Must be v8M security extension function return */
8907 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
8908 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8909 if (do_v7m_function_return(cpu
)) {
8913 do_v7m_exception_exit(cpu
);
8918 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8919 return; /* Never happens. Keep compiler happy. */
8922 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8923 lr
= R_V7M_EXCRET_RES1_MASK
|
8924 R_V7M_EXCRET_DCRS_MASK
|
8925 R_V7M_EXCRET_FTYPE_MASK
;
8926 /* The S bit indicates whether we should return to Secure
8927 * or NonSecure (ie our current state).
8928 * The ES bit indicates whether we're taking this exception
8929 * to Secure or NonSecure (ie our target state). We set it
8930 * later, in v7m_exception_taken().
8931 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8932 * This corresponds to the ARM ARM pseudocode for v8M setting
8933 * some LR bits in PushStack() and some in ExceptionTaken();
8934 * the distinction matters for the tailchain cases where we
8935 * can take an exception without pushing the stack.
8937 if (env
->v7m
.secure
) {
8938 lr
|= R_V7M_EXCRET_S_MASK
;
8941 lr
= R_V7M_EXCRET_RES1_MASK
|
8942 R_V7M_EXCRET_S_MASK
|
8943 R_V7M_EXCRET_DCRS_MASK
|
8944 R_V7M_EXCRET_FTYPE_MASK
|
8945 R_V7M_EXCRET_ES_MASK
;
8946 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
8947 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8950 if (!arm_v7m_is_handler_mode(env
)) {
8951 lr
|= R_V7M_EXCRET_MODE_MASK
;
8954 ignore_stackfaults
= v7m_push_stack(cpu
);
8955 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
8958 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8959 * register set. This is necessary when switching between AArch32 and AArch64
8962 void aarch64_sync_32_to_64(CPUARMState
*env
)
8965 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8967 /* We can blanket copy R[0:7] to X[0:7] */
8968 for (i
= 0; i
< 8; i
++) {
8969 env
->xregs
[i
] = env
->regs
[i
];
8972 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8973 * Otherwise, they come from the banked user regs.
8975 if (mode
== ARM_CPU_MODE_FIQ
) {
8976 for (i
= 8; i
< 13; i
++) {
8977 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8980 for (i
= 8; i
< 13; i
++) {
8981 env
->xregs
[i
] = env
->regs
[i
];
8985 /* Registers x13-x23 are the various mode SP and FP registers. Registers
8986 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8987 * from the mode banked register.
8989 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8990 env
->xregs
[13] = env
->regs
[13];
8991 env
->xregs
[14] = env
->regs
[14];
8993 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8994 /* HYP is an exception in that it is copied from r14 */
8995 if (mode
== ARM_CPU_MODE_HYP
) {
8996 env
->xregs
[14] = env
->regs
[14];
8998 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9002 if (mode
== ARM_CPU_MODE_HYP
) {
9003 env
->xregs
[15] = env
->regs
[13];
9005 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9008 if (mode
== ARM_CPU_MODE_IRQ
) {
9009 env
->xregs
[16] = env
->regs
[14];
9010 env
->xregs
[17] = env
->regs
[13];
9012 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9013 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9016 if (mode
== ARM_CPU_MODE_SVC
) {
9017 env
->xregs
[18] = env
->regs
[14];
9018 env
->xregs
[19] = env
->regs
[13];
9020 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9021 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9024 if (mode
== ARM_CPU_MODE_ABT
) {
9025 env
->xregs
[20] = env
->regs
[14];
9026 env
->xregs
[21] = env
->regs
[13];
9028 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9029 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9032 if (mode
== ARM_CPU_MODE_UND
) {
9033 env
->xregs
[22] = env
->regs
[14];
9034 env
->xregs
[23] = env
->regs
[13];
9036 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9037 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9040 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9041 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9042 * FIQ bank for r8-r14.
9044 if (mode
== ARM_CPU_MODE_FIQ
) {
9045 for (i
= 24; i
< 31; i
++) {
9046 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9049 for (i
= 24; i
< 29; i
++) {
9050 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9052 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9053 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9056 env
->pc
= env
->regs
[15];
9059 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9060 * register set. This is necessary when switching between AArch32 and AArch64
9063 void aarch64_sync_64_to_32(CPUARMState
*env
)
9066 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9068 /* We can blanket copy X[0:7] to R[0:7] */
9069 for (i
= 0; i
< 8; i
++) {
9070 env
->regs
[i
] = env
->xregs
[i
];
9073 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9074 * Otherwise, we copy x8-x12 into the banked user regs.
9076 if (mode
== ARM_CPU_MODE_FIQ
) {
9077 for (i
= 8; i
< 13; i
++) {
9078 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9081 for (i
= 8; i
< 13; i
++) {
9082 env
->regs
[i
] = env
->xregs
[i
];
9086 /* Registers r13 & r14 depend on the current mode.
9087 * If we are in a given mode, we copy the corresponding x registers to r13
9088 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9091 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9092 env
->regs
[13] = env
->xregs
[13];
9093 env
->regs
[14] = env
->xregs
[14];
9095 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9097 /* HYP is an exception in that it does not have its own banked r14 but
9098 * shares the USR r14
9100 if (mode
== ARM_CPU_MODE_HYP
) {
9101 env
->regs
[14] = env
->xregs
[14];
9103 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9107 if (mode
== ARM_CPU_MODE_HYP
) {
9108 env
->regs
[13] = env
->xregs
[15];
9110 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9113 if (mode
== ARM_CPU_MODE_IRQ
) {
9114 env
->regs
[14] = env
->xregs
[16];
9115 env
->regs
[13] = env
->xregs
[17];
9117 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9118 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9121 if (mode
== ARM_CPU_MODE_SVC
) {
9122 env
->regs
[14] = env
->xregs
[18];
9123 env
->regs
[13] = env
->xregs
[19];
9125 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9126 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9129 if (mode
== ARM_CPU_MODE_ABT
) {
9130 env
->regs
[14] = env
->xregs
[20];
9131 env
->regs
[13] = env
->xregs
[21];
9133 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9134 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9137 if (mode
== ARM_CPU_MODE_UND
) {
9138 env
->regs
[14] = env
->xregs
[22];
9139 env
->regs
[13] = env
->xregs
[23];
9141 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9142 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9145 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9146 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9147 * FIQ bank for r8-r14.
9149 if (mode
== ARM_CPU_MODE_FIQ
) {
9150 for (i
= 24; i
< 31; i
++) {
9151 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9154 for (i
= 24; i
< 29; i
++) {
9155 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9157 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9158 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9161 env
->regs
[15] = env
->pc
;
9164 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9165 uint32_t mask
, uint32_t offset
,
9168 /* Change the CPU state so as to actually take the exception. */
9169 switch_mode(env
, new_mode
);
9171 * For exceptions taken to AArch32 we must clear the SS bit in both
9172 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9174 env
->uncached_cpsr
&= ~PSTATE_SS
;
9175 env
->spsr
= cpsr_read(env
);
9176 /* Clear IT bits. */
9177 env
->condexec_bits
= 0;
9178 /* Switch to the new mode, and to the correct instruction set. */
9179 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9180 /* Set new mode endianness */
9181 env
->uncached_cpsr
&= ~CPSR_E
;
9182 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
9183 env
->uncached_cpsr
|= CPSR_E
;
9185 /* J and IL must always be cleared for exception entry */
9186 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9189 if (new_mode
== ARM_CPU_MODE_HYP
) {
9190 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9191 env
->elr_el
[2] = env
->regs
[15];
9194 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9195 * and we should just guard the thumb mode on V4
9197 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9199 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9201 env
->regs
[14] = env
->regs
[15] + offset
;
9203 env
->regs
[15] = newpc
;
9206 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9209 * Handle exception entry to Hyp mode; this is sufficiently
9210 * different to entry to other AArch32 modes that we handle it
9213 * The vector table entry used is always the 0x14 Hyp mode entry point,
9214 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9215 * The offset applied to the preferred return address is always zero
9216 * (see DDI0487C.a section G1.12.3).
9217 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9219 uint32_t addr
, mask
;
9220 ARMCPU
*cpu
= ARM_CPU(cs
);
9221 CPUARMState
*env
= &cpu
->env
;
9223 switch (cs
->exception_index
) {
9231 /* Fall through to prefetch abort. */
9232 case EXCP_PREFETCH_ABORT
:
9233 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9234 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9235 (uint32_t)env
->exception
.vaddress
);
9238 case EXCP_DATA_ABORT
:
9239 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9240 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9241 (uint32_t)env
->exception
.vaddress
);
9256 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9259 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9260 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9262 * QEMU syndrome values are v8-style. v7 has the IL bit
9263 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9264 * If this is a v7 CPU, squash the IL bit in those cases.
9266 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9267 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9268 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9269 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9270 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9273 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9276 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9281 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9284 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9287 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9291 addr
+= env
->cp15
.hvbar
;
9293 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9296 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9298 ARMCPU
*cpu
= ARM_CPU(cs
);
9299 CPUARMState
*env
= &cpu
->env
;
9306 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9307 switch (syn_get_ec(env
->exception
.syndrome
)) {
9309 case EC_BREAKPOINT_SAME_EL
:
9313 case EC_WATCHPOINT_SAME_EL
:
9319 case EC_VECTORCATCH
:
9328 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9331 if (env
->exception
.target_el
== 2) {
9332 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9336 switch (cs
->exception_index
) {
9338 new_mode
= ARM_CPU_MODE_UND
;
9347 new_mode
= ARM_CPU_MODE_SVC
;
9350 /* The PC already points to the next instruction. */
9354 /* Fall through to prefetch abort. */
9355 case EXCP_PREFETCH_ABORT
:
9356 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9357 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9358 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9359 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9360 new_mode
= ARM_CPU_MODE_ABT
;
9362 mask
= CPSR_A
| CPSR_I
;
9365 case EXCP_DATA_ABORT
:
9366 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9367 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9368 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9370 (uint32_t)env
->exception
.vaddress
);
9371 new_mode
= ARM_CPU_MODE_ABT
;
9373 mask
= CPSR_A
| CPSR_I
;
9377 new_mode
= ARM_CPU_MODE_IRQ
;
9379 /* Disable IRQ and imprecise data aborts. */
9380 mask
= CPSR_A
| CPSR_I
;
9382 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9383 /* IRQ routed to monitor mode */
9384 new_mode
= ARM_CPU_MODE_MON
;
9389 new_mode
= ARM_CPU_MODE_FIQ
;
9391 /* Disable FIQ, IRQ and imprecise data aborts. */
9392 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9393 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9394 /* FIQ routed to monitor mode */
9395 new_mode
= ARM_CPU_MODE_MON
;
9400 new_mode
= ARM_CPU_MODE_IRQ
;
9402 /* Disable IRQ and imprecise data aborts. */
9403 mask
= CPSR_A
| CPSR_I
;
9407 new_mode
= ARM_CPU_MODE_FIQ
;
9409 /* Disable FIQ, IRQ and imprecise data aborts. */
9410 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9414 new_mode
= ARM_CPU_MODE_MON
;
9416 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9420 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9421 return; /* Never happens. Keep compiler happy. */
9424 if (new_mode
== ARM_CPU_MODE_MON
) {
9425 addr
+= env
->cp15
.mvbar
;
9426 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9427 /* High vectors. When enabled, base address cannot be remapped. */
9430 /* ARM v7 architectures provide a vector base address register to remap
9431 * the interrupt vector table.
9432 * This register is only followed in non-monitor mode, and is banked.
9433 * Note: only bits 31:5 are valid.
9435 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9438 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9439 env
->cp15
.scr_el3
&= ~SCR_NS
;
9442 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9445 /* Handle exception entry to a target EL which is using AArch64 */
9446 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9448 ARMCPU
*cpu
= ARM_CPU(cs
);
9449 CPUARMState
*env
= &cpu
->env
;
9450 unsigned int new_el
= env
->exception
.target_el
;
9451 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9452 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9453 unsigned int cur_el
= arm_current_el(env
);
9456 * Note that new_el can never be 0. If cur_el is 0, then
9457 * el0_a64 is is_a64(), else el0_a64 is ignored.
9459 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9461 if (cur_el
< new_el
) {
9462 /* Entry vector offset depends on whether the implemented EL
9463 * immediately lower than the target level is using AArch32 or AArch64
9469 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9472 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
9475 is_aa64
= is_a64(env
);
9478 g_assert_not_reached();
9486 } else if (pstate_read(env
) & PSTATE_SP
) {
9490 switch (cs
->exception_index
) {
9491 case EXCP_PREFETCH_ABORT
:
9492 case EXCP_DATA_ABORT
:
9493 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9494 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9495 env
->cp15
.far_el
[new_el
]);
9503 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9505 * QEMU internal FP/SIMD syndromes from AArch32 include the
9506 * TA and coproc fields which are only exposed if the exception
9507 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9508 * AArch64 format syndrome.
9510 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9512 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9523 qemu_log_mask(CPU_LOG_INT
,
9524 "...handling as semihosting call 0x%" PRIx64
"\n",
9526 env
->xregs
[0] = do_arm_semihosting(env
);
9529 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9533 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
9534 aarch64_save_sp(env
, arm_current_el(env
));
9535 env
->elr_el
[new_el
] = env
->pc
;
9537 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
9538 env
->elr_el
[new_el
] = env
->regs
[15];
9540 aarch64_sync_32_to_64(env
);
9542 env
->condexec_bits
= 0;
9544 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9545 env
->elr_el
[new_el
]);
9547 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9549 aarch64_restore_sp(env
, new_el
);
9553 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9554 new_el
, env
->pc
, pstate_read(env
));
9557 static inline bool check_for_semihosting(CPUState
*cs
)
9559 /* Check whether this exception is a semihosting call; if so
9560 * then handle it and return true; otherwise return false.
9562 ARMCPU
*cpu
= ARM_CPU(cs
);
9563 CPUARMState
*env
= &cpu
->env
;
9566 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9567 /* This is always the 64-bit semihosting exception.
9568 * The "is this usermode" and "is semihosting enabled"
9569 * checks have been done at translate time.
9571 qemu_log_mask(CPU_LOG_INT
,
9572 "...handling as semihosting call 0x%" PRIx64
"\n",
9574 env
->xregs
[0] = do_arm_semihosting(env
);
9581 /* Only intercept calls from privileged modes, to provide some
9582 * semblance of security.
9584 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
9585 (!semihosting_enabled() ||
9586 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
9590 switch (cs
->exception_index
) {
9592 /* This is always a semihosting call; the "is this usermode"
9593 * and "is semihosting enabled" checks have been done at
9598 /* Check for semihosting interrupt. */
9600 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
9606 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
9608 if (imm
== 0x123456) {
9614 /* See if this is a semihosting syscall. */
9616 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
9628 qemu_log_mask(CPU_LOG_INT
,
9629 "...handling as semihosting call 0x%x\n",
9631 env
->regs
[0] = do_arm_semihosting(env
);
9636 /* Handle a CPU exception for A and R profile CPUs.
9637 * Do any appropriate logging, handle PSCI calls, and then hand off
9638 * to the AArch64-entry or AArch32-entry function depending on the
9639 * target exception level's register width.
9641 void arm_cpu_do_interrupt(CPUState
*cs
)
9643 ARMCPU
*cpu
= ARM_CPU(cs
);
9644 CPUARMState
*env
= &cpu
->env
;
9645 unsigned int new_el
= env
->exception
.target_el
;
9647 assert(!arm_feature(env
, ARM_FEATURE_M
));
9649 arm_log_exception(cs
->exception_index
);
9650 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9652 if (qemu_loglevel_mask(CPU_LOG_INT
)
9653 && !excp_is_internal(cs
->exception_index
)) {
9654 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9655 syn_get_ec(env
->exception
.syndrome
),
9656 env
->exception
.syndrome
);
9659 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9660 arm_handle_psci_call(cpu
);
9661 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9665 /* Semihosting semantics depend on the register width of the
9666 * code that caused the exception, not the target exception level,
9667 * so must be handled here.
9669 if (check_for_semihosting(cs
)) {
9673 /* Hooks may change global state so BQL should be held, also the
9674 * BQL needs to be held for any modification of
9675 * cs->interrupt_request.
9677 g_assert(qemu_mutex_iothread_locked());
9679 arm_call_pre_el_change_hook(cpu
);
9681 assert(!excp_is_internal(cs
->exception_index
));
9682 if (arm_el_is_aa64(env
, new_el
)) {
9683 arm_cpu_do_interrupt_aarch64(cs
);
9685 arm_cpu_do_interrupt_aarch32(cs
);
9688 arm_call_el_change_hook(cpu
);
9690 if (!kvm_enabled()) {
9691 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9694 #endif /* !CONFIG_USER_ONLY */
9696 /* Return the exception level which controls this address translation regime */
9697 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9700 case ARMMMUIdx_S2NS
:
9701 case ARMMMUIdx_S1E2
:
9703 case ARMMMUIdx_S1E3
:
9705 case ARMMMUIdx_S1SE0
:
9706 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9707 case ARMMMUIdx_S1SE1
:
9708 case ARMMMUIdx_S1NSE0
:
9709 case ARMMMUIdx_S1NSE1
:
9710 case ARMMMUIdx_MPrivNegPri
:
9711 case ARMMMUIdx_MUserNegPri
:
9712 case ARMMMUIdx_MPriv
:
9713 case ARMMMUIdx_MUser
:
9714 case ARMMMUIdx_MSPrivNegPri
:
9715 case ARMMMUIdx_MSUserNegPri
:
9716 case ARMMMUIdx_MSPriv
:
9717 case ARMMMUIdx_MSUser
:
9720 g_assert_not_reached();
9724 #ifndef CONFIG_USER_ONLY
9726 /* Return the SCTLR value which controls this address translation regime */
9727 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9729 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9732 /* Return true if the specified stage of address translation is disabled */
9733 static inline bool regime_translation_disabled(CPUARMState
*env
,
9736 if (arm_feature(env
, ARM_FEATURE_M
)) {
9737 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9738 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9739 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9740 /* Enabled, but not for HardFault and NMI */
9741 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9742 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9743 /* Enabled for all cases */
9747 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9748 * we warned about that in armv7m_nvic.c when the guest set it.
9754 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9755 /* HCR.DC means HCR.VM behaves as 1 */
9756 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9759 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9760 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9761 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9766 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
9767 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
9768 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9772 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9775 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9778 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9781 /* Return the TTBR associated with this translation regime */
9782 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9785 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9786 return env
->cp15
.vttbr_el2
;
9789 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9791 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9795 #endif /* !CONFIG_USER_ONLY */
9797 /* Return the TCR controlling this translation regime */
9798 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9800 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9801 return &env
->cp15
.vtcr_el2
;
9803 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9806 /* Convert a possible stage1+2 MMU index into the appropriate
9809 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9811 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9812 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
9817 /* Return true if the translation regime is using LPAE format page tables */
9818 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9821 int el
= regime_el(env
, mmu_idx
);
9822 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9825 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9826 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9832 /* Returns true if the stage 1 translation regime is using LPAE format page
9833 * tables. Used when raising alignment exceptions, whose FSR changes depending
9834 * on whether the long or short descriptor format is in use. */
9835 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9837 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9839 return regime_using_lpae_format(env
, mmu_idx
);
9842 #ifndef CONFIG_USER_ONLY
9843 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9846 case ARMMMUIdx_S1SE0
:
9847 case ARMMMUIdx_S1NSE0
:
9848 case ARMMMUIdx_MUser
:
9849 case ARMMMUIdx_MSUser
:
9850 case ARMMMUIdx_MUserNegPri
:
9851 case ARMMMUIdx_MSUserNegPri
:
9855 case ARMMMUIdx_S12NSE0
:
9856 case ARMMMUIdx_S12NSE1
:
9857 g_assert_not_reached();
9861 /* Translate section/page access permissions to page
9862 * R/W protection flags
9865 * @mmu_idx: MMU index indicating required translation regime
9866 * @ap: The 3-bit access permissions (AP[2:0])
9867 * @domain_prot: The 2-bit domain access permissions
9869 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9870 int ap
, int domain_prot
)
9872 bool is_user
= regime_is_user(env
, mmu_idx
);
9874 if (domain_prot
== 3) {
9875 return PAGE_READ
| PAGE_WRITE
;
9880 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9883 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9885 return is_user
? 0 : PAGE_READ
;
9892 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9897 return PAGE_READ
| PAGE_WRITE
;
9900 return PAGE_READ
| PAGE_WRITE
;
9901 case 4: /* Reserved. */
9904 return is_user
? 0 : PAGE_READ
;
9908 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9913 g_assert_not_reached();
9917 /* Translate section/page access permissions to page
9918 * R/W protection flags.
9920 * @ap: The 2-bit simple AP (AP[2:1])
9921 * @is_user: TRUE if accessing from PL0
9923 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9927 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9929 return PAGE_READ
| PAGE_WRITE
;
9931 return is_user
? 0 : PAGE_READ
;
9935 g_assert_not_reached();
9940 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9942 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9945 /* Translate S2 section/page access permissions to protection flags
9948 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9949 * @xn: XN (execute-never) bit
9951 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9962 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9969 /* Translate section/page access permissions to protection flags
9972 * @mmu_idx: MMU index indicating required translation regime
9973 * @is_aa64: TRUE if AArch64
9974 * @ap: The 2-bit simple AP (AP[2:1])
9975 * @ns: NS (non-secure) bit
9976 * @xn: XN (execute-never) bit
9977 * @pxn: PXN (privileged execute-never) bit
9979 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9980 int ap
, int ns
, int xn
, int pxn
)
9982 bool is_user
= regime_is_user(env
, mmu_idx
);
9983 int prot_rw
, user_rw
;
9987 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
9989 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9993 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9996 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10000 /* TODO have_wxn should be replaced with
10001 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10002 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10003 * compatible processors have EL2, which is required for [U]WXN.
10005 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10008 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10012 switch (regime_el(env
, mmu_idx
)) {
10015 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10022 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10023 switch (regime_el(env
, mmu_idx
)) {
10027 xn
= xn
|| !(user_rw
& PAGE_READ
);
10031 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10033 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10034 (uwxn
&& (user_rw
& PAGE_WRITE
));
10044 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10047 return prot_rw
| PAGE_EXEC
;
10050 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10051 uint32_t *table
, uint32_t address
)
10053 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10054 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10056 if (address
& tcr
->mask
) {
10057 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10058 /* Translation table walk disabled for TTBR1 */
10061 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10063 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10064 /* Translation table walk disabled for TTBR0 */
10067 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10069 *table
|= (address
>> 18) & 0x3ffc;
10073 /* Translate a S1 pagetable walk through S2 if needed. */
10074 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10075 hwaddr addr
, MemTxAttrs txattrs
,
10076 ARMMMUFaultInfo
*fi
)
10078 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
10079 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10080 target_ulong s2size
;
10084 ARMCacheAttrs cacheattrs
= {};
10085 ARMCacheAttrs
*pcacheattrs
= NULL
;
10087 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
10089 * PTW means we must fault if this S1 walk touches S2 Device
10090 * memory; otherwise we don't care about the attributes and can
10091 * save the S2 translation the effort of computing them.
10093 pcacheattrs
= &cacheattrs
;
10096 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
10097 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
10099 assert(fi
->type
!= ARMFault_None
);
10105 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
10106 /* Access was to Device memory: generate Permission fault */
10107 fi
->type
= ARMFault_Permission
;
10118 /* All loads done in the course of a page table walk go through here. */
10119 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10120 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10122 ARMCPU
*cpu
= ARM_CPU(cs
);
10123 CPUARMState
*env
= &cpu
->env
;
10124 MemTxAttrs attrs
= {};
10125 MemTxResult result
= MEMTX_OK
;
10129 attrs
.secure
= is_secure
;
10130 as
= arm_addressspace(cs
, attrs
);
10131 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10135 if (regime_translation_big_endian(env
, mmu_idx
)) {
10136 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10138 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10140 if (result
== MEMTX_OK
) {
10143 fi
->type
= ARMFault_SyncExternalOnWalk
;
10144 fi
->ea
= arm_extabort_type(result
);
10148 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10149 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10151 ARMCPU
*cpu
= ARM_CPU(cs
);
10152 CPUARMState
*env
= &cpu
->env
;
10153 MemTxAttrs attrs
= {};
10154 MemTxResult result
= MEMTX_OK
;
10158 attrs
.secure
= is_secure
;
10159 as
= arm_addressspace(cs
, attrs
);
10160 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10164 if (regime_translation_big_endian(env
, mmu_idx
)) {
10165 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10167 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10169 if (result
== MEMTX_OK
) {
10172 fi
->type
= ARMFault_SyncExternalOnWalk
;
10173 fi
->ea
= arm_extabort_type(result
);
10177 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10178 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10179 hwaddr
*phys_ptr
, int *prot
,
10180 target_ulong
*page_size
,
10181 ARMMMUFaultInfo
*fi
)
10183 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10194 /* Pagetable walk. */
10195 /* Lookup l1 descriptor. */
10196 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10197 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10198 fi
->type
= ARMFault_Translation
;
10201 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10203 if (fi
->type
!= ARMFault_None
) {
10207 domain
= (desc
>> 5) & 0x0f;
10208 if (regime_el(env
, mmu_idx
) == 1) {
10209 dacr
= env
->cp15
.dacr_ns
;
10211 dacr
= env
->cp15
.dacr_s
;
10213 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10215 /* Section translation fault. */
10216 fi
->type
= ARMFault_Translation
;
10222 if (domain_prot
== 0 || domain_prot
== 2) {
10223 fi
->type
= ARMFault_Domain
;
10228 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10229 ap
= (desc
>> 10) & 3;
10230 *page_size
= 1024 * 1024;
10232 /* Lookup l2 entry. */
10234 /* Coarse pagetable. */
10235 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10237 /* Fine pagetable. */
10238 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10240 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10242 if (fi
->type
!= ARMFault_None
) {
10245 switch (desc
& 3) {
10246 case 0: /* Page translation fault. */
10247 fi
->type
= ARMFault_Translation
;
10249 case 1: /* 64k page. */
10250 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10251 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10252 *page_size
= 0x10000;
10254 case 2: /* 4k page. */
10255 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10256 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10257 *page_size
= 0x1000;
10259 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10261 /* ARMv6/XScale extended small page format */
10262 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10263 || arm_feature(env
, ARM_FEATURE_V6
)) {
10264 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10265 *page_size
= 0x1000;
10267 /* UNPREDICTABLE in ARMv5; we choose to take a
10268 * page translation fault.
10270 fi
->type
= ARMFault_Translation
;
10274 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10275 *page_size
= 0x400;
10277 ap
= (desc
>> 4) & 3;
10280 /* Never happens, but compiler isn't smart enough to tell. */
10284 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10285 *prot
|= *prot
? PAGE_EXEC
: 0;
10286 if (!(*prot
& (1 << access_type
))) {
10287 /* Access permission fault. */
10288 fi
->type
= ARMFault_Permission
;
10291 *phys_ptr
= phys_addr
;
10294 fi
->domain
= domain
;
10299 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10300 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10301 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10302 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10304 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10318 /* Pagetable walk. */
10319 /* Lookup l1 descriptor. */
10320 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10321 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10322 fi
->type
= ARMFault_Translation
;
10325 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10327 if (fi
->type
!= ARMFault_None
) {
10331 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10332 /* Section translation fault, or attempt to use the encoding
10333 * which is Reserved on implementations without PXN.
10335 fi
->type
= ARMFault_Translation
;
10338 if ((type
== 1) || !(desc
& (1 << 18))) {
10339 /* Page or Section. */
10340 domain
= (desc
>> 5) & 0x0f;
10342 if (regime_el(env
, mmu_idx
) == 1) {
10343 dacr
= env
->cp15
.dacr_ns
;
10345 dacr
= env
->cp15
.dacr_s
;
10350 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10351 if (domain_prot
== 0 || domain_prot
== 2) {
10352 /* Section or Page domain fault */
10353 fi
->type
= ARMFault_Domain
;
10357 if (desc
& (1 << 18)) {
10358 /* Supersection. */
10359 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10360 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10361 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10362 *page_size
= 0x1000000;
10365 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10366 *page_size
= 0x100000;
10368 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10369 xn
= desc
& (1 << 4);
10371 ns
= extract32(desc
, 19, 1);
10373 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10374 pxn
= (desc
>> 2) & 1;
10376 ns
= extract32(desc
, 3, 1);
10377 /* Lookup l2 entry. */
10378 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10379 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10381 if (fi
->type
!= ARMFault_None
) {
10384 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10385 switch (desc
& 3) {
10386 case 0: /* Page translation fault. */
10387 fi
->type
= ARMFault_Translation
;
10389 case 1: /* 64k page. */
10390 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10391 xn
= desc
& (1 << 15);
10392 *page_size
= 0x10000;
10394 case 2: case 3: /* 4k page. */
10395 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10397 *page_size
= 0x1000;
10400 /* Never happens, but compiler isn't smart enough to tell. */
10404 if (domain_prot
== 3) {
10405 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10407 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10410 if (xn
&& access_type
== MMU_INST_FETCH
) {
10411 fi
->type
= ARMFault_Permission
;
10415 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10416 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10417 /* The simplified model uses AP[0] as an access control bit. */
10418 if ((ap
& 1) == 0) {
10419 /* Access flag fault. */
10420 fi
->type
= ARMFault_AccessFlag
;
10423 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10425 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10427 if (*prot
&& !xn
) {
10428 *prot
|= PAGE_EXEC
;
10430 if (!(*prot
& (1 << access_type
))) {
10431 /* Access permission fault. */
10432 fi
->type
= ARMFault_Permission
;
10437 /* The NS bit will (as required by the architecture) have no effect if
10438 * the CPU doesn't support TZ or this is a non-secure translation
10439 * regime, because the attribute will already be non-secure.
10441 attrs
->secure
= false;
10443 *phys_ptr
= phys_addr
;
10446 fi
->domain
= domain
;
10452 * check_s2_mmu_setup
10454 * @is_aa64: True if the translation regime is in AArch64 state
10455 * @startlevel: Suggested starting level
10456 * @inputsize: Bitsize of IPAs
10457 * @stride: Page-table stride (See the ARM ARM)
10459 * Returns true if the suggested S2 translation parameters are OK and
10462 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10463 int inputsize
, int stride
)
10465 const int grainsize
= stride
+ 3;
10466 int startsizecheck
;
10468 /* Negative levels are never allowed. */
10473 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10474 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10479 CPUARMState
*env
= &cpu
->env
;
10480 unsigned int pamax
= arm_pamax(cpu
);
10483 case 13: /* 64KB Pages. */
10484 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10488 case 11: /* 16KB Pages. */
10489 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10493 case 9: /* 4KB Pages. */
10494 if (level
== 0 && pamax
<= 42) {
10499 g_assert_not_reached();
10502 /* Inputsize checks. */
10503 if (inputsize
> pamax
&&
10504 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10505 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10509 /* AArch32 only supports 4KB pages. Assert on that. */
10510 assert(stride
== 9);
10519 /* Translate from the 4-bit stage 2 representation of
10520 * memory attributes (without cache-allocation hints) to
10521 * the 8-bit representation of the stage 1 MAIR registers
10522 * (which includes allocation hints).
10524 * ref: shared/translation/attrs/S2AttrDecode()
10525 * .../S2ConvertAttrsHints()
10527 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10529 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10530 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10531 uint8_t hihint
= 0, lohint
= 0;
10533 if (hiattr
!= 0) { /* normal memory */
10534 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10535 hiattr
= loattr
= 1; /* non-cacheable */
10537 if (hiattr
!= 1) { /* Write-through or write-back */
10538 hihint
= 3; /* RW allocate */
10540 if (loattr
!= 1) { /* Write-through or write-back */
10541 lohint
= 3; /* RW allocate */
10546 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10548 #endif /* !CONFIG_USER_ONLY */
10550 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
10553 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10554 uint32_t el
= regime_el(env
, mmu_idx
);
10555 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
10559 * Bit 55 is always between the two regions, and is canonical for
10560 * determining if address tagging is enabled.
10562 select
= extract64(va
, 55, 1);
10565 tsz
= extract32(tcr
, 0, 6);
10566 using64k
= extract32(tcr
, 14, 1);
10567 using16k
= extract32(tcr
, 15, 1);
10568 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10570 tbi
= tbid
= hpd
= false;
10572 tbi
= extract32(tcr
, 20, 1);
10573 hpd
= extract32(tcr
, 24, 1);
10574 tbid
= extract32(tcr
, 29, 1);
10577 } else if (!select
) {
10578 tsz
= extract32(tcr
, 0, 6);
10579 epd
= extract32(tcr
, 7, 1);
10580 using64k
= extract32(tcr
, 14, 1);
10581 using16k
= extract32(tcr
, 15, 1);
10582 tbi
= extract64(tcr
, 37, 1);
10583 hpd
= extract64(tcr
, 41, 1);
10584 tbid
= extract64(tcr
, 51, 1);
10586 int tg
= extract32(tcr
, 30, 2);
10587 using16k
= tg
== 1;
10588 using64k
= tg
== 3;
10589 tsz
= extract32(tcr
, 16, 6);
10590 epd
= extract32(tcr
, 23, 1);
10591 tbi
= extract64(tcr
, 38, 1);
10592 hpd
= extract64(tcr
, 42, 1);
10593 tbid
= extract64(tcr
, 52, 1);
10595 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10596 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10598 return (ARMVAParameters
) {
10605 .using16k
= using16k
,
10606 .using64k
= using64k
,
10610 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10611 ARMMMUIdx mmu_idx
, bool data
)
10613 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
10615 /* Present TBI as a composite with TBID. */
10616 ret
.tbi
&= (data
|| !ret
.tbid
);
10620 #ifndef CONFIG_USER_ONLY
10621 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10624 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10625 uint32_t el
= regime_el(env
, mmu_idx
);
10629 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10631 bool sext
= extract32(tcr
, 4, 1);
10632 bool sign
= extract32(tcr
, 3, 1);
10635 * If the sign-extend bit is not the same as t0sz[3], the result
10636 * is unpredictable. Flag this as a guest error.
10638 if (sign
!= sext
) {
10639 qemu_log_mask(LOG_GUEST_ERROR
,
10640 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10642 tsz
= sextract32(tcr
, 0, 4) + 8;
10646 } else if (el
== 2) {
10648 tsz
= extract32(tcr
, 0, 3);
10650 hpd
= extract64(tcr
, 24, 1);
10653 int t0sz
= extract32(tcr
, 0, 3);
10654 int t1sz
= extract32(tcr
, 16, 3);
10657 select
= va
> (0xffffffffu
>> t0sz
);
10659 /* Note that we will detect errors later. */
10660 select
= va
>= ~(0xffffffffu
>> t1sz
);
10664 epd
= extract32(tcr
, 7, 1);
10665 hpd
= extract64(tcr
, 41, 1);
10668 epd
= extract32(tcr
, 23, 1);
10669 hpd
= extract64(tcr
, 42, 1);
10671 /* For aarch32, hpd0 is not enabled without t2e as well. */
10672 hpd
&= extract32(tcr
, 6, 1);
10675 return (ARMVAParameters
) {
10683 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10684 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10685 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10686 target_ulong
*page_size_ptr
,
10687 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10689 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10690 CPUState
*cs
= CPU(cpu
);
10691 /* Read an LPAE long-descriptor translation table. */
10692 ARMFaultType fault_type
= ARMFault_Translation
;
10694 ARMVAParameters param
;
10696 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10697 uint32_t tableattrs
;
10698 target_ulong page_size
;
10701 int addrsize
, inputsize
;
10702 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10703 int ap
, ns
, xn
, pxn
;
10704 uint32_t el
= regime_el(env
, mmu_idx
);
10706 uint64_t descaddrmask
;
10707 bool aarch64
= arm_el_is_aa64(env
, el
);
10708 bool guarded
= false;
10711 * This code does not handle the different format TCR for VTCR_EL2.
10712 * This code also does not support shareability levels.
10713 * Attribute and permission bit handling should also be checked when adding
10714 * support for those page table walks.
10717 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10718 access_type
!= MMU_INST_FETCH
);
10720 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
10723 ttbr1_valid
= (el
< 2);
10724 addrsize
= 64 - 8 * param
.tbi
;
10725 inputsize
= 64 - param
.tsz
;
10727 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10729 /* There is no TTBR1 for EL2 */
10730 ttbr1_valid
= (el
!= 2);
10731 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
10732 inputsize
= addrsize
- param
.tsz
;
10736 * We determined the region when collecting the parameters, but we
10737 * have not yet validated that the address is valid for the region.
10738 * Extract the top bits and verify that they all match select.
10740 * For aa32, if inputsize == addrsize, then we have selected the
10741 * region by exclusion in aa32_va_parameters and there is no more
10742 * validation to do here.
10744 if (inputsize
< addrsize
) {
10745 target_ulong top_bits
= sextract64(address
, inputsize
,
10746 addrsize
- inputsize
);
10747 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
10748 /* The gap between the two regions is a Translation fault */
10749 fault_type
= ARMFault_Translation
;
10754 if (param
.using64k
) {
10756 } else if (param
.using16k
) {
10762 /* Note that QEMU ignores shareability and cacheability attributes,
10763 * so we don't need to do anything with the SH, ORGN, IRGN fields
10764 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10765 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10766 * implement any ASID-like capability so we can ignore it (instead
10767 * we will always flush the TLB any time the ASID is changed).
10769 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10771 /* Here we should have set up all the parameters for the translation:
10772 * inputsize, ttbr, epd, stride, tbi
10776 /* Translation table walk disabled => Translation fault on TLB miss
10777 * Note: This is always 0 on 64-bit EL2 and EL3.
10782 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
10783 /* The starting level depends on the virtual address size (which can
10784 * be up to 48 bits) and the translation granule size. It indicates
10785 * the number of strides (stride bits at a time) needed to
10786 * consume the bits of the input address. In the pseudocode this is:
10787 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10788 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10789 * our 'stride + 3' and 'stride' is our 'stride'.
10790 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10791 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10792 * = 4 - (inputsize - 4) / stride;
10794 level
= 4 - (inputsize
- 4) / stride
;
10796 /* For stage 2 translations the starting level is specified by the
10797 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10799 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10800 uint32_t startlevel
;
10803 if (!aarch64
|| stride
== 9) {
10804 /* AArch32 or 4KB pages */
10805 startlevel
= 2 - sl0
;
10807 /* 16KB or 64KB pages */
10808 startlevel
= 3 - sl0
;
10811 /* Check that the starting level is valid. */
10812 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10813 inputsize
, stride
);
10815 fault_type
= ARMFault_Translation
;
10818 level
= startlevel
;
10821 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10822 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10824 /* Now we can extract the actual base address from the TTBR */
10825 descaddr
= extract64(ttbr
, 0, 48);
10826 descaddr
&= ~indexmask
;
10828 /* The address field in the descriptor goes up to bit 39 for ARMv7
10829 * but up to bit 47 for ARMv8, but we use the descaddrmask
10830 * up to bit 39 for AArch32, because we don't need other bits in that case
10831 * to construct next descriptor address (anyway they should be all zeroes).
10833 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10834 ~indexmask_grainsize
;
10836 /* Secure accesses start with the page table in secure memory and
10837 * can be downgraded to non-secure at any step. Non-secure accesses
10838 * remain non-secure. We implement this by just ORing in the NSTable/NS
10839 * bits at each step.
10841 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10843 uint64_t descriptor
;
10846 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10848 nstable
= extract32(tableattrs
, 4, 1);
10849 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10850 if (fi
->type
!= ARMFault_None
) {
10854 if (!(descriptor
& 1) ||
10855 (!(descriptor
& 2) && (level
== 3))) {
10856 /* Invalid, or the Reserved level 3 encoding */
10859 descaddr
= descriptor
& descaddrmask
;
10861 if ((descriptor
& 2) && (level
< 3)) {
10862 /* Table entry. The top five bits are attributes which may
10863 * propagate down through lower levels of the table (and
10864 * which are all arranged so that 0 means "no effect", so
10865 * we can gather them up by ORing in the bits at each level).
10867 tableattrs
|= extract64(descriptor
, 59, 5);
10869 indexmask
= indexmask_grainsize
;
10872 /* Block entry at level 1 or 2, or page entry at level 3.
10873 * These are basically the same thing, although the number
10874 * of bits we pull in from the vaddr varies.
10876 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10877 descaddr
|= (address
& (page_size
- 1));
10878 /* Extract attributes from the descriptor */
10879 attrs
= extract64(descriptor
, 2, 10)
10880 | (extract64(descriptor
, 52, 12) << 10);
10882 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10883 /* Stage 2 table descriptors do not include any attribute fields */
10886 /* Merge in attributes from table descriptors */
10887 attrs
|= nstable
<< 3; /* NS */
10888 guarded
= extract64(descriptor
, 50, 1); /* GP */
10890 /* HPD disables all the table attributes except NSTable. */
10893 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10894 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10895 * means "force PL1 access only", which means forcing AP[1] to 0.
10897 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10898 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10901 /* Here descaddr is the final physical address, and attributes
10902 * are all in attrs.
10904 fault_type
= ARMFault_AccessFlag
;
10905 if ((attrs
& (1 << 8)) == 0) {
10910 ap
= extract32(attrs
, 4, 2);
10911 xn
= extract32(attrs
, 12, 1);
10913 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10915 *prot
= get_S2prot(env
, ap
, xn
);
10917 ns
= extract32(attrs
, 3, 1);
10918 pxn
= extract32(attrs
, 11, 1);
10919 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10922 fault_type
= ARMFault_Permission
;
10923 if (!(*prot
& (1 << access_type
))) {
10928 /* The NS bit will (as required by the architecture) have no effect if
10929 * the CPU doesn't support TZ or this is a non-secure translation
10930 * regime, because the attribute will already be non-secure.
10932 txattrs
->secure
= false;
10934 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10935 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
10936 txattrs
->target_tlb_bit0
= true;
10939 if (cacheattrs
!= NULL
) {
10940 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10941 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10942 extract32(attrs
, 0, 4));
10944 /* Index into MAIR registers for cache attributes */
10945 uint8_t attrindx
= extract32(attrs
, 0, 3);
10946 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10947 assert(attrindx
<= 7);
10948 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10950 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10953 *phys_ptr
= descaddr
;
10954 *page_size_ptr
= page_size
;
10958 fi
->type
= fault_type
;
10960 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10961 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
10965 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10967 int32_t address
, int *prot
)
10969 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10970 *prot
= PAGE_READ
| PAGE_WRITE
;
10972 case 0xF0000000 ... 0xFFFFFFFF:
10973 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10974 /* hivecs execing is ok */
10975 *prot
|= PAGE_EXEC
;
10978 case 0x00000000 ... 0x7FFFFFFF:
10979 *prot
|= PAGE_EXEC
;
10983 /* Default system address map for M profile cores.
10984 * The architecture specifies which regions are execute-never;
10985 * at the MPU level no other checks are defined.
10988 case 0x00000000 ... 0x1fffffff: /* ROM */
10989 case 0x20000000 ... 0x3fffffff: /* SRAM */
10990 case 0x60000000 ... 0x7fffffff: /* RAM */
10991 case 0x80000000 ... 0x9fffffff: /* RAM */
10992 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10994 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10995 case 0xa0000000 ... 0xbfffffff: /* Device */
10996 case 0xc0000000 ... 0xdfffffff: /* Device */
10997 case 0xe0000000 ... 0xffffffff: /* System */
10998 *prot
= PAGE_READ
| PAGE_WRITE
;
11001 g_assert_not_reached();
11006 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11007 ARMMMUIdx mmu_idx
, bool is_user
)
11009 /* Return true if we should use the default memory map as a
11010 * "background" region if there are no hits against any MPU regions.
11012 CPUARMState
*env
= &cpu
->env
;
11018 if (arm_feature(env
, ARM_FEATURE_M
)) {
11019 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11020 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11022 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11026 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11028 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11029 return arm_feature(env
, ARM_FEATURE_M
) &&
11030 extract32(address
, 20, 12) == 0xe00;
11033 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11035 /* True if address is in the M profile system region
11036 * 0xe0000000 - 0xffffffff
11038 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11041 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11042 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11043 hwaddr
*phys_ptr
, int *prot
,
11044 target_ulong
*page_size
,
11045 ARMMMUFaultInfo
*fi
)
11047 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11049 bool is_user
= regime_is_user(env
, mmu_idx
);
11051 *phys_ptr
= address
;
11052 *page_size
= TARGET_PAGE_SIZE
;
11055 if (regime_translation_disabled(env
, mmu_idx
) ||
11056 m_is_ppb_region(env
, address
)) {
11057 /* MPU disabled or M profile PPB access: use default memory map.
11058 * The other case which uses the default memory map in the
11059 * v7M ARM ARM pseudocode is exception vector reads from the vector
11060 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11061 * which always does a direct read using address_space_ldl(), rather
11062 * than going via this function, so we don't need to check that here.
11064 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11065 } else { /* MPU enabled */
11066 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11067 /* region search */
11068 uint32_t base
= env
->pmsav7
.drbar
[n
];
11069 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11071 bool srdis
= false;
11073 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11078 qemu_log_mask(LOG_GUEST_ERROR
,
11079 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11083 rmask
= (1ull << rsize
) - 1;
11085 if (base
& rmask
) {
11086 qemu_log_mask(LOG_GUEST_ERROR
,
11087 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11088 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11093 if (address
< base
|| address
> base
+ rmask
) {
11095 * Address not in this region. We must check whether the
11096 * region covers addresses in the same page as our address.
11097 * In that case we must not report a size that covers the
11098 * whole page for a subsequent hit against a different MPU
11099 * region or the background region, because it would result in
11100 * incorrect TLB hits for subsequent accesses to addresses that
11101 * are in this MPU region.
11103 if (ranges_overlap(base
, rmask
,
11104 address
& TARGET_PAGE_MASK
,
11105 TARGET_PAGE_SIZE
)) {
11111 /* Region matched */
11113 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11115 uint32_t srdis_mask
;
11117 rsize
-= 3; /* sub region size (power of 2) */
11118 snd
= ((address
- base
) >> rsize
) & 0x7;
11119 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11121 srdis_mask
= srdis
? 0x3 : 0x0;
11122 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11123 /* This will check in groups of 2, 4 and then 8, whether
11124 * the subregion bits are consistent. rsize is incremented
11125 * back up to give the region size, considering consistent
11126 * adjacent subregions as one region. Stop testing if rsize
11127 * is already big enough for an entire QEMU page.
11129 int snd_rounded
= snd
& ~(i
- 1);
11130 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11131 snd_rounded
+ 8, i
);
11132 if (srdis_mask
^ srdis_multi
) {
11135 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11142 if (rsize
< TARGET_PAGE_BITS
) {
11143 *page_size
= 1 << rsize
;
11148 if (n
== -1) { /* no hits */
11149 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11150 /* background fault */
11151 fi
->type
= ARMFault_Background
;
11154 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11155 } else { /* a MPU hit! */
11156 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11157 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11159 if (m_is_system_region(env
, address
)) {
11160 /* System space is always execute never */
11164 if (is_user
) { /* User mode AP bit decoding */
11169 break; /* no access */
11171 *prot
|= PAGE_WRITE
;
11175 *prot
|= PAGE_READ
| PAGE_EXEC
;
11178 /* for v7M, same as 6; for R profile a reserved value */
11179 if (arm_feature(env
, ARM_FEATURE_M
)) {
11180 *prot
|= PAGE_READ
| PAGE_EXEC
;
11185 qemu_log_mask(LOG_GUEST_ERROR
,
11186 "DRACR[%d]: Bad value for AP bits: 0x%"
11187 PRIx32
"\n", n
, ap
);
11189 } else { /* Priv. mode AP bits decoding */
11192 break; /* no access */
11196 *prot
|= PAGE_WRITE
;
11200 *prot
|= PAGE_READ
| PAGE_EXEC
;
11203 /* for v7M, same as 6; for R profile a reserved value */
11204 if (arm_feature(env
, ARM_FEATURE_M
)) {
11205 *prot
|= PAGE_READ
| PAGE_EXEC
;
11210 qemu_log_mask(LOG_GUEST_ERROR
,
11211 "DRACR[%d]: Bad value for AP bits: 0x%"
11212 PRIx32
"\n", n
, ap
);
11216 /* execute never */
11218 *prot
&= ~PAGE_EXEC
;
11223 fi
->type
= ARMFault_Permission
;
11225 return !(*prot
& (1 << access_type
));
11228 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11229 uint32_t address
, MMUAccessType access_type
)
11231 /* The architecture specifies that certain address ranges are
11232 * exempt from v8M SAU/IDAU checks.
11235 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11236 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11237 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11238 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11239 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11240 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11243 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11244 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11245 V8M_SAttributes
*sattrs
)
11247 /* Look up the security attributes for this address. Compare the
11248 * pseudocode SecurityCheck() function.
11249 * We assume the caller has zero-initialized *sattrs.
11251 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11253 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11254 int idau_region
= IREGION_NOTVALID
;
11255 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11256 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11259 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11260 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11262 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11266 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11267 /* 0xf0000000..0xffffffff is always S for insn fetches */
11271 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11272 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11276 if (idau_region
!= IREGION_NOTVALID
) {
11277 sattrs
->irvalid
= true;
11278 sattrs
->iregion
= idau_region
;
11281 switch (env
->sau
.ctrl
& 3) {
11282 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11284 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11287 default: /* SAU.ENABLE == 1 */
11288 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11289 if (env
->sau
.rlar
[r
] & 1) {
11290 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11291 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11293 if (base
<= address
&& limit
>= address
) {
11294 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11295 sattrs
->subpage
= true;
11297 if (sattrs
->srvalid
) {
11298 /* If we hit in more than one region then we must report
11299 * as Secure, not NS-Callable, with no valid region
11302 sattrs
->ns
= false;
11303 sattrs
->nsc
= false;
11304 sattrs
->sregion
= 0;
11305 sattrs
->srvalid
= false;
11308 if (env
->sau
.rlar
[r
] & 2) {
11309 sattrs
->nsc
= true;
11313 sattrs
->srvalid
= true;
11314 sattrs
->sregion
= r
;
11318 * Address not in this region. We must check whether the
11319 * region covers addresses in the same page as our address.
11320 * In that case we must not report a size that covers the
11321 * whole page for a subsequent hit against a different MPU
11322 * region or the background region, because it would result
11323 * in incorrect TLB hits for subsequent accesses to
11324 * addresses that are in this MPU region.
11326 if (limit
>= base
&&
11327 ranges_overlap(base
, limit
- base
+ 1,
11329 TARGET_PAGE_SIZE
)) {
11330 sattrs
->subpage
= true;
11339 * The IDAU will override the SAU lookup results if it specifies
11340 * higher security than the SAU does.
11343 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11344 sattrs
->ns
= false;
11345 sattrs
->nsc
= idau_nsc
;
11350 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11351 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11352 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11353 int *prot
, bool *is_subpage
,
11354 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11356 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11357 * that a full phys-to-virt translation does).
11358 * mregion is (if not NULL) set to the region number which matched,
11359 * or -1 if no region number is returned (MPU off, address did not
11360 * hit a region, address hit in multiple regions).
11361 * We set is_subpage to true if the region hit doesn't cover the
11362 * entire TARGET_PAGE the address is within.
11364 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11365 bool is_user
= regime_is_user(env
, mmu_idx
);
11366 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11368 int matchregion
= -1;
11370 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11371 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11373 *is_subpage
= false;
11374 *phys_ptr
= address
;
11380 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11381 * was an exception vector read from the vector table (which is always
11382 * done using the default system address map), because those accesses
11383 * are done in arm_v7m_load_vector(), which always does a direct
11384 * read using address_space_ldl(), rather than going via this function.
11386 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11388 } else if (m_is_ppb_region(env
, address
)) {
11390 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11393 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11394 /* region search */
11395 /* Note that the base address is bits [31:5] from the register
11396 * with bits [4:0] all zeroes, but the limit address is bits
11397 * [31:5] from the register with bits [4:0] all ones.
11399 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11400 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11402 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11403 /* Region disabled */
11407 if (address
< base
|| address
> limit
) {
11409 * Address not in this region. We must check whether the
11410 * region covers addresses in the same page as our address.
11411 * In that case we must not report a size that covers the
11412 * whole page for a subsequent hit against a different MPU
11413 * region or the background region, because it would result in
11414 * incorrect TLB hits for subsequent accesses to addresses that
11415 * are in this MPU region.
11417 if (limit
>= base
&&
11418 ranges_overlap(base
, limit
- base
+ 1,
11420 TARGET_PAGE_SIZE
)) {
11421 *is_subpage
= true;
11426 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11427 *is_subpage
= true;
11431 /* Multiple regions match -- always a failure (unlike
11432 * PMSAv7 where highest-numbered-region wins)
11434 fi
->type
= ARMFault_Permission
;
11445 /* background fault */
11446 fi
->type
= ARMFault_Background
;
11450 if (matchregion
== -1) {
11451 /* hit using the background region */
11452 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11454 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11455 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11457 if (m_is_system_region(env
, address
)) {
11458 /* System space is always execute never */
11462 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11463 if (*prot
&& !xn
) {
11464 *prot
|= PAGE_EXEC
;
11466 /* We don't need to look the attribute up in the MAIR0/MAIR1
11467 * registers because that only tells us about cacheability.
11470 *mregion
= matchregion
;
11474 fi
->type
= ARMFault_Permission
;
11476 return !(*prot
& (1 << access_type
));
11480 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11481 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11482 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11483 int *prot
, target_ulong
*page_size
,
11484 ARMMMUFaultInfo
*fi
)
11486 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11487 V8M_SAttributes sattrs
= {};
11489 bool mpu_is_subpage
;
11491 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11492 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11493 if (access_type
== MMU_INST_FETCH
) {
11494 /* Instruction fetches always use the MMU bank and the
11495 * transaction attribute determined by the fetch address,
11496 * regardless of CPU state. This is painful for QEMU
11497 * to handle, because it would mean we need to encode
11498 * into the mmu_idx not just the (user, negpri) information
11499 * for the current security state but also that for the
11500 * other security state, which would balloon the number
11501 * of mmu_idx values needed alarmingly.
11502 * Fortunately we can avoid this because it's not actually
11503 * possible to arbitrarily execute code from memory with
11504 * the wrong security attribute: it will always generate
11505 * an exception of some kind or another, apart from the
11506 * special case of an NS CPU executing an SG instruction
11507 * in S&NSC memory. So we always just fail the translation
11508 * here and sort things out in the exception handler
11509 * (including possibly emulating an SG instruction).
11511 if (sattrs
.ns
!= !secure
) {
11513 fi
->type
= ARMFault_QEMU_NSCExec
;
11515 fi
->type
= ARMFault_QEMU_SFault
;
11517 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11518 *phys_ptr
= address
;
11523 /* For data accesses we always use the MMU bank indicated
11524 * by the current CPU state, but the security attributes
11525 * might downgrade a secure access to nonsecure.
11528 txattrs
->secure
= false;
11529 } else if (!secure
) {
11530 /* NS access to S memory must fault.
11531 * Architecturally we should first check whether the
11532 * MPU information for this address indicates that we
11533 * are doing an unaligned access to Device memory, which
11534 * should generate a UsageFault instead. QEMU does not
11535 * currently check for that kind of unaligned access though.
11536 * If we added it we would need to do so as a special case
11537 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11539 fi
->type
= ARMFault_QEMU_SFault
;
11540 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11541 *phys_ptr
= address
;
11548 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11549 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11550 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11554 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11555 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11556 hwaddr
*phys_ptr
, int *prot
,
11557 ARMMMUFaultInfo
*fi
)
11562 bool is_user
= regime_is_user(env
, mmu_idx
);
11564 if (regime_translation_disabled(env
, mmu_idx
)) {
11565 /* MPU disabled. */
11566 *phys_ptr
= address
;
11567 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11571 *phys_ptr
= address
;
11572 for (n
= 7; n
>= 0; n
--) {
11573 base
= env
->cp15
.c6_region
[n
];
11574 if ((base
& 1) == 0) {
11577 mask
= 1 << ((base
>> 1) & 0x1f);
11578 /* Keep this shift separate from the above to avoid an
11579 (undefined) << 32. */
11580 mask
= (mask
<< 1) - 1;
11581 if (((base
^ address
) & ~mask
) == 0) {
11586 fi
->type
= ARMFault_Background
;
11590 if (access_type
== MMU_INST_FETCH
) {
11591 mask
= env
->cp15
.pmsav5_insn_ap
;
11593 mask
= env
->cp15
.pmsav5_data_ap
;
11595 mask
= (mask
>> (n
* 4)) & 0xf;
11598 fi
->type
= ARMFault_Permission
;
11603 fi
->type
= ARMFault_Permission
;
11607 *prot
= PAGE_READ
| PAGE_WRITE
;
11612 *prot
|= PAGE_WRITE
;
11616 *prot
= PAGE_READ
| PAGE_WRITE
;
11620 fi
->type
= ARMFault_Permission
;
11630 /* Bad permission. */
11631 fi
->type
= ARMFault_Permission
;
11635 *prot
|= PAGE_EXEC
;
11639 /* Combine either inner or outer cacheability attributes for normal
11640 * memory, according to table D4-42 and pseudocode procedure
11641 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11643 * NB: only stage 1 includes allocation hints (RW bits), leading to
11646 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11648 if (s1
== 4 || s2
== 4) {
11649 /* non-cacheable has precedence */
11651 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11652 /* stage 1 write-through takes precedence */
11654 } else if (extract32(s2
, 2, 2) == 2) {
11655 /* stage 2 write-through takes precedence, but the allocation hint
11656 * is still taken from stage 1
11658 return (2 << 2) | extract32(s1
, 0, 2);
11659 } else { /* write-back */
11664 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11665 * and CombineS1S2Desc()
11667 * @s1: Attributes from stage 1 walk
11668 * @s2: Attributes from stage 2 walk
11670 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11672 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11673 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11676 /* Combine shareability attributes (table D4-43) */
11677 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11678 /* if either are outer-shareable, the result is outer-shareable */
11679 ret
.shareability
= 2;
11680 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11681 /* if either are inner-shareable, the result is inner-shareable */
11682 ret
.shareability
= 3;
11684 /* both non-shareable */
11685 ret
.shareability
= 0;
11688 /* Combine memory type and cacheability attributes */
11689 if (s1hi
== 0 || s2hi
== 0) {
11690 /* Device has precedence over normal */
11691 if (s1lo
== 0 || s2lo
== 0) {
11692 /* nGnRnE has precedence over anything */
11694 } else if (s1lo
== 4 || s2lo
== 4) {
11695 /* non-Reordering has precedence over Reordering */
11696 ret
.attrs
= 4; /* nGnRE */
11697 } else if (s1lo
== 8 || s2lo
== 8) {
11698 /* non-Gathering has precedence over Gathering */
11699 ret
.attrs
= 8; /* nGRE */
11701 ret
.attrs
= 0xc; /* GRE */
11704 /* Any location for which the resultant memory type is any
11705 * type of Device memory is always treated as Outer Shareable.
11707 ret
.shareability
= 2;
11708 } else { /* Normal memory */
11709 /* Outer/inner cacheability combine independently */
11710 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11711 | combine_cacheattr_nibble(s1lo
, s2lo
);
11713 if (ret
.attrs
== 0x44) {
11714 /* Any location for which the resultant memory type is Normal
11715 * Inner Non-cacheable, Outer Non-cacheable is always treated
11716 * as Outer Shareable.
11718 ret
.shareability
= 2;
11726 /* get_phys_addr - get the physical address for this virtual address
11728 * Find the physical address corresponding to the given virtual address,
11729 * by doing a translation table walk on MMU based systems or using the
11730 * MPU state on MPU based systems.
11732 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11733 * prot and page_size may not be filled in, and the populated fsr value provides
11734 * information on why the translation aborted, in the format of a
11735 * DFSR/IFSR fault register, with the following caveats:
11736 * * we honour the short vs long DFSR format differences.
11737 * * the WnR bit is never set (the caller must do this).
11738 * * for PSMAv5 based systems we don't bother to return a full FSR format
11741 * @env: CPUARMState
11742 * @address: virtual address to get physical address for
11743 * @access_type: 0 for read, 1 for write, 2 for execute
11744 * @mmu_idx: MMU index indicating required translation regime
11745 * @phys_ptr: set to the physical address corresponding to the virtual address
11746 * @attrs: set to the memory transaction attributes to use
11747 * @prot: set to the permissions for the page containing phys_ptr
11748 * @page_size: set to the size of the page containing phys_ptr
11749 * @fi: set to fault info if the translation fails
11750 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11752 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11753 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11754 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11755 target_ulong
*page_size
,
11756 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11758 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
11759 /* Call ourselves recursively to do the stage 1 and then stage 2
11762 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11766 ARMCacheAttrs cacheattrs2
= {};
11768 ret
= get_phys_addr(env
, address
, access_type
,
11769 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11770 prot
, page_size
, fi
, cacheattrs
);
11772 /* If S1 fails or S2 is disabled, return early. */
11773 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
11778 /* S1 is done. Now do S2 translation. */
11779 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
11780 phys_ptr
, attrs
, &s2_prot
,
11782 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11784 /* Combine the S1 and S2 perms. */
11787 /* Combine the S1 and S2 cache attributes, if needed */
11788 if (!ret
&& cacheattrs
!= NULL
) {
11789 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11791 * HCR.DC forces the first stage attributes to
11792 * Normal Non-Shareable,
11793 * Inner Write-Back Read-Allocate Write-Allocate,
11794 * Outer Write-Back Read-Allocate Write-Allocate.
11796 cacheattrs
->attrs
= 0xff;
11797 cacheattrs
->shareability
= 0;
11799 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11805 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11807 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11811 /* The page table entries may downgrade secure to non-secure, but
11812 * cannot upgrade an non-secure translation regime's attributes
11815 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11816 attrs
->user
= regime_is_user(env
, mmu_idx
);
11818 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11819 * In v7 and earlier it affects all stage 1 translations.
11821 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
11822 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11823 if (regime_el(env
, mmu_idx
) == 3) {
11824 address
+= env
->cp15
.fcseidr_s
;
11826 address
+= env
->cp15
.fcseidr_ns
;
11830 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11832 *page_size
= TARGET_PAGE_SIZE
;
11834 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11836 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11837 phys_ptr
, attrs
, prot
, page_size
, fi
);
11838 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11840 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11841 phys_ptr
, prot
, page_size
, fi
);
11844 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11845 phys_ptr
, prot
, fi
);
11847 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11848 " mmu_idx %u -> %s (prot %c%c%c)\n",
11849 access_type
== MMU_DATA_LOAD
? "reading" :
11850 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11851 (uint32_t)address
, mmu_idx
,
11852 ret
? "Miss" : "Hit",
11853 *prot
& PAGE_READ
? 'r' : '-',
11854 *prot
& PAGE_WRITE
? 'w' : '-',
11855 *prot
& PAGE_EXEC
? 'x' : '-');
11860 /* Definitely a real MMU, not an MPU */
11862 if (regime_translation_disabled(env
, mmu_idx
)) {
11863 /* MMU disabled. */
11864 *phys_ptr
= address
;
11865 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11866 *page_size
= TARGET_PAGE_SIZE
;
11870 if (regime_using_lpae_format(env
, mmu_idx
)) {
11871 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11872 phys_ptr
, attrs
, prot
, page_size
,
11874 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11875 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11876 phys_ptr
, attrs
, prot
, page_size
, fi
);
11878 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11879 phys_ptr
, prot
, page_size
, fi
);
11883 /* Walk the page table and (if the mapping exists) add the page
11884 * to the TLB. Return false on success, or true on failure. Populate
11885 * fsr with ARM DFSR/IFSR fault register format value on failure.
11887 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
11888 MMUAccessType access_type
, int mmu_idx
,
11889 ARMMMUFaultInfo
*fi
)
11891 ARMCPU
*cpu
= ARM_CPU(cs
);
11892 CPUARMState
*env
= &cpu
->env
;
11894 target_ulong page_size
;
11897 MemTxAttrs attrs
= {};
11899 ret
= get_phys_addr(env
, address
, access_type
,
11900 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
11901 &attrs
, &prot
, &page_size
, fi
, NULL
);
11904 * Map a single [sub]page. Regions smaller than our declared
11905 * target page size are handled specially, so for those we
11906 * pass in the exact addresses.
11908 if (page_size
>= TARGET_PAGE_SIZE
) {
11909 phys_addr
&= TARGET_PAGE_MASK
;
11910 address
&= TARGET_PAGE_MASK
;
11912 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
11913 prot
, mmu_idx
, page_size
);
11920 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11923 ARMCPU
*cpu
= ARM_CPU(cs
);
11924 CPUARMState
*env
= &cpu
->env
;
11926 target_ulong page_size
;
11929 ARMMMUFaultInfo fi
= {};
11930 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11932 *attrs
= (MemTxAttrs
) {};
11934 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11935 attrs
, &prot
, &page_size
, &fi
, NULL
);
11943 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
11946 unsigned el
= arm_current_el(env
);
11948 /* First handle registers which unprivileged can read */
11951 case 0 ... 7: /* xPSR sub-fields */
11953 if ((reg
& 1) && el
) {
11954 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
11957 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
11959 /* EPSR reads as zero */
11960 return xpsr_read(env
) & mask
;
11962 case 20: /* CONTROL */
11963 return env
->v7m
.control
[env
->v7m
.secure
];
11964 case 0x94: /* CONTROL_NS */
11965 /* We have to handle this here because unprivileged Secure code
11966 * can read the NS CONTROL register.
11968 if (!env
->v7m
.secure
) {
11971 return env
->v7m
.control
[M_REG_NS
];
11975 return 0; /* unprivileged reads others as zero */
11978 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11980 case 0x88: /* MSP_NS */
11981 if (!env
->v7m
.secure
) {
11984 return env
->v7m
.other_ss_msp
;
11985 case 0x89: /* PSP_NS */
11986 if (!env
->v7m
.secure
) {
11989 return env
->v7m
.other_ss_psp
;
11990 case 0x8a: /* MSPLIM_NS */
11991 if (!env
->v7m
.secure
) {
11994 return env
->v7m
.msplim
[M_REG_NS
];
11995 case 0x8b: /* PSPLIM_NS */
11996 if (!env
->v7m
.secure
) {
11999 return env
->v7m
.psplim
[M_REG_NS
];
12000 case 0x90: /* PRIMASK_NS */
12001 if (!env
->v7m
.secure
) {
12004 return env
->v7m
.primask
[M_REG_NS
];
12005 case 0x91: /* BASEPRI_NS */
12006 if (!env
->v7m
.secure
) {
12009 return env
->v7m
.basepri
[M_REG_NS
];
12010 case 0x93: /* FAULTMASK_NS */
12011 if (!env
->v7m
.secure
) {
12014 return env
->v7m
.faultmask
[M_REG_NS
];
12015 case 0x98: /* SP_NS */
12017 /* This gives the non-secure SP selected based on whether we're
12018 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12020 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12022 if (!env
->v7m
.secure
) {
12025 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
12026 return env
->v7m
.other_ss_psp
;
12028 return env
->v7m
.other_ss_msp
;
12038 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
12040 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
12041 case 10: /* MSPLIM */
12042 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12045 return env
->v7m
.msplim
[env
->v7m
.secure
];
12046 case 11: /* PSPLIM */
12047 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12050 return env
->v7m
.psplim
[env
->v7m
.secure
];
12051 case 16: /* PRIMASK */
12052 return env
->v7m
.primask
[env
->v7m
.secure
];
12053 case 17: /* BASEPRI */
12054 case 18: /* BASEPRI_MAX */
12055 return env
->v7m
.basepri
[env
->v7m
.secure
];
12056 case 19: /* FAULTMASK */
12057 return env
->v7m
.faultmask
[env
->v7m
.secure
];
12060 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
12061 " register %d\n", reg
);
12066 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
12068 /* We're passed bits [11..0] of the instruction; extract
12069 * SYSm and the mask bits.
12070 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12071 * we choose to treat them as if the mask bits were valid.
12072 * NB that the pseudocode 'mask' variable is bits [11..10],
12073 * whereas ours is [11..8].
12075 uint32_t mask
= extract32(maskreg
, 8, 4);
12076 uint32_t reg
= extract32(maskreg
, 0, 8);
12078 if (arm_current_el(env
) == 0 && reg
> 7) {
12079 /* only xPSR sub-fields may be written by unprivileged */
12083 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12085 case 0x88: /* MSP_NS */
12086 if (!env
->v7m
.secure
) {
12089 env
->v7m
.other_ss_msp
= val
;
12091 case 0x89: /* PSP_NS */
12092 if (!env
->v7m
.secure
) {
12095 env
->v7m
.other_ss_psp
= val
;
12097 case 0x8a: /* MSPLIM_NS */
12098 if (!env
->v7m
.secure
) {
12101 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
12103 case 0x8b: /* PSPLIM_NS */
12104 if (!env
->v7m
.secure
) {
12107 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
12109 case 0x90: /* PRIMASK_NS */
12110 if (!env
->v7m
.secure
) {
12113 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
12115 case 0x91: /* BASEPRI_NS */
12116 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12119 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
12121 case 0x93: /* FAULTMASK_NS */
12122 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12125 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
12127 case 0x94: /* CONTROL_NS */
12128 if (!env
->v7m
.secure
) {
12131 write_v7m_control_spsel_for_secstate(env
,
12132 val
& R_V7M_CONTROL_SPSEL_MASK
,
12134 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12135 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12136 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12139 case 0x98: /* SP_NS */
12141 /* This gives the non-secure SP selected based on whether we're
12142 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12144 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12145 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
12148 if (!env
->v7m
.secure
) {
12152 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
12155 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
12157 cpu_restore_state(cs
, GETPC(), true);
12158 raise_exception(env
, EXCP_STKOF
, 0, 1);
12162 env
->v7m
.other_ss_psp
= val
;
12164 env
->v7m
.other_ss_msp
= val
;
12174 case 0 ... 7: /* xPSR sub-fields */
12175 /* only APSR is actually writable */
12177 uint32_t apsrmask
= 0;
12180 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
12182 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
12183 apsrmask
|= XPSR_GE
;
12185 xpsr_write(env
, val
, apsrmask
);
12189 if (v7m_using_psp(env
)) {
12190 env
->v7m
.other_sp
= val
;
12192 env
->regs
[13] = val
;
12196 if (v7m_using_psp(env
)) {
12197 env
->regs
[13] = val
;
12199 env
->v7m
.other_sp
= val
;
12202 case 10: /* MSPLIM */
12203 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12206 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
12208 case 11: /* PSPLIM */
12209 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12212 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
12214 case 16: /* PRIMASK */
12215 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
12217 case 17: /* BASEPRI */
12218 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12221 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
12223 case 18: /* BASEPRI_MAX */
12224 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12228 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
12229 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
12230 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
12233 case 19: /* FAULTMASK */
12234 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12237 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
12239 case 20: /* CONTROL */
12240 /* Writing to the SPSEL bit only has an effect if we are in
12241 * thread mode; other bits can be updated by any privileged code.
12242 * write_v7m_control_spsel() deals with updating the SPSEL bit in
12243 * env->v7m.control, so we only need update the others.
12244 * For v7M, we must just ignore explicit writes to SPSEL in handler
12245 * mode; for v8M the write is permitted but will have no effect.
12247 if (arm_feature(env
, ARM_FEATURE_V8
) ||
12248 !arm_v7m_is_handler_mode(env
)) {
12249 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
12251 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12252 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12253 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12258 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
12259 " register %d\n", reg
);
12264 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
12266 /* Implement the TT instruction. op is bits [7:6] of the insn. */
12267 bool forceunpriv
= op
& 1;
12269 V8M_SAttributes sattrs
= {};
12271 bool r
, rw
, nsr
, nsrw
, mrvalid
;
12273 ARMMMUFaultInfo fi
= {};
12274 MemTxAttrs attrs
= {};
12279 bool targetsec
= env
->v7m
.secure
;
12282 /* Work out what the security state and privilege level we're
12283 * interested in is...
12286 targetsec
= !targetsec
;
12290 targetpriv
= false;
12292 targetpriv
= arm_v7m_is_handler_mode(env
) ||
12293 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
12296 /* ...and then figure out which MMU index this is */
12297 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
12299 /* We know that the MPU and SAU don't care about the access type
12300 * for our purposes beyond that we don't want to claim to be
12301 * an insn fetch, so we arbitrarily call this a read.
12304 /* MPU region info only available for privileged or if
12305 * inspecting the other MPU state.
12307 if (arm_current_el(env
) != 0 || alt
) {
12308 /* We can ignore the return value as prot is always set */
12309 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
12310 &phys_addr
, &attrs
, &prot
, &is_subpage
,
12312 if (mregion
== -1) {
12318 r
= prot
& PAGE_READ
;
12319 rw
= prot
& PAGE_WRITE
;
12327 if (env
->v7m
.secure
) {
12328 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
12329 nsr
= sattrs
.ns
&& r
;
12330 nsrw
= sattrs
.ns
&& rw
;
12337 tt_resp
= (sattrs
.iregion
<< 24) |
12338 (sattrs
.irvalid
<< 23) |
12339 ((!sattrs
.ns
) << 22) |
12344 (sattrs
.srvalid
<< 17) |
12346 (sattrs
.sregion
<< 8) |
12354 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
12356 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
12357 * Note that we do not implement the (architecturally mandated)
12358 * alignment fault for attempts to use this on Device memory
12359 * (which matches the usual QEMU behaviour of not implementing either
12360 * alignment faults or any memory attribute handling).
12363 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12364 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
12365 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
12367 #ifndef CONFIG_USER_ONLY
12369 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
12370 * the block size so we might have to do more than one TLB lookup.
12371 * We know that in fact for any v8 CPU the page size is at least 4K
12372 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
12373 * 1K as an artefact of legacy v5 subpage support being present in the
12374 * same QEMU executable.
12376 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
12377 void *hostaddr
[maxidx
];
12379 unsigned mmu_idx
= cpu_mmu_index(env
, false);
12380 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
12382 for (try = 0; try < 2; try++) {
12384 for (i
= 0; i
< maxidx
; i
++) {
12385 hostaddr
[i
] = tlb_vaddr_to_host(env
,
12386 vaddr
+ TARGET_PAGE_SIZE
* i
,
12388 if (!hostaddr
[i
]) {
12393 /* If it's all in the TLB it's fair game for just writing to;
12394 * we know we don't need to update dirty status, etc.
12396 for (i
= 0; i
< maxidx
- 1; i
++) {
12397 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
12399 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
12402 /* OK, try a store and see if we can populate the tlb. This
12403 * might cause an exception if the memory isn't writable,
12404 * in which case we will longjmp out of here. We must for
12405 * this purpose use the actual register value passed to us
12406 * so that we get the fault address right.
12408 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
12409 /* Now we can populate the other TLB entries, if any */
12410 for (i
= 0; i
< maxidx
; i
++) {
12411 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
12412 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
12413 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
12418 /* Slow path (probably attempt to do this to an I/O device or
12419 * similar, or clearing of a block of code we have translations
12420 * cached for). Just do a series of byte writes as the architecture
12421 * demands. It's not worth trying to use a cpu_physical_memory_map(),
12422 * memset(), unmap() sequence here because:
12423 * + we'd need to account for the blocksize being larger than a page
12424 * + the direct-RAM access case is almost always going to be dealt
12425 * with in the fastpath code above, so there's no speed benefit
12426 * + we would have to deal with the map returning NULL because the
12427 * bounce buffer was in use
12429 for (i
= 0; i
< blocklen
; i
++) {
12430 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
12434 memset(g2h(vaddr
), 0, blocklen
);
12438 /* Note that signed overflow is undefined in C. The following routines are
12439 careful to use unsigned types where modulo arithmetic is required.
12440 Failure to do so _will_ break on newer gcc. */
12442 /* Signed saturating arithmetic. */
12444 /* Perform 16-bit signed saturating addition. */
12445 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12450 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12459 /* Perform 8-bit signed saturating addition. */
12460 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12465 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12474 /* Perform 16-bit signed saturating subtraction. */
12475 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12480 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12489 /* Perform 8-bit signed saturating subtraction. */
12490 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12495 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12504 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12505 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12506 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12507 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12510 #include "op_addsub.h"
12512 /* Unsigned saturating arithmetic. */
12513 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12522 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12530 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12539 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12547 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12548 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12549 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12550 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12553 #include "op_addsub.h"
12555 /* Signed modulo arithmetic. */
12556 #define SARITH16(a, b, n, op) do { \
12558 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12559 RESULT(sum, n, 16); \
12561 ge |= 3 << (n * 2); \
12564 #define SARITH8(a, b, n, op) do { \
12566 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12567 RESULT(sum, n, 8); \
12573 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12574 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12575 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12576 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12580 #include "op_addsub.h"
12582 /* Unsigned modulo arithmetic. */
12583 #define ADD16(a, b, n) do { \
12585 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12586 RESULT(sum, n, 16); \
12587 if ((sum >> 16) == 1) \
12588 ge |= 3 << (n * 2); \
12591 #define ADD8(a, b, n) do { \
12593 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12594 RESULT(sum, n, 8); \
12595 if ((sum >> 8) == 1) \
12599 #define SUB16(a, b, n) do { \
12601 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12602 RESULT(sum, n, 16); \
12603 if ((sum >> 16) == 0) \
12604 ge |= 3 << (n * 2); \
12607 #define SUB8(a, b, n) do { \
12609 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12610 RESULT(sum, n, 8); \
12611 if ((sum >> 8) == 0) \
12618 #include "op_addsub.h"
12620 /* Halved signed arithmetic. */
12621 #define ADD16(a, b, n) \
12622 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12623 #define SUB16(a, b, n) \
12624 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12625 #define ADD8(a, b, n) \
12626 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12627 #define SUB8(a, b, n) \
12628 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12631 #include "op_addsub.h"
12633 /* Halved unsigned arithmetic. */
12634 #define ADD16(a, b, n) \
12635 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12636 #define SUB16(a, b, n) \
12637 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12638 #define ADD8(a, b, n) \
12639 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12640 #define SUB8(a, b, n) \
12641 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12644 #include "op_addsub.h"
12646 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12654 /* Unsigned sum of absolute byte differences. */
12655 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12658 sum
= do_usad(a
, b
);
12659 sum
+= do_usad(a
>> 8, b
>> 8);
12660 sum
+= do_usad(a
>> 16, b
>>16);
12661 sum
+= do_usad(a
>> 24, b
>> 24);
12665 /* For ARMv6 SEL instruction. */
12666 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12678 mask
|= 0xff000000;
12679 return (a
& mask
) | (b
& ~mask
);
12682 /* VFP support. We follow the convention used for VFP instructions:
12683 Single precision routines have a "s" suffix, double precision a
12686 /* Convert host exception flags to vfp form. */
12687 static inline int vfp_exceptbits_from_host(int host_bits
)
12689 int target_bits
= 0;
12691 if (host_bits
& float_flag_invalid
)
12693 if (host_bits
& float_flag_divbyzero
)
12695 if (host_bits
& float_flag_overflow
)
12697 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
12699 if (host_bits
& float_flag_inexact
)
12700 target_bits
|= 0x10;
12701 if (host_bits
& float_flag_input_denormal
)
12702 target_bits
|= 0x80;
12703 return target_bits
;
12706 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
12710 fpscr
= env
->vfp
.xregs
[ARM_VFP_FPSCR
]
12711 | (env
->vfp
.vec_len
<< 16)
12712 | (env
->vfp
.vec_stride
<< 20);
12714 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
12715 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
12716 /* FZ16 does not generate an input denormal exception. */
12717 i
|= (get_float_exception_flags(&env
->vfp
.fp_status_f16
)
12718 & ~float_flag_input_denormal
);
12719 fpscr
|= vfp_exceptbits_from_host(i
);
12721 i
= env
->vfp
.qc
[0] | env
->vfp
.qc
[1] | env
->vfp
.qc
[2] | env
->vfp
.qc
[3];
12722 fpscr
|= i
? FPCR_QC
: 0;
12727 uint32_t vfp_get_fpscr(CPUARMState
*env
)
12729 return HELPER(vfp_get_fpscr
)(env
);
12732 /* Convert vfp exception flags to target form. */
12733 static inline int vfp_exceptbits_to_host(int target_bits
)
12737 if (target_bits
& 1)
12738 host_bits
|= float_flag_invalid
;
12739 if (target_bits
& 2)
12740 host_bits
|= float_flag_divbyzero
;
12741 if (target_bits
& 4)
12742 host_bits
|= float_flag_overflow
;
12743 if (target_bits
& 8)
12744 host_bits
|= float_flag_underflow
;
12745 if (target_bits
& 0x10)
12746 host_bits
|= float_flag_inexact
;
12747 if (target_bits
& 0x80)
12748 host_bits
|= float_flag_input_denormal
;
12752 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
12755 uint32_t changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
12757 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
12758 if (!cpu_isar_feature(aa64_fp16
, arm_env_get_cpu(env
))) {
12763 * We don't implement trapped exception handling, so the
12764 * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
12766 * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
12767 * (which are stored in fp_status), and the other RES0 bits
12768 * in between, then we clear all of the low 16 bits.
12770 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = val
& 0xf7c80000;
12771 env
->vfp
.vec_len
= (val
>> 16) & 7;
12772 env
->vfp
.vec_stride
= (val
>> 20) & 3;
12775 * The bit we set within fpscr_q is arbitrary; the register as a
12776 * whole being zero/non-zero is what counts.
12778 env
->vfp
.qc
[0] = val
& FPCR_QC
;
12779 env
->vfp
.qc
[1] = 0;
12780 env
->vfp
.qc
[2] = 0;
12781 env
->vfp
.qc
[3] = 0;
12784 if (changed
& (3 << 22)) {
12785 i
= (val
>> 22) & 3;
12787 case FPROUNDING_TIEEVEN
:
12788 i
= float_round_nearest_even
;
12790 case FPROUNDING_POSINF
:
12791 i
= float_round_up
;
12793 case FPROUNDING_NEGINF
:
12794 i
= float_round_down
;
12796 case FPROUNDING_ZERO
:
12797 i
= float_round_to_zero
;
12800 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
12801 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
12803 if (changed
& FPCR_FZ16
) {
12804 bool ftz_enabled
= val
& FPCR_FZ16
;
12805 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12806 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12808 if (changed
& FPCR_FZ
) {
12809 bool ftz_enabled
= val
& FPCR_FZ
;
12810 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12811 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12813 if (changed
& FPCR_DN
) {
12814 bool dnan_enabled
= val
& FPCR_DN
;
12815 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
12816 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
12819 /* The exception flags are ORed together when we read fpscr so we
12820 * only need to preserve the current state in one of our
12821 * float_status values.
12823 i
= vfp_exceptbits_to_host(val
);
12824 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
12825 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
12826 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
12829 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
12831 HELPER(vfp_set_fpscr
)(env
, val
);
12834 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
12836 #define VFP_BINOP(name) \
12837 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
12839 float_status *fpst = fpstp; \
12840 return float32_ ## name(a, b, fpst); \
12842 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
12844 float_status *fpst = fpstp; \
12845 return float64_ ## name(a, b, fpst); \
12857 float32
VFP_HELPER(neg
, s
)(float32 a
)
12859 return float32_chs(a
);
12862 float64
VFP_HELPER(neg
, d
)(float64 a
)
12864 return float64_chs(a
);
12867 float32
VFP_HELPER(abs
, s
)(float32 a
)
12869 return float32_abs(a
);
12872 float64
VFP_HELPER(abs
, d
)(float64 a
)
12874 return float64_abs(a
);
12877 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
12879 return float32_sqrt(a
, &env
->vfp
.fp_status
);
12882 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
12884 return float64_sqrt(a
, &env
->vfp
.fp_status
);
12887 static void softfloat_to_vfp_compare(CPUARMState
*env
, int cmp
)
12891 case float_relation_equal
:
12894 case float_relation_less
:
12897 case float_relation_greater
:
12900 case float_relation_unordered
:
12904 g_assert_not_reached();
12906 env
->vfp
.xregs
[ARM_VFP_FPSCR
] =
12907 deposit32(env
->vfp
.xregs
[ARM_VFP_FPSCR
], 28, 4, flags
);
12910 /* XXX: check quiet/signaling case */
12911 #define DO_VFP_cmp(p, type) \
12912 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
12914 softfloat_to_vfp_compare(env, \
12915 type ## _compare_quiet(a, b, &env->vfp.fp_status)); \
12917 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
12919 softfloat_to_vfp_compare(env, \
12920 type ## _compare(a, b, &env->vfp.fp_status)); \
12922 DO_VFP_cmp(s
, float32
)
12923 DO_VFP_cmp(d
, float64
)
12926 /* Integer to float and float to integer conversions */
12928 #define CONV_ITOF(name, ftype, fsz, sign) \
12929 ftype HELPER(name)(uint32_t x, void *fpstp) \
12931 float_status *fpst = fpstp; \
12932 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
12935 #define CONV_FTOI(name, ftype, fsz, sign, round) \
12936 sign##int32_t HELPER(name)(ftype x, void *fpstp) \
12938 float_status *fpst = fpstp; \
12939 if (float##fsz##_is_any_nan(x)) { \
12940 float_raise(float_flag_invalid, fpst); \
12943 return float##fsz##_to_##sign##int32##round(x, fpst); \
12946 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \
12947 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
12948 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
12949 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
12951 FLOAT_CONVS(si
, h
, uint32_t, 16, )
12952 FLOAT_CONVS(si
, s
, float32
, 32, )
12953 FLOAT_CONVS(si
, d
, float64
, 64, )
12954 FLOAT_CONVS(ui
, h
, uint32_t, 16, u
)
12955 FLOAT_CONVS(ui
, s
, float32
, 32, u
)
12956 FLOAT_CONVS(ui
, d
, float64
, 64, u
)
12962 /* floating point conversion */
12963 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
12965 return float32_to_float64(x
, &env
->vfp
.fp_status
);
12968 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
12970 return float64_to_float32(x
, &env
->vfp
.fp_status
);
12973 /* VFP3 fixed point conversion. */
12974 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12975 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
12977 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
12979 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
12980 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
12983 if (unlikely(float##fsz##_is_any_nan(x))) { \
12984 float_raise(float_flag_invalid, fpst); \
12987 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
12990 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
12991 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12992 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12993 float_round_to_zero, _round_to_zero) \
12994 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12995 get_float_rounding_mode(fpst), )
12997 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
12998 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12999 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
13000 get_float_rounding_mode(fpst), )
13002 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
13003 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
13004 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
13005 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
13006 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
13007 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
13008 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
13009 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
13010 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
13011 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
13012 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
13013 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
13015 #undef VFP_CONV_FIX
13016 #undef VFP_CONV_FIX_FLOAT
13017 #undef VFP_CONV_FLOAT_FIX_ROUND
13018 #undef VFP_CONV_FIX_A64
13020 uint32_t HELPER(vfp_sltoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13022 return int32_to_float16_scalbn(x
, -shift
, fpst
);
13025 uint32_t HELPER(vfp_ultoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13027 return uint32_to_float16_scalbn(x
, -shift
, fpst
);
13030 uint32_t HELPER(vfp_sqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
13032 return int64_to_float16_scalbn(x
, -shift
, fpst
);
13035 uint32_t HELPER(vfp_uqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
13037 return uint64_to_float16_scalbn(x
, -shift
, fpst
);
13040 uint32_t HELPER(vfp_toshh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13042 if (unlikely(float16_is_any_nan(x
))) {
13043 float_raise(float_flag_invalid
, fpst
);
13046 return float16_to_int16_scalbn(x
, get_float_rounding_mode(fpst
),
13050 uint32_t HELPER(vfp_touhh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13052 if (unlikely(float16_is_any_nan(x
))) {
13053 float_raise(float_flag_invalid
, fpst
);
13056 return float16_to_uint16_scalbn(x
, get_float_rounding_mode(fpst
),
13060 uint32_t HELPER(vfp_toslh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13062 if (unlikely(float16_is_any_nan(x
))) {
13063 float_raise(float_flag_invalid
, fpst
);
13066 return float16_to_int32_scalbn(x
, get_float_rounding_mode(fpst
),
13070 uint32_t HELPER(vfp_toulh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13072 if (unlikely(float16_is_any_nan(x
))) {
13073 float_raise(float_flag_invalid
, fpst
);
13076 return float16_to_uint32_scalbn(x
, get_float_rounding_mode(fpst
),
13080 uint64_t HELPER(vfp_tosqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13082 if (unlikely(float16_is_any_nan(x
))) {
13083 float_raise(float_flag_invalid
, fpst
);
13086 return float16_to_int64_scalbn(x
, get_float_rounding_mode(fpst
),
13090 uint64_t HELPER(vfp_touqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
13092 if (unlikely(float16_is_any_nan(x
))) {
13093 float_raise(float_flag_invalid
, fpst
);
13096 return float16_to_uint64_scalbn(x
, get_float_rounding_mode(fpst
),
13100 /* Set the current fp rounding mode and return the old one.
13101 * The argument is a softfloat float_round_ value.
13103 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
13105 float_status
*fp_status
= fpstp
;
13107 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
13108 set_float_rounding_mode(rmode
, fp_status
);
13113 /* Set the current fp rounding mode in the standard fp status and return
13114 * the old one. This is for NEON instructions that need to change the
13115 * rounding mode but wish to use the standard FPSCR values for everything
13116 * else. Always set the rounding mode back to the correct value after
13118 * The argument is a softfloat float_round_ value.
13120 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
13122 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
13124 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
13125 set_float_rounding_mode(rmode
, fp_status
);
13130 /* Half precision conversions. */
13131 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
13133 /* Squash FZ16 to 0 for the duration of conversion. In this case,
13134 * it would affect flushing input denormals.
13136 float_status
*fpst
= fpstp
;
13137 flag save
= get_flush_inputs_to_zero(fpst
);
13138 set_flush_inputs_to_zero(false, fpst
);
13139 float32 r
= float16_to_float32(a
, !ahp_mode
, fpst
);
13140 set_flush_inputs_to_zero(save
, fpst
);
13144 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, void *fpstp
, uint32_t ahp_mode
)
13146 /* Squash FZ16 to 0 for the duration of conversion. In this case,
13147 * it would affect flushing output denormals.
13149 float_status
*fpst
= fpstp
;
13150 flag save
= get_flush_to_zero(fpst
);
13151 set_flush_to_zero(false, fpst
);
13152 float16 r
= float32_to_float16(a
, !ahp_mode
, fpst
);
13153 set_flush_to_zero(save
, fpst
);
13157 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
13159 /* Squash FZ16 to 0 for the duration of conversion. In this case,
13160 * it would affect flushing input denormals.
13162 float_status
*fpst
= fpstp
;
13163 flag save
= get_flush_inputs_to_zero(fpst
);
13164 set_flush_inputs_to_zero(false, fpst
);
13165 float64 r
= float16_to_float64(a
, !ahp_mode
, fpst
);
13166 set_flush_inputs_to_zero(save
, fpst
);
13170 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, void *fpstp
, uint32_t ahp_mode
)
13172 /* Squash FZ16 to 0 for the duration of conversion. In this case,
13173 * it would affect flushing output denormals.
13175 float_status
*fpst
= fpstp
;
13176 flag save
= get_flush_to_zero(fpst
);
13177 set_flush_to_zero(false, fpst
);
13178 float16 r
= float64_to_float16(a
, !ahp_mode
, fpst
);
13179 set_flush_to_zero(save
, fpst
);
13183 #define float32_two make_float32(0x40000000)
13184 #define float32_three make_float32(0x40400000)
13185 #define float32_one_point_five make_float32(0x3fc00000)
13187 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
13189 float_status
*s
= &env
->vfp
.standard_fp_status
;
13190 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
13191 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
13192 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
13193 float_raise(float_flag_input_denormal
, s
);
13195 return float32_two
;
13197 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
13200 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
13202 float_status
*s
= &env
->vfp
.standard_fp_status
;
13204 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
13205 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
13206 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
13207 float_raise(float_flag_input_denormal
, s
);
13209 return float32_one_point_five
;
13211 product
= float32_mul(a
, b
, s
);
13212 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
13215 /* NEON helpers. */
13217 /* Constants 256 and 512 are used in some helpers; we avoid relying on
13218 * int->float conversions at run-time. */
13219 #define float64_256 make_float64(0x4070000000000000LL)
13220 #define float64_512 make_float64(0x4080000000000000LL)
13221 #define float16_maxnorm make_float16(0x7bff)
13222 #define float32_maxnorm make_float32(0x7f7fffff)
13223 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
13225 /* Reciprocal functions
13227 * The algorithm that must be used to calculate the estimate
13228 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
13231 /* See RecipEstimate()
13233 * input is a 9 bit fixed point number
13234 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
13235 * result range 256 .. 511 for a number from 1.0 to 511/256.
13238 static int recip_estimate(int input
)
13241 assert(256 <= input
&& input
< 512);
13242 a
= (input
* 2) + 1;
13245 assert(256 <= r
&& r
< 512);
13250 * Common wrapper to call recip_estimate
13252 * The parameters are exponent and 64 bit fraction (without implicit
13253 * bit) where the binary point is nominally at bit 52. Returns a
13254 * float64 which can then be rounded to the appropriate size by the
13258 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
13260 uint32_t scaled
, estimate
;
13261 uint64_t result_frac
;
13264 /* Handle sub-normals */
13266 if (extract64(frac
, 51, 1) == 0) {
13274 /* scaled = UInt('1':fraction<51:44>) */
13275 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
13276 estimate
= recip_estimate(scaled
);
13278 result_exp
= exp_off
- *exp
;
13279 result_frac
= deposit64(0, 44, 8, estimate
);
13280 if (result_exp
== 0) {
13281 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
13282 } else if (result_exp
== -1) {
13283 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
13289 return result_frac
;
13292 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
13294 switch (fpst
->float_rounding_mode
) {
13295 case float_round_nearest_even
: /* Round to Nearest */
13297 case float_round_up
: /* Round to +Inf */
13299 case float_round_down
: /* Round to -Inf */
13301 case float_round_to_zero
: /* Round to Zero */
13305 g_assert_not_reached();
13308 uint32_t HELPER(recpe_f16
)(uint32_t input
, void *fpstp
)
13310 float_status
*fpst
= fpstp
;
13311 float16 f16
= float16_squash_input_denormal(input
, fpst
);
13312 uint32_t f16_val
= float16_val(f16
);
13313 uint32_t f16_sign
= float16_is_neg(f16
);
13314 int f16_exp
= extract32(f16_val
, 10, 5);
13315 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
13318 if (float16_is_any_nan(f16
)) {
13320 if (float16_is_signaling_nan(f16
, fpst
)) {
13321 float_raise(float_flag_invalid
, fpst
);
13322 nan
= float16_silence_nan(f16
, fpst
);
13324 if (fpst
->default_nan_mode
) {
13325 nan
= float16_default_nan(fpst
);
13328 } else if (float16_is_infinity(f16
)) {
13329 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
13330 } else if (float16_is_zero(f16
)) {
13331 float_raise(float_flag_divbyzero
, fpst
);
13332 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
13333 } else if (float16_abs(f16
) < (1 << 8)) {
13334 /* Abs(value) < 2.0^-16 */
13335 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13336 if (round_to_inf(fpst
, f16_sign
)) {
13337 return float16_set_sign(float16_infinity
, f16_sign
);
13339 return float16_set_sign(float16_maxnorm
, f16_sign
);
13341 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
13342 float_raise(float_flag_underflow
, fpst
);
13343 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
13346 f64_frac
= call_recip_estimate(&f16_exp
, 29,
13347 ((uint64_t) f16_frac
) << (52 - 10));
13349 /* result = sign : result_exp<4:0> : fraction<51:42> */
13350 f16_val
= deposit32(0, 15, 1, f16_sign
);
13351 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
13352 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
13353 return make_float16(f16_val
);
13356 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
13358 float_status
*fpst
= fpstp
;
13359 float32 f32
= float32_squash_input_denormal(input
, fpst
);
13360 uint32_t f32_val
= float32_val(f32
);
13361 bool f32_sign
= float32_is_neg(f32
);
13362 int f32_exp
= extract32(f32_val
, 23, 8);
13363 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
13366 if (float32_is_any_nan(f32
)) {
13368 if (float32_is_signaling_nan(f32
, fpst
)) {
13369 float_raise(float_flag_invalid
, fpst
);
13370 nan
= float32_silence_nan(f32
, fpst
);
13372 if (fpst
->default_nan_mode
) {
13373 nan
= float32_default_nan(fpst
);
13376 } else if (float32_is_infinity(f32
)) {
13377 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
13378 } else if (float32_is_zero(f32
)) {
13379 float_raise(float_flag_divbyzero
, fpst
);
13380 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
13381 } else if (float32_abs(f32
) < (1ULL << 21)) {
13382 /* Abs(value) < 2.0^-128 */
13383 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13384 if (round_to_inf(fpst
, f32_sign
)) {
13385 return float32_set_sign(float32_infinity
, f32_sign
);
13387 return float32_set_sign(float32_maxnorm
, f32_sign
);
13389 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
13390 float_raise(float_flag_underflow
, fpst
);
13391 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
13394 f64_frac
= call_recip_estimate(&f32_exp
, 253,
13395 ((uint64_t) f32_frac
) << (52 - 23));
13397 /* result = sign : result_exp<7:0> : fraction<51:29> */
13398 f32_val
= deposit32(0, 31, 1, f32_sign
);
13399 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
13400 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
13401 return make_float32(f32_val
);
13404 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
13406 float_status
*fpst
= fpstp
;
13407 float64 f64
= float64_squash_input_denormal(input
, fpst
);
13408 uint64_t f64_val
= float64_val(f64
);
13409 bool f64_sign
= float64_is_neg(f64
);
13410 int f64_exp
= extract64(f64_val
, 52, 11);
13411 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
13413 /* Deal with any special cases */
13414 if (float64_is_any_nan(f64
)) {
13416 if (float64_is_signaling_nan(f64
, fpst
)) {
13417 float_raise(float_flag_invalid
, fpst
);
13418 nan
= float64_silence_nan(f64
, fpst
);
13420 if (fpst
->default_nan_mode
) {
13421 nan
= float64_default_nan(fpst
);
13424 } else if (float64_is_infinity(f64
)) {
13425 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
13426 } else if (float64_is_zero(f64
)) {
13427 float_raise(float_flag_divbyzero
, fpst
);
13428 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
13429 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
13430 /* Abs(value) < 2.0^-1024 */
13431 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13432 if (round_to_inf(fpst
, f64_sign
)) {
13433 return float64_set_sign(float64_infinity
, f64_sign
);
13435 return float64_set_sign(float64_maxnorm
, f64_sign
);
13437 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
13438 float_raise(float_flag_underflow
, fpst
);
13439 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
13442 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
13444 /* result = sign : result_exp<10:0> : fraction<51:0>; */
13445 f64_val
= deposit64(0, 63, 1, f64_sign
);
13446 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
13447 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
13448 return make_float64(f64_val
);
13451 /* The algorithm that must be used to calculate the estimate
13452 * is specified by the ARM ARM.
13455 static int do_recip_sqrt_estimate(int a
)
13459 assert(128 <= a
&& a
< 512);
13467 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
13470 estimate
= (b
+ 1) / 2;
13471 assert(256 <= estimate
&& estimate
< 512);
13477 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
13483 while (extract64(frac
, 51, 1) == 0) {
13487 frac
= extract64(frac
, 0, 51) << 1;
13491 /* scaled = UInt('01':fraction<51:45>) */
13492 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
13494 /* scaled = UInt('1':fraction<51:44>) */
13495 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
13497 estimate
= do_recip_sqrt_estimate(scaled
);
13499 *exp
= (exp_off
- *exp
) / 2;
13500 return extract64(estimate
, 0, 8) << 44;
13503 uint32_t HELPER(rsqrte_f16
)(uint32_t input
, void *fpstp
)
13505 float_status
*s
= fpstp
;
13506 float16 f16
= float16_squash_input_denormal(input
, s
);
13507 uint16_t val
= float16_val(f16
);
13508 bool f16_sign
= float16_is_neg(f16
);
13509 int f16_exp
= extract32(val
, 10, 5);
13510 uint16_t f16_frac
= extract32(val
, 0, 10);
13513 if (float16_is_any_nan(f16
)) {
13515 if (float16_is_signaling_nan(f16
, s
)) {
13516 float_raise(float_flag_invalid
, s
);
13517 nan
= float16_silence_nan(f16
, s
);
13519 if (s
->default_nan_mode
) {
13520 nan
= float16_default_nan(s
);
13523 } else if (float16_is_zero(f16
)) {
13524 float_raise(float_flag_divbyzero
, s
);
13525 return float16_set_sign(float16_infinity
, f16_sign
);
13526 } else if (f16_sign
) {
13527 float_raise(float_flag_invalid
, s
);
13528 return float16_default_nan(s
);
13529 } else if (float16_is_infinity(f16
)) {
13530 return float16_zero
;
13533 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
13534 * preserving the parity of the exponent. */
13536 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
13538 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
13540 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
13541 val
= deposit32(0, 15, 1, f16_sign
);
13542 val
= deposit32(val
, 10, 5, f16_exp
);
13543 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
13544 return make_float16(val
);
13547 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
13549 float_status
*s
= fpstp
;
13550 float32 f32
= float32_squash_input_denormal(input
, s
);
13551 uint32_t val
= float32_val(f32
);
13552 uint32_t f32_sign
= float32_is_neg(f32
);
13553 int f32_exp
= extract32(val
, 23, 8);
13554 uint32_t f32_frac
= extract32(val
, 0, 23);
13557 if (float32_is_any_nan(f32
)) {
13559 if (float32_is_signaling_nan(f32
, s
)) {
13560 float_raise(float_flag_invalid
, s
);
13561 nan
= float32_silence_nan(f32
, s
);
13563 if (s
->default_nan_mode
) {
13564 nan
= float32_default_nan(s
);
13567 } else if (float32_is_zero(f32
)) {
13568 float_raise(float_flag_divbyzero
, s
);
13569 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
13570 } else if (float32_is_neg(f32
)) {
13571 float_raise(float_flag_invalid
, s
);
13572 return float32_default_nan(s
);
13573 } else if (float32_is_infinity(f32
)) {
13574 return float32_zero
;
13577 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
13578 * preserving the parity of the exponent. */
13580 f64_frac
= ((uint64_t) f32_frac
) << 29;
13582 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
13584 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
13585 val
= deposit32(0, 31, 1, f32_sign
);
13586 val
= deposit32(val
, 23, 8, f32_exp
);
13587 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
13588 return make_float32(val
);
13591 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
13593 float_status
*s
= fpstp
;
13594 float64 f64
= float64_squash_input_denormal(input
, s
);
13595 uint64_t val
= float64_val(f64
);
13596 bool f64_sign
= float64_is_neg(f64
);
13597 int f64_exp
= extract64(val
, 52, 11);
13598 uint64_t f64_frac
= extract64(val
, 0, 52);
13600 if (float64_is_any_nan(f64
)) {
13602 if (float64_is_signaling_nan(f64
, s
)) {
13603 float_raise(float_flag_invalid
, s
);
13604 nan
= float64_silence_nan(f64
, s
);
13606 if (s
->default_nan_mode
) {
13607 nan
= float64_default_nan(s
);
13610 } else if (float64_is_zero(f64
)) {
13611 float_raise(float_flag_divbyzero
, s
);
13612 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
13613 } else if (float64_is_neg(f64
)) {
13614 float_raise(float_flag_invalid
, s
);
13615 return float64_default_nan(s
);
13616 } else if (float64_is_infinity(f64
)) {
13617 return float64_zero
;
13620 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
13622 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
13623 val
= deposit64(0, 61, 1, f64_sign
);
13624 val
= deposit64(val
, 52, 11, f64_exp
);
13625 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
13626 return make_float64(val
);
13629 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
13631 /* float_status *s = fpstp; */
13632 int input
, estimate
;
13634 if ((a
& 0x80000000) == 0) {
13638 input
= extract32(a
, 23, 9);
13639 estimate
= recip_estimate(input
);
13641 return deposit32(0, (32 - 9), 9, estimate
);
13644 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
13648 if ((a
& 0xc0000000) == 0) {
13652 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
13654 return deposit32(0, 23, 9, estimate
);
13657 /* VFPv4 fused multiply-accumulate */
13658 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
13660 float_status
*fpst
= fpstp
;
13661 return float32_muladd(a
, b
, c
, 0, fpst
);
13664 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
13666 float_status
*fpst
= fpstp
;
13667 return float64_muladd(a
, b
, c
, 0, fpst
);
13670 /* ARMv8 round to integral */
13671 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
13673 return float32_round_to_int(x
, fp_status
);
13676 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
13678 return float64_round_to_int(x
, fp_status
);
13681 float32
HELPER(rints
)(float32 x
, void *fp_status
)
13683 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
13686 ret
= float32_round_to_int(x
, fp_status
);
13688 /* Suppress any inexact exceptions the conversion produced */
13689 if (!(old_flags
& float_flag_inexact
)) {
13690 new_flags
= get_float_exception_flags(fp_status
);
13691 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
13697 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
13699 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
13702 ret
= float64_round_to_int(x
, fp_status
);
13704 new_flags
= get_float_exception_flags(fp_status
);
13706 /* Suppress any inexact exceptions the conversion produced */
13707 if (!(old_flags
& float_flag_inexact
)) {
13708 new_flags
= get_float_exception_flags(fp_status
);
13709 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
13715 /* Convert ARM rounding mode to softfloat */
13716 int arm_rmode_to_sf(int rmode
)
13719 case FPROUNDING_TIEAWAY
:
13720 rmode
= float_round_ties_away
;
13722 case FPROUNDING_ODD
:
13723 /* FIXME: add support for TIEAWAY and ODD */
13724 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
13726 /* fall through for now */
13727 case FPROUNDING_TIEEVEN
:
13729 rmode
= float_round_nearest_even
;
13731 case FPROUNDING_POSINF
:
13732 rmode
= float_round_up
;
13734 case FPROUNDING_NEGINF
:
13735 rmode
= float_round_down
;
13737 case FPROUNDING_ZERO
:
13738 rmode
= float_round_to_zero
;
13745 * The upper bytes of val (above the number specified by 'bytes') must have
13746 * been zeroed out by the caller.
13748 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13752 stl_le_p(buf
, val
);
13754 /* zlib crc32 converts the accumulator and output to one's complement. */
13755 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
13758 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13762 stl_le_p(buf
, val
);
13764 /* Linux crc32c converts the output to one's complement. */
13765 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
13768 /* Return the exception level to which FP-disabled exceptions should
13769 * be taken, or 0 if FP is enabled.
13771 int fp_exception_el(CPUARMState
*env
, int cur_el
)
13773 #ifndef CONFIG_USER_ONLY
13776 /* CPACR and the CPTR registers don't exist before v6, so FP is
13777 * always accessible
13779 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
13783 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13784 * 0, 2 : trap EL0 and EL1/PL1 accesses
13785 * 1 : trap only EL0 accesses
13786 * 3 : trap no accesses
13788 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13792 if (cur_el
== 0 || cur_el
== 1) {
13793 /* Trap to PL1, which might be EL1 or EL3 */
13794 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13799 if (cur_el
== 3 && !is_a64(env
)) {
13800 /* Secure PL1 running at EL3 */
13813 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13814 * check because zero bits in the registers mean "don't trap".
13817 /* CPTR_EL2 : present in v7VE or v8 */
13818 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
13819 && !arm_is_secure_below_el3(env
)) {
13820 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13824 /* CPTR_EL3 : present in v8 */
13825 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
13826 /* Trap all FP ops to EL3 */
13833 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
13834 bool secstate
, bool priv
)
13836 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
13839 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
13842 if (armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
)) {
13843 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
13847 mmu_idx
|= ARM_MMU_IDX_M_S
;
13853 /* Return the MMU index for a v7M CPU in the specified security state */
13854 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13856 bool priv
= arm_current_el(env
) != 0;
13858 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
13861 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13865 if (arm_feature(env
, ARM_FEATURE_M
)) {
13866 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13869 el
= arm_current_el(env
);
13870 if (el
< 2 && arm_is_secure_below_el3(env
)) {
13871 return ARMMMUIdx_S1SE0
+ el
;
13873 return ARMMMUIdx_S12NSE0
+ el
;
13877 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
13879 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
13882 #ifndef CONFIG_USER_ONLY
13883 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13885 return stage_1_mmu_idx(arm_mmu_idx(env
));
13889 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13890 target_ulong
*cs_base
, uint32_t *pflags
)
13892 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
13893 int current_el
= arm_current_el(env
);
13894 int fp_el
= fp_exception_el(env
, current_el
);
13895 uint32_t flags
= 0;
13898 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13902 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
13904 /* Get control bits for tagged addresses. */
13906 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13907 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
13910 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13911 if (regime_el(env
, stage1
) < 2) {
13912 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
13913 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
13914 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
13917 tbii
= tbid
& !p0
.tbid
;
13920 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
13921 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
13924 if (cpu_isar_feature(aa64_sve
, cpu
)) {
13925 int sve_el
= sve_exception_el(env
, current_el
);
13928 /* If SVE is disabled, but FP is enabled,
13929 * then the effective len is 0.
13931 if (sve_el
!= 0 && fp_el
== 0) {
13934 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
13936 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
13937 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
13940 if (current_el
== 0) {
13941 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13942 sctlr
= env
->cp15
.sctlr_el
[1];
13944 sctlr
= env
->cp15
.sctlr_el
[current_el
];
13946 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
13948 * In order to save space in flags, we record only whether
13949 * pauth is "inactive", meaning all insns are implemented as
13950 * a nop, or "active" when some action must be performed.
13951 * The decision of which action to take is left to a helper.
13953 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13954 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
13958 if (cpu_isar_feature(aa64_bti
, cpu
)) {
13959 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13960 if (sctlr
& (current_el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
13961 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
13963 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
13966 *pc
= env
->regs
[15];
13967 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
13968 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
13969 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
13970 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
13971 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
13972 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
13973 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
13974 || arm_el_is_aa64(env
, 1)) {
13975 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
13977 flags
= FIELD_DP32(flags
, TBFLAG_A32
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13980 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13982 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13983 * states defined in the ARM ARM for software singlestep:
13984 * SS_ACTIVE PSTATE.SS State
13985 * 0 x Inactive (the TB flag for SS is always 0)
13986 * 1 0 Active-pending
13987 * 1 1 Active-not-pending
13989 if (arm_singlestep_active(env
)) {
13990 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
13992 if (env
->pstate
& PSTATE_SS
) {
13993 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13996 if (env
->uncached_cpsr
& PSTATE_SS
) {
13997 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
14001 if (arm_cpu_data_is_big_endian(env
)) {
14002 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
14004 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
14006 if (arm_v7m_is_handler_mode(env
)) {
14007 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
14010 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
14011 * suppressing them because the requested execution priority is less than 0.
14013 if (arm_feature(env
, ARM_FEATURE_V8
) &&
14014 arm_feature(env
, ARM_FEATURE_M
) &&
14015 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
14016 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
14017 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
14024 #ifdef TARGET_AARCH64
14026 * The manual says that when SVE is enabled and VQ is widened the
14027 * implementation is allowed to zero the previously inaccessible
14028 * portion of the registers. The corollary to that is that when
14029 * SVE is enabled and VQ is narrowed we are also allowed to zero
14030 * the now inaccessible portion of the registers.
14032 * The intent of this is that no predicate bit beyond VQ is ever set.
14033 * Which means that some operations on predicate registers themselves
14034 * may operate on full uint64_t or even unrolled across the maximum
14035 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
14036 * may well be cheaper than conditionals to restrict the operation
14037 * to the relevant portion of a uint16_t[16].
14039 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
14044 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
14045 assert(vq
<= arm_env_get_cpu(env
)->sve_max_vq
);
14047 /* Zap the high bits of the zregs. */
14048 for (i
= 0; i
< 32; i
++) {
14049 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
14052 /* Zap the high bits of the pregs and ffr. */
14055 pmask
= ~(-1ULL << (16 * (vq
& 3)));
14057 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
14058 for (i
= 0; i
< 17; ++i
) {
14059 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
14066 * Notice a change in SVE vector size when changing EL.
14068 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
14069 int new_el
, bool el0_a64
)
14071 ARMCPU
*cpu
= arm_env_get_cpu(env
);
14072 int old_len
, new_len
;
14073 bool old_a64
, new_a64
;
14075 /* Nothing to do if no SVE. */
14076 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
14080 /* Nothing to do if FP is disabled in either EL. */
14081 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
14086 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
14087 * at ELx, or not available because the EL is in AArch32 state, then
14088 * for all purposes other than a direct read, the ZCR_ELx.LEN field
14089 * has an effective value of 0".
14091 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
14092 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
14093 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
14094 * we already have the correct register contents when encountering the
14095 * vq0->vq0 transition between EL0->EL1.
14097 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
14098 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
14099 ? sve_zcr_len_for_el(env
, old_el
) : 0);
14100 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
14101 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
14102 ? sve_zcr_len_for_el(env
, new_el
) : 0);
14104 /* When changing vector length, clear inaccessible state. */
14105 if (new_len
< old_len
) {
14106 aarch64_sve_narrow_vq(env
, new_len
+ 1);