1 #include "qemu/osdep.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
23 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
24 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
25 target_ulong
*page_size
, uint32_t *fsr
,
28 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
29 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
30 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
31 target_ulong
*page_size_ptr
, uint32_t *fsr
,
34 /* Definitions for the PMCCNTR and PMCR registers */
40 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
44 /* VFP data registers are always little-endian. */
45 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
47 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
50 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
51 /* Aliases for Q regs. */
54 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
55 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
59 switch (reg
- nregs
) {
60 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
61 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
62 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
67 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
71 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
73 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
76 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
79 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
80 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
84 switch (reg
- nregs
) {
85 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
86 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
87 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
92 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
96 /* 128 bit FP register */
97 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
98 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
102 stl_p(buf
, vfp_get_fpsr(env
));
106 stl_p(buf
, vfp_get_fpcr(env
));
113 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
117 /* 128 bit FP register */
118 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
119 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
123 vfp_set_fpsr(env
, ldl_p(buf
));
127 vfp_set_fpcr(env
, ldl_p(buf
));
134 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
136 assert(ri
->fieldoffset
);
137 if (cpreg_field_is_64bit(ri
)) {
138 return CPREG_FIELD64(env
, ri
);
140 return CPREG_FIELD32(env
, ri
);
144 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
147 assert(ri
->fieldoffset
);
148 if (cpreg_field_is_64bit(ri
)) {
149 CPREG_FIELD64(env
, ri
) = value
;
151 CPREG_FIELD32(env
, ri
) = value
;
155 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
157 return (char *)env
+ ri
->fieldoffset
;
160 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
162 /* Raw read of a coprocessor register (as needed for migration, etc). */
163 if (ri
->type
& ARM_CP_CONST
) {
164 return ri
->resetvalue
;
165 } else if (ri
->raw_readfn
) {
166 return ri
->raw_readfn(env
, ri
);
167 } else if (ri
->readfn
) {
168 return ri
->readfn(env
, ri
);
170 return raw_read(env
, ri
);
174 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
177 /* Raw write of a coprocessor register (as needed for migration, etc).
178 * Note that constant registers are treated as write-ignored; the
179 * caller should check for success by whether a readback gives the
182 if (ri
->type
& ARM_CP_CONST
) {
184 } else if (ri
->raw_writefn
) {
185 ri
->raw_writefn(env
, ri
, v
);
186 } else if (ri
->writefn
) {
187 ri
->writefn(env
, ri
, v
);
189 raw_write(env
, ri
, v
);
193 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
195 /* Return true if the regdef would cause an assertion if you called
196 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197 * program bug for it not to have the NO_RAW flag).
198 * NB that returning false here doesn't necessarily mean that calling
199 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200 * read/write access functions which are safe for raw use" from "has
201 * read/write access functions which have side effects but has forgotten
202 * to provide raw access functions".
203 * The tests here line up with the conditions in read/write_raw_cp_reg()
204 * and assertions in raw_read()/raw_write().
206 if ((ri
->type
& ARM_CP_CONST
) ||
208 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
214 bool write_cpustate_to_list(ARMCPU
*cpu
)
216 /* Write the coprocessor state from cpu->env to the (index,value) list. */
220 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
221 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
222 const ARMCPRegInfo
*ri
;
224 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
229 if (ri
->type
& ARM_CP_NO_RAW
) {
232 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
237 bool write_list_to_cpustate(ARMCPU
*cpu
)
242 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
243 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
244 uint64_t v
= cpu
->cpreg_values
[i
];
245 const ARMCPRegInfo
*ri
;
247 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
252 if (ri
->type
& ARM_CP_NO_RAW
) {
255 /* Write value and confirm it reads back as written
256 * (to catch read-only registers and partially read-only
257 * registers where the incoming migration value doesn't match)
259 write_raw_cp_reg(&cpu
->env
, ri
, v
);
260 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
267 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
269 ARMCPU
*cpu
= opaque
;
271 const ARMCPRegInfo
*ri
;
273 regidx
= *(uint32_t *)key
;
274 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
276 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
277 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
278 /* The value array need not be initialized at this point */
279 cpu
->cpreg_array_len
++;
283 static void count_cpreg(gpointer key
, gpointer opaque
)
285 ARMCPU
*cpu
= opaque
;
287 const ARMCPRegInfo
*ri
;
289 regidx
= *(uint32_t *)key
;
290 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
292 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
293 cpu
->cpreg_array_len
++;
297 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
299 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
300 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
311 void init_cpreg_list(ARMCPU
*cpu
)
313 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314 * Note that we require cpreg_tuples[] to be sorted by key ID.
319 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
320 keys
= g_list_sort(keys
, cpreg_key_compare
);
322 cpu
->cpreg_array_len
= 0;
324 g_list_foreach(keys
, count_cpreg
, cpu
);
326 arraylen
= cpu
->cpreg_array_len
;
327 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
328 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
329 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
330 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
331 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
332 cpu
->cpreg_array_len
= 0;
334 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
336 assert(cpu
->cpreg_array_len
== arraylen
);
342 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
345 * access_el3_aa32ns: Used to check AArch32 register views.
346 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
348 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
349 const ARMCPRegInfo
*ri
,
352 bool secure
= arm_is_secure_below_el3(env
);
354 assert(!arm_el_is_aa64(env
, 3));
356 return CP_ACCESS_TRAP_UNCATEGORIZED
;
361 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
362 const ARMCPRegInfo
*ri
,
365 if (!arm_el_is_aa64(env
, 3)) {
366 return access_el3_aa32ns(env
, ri
, isread
);
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374 * We assume that the .access field is set to PL1_RW.
376 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
377 const ARMCPRegInfo
*ri
,
380 if (arm_current_el(env
) == 3) {
383 if (arm_is_secure_below_el3(env
)) {
384 return CP_ACCESS_TRAP_EL3
;
386 /* This will be EL1 NS and EL2 NS, which just UNDEF */
387 return CP_ACCESS_TRAP_UNCATEGORIZED
;
390 /* Check for traps to "powerdown debug" registers, which are controlled
393 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
396 int el
= arm_current_el(env
);
398 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
399 && !arm_is_secure_below_el3(env
)) {
400 return CP_ACCESS_TRAP_EL2
;
402 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
403 return CP_ACCESS_TRAP_EL3
;
408 /* Check for traps to "debug ROM" registers, which are controlled
409 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
411 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
414 int el
= arm_current_el(env
);
416 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
417 && !arm_is_secure_below_el3(env
)) {
418 return CP_ACCESS_TRAP_EL2
;
420 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
421 return CP_ACCESS_TRAP_EL3
;
426 /* Check for traps to general debug registers, which are controlled
427 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
429 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
432 int el
= arm_current_el(env
);
434 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
435 && !arm_is_secure_below_el3(env
)) {
436 return CP_ACCESS_TRAP_EL2
;
438 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
439 return CP_ACCESS_TRAP_EL3
;
444 /* Check for traps to performance monitor registers, which are controlled
445 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
447 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
450 int el
= arm_current_el(env
);
452 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
453 && !arm_is_secure_below_el3(env
)) {
454 return CP_ACCESS_TRAP_EL2
;
456 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
457 return CP_ACCESS_TRAP_EL3
;
462 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
464 ARMCPU
*cpu
= arm_env_get_cpu(env
);
466 raw_write(env
, ri
, value
);
467 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
470 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
472 ARMCPU
*cpu
= arm_env_get_cpu(env
);
474 if (raw_read(env
, ri
) != value
) {
475 /* Unlike real hardware the qemu TLB uses virtual addresses,
476 * not modified virtual addresses, so this causes a TLB flush.
479 raw_write(env
, ri
, value
);
483 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
486 ARMCPU
*cpu
= arm_env_get_cpu(env
);
488 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
489 && !extended_addresses_enabled(env
)) {
490 /* For VMSA (when not using the LPAE long descriptor page table
491 * format) this register includes the ASID, so do a TLB flush.
492 * For PMSA it is purely a process ID and no action is needed.
496 raw_write(env
, ri
, value
);
499 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
502 /* Invalidate all (TLBIALL) */
503 ARMCPU
*cpu
= arm_env_get_cpu(env
);
508 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
511 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512 ARMCPU
*cpu
= arm_env_get_cpu(env
);
514 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
517 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
520 /* Invalidate by ASID (TLBIASID) */
521 ARMCPU
*cpu
= arm_env_get_cpu(env
);
526 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
529 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530 ARMCPU
*cpu
= arm_env_get_cpu(env
);
532 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
539 CPUState
*cs
= ENV_GET_CPU(env
);
541 tlb_flush_all_cpus_synced(cs
);
544 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
547 CPUState
*cs
= ENV_GET_CPU(env
);
549 tlb_flush_all_cpus_synced(cs
);
552 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
555 CPUState
*cs
= ENV_GET_CPU(env
);
557 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
560 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
563 CPUState
*cs
= ENV_GET_CPU(env
);
565 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
568 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
571 CPUState
*cs
= ENV_GET_CPU(env
);
573 tlb_flush_by_mmuidx(cs
,
574 ARMMMUIdxBit_S12NSE1
|
575 ARMMMUIdxBit_S12NSE0
|
579 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
582 CPUState
*cs
= ENV_GET_CPU(env
);
584 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
585 ARMMMUIdxBit_S12NSE1
|
586 ARMMMUIdxBit_S12NSE0
|
590 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
593 /* Invalidate by IPA. This has to invalidate any structures that
594 * contain only stage 2 translation information, but does not need
595 * to apply to structures that contain combined stage 1 and stage 2
596 * translation information.
597 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
599 CPUState
*cs
= ENV_GET_CPU(env
);
602 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
606 pageaddr
= sextract64(value
<< 12, 0, 40);
608 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
611 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
614 CPUState
*cs
= ENV_GET_CPU(env
);
617 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
621 pageaddr
= sextract64(value
<< 12, 0, 40);
623 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
627 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
630 CPUState
*cs
= ENV_GET_CPU(env
);
632 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
635 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
638 CPUState
*cs
= ENV_GET_CPU(env
);
640 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
643 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
646 CPUState
*cs
= ENV_GET_CPU(env
);
647 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
649 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
652 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
655 CPUState
*cs
= ENV_GET_CPU(env
);
656 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
658 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
662 static const ARMCPRegInfo cp_reginfo
[] = {
663 /* Define the secure and non-secure FCSE identifier CP registers
664 * separately because there is no secure bank in V8 (no _EL3). This allows
665 * the secure register to be properly reset and migrated. There is also no
666 * v8 EL1 version of the register so the non-secure instance stands alone.
668 { .name
= "FCSEIDR(NS)",
669 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
670 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
671 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
672 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
673 { .name
= "FCSEIDR(S)",
674 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
675 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
676 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
677 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
678 /* Define the secure and non-secure context identifier CP registers
679 * separately because there is no secure bank in V8 (no _EL3). This allows
680 * the secure register to be properly reset and migrated. In the
681 * non-secure case, the 32-bit register will have reset and migration
682 * disabled during registration as it is handled by the 64-bit instance.
684 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
685 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
686 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
687 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
688 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
689 { .name
= "CONTEXTIDR(S)", .state
= ARM_CP_STATE_AA32
,
690 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
691 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
692 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
693 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
697 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
698 /* NB: Some of these registers exist in v8 but with more precise
699 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
701 /* MMU Domain access control / MPU write buffer control */
703 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
704 .access
= PL1_RW
, .resetvalue
= 0,
705 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
706 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
707 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
708 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
709 * For v6 and v5, these mappings are overly broad.
711 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
712 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
713 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
714 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
715 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
716 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
717 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
718 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
719 /* Cache maintenance ops; some of this space may be overridden later. */
720 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
721 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
722 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
726 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
727 /* Not all pre-v6 cores implemented this WFI, so this is slightly
730 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
731 .access
= PL1_W
, .type
= ARM_CP_WFI
},
735 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
736 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
737 * is UNPREDICTABLE; we choose to NOP as most implementations do).
739 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
740 .access
= PL1_W
, .type
= ARM_CP_WFI
},
741 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
742 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
743 * OMAPCP will override this space.
745 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
746 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
748 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
749 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
751 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
752 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
753 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
755 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
756 * implementing it as RAZ means the "debug architecture version" bits
757 * will read as a reserved value, which should cause Linux to not try
758 * to use the debug hardware.
760 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
761 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
762 /* MMU TLB control. Note that the wildcarding means we cover not just
763 * the unified TLB ops but also the dside/iside/inner-shareable variants.
765 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
766 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
767 .type
= ARM_CP_NO_RAW
},
768 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
769 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
770 .type
= ARM_CP_NO_RAW
},
771 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
772 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
773 .type
= ARM_CP_NO_RAW
},
774 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
775 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
776 .type
= ARM_CP_NO_RAW
},
777 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
778 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
779 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
780 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
784 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
789 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
790 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
791 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
792 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
793 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
795 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
796 /* VFP coprocessor: cp10 & cp11 [23:20] */
797 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
799 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
800 /* ASEDIS [31] bit is RAO/WI */
804 /* VFPv3 and upwards with NEON implement 32 double precision
805 * registers (D0-D31).
807 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
808 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
809 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
815 env
->cp15
.cpacr_el1
= value
;
818 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
821 if (arm_feature(env
, ARM_FEATURE_V8
)) {
822 /* Check if CPACR accesses are to be trapped to EL2 */
823 if (arm_current_el(env
) == 1 &&
824 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
825 return CP_ACCESS_TRAP_EL2
;
826 /* Check if CPACR accesses are to be trapped to EL3 */
827 } else if (arm_current_el(env
) < 3 &&
828 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
829 return CP_ACCESS_TRAP_EL3
;
836 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
839 /* Check if CPTR accesses are set to trap to EL3 */
840 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
841 return CP_ACCESS_TRAP_EL3
;
847 static const ARMCPRegInfo v6_cp_reginfo
[] = {
848 /* prefetch by MVA in v6, NOP in v7 */
849 { .name
= "MVA_prefetch",
850 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
851 .access
= PL1_W
, .type
= ARM_CP_NOP
},
852 /* We need to break the TB after ISB to execute self-modifying code
853 * correctly and also to take any pending interrupts immediately.
854 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
856 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
857 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
858 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
859 .access
= PL0_W
, .type
= ARM_CP_NOP
},
860 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
861 .access
= PL0_W
, .type
= ARM_CP_NOP
},
862 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
864 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
865 offsetof(CPUARMState
, cp15
.ifar_ns
) },
867 /* Watchpoint Fault Address Register : should actually only be present
868 * for 1136, 1176, 11MPCore.
870 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
871 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
872 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
873 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
874 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
875 .resetvalue
= 0, .writefn
= cpacr_write
},
879 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
882 /* Performance monitor registers user accessibility is controlled
883 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
884 * trapping to EL2 or EL3 for other accesses.
886 int el
= arm_current_el(env
);
888 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
889 return CP_ACCESS_TRAP
;
891 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
892 && !arm_is_secure_below_el3(env
)) {
893 return CP_ACCESS_TRAP_EL2
;
895 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
896 return CP_ACCESS_TRAP_EL3
;
902 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
903 const ARMCPRegInfo
*ri
,
906 /* ER: event counter read trap control */
907 if (arm_feature(env
, ARM_FEATURE_V8
)
908 && arm_current_el(env
) == 0
909 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
914 return pmreg_access(env
, ri
, isread
);
917 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
918 const ARMCPRegInfo
*ri
,
921 /* SW: software increment write trap control */
922 if (arm_feature(env
, ARM_FEATURE_V8
)
923 && arm_current_el(env
) == 0
924 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
929 return pmreg_access(env
, ri
, isread
);
932 #ifndef CONFIG_USER_ONLY
934 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
935 const ARMCPRegInfo
*ri
,
938 /* ER: event counter read trap control */
939 if (arm_feature(env
, ARM_FEATURE_V8
)
940 && arm_current_el(env
) == 0
941 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
945 return pmreg_access(env
, ri
, isread
);
948 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
949 const ARMCPRegInfo
*ri
,
952 /* CR: cycle counter read trap control */
953 if (arm_feature(env
, ARM_FEATURE_V8
)
954 && arm_current_el(env
) == 0
955 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
960 return pmreg_access(env
, ri
, isread
);
963 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
965 /* This does not support checking PMCCFILTR_EL0 register */
967 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
974 void pmccntr_sync(CPUARMState
*env
)
978 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
979 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
981 if (env
->cp15
.c9_pmcr
& PMCRD
) {
982 /* Increment once every 64 processor clock cycles */
986 if (arm_ccnt_enabled(env
)) {
987 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
991 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
997 /* The counter has been reset */
998 env
->cp15
.c15_ccnt
= 0;
1001 /* only the DP, X, D and E bits are writable */
1002 env
->cp15
.c9_pmcr
&= ~0x39;
1003 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1008 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1010 uint64_t total_ticks
;
1012 if (!arm_ccnt_enabled(env
)) {
1013 /* Counter is disabled, do not change value */
1014 return env
->cp15
.c15_ccnt
;
1017 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1018 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1020 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1021 /* Increment once every 64 processor clock cycles */
1024 return total_ticks
- env
->cp15
.c15_ccnt
;
1027 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1030 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1031 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1032 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1035 env
->cp15
.c9_pmselr
= value
& 0x1f;
1038 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1041 uint64_t total_ticks
;
1043 if (!arm_ccnt_enabled(env
)) {
1044 /* Counter is disabled, set the absolute value */
1045 env
->cp15
.c15_ccnt
= value
;
1049 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1050 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1052 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1053 /* Increment once every 64 processor clock cycles */
1056 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1059 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1062 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1064 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1067 #else /* CONFIG_USER_ONLY */
1069 void pmccntr_sync(CPUARMState
*env
)
1075 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1079 env
->cp15
.pmccfiltr_el0
= value
& 0x7E000000;
1083 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1087 env
->cp15
.c9_pmcnten
|= value
;
1090 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1094 env
->cp15
.c9_pmcnten
&= ~value
;
1097 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1100 env
->cp15
.c9_pmovsr
&= ~value
;
1103 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1106 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1107 * PMSELR value is equal to or greater than the number of implemented
1108 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1110 if (env
->cp15
.c9_pmselr
== 0x1f) {
1111 pmccfiltr_write(env
, ri
, value
);
1115 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1117 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1118 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1120 if (env
->cp15
.c9_pmselr
== 0x1f) {
1121 return env
->cp15
.pmccfiltr_el0
;
1127 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1130 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1131 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1133 env
->cp15
.c9_pmuserenr
= value
& 1;
1137 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1140 /* We have no event counters so only the C bit can be changed */
1142 env
->cp15
.c9_pminten
|= value
;
1145 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1149 env
->cp15
.c9_pminten
&= ~value
;
1152 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1155 /* Note that even though the AArch64 view of this register has bits
1156 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1157 * architectural requirements for bits which are RES0 only in some
1158 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1159 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1161 raw_write(env
, ri
, value
& ~0x1FULL
);
1164 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1166 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1167 * For bits that vary between AArch32/64, code needs to check the
1168 * current execution mode before directly using the feature bit.
1170 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1172 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1173 valid_mask
&= ~SCR_HCE
;
1175 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1176 * supported if EL2 exists. The bit is UNK/SBZP when
1177 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1178 * when EL2 is unavailable.
1179 * On ARMv8, this bit is always available.
1181 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1182 !arm_feature(env
, ARM_FEATURE_V8
)) {
1183 valid_mask
&= ~SCR_SMD
;
1187 /* Clear all-context RES0 bits. */
1188 value
&= valid_mask
;
1189 raw_write(env
, ri
, value
);
1192 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1194 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1196 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1199 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1200 ri
->secure
& ARM_CP_SECSTATE_S
);
1202 return cpu
->ccsidr
[index
];
1205 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1208 raw_write(env
, ri
, value
& 0xf);
1211 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1213 CPUState
*cs
= ENV_GET_CPU(env
);
1216 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1219 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1222 /* External aborts are not possible in QEMU so A bit is always clear */
1226 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1227 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1228 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1229 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1230 /* Performance monitors are implementation defined in v7,
1231 * but with an ARM recommended set of registers, which we
1232 * follow (although we don't actually implement any counters)
1234 * Performance registers fall into three categories:
1235 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1236 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1237 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1238 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1239 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1241 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1242 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1243 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1244 .writefn
= pmcntenset_write
,
1245 .accessfn
= pmreg_access
,
1246 .raw_writefn
= raw_write
},
1247 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1248 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1249 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1250 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1251 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1252 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1254 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1255 .accessfn
= pmreg_access
,
1256 .writefn
= pmcntenclr_write
,
1257 .type
= ARM_CP_ALIAS
},
1258 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1259 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1260 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1261 .type
= ARM_CP_ALIAS
,
1262 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1263 .writefn
= pmcntenclr_write
},
1264 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1265 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1266 .accessfn
= pmreg_access
,
1267 .writefn
= pmovsr_write
,
1268 .raw_writefn
= raw_write
},
1269 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1270 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1271 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1272 .type
= ARM_CP_ALIAS
,
1273 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1274 .writefn
= pmovsr_write
,
1275 .raw_writefn
= raw_write
},
1276 /* Unimplemented so WI. */
1277 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1278 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1279 #ifndef CONFIG_USER_ONLY
1280 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1281 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1282 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1283 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1284 .raw_writefn
= raw_write
},
1285 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1286 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1287 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1288 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1289 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1290 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1291 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
1292 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1293 .accessfn
= pmreg_access_ccntr
},
1294 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1295 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1296 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1298 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1300 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1301 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1302 .writefn
= pmccfiltr_write
,
1303 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1305 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1307 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1308 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1309 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1310 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1311 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1312 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1313 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1314 /* Unimplemented, RAZ/WI. */
1315 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1316 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1317 .accessfn
= pmreg_access_xevcntr
},
1318 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1319 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1320 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1322 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1323 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1324 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1325 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1326 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1328 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1329 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1330 .access
= PL1_RW
, .accessfn
= access_tpm
,
1331 .type
= ARM_CP_ALIAS
,
1332 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1334 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1335 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1336 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1337 .access
= PL1_RW
, .accessfn
= access_tpm
,
1339 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1340 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1341 .resetvalue
= 0x0 },
1342 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1343 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1344 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1345 .writefn
= pmintenclr_write
, },
1346 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1347 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1348 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1349 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1350 .writefn
= pmintenclr_write
},
1351 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1352 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1353 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1354 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1355 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1356 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1357 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1358 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1359 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1360 * just RAZ for all cores:
1362 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1363 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1364 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1365 /* Auxiliary fault status registers: these also are IMPDEF, and we
1366 * choose to RAZ/WI for all cores.
1368 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1369 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1370 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1371 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1372 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1373 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1374 /* MAIR can just read-as-written because we don't implement caches
1375 * and so don't need to care about memory attributes.
1377 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1378 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1379 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1381 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1382 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1383 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1385 /* For non-long-descriptor page tables these are PRRR and NMRR;
1386 * regardless they still act as reads-as-written for QEMU.
1388 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1389 * allows them to assign the correct fieldoffset based on the endianness
1390 * handled in the field definitions.
1392 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1393 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1394 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1395 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1396 .resetfn
= arm_cp_reset_ignore
},
1397 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1398 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1399 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1400 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1401 .resetfn
= arm_cp_reset_ignore
},
1402 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1403 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1404 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1405 /* 32 bit ITLB invalidates */
1406 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1407 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1408 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1409 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1410 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1411 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1412 /* 32 bit DTLB invalidates */
1413 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1414 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1415 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1416 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1417 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1418 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1419 /* 32 bit TLB invalidates */
1420 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1421 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1422 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1423 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1424 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1425 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1426 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1427 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1431 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1432 /* 32 bit TLB invalidates, Inner Shareable */
1433 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1434 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1435 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1436 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1437 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1438 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1439 .writefn
= tlbiasid_is_write
},
1440 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1441 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1442 .writefn
= tlbimvaa_is_write
},
1446 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1453 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1456 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1457 return CP_ACCESS_TRAP
;
1459 return CP_ACCESS_OK
;
1462 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1463 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1464 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1466 .writefn
= teecr_write
},
1467 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1468 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1469 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1473 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1474 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1475 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1477 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1478 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1480 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1481 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1482 .resetfn
= arm_cp_reset_ignore
},
1483 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1484 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1485 .access
= PL0_R
|PL1_W
,
1486 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1488 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1489 .access
= PL0_R
|PL1_W
,
1490 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1491 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1492 .resetfn
= arm_cp_reset_ignore
},
1493 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1494 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1496 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1497 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1499 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1500 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1505 #ifndef CONFIG_USER_ONLY
1507 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1510 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1511 * Writable only at the highest implemented exception level.
1513 int el
= arm_current_el(env
);
1517 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1518 return CP_ACCESS_TRAP
;
1522 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1523 arm_is_secure_below_el3(env
)) {
1524 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1525 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1533 if (!isread
&& el
< arm_highest_el(env
)) {
1534 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1537 return CP_ACCESS_OK
;
1540 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1543 unsigned int cur_el
= arm_current_el(env
);
1544 bool secure
= arm_is_secure(env
);
1546 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1548 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1549 return CP_ACCESS_TRAP
;
1552 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1553 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1554 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1555 return CP_ACCESS_TRAP_EL2
;
1557 return CP_ACCESS_OK
;
1560 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1563 unsigned int cur_el
= arm_current_el(env
);
1564 bool secure
= arm_is_secure(env
);
1566 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1567 * EL0[PV]TEN is zero.
1570 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1571 return CP_ACCESS_TRAP
;
1574 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1575 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1576 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1577 return CP_ACCESS_TRAP_EL2
;
1579 return CP_ACCESS_OK
;
1582 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1583 const ARMCPRegInfo
*ri
,
1586 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1589 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1590 const ARMCPRegInfo
*ri
,
1593 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1596 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1599 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1602 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1605 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1608 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1609 const ARMCPRegInfo
*ri
,
1612 /* The AArch64 register view of the secure physical timer is
1613 * always accessible from EL3, and configurably accessible from
1616 switch (arm_current_el(env
)) {
1618 if (!arm_is_secure(env
)) {
1619 return CP_ACCESS_TRAP
;
1621 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1622 return CP_ACCESS_TRAP_EL3
;
1624 return CP_ACCESS_OK
;
1627 return CP_ACCESS_TRAP
;
1629 return CP_ACCESS_OK
;
1631 g_assert_not_reached();
1635 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1637 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1640 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1642 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1645 /* Timer enabled: calculate and set current ISTATUS, irq, and
1646 * reset timer to when ISTATUS next has to change
1648 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1649 cpu
->env
.cp15
.cntvoff_el2
: 0;
1650 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1651 /* Note that this must be unsigned 64 bit arithmetic: */
1652 int istatus
= count
- offset
>= gt
->cval
;
1656 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1658 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1659 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1662 /* Next transition is when count rolls back over to zero */
1663 nexttick
= UINT64_MAX
;
1665 /* Next transition is when we hit cval */
1666 nexttick
= gt
->cval
+ offset
;
1668 /* Note that the desired next expiry time might be beyond the
1669 * signed-64-bit range of a QEMUTimer -- in this case we just
1670 * set the timer for as far in the future as possible. When the
1671 * timer expires we will reset the timer for any remaining period.
1673 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1674 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1676 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1677 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1679 /* Timer disabled: ISTATUS and timer output always clear */
1681 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1682 timer_del(cpu
->gt_timer
[timeridx
]);
1683 trace_arm_gt_recalc_disabled(timeridx
);
1687 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1690 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1692 timer_del(cpu
->gt_timer
[timeridx
]);
1695 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1697 return gt_get_countervalue(env
);
1700 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1702 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1705 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1709 trace_arm_gt_cval_write(timeridx
, value
);
1710 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1711 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1714 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1717 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1719 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1720 (gt_get_countervalue(env
) - offset
));
1723 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1727 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1729 trace_arm_gt_tval_write(timeridx
, value
);
1730 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1731 sextract64(value
, 0, 32);
1732 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1735 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1739 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1740 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1742 trace_arm_gt_ctl_write(timeridx
, value
);
1743 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1744 if ((oldval
^ value
) & 1) {
1745 /* Enable toggled */
1746 gt_recalc_timer(cpu
, timeridx
);
1747 } else if ((oldval
^ value
) & 2) {
1748 /* IMASK toggled: don't need to recalculate,
1749 * just set the interrupt line based on ISTATUS
1751 int irqstate
= (oldval
& 4) && !(value
& 2);
1753 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1754 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1758 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1760 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1763 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1766 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1769 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1771 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1774 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1777 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1780 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1783 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1786 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1788 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1791 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1794 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1797 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1799 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1802 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1805 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1808 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1811 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1814 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1817 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1819 trace_arm_gt_cntvoff_write(value
);
1820 raw_write(env
, ri
, value
);
1821 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1824 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1826 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1829 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1832 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1835 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1837 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1840 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1843 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1846 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1849 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1852 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1854 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1857 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1860 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1863 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1865 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1868 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1871 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1874 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1877 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1880 void arm_gt_ptimer_cb(void *opaque
)
1882 ARMCPU
*cpu
= opaque
;
1884 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1887 void arm_gt_vtimer_cb(void *opaque
)
1889 ARMCPU
*cpu
= opaque
;
1891 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1894 void arm_gt_htimer_cb(void *opaque
)
1896 ARMCPU
*cpu
= opaque
;
1898 gt_recalc_timer(cpu
, GTIMER_HYP
);
1901 void arm_gt_stimer_cb(void *opaque
)
1903 ARMCPU
*cpu
= opaque
;
1905 gt_recalc_timer(cpu
, GTIMER_SEC
);
1908 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1909 /* Note that CNTFRQ is purely reads-as-written for the benefit
1910 * of software; writing it doesn't actually change the timer frequency.
1911 * Our reset value matches the fixed frequency we implement the timer at.
1913 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1914 .type
= ARM_CP_ALIAS
,
1915 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1916 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1918 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1919 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1920 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1921 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1922 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1924 /* overall control: mostly access permissions */
1925 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
1926 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1928 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1931 /* per-timer control */
1932 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1933 .secure
= ARM_CP_SECSTATE_NS
,
1934 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1935 .accessfn
= gt_ptimer_access
,
1936 .fieldoffset
= offsetoflow32(CPUARMState
,
1937 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1938 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1940 { .name
= "CNTP_CTL(S)",
1941 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1942 .secure
= ARM_CP_SECSTATE_S
,
1943 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1944 .accessfn
= gt_ptimer_access
,
1945 .fieldoffset
= offsetoflow32(CPUARMState
,
1946 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
1947 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
1949 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1950 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1951 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1952 .accessfn
= gt_ptimer_access
,
1953 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1955 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1957 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1958 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1959 .accessfn
= gt_vtimer_access
,
1960 .fieldoffset
= offsetoflow32(CPUARMState
,
1961 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1962 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1964 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1965 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1966 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1967 .accessfn
= gt_vtimer_access
,
1968 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1970 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1972 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1973 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1974 .secure
= ARM_CP_SECSTATE_NS
,
1975 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1976 .accessfn
= gt_ptimer_access
,
1977 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
1979 { .name
= "CNTP_TVAL(S)",
1980 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1981 .secure
= ARM_CP_SECSTATE_S
,
1982 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1983 .accessfn
= gt_ptimer_access
,
1984 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
1986 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1987 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
1988 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1989 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
1990 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
1992 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
1993 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1994 .accessfn
= gt_vtimer_access
,
1995 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
1997 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1998 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
1999 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2000 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2001 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2003 /* The counter itself */
2004 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2005 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2006 .accessfn
= gt_pct_access
,
2007 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2009 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2010 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2011 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2012 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2014 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2015 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2016 .accessfn
= gt_vct_access
,
2017 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2019 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2020 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2021 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2022 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2024 /* Comparison value, indicating when the timer goes off */
2025 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2026 .secure
= ARM_CP_SECSTATE_NS
,
2027 .access
= PL1_RW
| PL0_R
,
2028 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2029 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2030 .accessfn
= gt_ptimer_access
,
2031 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2033 { .name
= "CNTP_CVAL(S)", .cp
= 15, .crm
= 14, .opc1
= 2,
2034 .secure
= ARM_CP_SECSTATE_S
,
2035 .access
= PL1_RW
| PL0_R
,
2036 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2037 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2038 .accessfn
= gt_ptimer_access
,
2039 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2041 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2042 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2043 .access
= PL1_RW
| PL0_R
,
2045 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2046 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2047 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2049 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2050 .access
= PL1_RW
| PL0_R
,
2051 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2052 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2053 .accessfn
= gt_vtimer_access
,
2054 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2056 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2057 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2058 .access
= PL1_RW
| PL0_R
,
2060 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2061 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2062 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2064 /* Secure timer -- this is actually restricted to only EL3
2065 * and configurably Secure-EL1 via the accessfn.
2067 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2068 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2069 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2070 .accessfn
= gt_stimer_access
,
2071 .readfn
= gt_sec_tval_read
,
2072 .writefn
= gt_sec_tval_write
,
2073 .resetfn
= gt_sec_timer_reset
,
2075 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2076 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2077 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2078 .accessfn
= gt_stimer_access
,
2079 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2081 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2083 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2084 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2085 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2086 .accessfn
= gt_stimer_access
,
2087 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2088 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2094 /* In user-mode none of the generic timer registers are accessible,
2095 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2096 * so instead just don't register any of them.
2098 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2104 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2106 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2107 raw_write(env
, ri
, value
);
2108 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2109 raw_write(env
, ri
, value
& 0xfffff6ff);
2111 raw_write(env
, ri
, value
& 0xfffff1ff);
2115 #ifndef CONFIG_USER_ONLY
2116 /* get_phys_addr() isn't present for user-mode-only targets */
2118 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2122 /* The ATS12NSO* operations must trap to EL3 if executed in
2123 * Secure EL1 (which can only happen if EL3 is AArch64).
2124 * They are simply UNDEF if executed from NS EL1.
2125 * They function normally from EL2 or EL3.
2127 if (arm_current_el(env
) == 1) {
2128 if (arm_is_secure_below_el3(env
)) {
2129 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2131 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2134 return CP_ACCESS_OK
;
2137 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2138 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2141 target_ulong page_size
;
2146 MemTxAttrs attrs
= {};
2147 ARMMMUFaultInfo fi
= {};
2149 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
,
2150 &phys_addr
, &attrs
, &prot
, &page_size
, &fsr
, &fi
);
2151 if (extended_addresses_enabled(env
)) {
2152 /* fsr is a DFSR/IFSR value for the long descriptor
2153 * translation table format, but with WnR always clear.
2154 * Convert it to a 64-bit PAR.
2156 par64
= (1 << 11); /* LPAE bit always set */
2158 par64
|= phys_addr
& ~0xfffULL
;
2159 if (!attrs
.secure
) {
2160 par64
|= (1 << 9); /* NS */
2162 /* We don't set the ATTR or SH fields in the PAR. */
2165 par64
|= (fsr
& 0x3f) << 1; /* FS */
2166 /* Note that S2WLK and FSTAGE are always zero, because we don't
2167 * implement virtualization and therefore there can't be a stage 2
2172 /* fsr is a DFSR/IFSR value for the short descriptor
2173 * translation table format (with WnR always clear).
2174 * Convert it to a 32-bit PAR.
2177 /* We do not set any attribute bits in the PAR */
2178 if (page_size
== (1 << 24)
2179 && arm_feature(env
, ARM_FEATURE_V7
)) {
2180 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2182 par64
= phys_addr
& 0xfffff000;
2184 if (!attrs
.secure
) {
2185 par64
|= (1 << 9); /* NS */
2188 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2189 ((fsr
& 0xf) << 1) | 1;
2195 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2197 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2200 int el
= arm_current_el(env
);
2201 bool secure
= arm_is_secure_below_el3(env
);
2203 switch (ri
->opc2
& 6) {
2205 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2208 mmu_idx
= ARMMMUIdx_S1E3
;
2211 mmu_idx
= ARMMMUIdx_S1NSE1
;
2214 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2217 g_assert_not_reached();
2221 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2224 mmu_idx
= ARMMMUIdx_S1SE0
;
2227 mmu_idx
= ARMMMUIdx_S1NSE0
;
2230 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2233 g_assert_not_reached();
2237 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2238 mmu_idx
= ARMMMUIdx_S12NSE1
;
2241 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2242 mmu_idx
= ARMMMUIdx_S12NSE0
;
2245 g_assert_not_reached();
2248 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2250 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2253 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2256 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2259 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2261 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2264 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2267 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2268 return CP_ACCESS_TRAP
;
2270 return CP_ACCESS_OK
;
2273 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2276 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2278 int secure
= arm_is_secure_below_el3(env
);
2280 switch (ri
->opc2
& 6) {
2283 case 0: /* AT S1E1R, AT S1E1W */
2284 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2286 case 4: /* AT S1E2R, AT S1E2W */
2287 mmu_idx
= ARMMMUIdx_S1E2
;
2289 case 6: /* AT S1E3R, AT S1E3W */
2290 mmu_idx
= ARMMMUIdx_S1E3
;
2293 g_assert_not_reached();
2296 case 2: /* AT S1E0R, AT S1E0W */
2297 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2299 case 4: /* AT S12E1R, AT S12E1W */
2300 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2302 case 6: /* AT S12E0R, AT S12E0W */
2303 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2306 g_assert_not_reached();
2309 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2313 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2314 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2315 .access
= PL1_RW
, .resetvalue
= 0,
2316 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2317 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2318 .writefn
= par_write
},
2319 #ifndef CONFIG_USER_ONLY
2320 /* This underdecoding is safe because the reginfo is NO_RAW. */
2321 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2322 .access
= PL1_W
, .accessfn
= ats_access
,
2323 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2328 /* Return basic MPU access permission bits. */
2329 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2336 for (i
= 0; i
< 16; i
+= 2) {
2337 ret
|= (val
>> i
) & mask
;
2343 /* Pad basic MPU access permission bits to extended format. */
2344 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2351 for (i
= 0; i
< 16; i
+= 2) {
2352 ret
|= (val
& mask
) << i
;
2358 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2361 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2364 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2366 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2369 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2372 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2375 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2377 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2380 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2382 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2388 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2392 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2395 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2396 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2402 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2403 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2407 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2410 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2411 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2413 if (value
>= nrgs
) {
2414 qemu_log_mask(LOG_GUEST_ERROR
,
2415 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2416 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2420 raw_write(env
, ri
, value
);
2423 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2424 /* Reset for all these registers is handled in arm_cpu_reset(),
2425 * because the PMSAv7 is also used by M-profile CPUs, which do
2426 * not register cpregs but still need the state to be reset.
2428 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2429 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2430 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2431 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2432 .resetfn
= arm_cp_reset_ignore
},
2433 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2434 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2435 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2436 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2437 .resetfn
= arm_cp_reset_ignore
},
2438 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2439 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2440 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2441 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2442 .resetfn
= arm_cp_reset_ignore
},
2443 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2445 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2446 .writefn
= pmsav7_rgnr_write
,
2447 .resetfn
= arm_cp_reset_ignore
},
2451 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2452 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2453 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2454 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2455 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2456 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2457 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2458 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2459 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2460 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2462 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2464 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2466 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2468 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2470 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2471 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2473 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2474 /* Protection region base and size registers */
2475 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2476 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2477 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2478 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2479 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2480 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2481 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2482 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2483 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2484 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2485 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2486 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2487 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2488 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2489 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2490 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2491 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2492 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2493 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2494 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2495 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2496 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2497 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2498 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2502 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2505 TCR
*tcr
= raw_ptr(env
, ri
);
2506 int maskshift
= extract32(value
, 0, 3);
2508 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2509 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2510 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2511 * using Long-desciptor translation table format */
2512 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2513 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2514 /* In an implementation that includes the Security Extensions
2515 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2516 * Short-descriptor translation table format.
2518 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2524 /* Update the masks corresponding to the TCR bank being written
2525 * Note that we always calculate mask and base_mask, but
2526 * they are only used for short-descriptor tables (ie if EAE is 0);
2527 * for long-descriptor tables the TCR fields are used differently
2528 * and the mask and base_mask values are meaningless.
2530 tcr
->raw_tcr
= value
;
2531 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2532 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2535 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2538 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2540 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2541 /* With LPAE the TTBCR could result in a change of ASID
2542 * via the TTBCR.A1 bit, so do a TLB flush.
2544 tlb_flush(CPU(cpu
));
2546 vmsa_ttbcr_raw_write(env
, ri
, value
);
2549 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2551 TCR
*tcr
= raw_ptr(env
, ri
);
2553 /* Reset both the TCR as well as the masks corresponding to the bank of
2554 * the TCR being reset.
2558 tcr
->base_mask
= 0xffffc000u
;
2561 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2564 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2565 TCR
*tcr
= raw_ptr(env
, ri
);
2567 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2568 tlb_flush(CPU(cpu
));
2569 tcr
->raw_tcr
= value
;
2572 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2575 /* 64 bit accesses to the TTBRs can change the ASID and so we
2576 * must flush the TLB.
2578 if (cpreg_field_is_64bit(ri
)) {
2579 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2581 tlb_flush(CPU(cpu
));
2583 raw_write(env
, ri
, value
);
2586 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2589 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2590 CPUState
*cs
= CPU(cpu
);
2592 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2593 if (raw_read(env
, ri
) != value
) {
2594 tlb_flush_by_mmuidx(cs
,
2595 ARMMMUIdxBit_S12NSE1
|
2596 ARMMMUIdxBit_S12NSE0
|
2598 raw_write(env
, ri
, value
);
2602 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2603 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2604 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2605 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2606 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2607 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2608 .access
= PL1_RW
, .resetvalue
= 0,
2609 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2610 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2611 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2612 .access
= PL1_RW
, .resetvalue
= 0,
2613 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2614 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2615 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2616 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2617 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2622 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2623 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2624 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2626 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2627 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2628 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2629 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2630 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2631 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2632 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2633 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2634 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2635 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2636 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2637 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2638 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2639 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2640 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2641 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2642 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2643 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2644 .raw_writefn
= vmsa_ttbcr_raw_write
,
2645 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2646 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2650 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2653 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2654 /* The OS_TYPE bit in this register changes the reported CPUID! */
2655 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2656 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2659 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2662 env
->cp15
.c15_threadid
= value
& 0xffff;
2665 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2668 /* Wait-for-interrupt (deprecated) */
2669 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2672 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2675 /* On OMAP there are registers indicating the max/min index of dcache lines
2676 * containing a dirty line; cache flush operations have to reset these.
2678 env
->cp15
.c15_i_max
= 0x000;
2679 env
->cp15
.c15_i_min
= 0xff0;
2682 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2683 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2684 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2685 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2687 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2688 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2689 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2691 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2692 .writefn
= omap_ticonfig_write
},
2693 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2695 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2696 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2697 .access
= PL1_RW
, .resetvalue
= 0xff0,
2698 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2699 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2701 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2702 .writefn
= omap_threadid_write
},
2703 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2704 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2705 .type
= ARM_CP_NO_RAW
,
2706 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2707 /* TODO: Peripheral port remap register:
2708 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2709 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2712 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2713 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2714 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2715 .writefn
= omap_cachemaint_write
},
2716 { .name
= "C9", .cp
= 15, .crn
= 9,
2717 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2718 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2722 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2725 env
->cp15
.c15_cpar
= value
& 0x3fff;
2728 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2729 { .name
= "XSCALE_CPAR",
2730 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2731 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2732 .writefn
= xscale_cpar_write
, },
2733 { .name
= "XSCALE_AUXCR",
2734 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2735 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2737 /* XScale specific cache-lockdown: since we have no cache we NOP these
2738 * and hope the guest does not really rely on cache behaviour.
2740 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2741 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2742 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2743 { .name
= "XSCALE_UNLOCK_ICACHE",
2744 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2745 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2746 { .name
= "XSCALE_DCACHE_LOCK",
2747 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2748 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2749 { .name
= "XSCALE_UNLOCK_DCACHE",
2750 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2751 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2755 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2756 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2757 * implementation of this implementation-defined space.
2758 * Ideally this should eventually disappear in favour of actually
2759 * implementing the correct behaviour for all cores.
2761 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2762 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2764 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2769 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2770 /* Cache status: RAZ because we have no cache so it's always clean */
2771 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2772 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2777 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2778 /* We never have a a block transfer operation in progress */
2779 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2780 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2782 /* The cache ops themselves: these all NOP for QEMU */
2783 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2784 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2785 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2786 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2787 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2788 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2789 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2790 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2791 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2792 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2793 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2794 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2798 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2799 /* The cache test-and-clean instructions always return (1 << 30)
2800 * to indicate that there are no dirty cache lines.
2802 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2803 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2804 .resetvalue
= (1 << 30) },
2805 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2806 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2807 .resetvalue
= (1 << 30) },
2811 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2812 /* Ignore ReadBuffer accesses */
2813 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2814 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2815 .access
= PL1_RW
, .resetvalue
= 0,
2816 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2820 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2822 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2823 unsigned int cur_el
= arm_current_el(env
);
2824 bool secure
= arm_is_secure(env
);
2826 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2827 return env
->cp15
.vpidr_el2
;
2829 return raw_read(env
, ri
);
2832 static uint64_t mpidr_read_val(CPUARMState
*env
)
2834 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2835 uint64_t mpidr
= cpu
->mp_affinity
;
2837 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2838 mpidr
|= (1U << 31);
2839 /* Cores which are uniprocessor (non-coherent)
2840 * but still implement the MP extensions set
2841 * bit 30. (For instance, Cortex-R5).
2843 if (cpu
->mp_is_up
) {
2844 mpidr
|= (1u << 30);
2850 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2852 unsigned int cur_el
= arm_current_el(env
);
2853 bool secure
= arm_is_secure(env
);
2855 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2856 return env
->cp15
.vmpidr_el2
;
2858 return mpidr_read_val(env
);
2861 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2862 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2863 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2864 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
2868 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
2870 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
2871 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
2872 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2874 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2875 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
2876 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2878 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
2879 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
2880 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
2881 offsetof(CPUARMState
, cp15
.par_ns
)} },
2882 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
2883 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2884 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2885 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
2886 .writefn
= vmsa_ttbr_write
, },
2887 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
2888 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2889 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2890 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
2891 .writefn
= vmsa_ttbr_write
, },
2895 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2897 return vfp_get_fpcr(env
);
2900 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2903 vfp_set_fpcr(env
, value
);
2906 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2908 return vfp_get_fpsr(env
);
2911 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2914 vfp_set_fpsr(env
, value
);
2917 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2920 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
2921 return CP_ACCESS_TRAP
;
2923 return CP_ACCESS_OK
;
2926 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2929 env
->daif
= value
& PSTATE_DAIF
;
2932 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
2933 const ARMCPRegInfo
*ri
,
2936 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2937 * SCTLR_EL1.UCI is set.
2939 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
2940 return CP_ACCESS_TRAP
;
2942 return CP_ACCESS_OK
;
2945 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2946 * Page D4-1736 (DDI0487A.b)
2949 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2952 CPUState
*cs
= ENV_GET_CPU(env
);
2954 if (arm_is_secure_below_el3(env
)) {
2955 tlb_flush_by_mmuidx(cs
,
2956 ARMMMUIdxBit_S1SE1
|
2957 ARMMMUIdxBit_S1SE0
);
2959 tlb_flush_by_mmuidx(cs
,
2960 ARMMMUIdxBit_S12NSE1
|
2961 ARMMMUIdxBit_S12NSE0
);
2965 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2968 CPUState
*cs
= ENV_GET_CPU(env
);
2969 bool sec
= arm_is_secure_below_el3(env
);
2972 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
2973 ARMMMUIdxBit_S1SE1
|
2974 ARMMMUIdxBit_S1SE0
);
2976 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
2977 ARMMMUIdxBit_S12NSE1
|
2978 ARMMMUIdxBit_S12NSE0
);
2982 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2985 /* Note that the 'ALL' scope must invalidate both stage 1 and
2986 * stage 2 translations, whereas most other scopes only invalidate
2987 * stage 1 translations.
2989 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2990 CPUState
*cs
= CPU(cpu
);
2992 if (arm_is_secure_below_el3(env
)) {
2993 tlb_flush_by_mmuidx(cs
,
2994 ARMMMUIdxBit_S1SE1
|
2995 ARMMMUIdxBit_S1SE0
);
2997 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2998 tlb_flush_by_mmuidx(cs
,
2999 ARMMMUIdxBit_S12NSE1
|
3000 ARMMMUIdxBit_S12NSE0
|
3003 tlb_flush_by_mmuidx(cs
,
3004 ARMMMUIdxBit_S12NSE1
|
3005 ARMMMUIdxBit_S12NSE0
);
3010 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3013 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3014 CPUState
*cs
= CPU(cpu
);
3016 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3019 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3022 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3023 CPUState
*cs
= CPU(cpu
);
3025 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3028 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3031 /* Note that the 'ALL' scope must invalidate both stage 1 and
3032 * stage 2 translations, whereas most other scopes only invalidate
3033 * stage 1 translations.
3035 CPUState
*cs
= ENV_GET_CPU(env
);
3036 bool sec
= arm_is_secure_below_el3(env
);
3037 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3040 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3041 ARMMMUIdxBit_S1SE1
|
3042 ARMMMUIdxBit_S1SE0
);
3043 } else if (has_el2
) {
3044 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3045 ARMMMUIdxBit_S12NSE1
|
3046 ARMMMUIdxBit_S12NSE0
|
3049 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3050 ARMMMUIdxBit_S12NSE1
|
3051 ARMMMUIdxBit_S12NSE0
);
3055 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3058 CPUState
*cs
= ENV_GET_CPU(env
);
3060 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3063 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3066 CPUState
*cs
= ENV_GET_CPU(env
);
3068 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3071 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3074 /* Invalidate by VA, EL1&0 (AArch64 version).
3075 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3076 * since we don't support flush-for-specific-ASID-only or
3077 * flush-last-level-only.
3079 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3080 CPUState
*cs
= CPU(cpu
);
3081 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3083 if (arm_is_secure_below_el3(env
)) {
3084 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3085 ARMMMUIdxBit_S1SE1
|
3086 ARMMMUIdxBit_S1SE0
);
3088 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3089 ARMMMUIdxBit_S12NSE1
|
3090 ARMMMUIdxBit_S12NSE0
);
3094 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3097 /* Invalidate by VA, EL2
3098 * Currently handles both VAE2 and VALE2, since we don't support
3099 * flush-last-level-only.
3101 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3102 CPUState
*cs
= CPU(cpu
);
3103 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3105 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3108 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3111 /* Invalidate by VA, EL3
3112 * Currently handles both VAE3 and VALE3, since we don't support
3113 * flush-last-level-only.
3115 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3116 CPUState
*cs
= CPU(cpu
);
3117 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3119 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3122 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3125 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3126 CPUState
*cs
= CPU(cpu
);
3127 bool sec
= arm_is_secure_below_el3(env
);
3128 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3131 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3132 ARMMMUIdxBit_S1SE1
|
3133 ARMMMUIdxBit_S1SE0
);
3135 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3136 ARMMMUIdxBit_S12NSE1
|
3137 ARMMMUIdxBit_S12NSE0
);
3141 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3144 CPUState
*cs
= ENV_GET_CPU(env
);
3145 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3147 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3151 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3154 CPUState
*cs
= ENV_GET_CPU(env
);
3155 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3157 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3161 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3164 /* Invalidate by IPA. This has to invalidate any structures that
3165 * contain only stage 2 translation information, but does not need
3166 * to apply to structures that contain combined stage 1 and stage 2
3167 * translation information.
3168 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3170 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3171 CPUState
*cs
= CPU(cpu
);
3174 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3178 pageaddr
= sextract64(value
<< 12, 0, 48);
3180 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3183 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3186 CPUState
*cs
= ENV_GET_CPU(env
);
3189 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3193 pageaddr
= sextract64(value
<< 12, 0, 48);
3195 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3199 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3202 /* We don't implement EL2, so the only control on DC ZVA is the
3203 * bit in the SCTLR which can prohibit access for EL0.
3205 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3206 return CP_ACCESS_TRAP
;
3208 return CP_ACCESS_OK
;
3211 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3213 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3214 int dzp_bit
= 1 << 4;
3216 /* DZP indicates whether DC ZVA access is allowed */
3217 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3220 return cpu
->dcz_blocksize
| dzp_bit
;
3223 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3226 if (!(env
->pstate
& PSTATE_SP
)) {
3227 /* Access to SP_EL0 is undefined if it's being used as
3228 * the stack pointer.
3230 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3232 return CP_ACCESS_OK
;
3235 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3237 return env
->pstate
& PSTATE_SP
;
3240 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3242 update_spsel(env
, val
);
3245 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3248 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3250 if (raw_read(env
, ri
) == value
) {
3251 /* Skip the TLB flush if nothing actually changed; Linux likes
3252 * to do a lot of pointless SCTLR writes.
3257 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3258 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3262 raw_write(env
, ri
, value
);
3263 /* ??? Lots of these bits are not implemented. */
3264 /* This may enable/disable the MMU, so do a TLB flush. */
3265 tlb_flush(CPU(cpu
));
3268 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3271 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3272 return CP_ACCESS_TRAP_FP_EL2
;
3274 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3275 return CP_ACCESS_TRAP_FP_EL3
;
3277 return CP_ACCESS_OK
;
3280 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3283 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3286 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3287 /* Minimal set of EL0-visible registers. This will need to be expanded
3288 * significantly for system emulation of AArch64 CPUs.
3290 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3291 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3292 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3293 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3294 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3295 .type
= ARM_CP_NO_RAW
,
3296 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3297 .fieldoffset
= offsetof(CPUARMState
, daif
),
3298 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3299 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3300 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3301 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3302 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3303 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3304 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3305 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3306 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3307 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3308 .readfn
= aa64_dczid_read
},
3309 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3310 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3311 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3312 #ifndef CONFIG_USER_ONLY
3313 /* Avoid overhead of an access check that always passes in user-mode */
3314 .accessfn
= aa64_zva_access
,
3317 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3318 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3319 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3320 /* Cache ops: all NOPs since we don't emulate caches */
3321 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3322 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3323 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3324 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3325 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3326 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3327 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3328 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3329 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3330 .accessfn
= aa64_cacheop_access
},
3331 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3332 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3333 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3334 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3335 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3336 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3337 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3338 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3339 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3340 .accessfn
= aa64_cacheop_access
},
3341 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3342 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3343 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3344 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3345 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3346 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3347 .accessfn
= aa64_cacheop_access
},
3348 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3349 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3350 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3351 .accessfn
= aa64_cacheop_access
},
3352 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3353 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3354 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3355 /* TLBI operations */
3356 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3357 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3358 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3359 .writefn
= tlbi_aa64_vmalle1is_write
},
3360 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3361 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3362 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3363 .writefn
= tlbi_aa64_vae1is_write
},
3364 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3365 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3366 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3367 .writefn
= tlbi_aa64_vmalle1is_write
},
3368 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3369 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3370 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3371 .writefn
= tlbi_aa64_vae1is_write
},
3372 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3373 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3374 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3375 .writefn
= tlbi_aa64_vae1is_write
},
3376 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3377 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3378 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3379 .writefn
= tlbi_aa64_vae1is_write
},
3380 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3381 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3382 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3383 .writefn
= tlbi_aa64_vmalle1_write
},
3384 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3385 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3386 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3387 .writefn
= tlbi_aa64_vae1_write
},
3388 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3389 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3390 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3391 .writefn
= tlbi_aa64_vmalle1_write
},
3392 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3393 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3394 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3395 .writefn
= tlbi_aa64_vae1_write
},
3396 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3397 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3398 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3399 .writefn
= tlbi_aa64_vae1_write
},
3400 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3401 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3402 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3403 .writefn
= tlbi_aa64_vae1_write
},
3404 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3405 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3406 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3407 .writefn
= tlbi_aa64_ipas2e1is_write
},
3408 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3409 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3410 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3411 .writefn
= tlbi_aa64_ipas2e1is_write
},
3412 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3413 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3414 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3415 .writefn
= tlbi_aa64_alle1is_write
},
3416 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3417 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3418 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3419 .writefn
= tlbi_aa64_alle1is_write
},
3420 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3421 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3422 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3423 .writefn
= tlbi_aa64_ipas2e1_write
},
3424 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3425 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3426 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3427 .writefn
= tlbi_aa64_ipas2e1_write
},
3428 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3429 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3430 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3431 .writefn
= tlbi_aa64_alle1_write
},
3432 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3433 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3434 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3435 .writefn
= tlbi_aa64_alle1is_write
},
3436 #ifndef CONFIG_USER_ONLY
3437 /* 64 bit address translation operations */
3438 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3439 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3440 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3441 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3442 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3443 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3444 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3445 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3446 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3447 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3448 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3449 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3450 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3451 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3452 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3453 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3454 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3455 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3456 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3457 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3458 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3459 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3460 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3461 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3462 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3463 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3464 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3465 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3466 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3467 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3468 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3469 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3470 .type
= ARM_CP_ALIAS
,
3471 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3472 .access
= PL1_RW
, .resetvalue
= 0,
3473 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3474 .writefn
= par_write
},
3476 /* TLB invalidate last level of translation table walk */
3477 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3478 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3479 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3480 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3481 .writefn
= tlbimvaa_is_write
},
3482 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3483 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3484 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3485 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3486 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3487 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3488 .writefn
= tlbimva_hyp_write
},
3489 { .name
= "TLBIMVALHIS",
3490 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3491 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3492 .writefn
= tlbimva_hyp_is_write
},
3493 { .name
= "TLBIIPAS2",
3494 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3495 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3496 .writefn
= tlbiipas2_write
},
3497 { .name
= "TLBIIPAS2IS",
3498 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3499 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3500 .writefn
= tlbiipas2_is_write
},
3501 { .name
= "TLBIIPAS2L",
3502 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3503 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3504 .writefn
= tlbiipas2_write
},
3505 { .name
= "TLBIIPAS2LIS",
3506 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3507 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3508 .writefn
= tlbiipas2_is_write
},
3509 /* 32 bit cache operations */
3510 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3511 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3512 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3513 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3514 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3515 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3516 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3517 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3518 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3519 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3520 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3521 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3522 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3523 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3524 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3525 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3526 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3527 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3528 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3529 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3530 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3531 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3532 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3533 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3534 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3535 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3536 /* MMU Domain access control / MPU write buffer control */
3537 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3538 .access
= PL1_RW
, .resetvalue
= 0,
3539 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3540 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3541 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3542 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3543 .type
= ARM_CP_ALIAS
,
3544 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3546 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3547 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3548 .type
= ARM_CP_ALIAS
,
3549 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3551 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3552 /* We rely on the access checks not allowing the guest to write to the
3553 * state field when SPSel indicates that it's being used as the stack
3556 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3557 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3558 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3559 .type
= ARM_CP_ALIAS
,
3560 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3561 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3562 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3563 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3564 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3565 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3566 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3567 .type
= ARM_CP_NO_RAW
,
3568 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3569 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3570 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3571 .type
= ARM_CP_ALIAS
,
3572 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3573 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3574 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3575 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3576 .access
= PL2_RW
, .resetvalue
= 0,
3577 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3578 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3579 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3580 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3581 .access
= PL2_RW
, .resetvalue
= 0,
3582 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3583 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3584 .type
= ARM_CP_ALIAS
,
3585 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3587 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3588 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3589 .type
= ARM_CP_ALIAS
,
3590 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3592 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3593 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3594 .type
= ARM_CP_ALIAS
,
3595 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3597 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3598 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3599 .type
= ARM_CP_ALIAS
,
3600 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3602 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3603 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3604 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3606 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3607 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3608 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3609 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3610 .writefn
= sdcr_write
,
3611 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3615 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3616 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3617 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3618 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3620 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3621 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3622 .type
= ARM_CP_NO_RAW
,
3623 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3625 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3626 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3627 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3628 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3629 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3630 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3631 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3633 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3634 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3635 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3636 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3637 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3638 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3640 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3641 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3642 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3644 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3645 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3646 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3648 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3649 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3650 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3652 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3653 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3654 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3655 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3656 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3657 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3658 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3659 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3660 .cp
= 15, .opc1
= 6, .crm
= 2,
3661 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3662 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3663 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3664 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3665 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3666 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3667 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3668 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3669 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3670 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3671 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3672 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3673 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3674 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3675 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3676 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3678 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3679 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3680 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3681 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3682 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3683 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3684 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3685 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3687 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3688 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3689 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3690 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3691 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3693 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3694 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3695 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3696 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3697 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3698 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3699 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3700 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3701 .access
= PL2_RW
, .accessfn
= access_tda
,
3702 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3703 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3704 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3705 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3706 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3707 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3708 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3709 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3713 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3715 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3716 uint64_t valid_mask
= HCR_MASK
;
3718 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3719 valid_mask
&= ~HCR_HCD
;
3720 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3721 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3722 * However, if we're using the SMC PSCI conduit then QEMU is
3723 * effectively acting like EL3 firmware and so the guest at
3724 * EL2 should retain the ability to prevent EL1 from being
3725 * able to make SMC calls into the ersatz firmware, so in
3726 * that case HCR.TSC should be read/write.
3728 valid_mask
&= ~HCR_TSC
;
3731 /* Clear RES0 bits. */
3732 value
&= valid_mask
;
3734 /* These bits change the MMU setup:
3735 * HCR_VM enables stage 2 translation
3736 * HCR_PTW forbids certain page-table setups
3737 * HCR_DC Disables stage1 and enables stage2 translation
3739 if ((raw_read(env
, ri
) ^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
3740 tlb_flush(CPU(cpu
));
3742 raw_write(env
, ri
, value
);
3745 static const ARMCPRegInfo el2_cp_reginfo
[] = {
3746 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3747 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3748 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3749 .writefn
= hcr_write
},
3750 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
3751 .type
= ARM_CP_ALIAS
,
3752 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
3754 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
3755 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_AA64
,
3756 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3757 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
3758 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_AA64
,
3759 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3760 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
3761 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
3762 .type
= ARM_CP_ALIAS
,
3763 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
3765 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
3766 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3767 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3768 .access
= PL2_RW
, .writefn
= vbar_write
,
3769 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
3771 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
3772 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
3773 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
3774 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
3775 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3776 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3777 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
3778 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
3779 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3780 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3781 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
3783 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3784 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3785 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3786 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
3787 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3788 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3789 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3791 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3792 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3793 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3794 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3796 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3797 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3798 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3800 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3801 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3802 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3804 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3805 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3807 /* no .writefn needed as this can't cause an ASID change;
3808 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3810 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
3811 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
3812 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3813 .type
= ARM_CP_ALIAS
,
3814 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3815 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3816 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
3817 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3819 /* no .writefn needed as this can't cause an ASID change;
3820 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3822 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3823 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3824 .cp
= 15, .opc1
= 6, .crm
= 2,
3825 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3826 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3827 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
3828 .writefn
= vttbr_write
},
3829 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3830 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3831 .access
= PL2_RW
, .writefn
= vttbr_write
,
3832 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
3833 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3834 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3835 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
3836 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
3837 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3838 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3839 .access
= PL2_RW
, .resetvalue
= 0,
3840 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
3841 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3842 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3843 .access
= PL2_RW
, .resetvalue
= 0,
3844 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3845 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3846 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3847 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3848 { .name
= "TLBIALLNSNH",
3849 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3850 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3851 .writefn
= tlbiall_nsnh_write
},
3852 { .name
= "TLBIALLNSNHIS",
3853 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3854 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3855 .writefn
= tlbiall_nsnh_is_write
},
3856 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3857 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3858 .writefn
= tlbiall_hyp_write
},
3859 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3860 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3861 .writefn
= tlbiall_hyp_is_write
},
3862 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3863 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3864 .writefn
= tlbimva_hyp_write
},
3865 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3866 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3867 .writefn
= tlbimva_hyp_is_write
},
3868 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
3869 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3870 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3871 .writefn
= tlbi_aa64_alle2_write
},
3872 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
3873 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3874 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3875 .writefn
= tlbi_aa64_vae2_write
},
3876 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
3877 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3878 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3879 .writefn
= tlbi_aa64_vae2_write
},
3880 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
3881 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3882 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3883 .writefn
= tlbi_aa64_alle2is_write
},
3884 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
3885 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3886 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3887 .writefn
= tlbi_aa64_vae2is_write
},
3888 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
3889 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3890 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3891 .writefn
= tlbi_aa64_vae2is_write
},
3892 #ifndef CONFIG_USER_ONLY
3893 /* Unlike the other EL2-related AT operations, these must
3894 * UNDEF from EL3 if EL2 is not implemented, which is why we
3895 * define them here rather than with the rest of the AT ops.
3897 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
3898 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3899 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3900 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3901 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
3902 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3903 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3904 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3905 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3906 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3907 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3908 * to behave as if SCR.NS was 1.
3910 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3912 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3913 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3915 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3916 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3917 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3918 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3919 * reset values as IMPDEF. We choose to reset to 3 to comply with
3920 * both ARMv7 and ARMv8.
3922 .access
= PL2_RW
, .resetvalue
= 3,
3923 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
3924 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3925 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3926 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
3927 .writefn
= gt_cntvoff_write
,
3928 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3929 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3930 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
3931 .writefn
= gt_cntvoff_write
,
3932 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3933 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3934 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
3936 .type
= ARM_CP_IO
, .access
= PL2_RW
,
3937 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
3938 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3939 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
3940 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
3941 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
3942 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3943 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3944 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
3945 .resetfn
= gt_hyp_timer_reset
,
3946 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
3947 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3949 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3951 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
3953 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
3955 /* The only field of MDCR_EL2 that has a defined architectural reset value
3956 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3957 * don't impelment any PMU event counters, so using zero as a reset
3958 * value for MDCR_EL2 is okay
3960 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3961 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3962 .access
= PL2_RW
, .resetvalue
= 0,
3963 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
3964 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
3965 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3966 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3967 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
3968 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
3969 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3971 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
3972 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3973 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3975 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
3979 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3982 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3983 * At Secure EL1 it traps to EL3.
3985 if (arm_current_el(env
) == 3) {
3986 return CP_ACCESS_OK
;
3988 if (arm_is_secure_below_el3(env
)) {
3989 return CP_ACCESS_TRAP_EL3
;
3991 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3993 return CP_ACCESS_OK
;
3995 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3998 static const ARMCPRegInfo el3_cp_reginfo
[] = {
3999 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4000 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4001 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4002 .resetvalue
= 0, .writefn
= scr_write
},
4003 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4004 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4005 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4006 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4007 .writefn
= scr_write
},
4008 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4009 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4010 .access
= PL3_RW
, .resetvalue
= 0,
4011 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4013 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4014 .access
= PL3_RW
, .resetvalue
= 0,
4015 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4016 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4017 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4018 .writefn
= vbar_write
, .resetvalue
= 0,
4019 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4020 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4021 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4022 .access
= PL3_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4023 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4024 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4025 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4027 /* no .writefn needed as this can't cause an ASID change;
4028 * we must provide a .raw_writefn and .resetfn because we handle
4029 * reset and migration for the AArch32 TTBCR(S), which might be
4030 * using mask and base_mask.
4032 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4033 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4034 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4035 .type
= ARM_CP_ALIAS
,
4036 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4038 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4039 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4040 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4041 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4042 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4043 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4044 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4045 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4046 .type
= ARM_CP_ALIAS
,
4047 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4049 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4050 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4051 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4052 .access
= PL3_RW
, .writefn
= vbar_write
,
4053 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4055 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4056 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4057 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4058 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4059 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4060 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4061 .access
= PL3_RW
, .resetvalue
= 0,
4062 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4063 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4064 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4065 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4067 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4068 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4069 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4071 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4072 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4073 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4075 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4076 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4077 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4078 .writefn
= tlbi_aa64_alle3is_write
},
4079 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4080 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4081 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4082 .writefn
= tlbi_aa64_vae3is_write
},
4083 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4084 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4085 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4086 .writefn
= tlbi_aa64_vae3is_write
},
4087 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4088 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4089 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4090 .writefn
= tlbi_aa64_alle3_write
},
4091 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4092 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4093 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4094 .writefn
= tlbi_aa64_vae3_write
},
4095 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4096 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4097 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4098 .writefn
= tlbi_aa64_vae3_write
},
4102 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4105 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4106 * but the AArch32 CTR has its own reginfo struct)
4108 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4109 return CP_ACCESS_TRAP
;
4111 return CP_ACCESS_OK
;
4114 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4117 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4118 * read via a bit in OSLSR_EL1.
4122 if (ri
->state
== ARM_CP_STATE_AA32
) {
4123 oslock
= (value
== 0xC5ACCE55);
4128 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4131 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4132 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4133 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4134 * unlike DBGDRAR it is never accessible from EL0.
4135 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4138 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4139 .access
= PL0_R
, .accessfn
= access_tdra
,
4140 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4141 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4142 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4143 .access
= PL1_R
, .accessfn
= access_tdra
,
4144 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4145 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4146 .access
= PL0_R
, .accessfn
= access_tdra
,
4147 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4148 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4149 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4150 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4151 .access
= PL1_RW
, .accessfn
= access_tda
,
4152 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4154 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4155 * We don't implement the configurable EL0 access.
4157 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4158 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4159 .type
= ARM_CP_ALIAS
,
4160 .access
= PL1_R
, .accessfn
= access_tda
,
4161 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4162 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4163 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4164 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4165 .accessfn
= access_tdosa
,
4166 .writefn
= oslar_write
},
4167 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4168 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4169 .access
= PL1_R
, .resetvalue
= 10,
4170 .accessfn
= access_tdosa
,
4171 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4172 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4173 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4174 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4175 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4176 .type
= ARM_CP_NOP
},
4177 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4178 * implement vector catch debug events yet.
4181 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4182 .access
= PL1_RW
, .accessfn
= access_tda
,
4183 .type
= ARM_CP_NOP
},
4184 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4185 * to save and restore a 32-bit guest's DBGVCR)
4187 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4188 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4189 .access
= PL2_RW
, .accessfn
= access_tda
,
4190 .type
= ARM_CP_NOP
},
4191 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4192 * Channel but Linux may try to access this register. The 32-bit
4193 * alias is DBGDCCINT.
4195 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4196 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4197 .access
= PL1_RW
, .accessfn
= access_tda
,
4198 .type
= ARM_CP_NOP
},
4202 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4203 /* 64 bit access versions of the (dummy) debug registers */
4204 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4205 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4206 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4207 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4211 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4213 CPUARMState
*env
= &cpu
->env
;
4215 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4216 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4218 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4220 if (env
->cpu_watchpoint
[n
]) {
4221 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4222 env
->cpu_watchpoint
[n
] = NULL
;
4225 if (!extract64(wcr
, 0, 1)) {
4226 /* E bit clear : watchpoint disabled */
4230 switch (extract64(wcr
, 3, 2)) {
4232 /* LSC 00 is reserved and must behave as if the wp is disabled */
4235 flags
|= BP_MEM_READ
;
4238 flags
|= BP_MEM_WRITE
;
4241 flags
|= BP_MEM_ACCESS
;
4245 /* Attempts to use both MASK and BAS fields simultaneously are
4246 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4247 * thus generating a watchpoint for every byte in the masked region.
4249 mask
= extract64(wcr
, 24, 4);
4250 if (mask
== 1 || mask
== 2) {
4251 /* Reserved values of MASK; we must act as if the mask value was
4252 * some non-reserved value, or as if the watchpoint were disabled.
4253 * We choose the latter.
4257 /* Watchpoint covers an aligned area up to 2GB in size */
4259 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4260 * whether the watchpoint fires when the unmasked bits match; we opt
4261 * to generate the exceptions.
4265 /* Watchpoint covers bytes defined by the byte address select bits */
4266 int bas
= extract64(wcr
, 5, 8);
4270 /* This must act as if the watchpoint is disabled */
4274 if (extract64(wvr
, 2, 1)) {
4275 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4276 * ignored, and BAS[3:0] define which bytes to watch.
4280 /* The BAS bits are supposed to be programmed to indicate a contiguous
4281 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4282 * we fire for each byte in the word/doubleword addressed by the WVR.
4283 * We choose to ignore any non-zero bits after the first range of 1s.
4285 basstart
= ctz32(bas
);
4286 len
= cto32(bas
>> basstart
);
4290 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4291 &env
->cpu_watchpoint
[n
]);
4294 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4297 CPUARMState
*env
= &cpu
->env
;
4299 /* Completely clear out existing QEMU watchpoints and our array, to
4300 * avoid possible stale entries following migration load.
4302 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4303 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4305 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4306 hw_watchpoint_update(cpu
, i
);
4310 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4313 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4316 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4317 * register reads and behaves as if values written are sign extended.
4318 * Bits [1:0] are RES0.
4320 value
= sextract64(value
, 0, 49) & ~3ULL;
4322 raw_write(env
, ri
, value
);
4323 hw_watchpoint_update(cpu
, i
);
4326 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4329 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4332 raw_write(env
, ri
, value
);
4333 hw_watchpoint_update(cpu
, i
);
4336 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4338 CPUARMState
*env
= &cpu
->env
;
4339 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4340 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4345 if (env
->cpu_breakpoint
[n
]) {
4346 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4347 env
->cpu_breakpoint
[n
] = NULL
;
4350 if (!extract64(bcr
, 0, 1)) {
4351 /* E bit clear : watchpoint disabled */
4355 bt
= extract64(bcr
, 20, 4);
4358 case 4: /* unlinked address mismatch (reserved if AArch64) */
4359 case 5: /* linked address mismatch (reserved if AArch64) */
4360 qemu_log_mask(LOG_UNIMP
,
4361 "arm: address mismatch breakpoint types not implemented");
4363 case 0: /* unlinked address match */
4364 case 1: /* linked address match */
4366 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4367 * we behave as if the register was sign extended. Bits [1:0] are
4368 * RES0. The BAS field is used to allow setting breakpoints on 16
4369 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4370 * a bp will fire if the addresses covered by the bp and the addresses
4371 * covered by the insn overlap but the insn doesn't start at the
4372 * start of the bp address range. We choose to require the insn and
4373 * the bp to have the same address. The constraints on writing to
4374 * BAS enforced in dbgbcr_write mean we have only four cases:
4375 * 0b0000 => no breakpoint
4376 * 0b0011 => breakpoint on addr
4377 * 0b1100 => breakpoint on addr + 2
4378 * 0b1111 => breakpoint on addr
4379 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4381 int bas
= extract64(bcr
, 5, 4);
4382 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4391 case 2: /* unlinked context ID match */
4392 case 8: /* unlinked VMID match (reserved if no EL2) */
4393 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4394 qemu_log_mask(LOG_UNIMP
,
4395 "arm: unlinked context breakpoint types not implemented");
4397 case 9: /* linked VMID match (reserved if no EL2) */
4398 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4399 case 3: /* linked context ID match */
4401 /* We must generate no events for Linked context matches (unless
4402 * they are linked to by some other bp/wp, which is handled in
4403 * updates for the linking bp/wp). We choose to also generate no events
4404 * for reserved values.
4409 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4412 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4415 CPUARMState
*env
= &cpu
->env
;
4417 /* Completely clear out existing QEMU breakpoints and our array, to
4418 * avoid possible stale entries following migration load.
4420 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4421 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4423 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4424 hw_breakpoint_update(cpu
, i
);
4428 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4431 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4434 raw_write(env
, ri
, value
);
4435 hw_breakpoint_update(cpu
, i
);
4438 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4441 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4444 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4447 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4448 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4450 raw_write(env
, ri
, value
);
4451 hw_breakpoint_update(cpu
, i
);
4454 static void define_debug_regs(ARMCPU
*cpu
)
4456 /* Define v7 and v8 architectural debug registers.
4457 * These are just dummy implementations for now.
4460 int wrps
, brps
, ctx_cmps
;
4461 ARMCPRegInfo dbgdidr
= {
4462 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4463 .access
= PL0_R
, .accessfn
= access_tda
,
4464 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4467 /* Note that all these register fields hold "number of Xs minus 1". */
4468 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4469 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4470 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4472 assert(ctx_cmps
<= brps
);
4474 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4475 * of the debug registers such as number of breakpoints;
4476 * check that if they both exist then they agree.
4478 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4479 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4480 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4481 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4484 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4485 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4487 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4488 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4491 for (i
= 0; i
< brps
+ 1; i
++) {
4492 ARMCPRegInfo dbgregs
[] = {
4493 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4494 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4495 .access
= PL1_RW
, .accessfn
= access_tda
,
4496 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4497 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4499 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4500 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4501 .access
= PL1_RW
, .accessfn
= access_tda
,
4502 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
4503 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
4507 define_arm_cp_regs(cpu
, dbgregs
);
4510 for (i
= 0; i
< wrps
+ 1; i
++) {
4511 ARMCPRegInfo dbgregs
[] = {
4512 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
4513 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
4514 .access
= PL1_RW
, .accessfn
= access_tda
,
4515 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
4516 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
4518 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
4519 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
4520 .access
= PL1_RW
, .accessfn
= access_tda
,
4521 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
4522 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
4526 define_arm_cp_regs(cpu
, dbgregs
);
4530 void register_cp_regs_for_features(ARMCPU
*cpu
)
4532 /* Register all the coprocessor registers based on feature bits */
4533 CPUARMState
*env
= &cpu
->env
;
4534 if (arm_feature(env
, ARM_FEATURE_M
)) {
4535 /* M profile has no coprocessor registers */
4539 define_arm_cp_regs(cpu
, cp_reginfo
);
4540 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4541 /* Must go early as it is full of wildcards that may be
4542 * overridden by later definitions.
4544 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
4547 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4548 /* The ID registers all have impdef reset values */
4549 ARMCPRegInfo v6_idregs
[] = {
4550 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
4551 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4552 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4553 .resetvalue
= cpu
->id_pfr0
},
4554 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
4555 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
4556 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4557 .resetvalue
= cpu
->id_pfr1
},
4558 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
4559 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
4560 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4561 .resetvalue
= cpu
->id_dfr0
},
4562 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
4563 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
4564 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4565 .resetvalue
= cpu
->id_afr0
},
4566 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
4567 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
4568 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4569 .resetvalue
= cpu
->id_mmfr0
},
4570 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
4571 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
4572 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4573 .resetvalue
= cpu
->id_mmfr1
},
4574 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
4575 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
4576 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4577 .resetvalue
= cpu
->id_mmfr2
},
4578 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
4579 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
4580 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4581 .resetvalue
= cpu
->id_mmfr3
},
4582 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
4583 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4584 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4585 .resetvalue
= cpu
->id_isar0
},
4586 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
4587 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
4588 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4589 .resetvalue
= cpu
->id_isar1
},
4590 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
4591 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4592 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4593 .resetvalue
= cpu
->id_isar2
},
4594 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
4595 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
4596 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4597 .resetvalue
= cpu
->id_isar3
},
4598 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
4599 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
4600 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4601 .resetvalue
= cpu
->id_isar4
},
4602 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
4603 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
4604 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4605 .resetvalue
= cpu
->id_isar5
},
4606 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
4607 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
4608 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4609 .resetvalue
= cpu
->id_mmfr4
},
4610 /* 7 is as yet unallocated and must RAZ */
4611 { .name
= "ID_ISAR7_RESERVED", .state
= ARM_CP_STATE_BOTH
,
4612 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
4613 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4617 define_arm_cp_regs(cpu
, v6_idregs
);
4618 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
4620 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
4622 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
4623 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
4625 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
4626 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
4627 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
4629 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4630 /* v7 performance monitor control register: same implementor
4631 * field as main ID register, and we implement only the cycle
4634 #ifndef CONFIG_USER_ONLY
4635 ARMCPRegInfo pmcr
= {
4636 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
4638 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
4639 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
4640 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
4641 .raw_writefn
= raw_write
,
4643 ARMCPRegInfo pmcr64
= {
4644 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
4645 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
4646 .access
= PL0_RW
, .accessfn
= pmreg_access
,
4648 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
4649 .resetvalue
= cpu
->midr
& 0xff000000,
4650 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
4652 define_one_arm_cp_reg(cpu
, &pmcr
);
4653 define_one_arm_cp_reg(cpu
, &pmcr64
);
4655 ARMCPRegInfo clidr
= {
4656 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
4657 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
4658 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
4660 define_one_arm_cp_reg(cpu
, &clidr
);
4661 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
4662 define_debug_regs(cpu
);
4664 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
4666 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4667 /* AArch64 ID registers, which all have impdef reset values.
4668 * Note that within the ID register ranges the unused slots
4669 * must all RAZ, not UNDEF; future architecture versions may
4670 * define new registers here.
4672 ARMCPRegInfo v8_idregs
[] = {
4673 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4674 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
4675 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4676 .resetvalue
= cpu
->id_aa64pfr0
},
4677 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4678 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
4679 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4680 .resetvalue
= cpu
->id_aa64pfr1
},
4681 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4682 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
4683 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4685 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4686 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
4687 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4689 { .name
= "ID_AA64PFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4690 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
4691 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4693 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4694 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
4695 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4697 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4698 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
4699 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4701 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4702 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
4703 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4705 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4706 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
4707 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4708 .resetvalue
= cpu
->id_aa64dfr0
},
4709 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4710 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
4711 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4712 .resetvalue
= cpu
->id_aa64dfr1
},
4713 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4714 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
4715 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4717 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4718 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
4719 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4721 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4722 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
4723 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4724 .resetvalue
= cpu
->id_aa64afr0
},
4725 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4726 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
4727 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4728 .resetvalue
= cpu
->id_aa64afr1
},
4729 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4730 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
4731 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4733 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4734 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
4735 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4737 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
4738 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
4739 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4740 .resetvalue
= cpu
->id_aa64isar0
},
4741 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
4742 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
4743 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4744 .resetvalue
= cpu
->id_aa64isar1
},
4745 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4746 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
4747 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4749 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4750 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
4751 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4753 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4754 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
4755 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4757 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4758 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
4759 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4761 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4762 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
4763 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4765 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4766 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
4767 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4769 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4770 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4771 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4772 .resetvalue
= cpu
->id_aa64mmfr0
},
4773 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4774 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
4775 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4776 .resetvalue
= cpu
->id_aa64mmfr1
},
4777 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4778 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
4779 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4781 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4782 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
4783 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4785 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4786 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
4787 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4789 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
4791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4793 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4794 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
4795 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4797 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4798 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
4799 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4801 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4802 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
4803 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4804 .resetvalue
= cpu
->mvfr0
},
4805 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4806 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
4807 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4808 .resetvalue
= cpu
->mvfr1
},
4809 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
4810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
4811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4812 .resetvalue
= cpu
->mvfr2
},
4813 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4814 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
4815 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4817 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4818 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
4819 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4821 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4822 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
4823 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4825 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4826 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
4827 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4829 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4830 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
4831 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4833 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
4834 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
4835 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4836 .resetvalue
= cpu
->pmceid0
},
4837 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
4838 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
4839 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4840 .resetvalue
= cpu
->pmceid0
},
4841 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
4842 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
4843 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4844 .resetvalue
= cpu
->pmceid1
},
4845 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
4846 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
4847 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
4848 .resetvalue
= cpu
->pmceid1
},
4851 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4852 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
4853 !arm_feature(env
, ARM_FEATURE_EL2
)) {
4854 ARMCPRegInfo rvbar
= {
4855 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
4856 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4857 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
4859 define_one_arm_cp_reg(cpu
, &rvbar
);
4861 define_arm_cp_regs(cpu
, v8_idregs
);
4862 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
4864 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
4865 uint64_t vmpidr_def
= mpidr_read_val(env
);
4866 ARMCPRegInfo vpidr_regs
[] = {
4867 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
4868 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4869 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4870 .resetvalue
= cpu
->midr
,
4871 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4872 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
4873 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4874 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
4875 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4876 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
4877 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
4878 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4879 .resetvalue
= vmpidr_def
,
4880 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
4881 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
4882 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
4884 .resetvalue
= vmpidr_def
,
4885 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
4888 define_arm_cp_regs(cpu
, vpidr_regs
);
4889 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
4890 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4891 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
4892 ARMCPRegInfo rvbar
= {
4893 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
4894 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
4895 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
4897 define_one_arm_cp_reg(cpu
, &rvbar
);
4900 /* If EL2 is missing but higher ELs are enabled, we need to
4901 * register the no_el2 reginfos.
4903 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4904 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4905 * of MIDR_EL1 and MPIDR_EL1.
4907 ARMCPRegInfo vpidr_regs
[] = {
4908 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4909 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
4910 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4911 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
4912 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
4913 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4914 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
4915 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4916 .type
= ARM_CP_NO_RAW
,
4917 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
4920 define_arm_cp_regs(cpu
, vpidr_regs
);
4921 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
4924 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4925 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
4926 ARMCPRegInfo el3_regs
[] = {
4927 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4928 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
4929 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
4930 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
4931 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
4933 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4934 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
4935 .resetvalue
= cpu
->reset_sctlr
},
4939 define_arm_cp_regs(cpu
, el3_regs
);
4941 /* The behaviour of NSACR is sufficiently various that we don't
4942 * try to describe it in a single reginfo:
4943 * if EL3 is 64 bit, then trap to EL3 from S EL1,
4944 * reads as constant 0xc00 from NS EL1 and NS EL2
4945 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
4946 * if v7 without EL3, register doesn't exist
4947 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
4949 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4950 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
4951 ARMCPRegInfo nsacr
= {
4952 .name
= "NSACR", .type
= ARM_CP_CONST
,
4953 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
4954 .access
= PL1_RW
, .accessfn
= nsacr_access
,
4957 define_one_arm_cp_reg(cpu
, &nsacr
);
4959 ARMCPRegInfo nsacr
= {
4961 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
4962 .access
= PL3_RW
| PL1_R
,
4964 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
4966 define_one_arm_cp_reg(cpu
, &nsacr
);
4969 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4970 ARMCPRegInfo nsacr
= {
4971 .name
= "NSACR", .type
= ARM_CP_CONST
,
4972 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
4976 define_one_arm_cp_reg(cpu
, &nsacr
);
4980 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
4981 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4982 /* PMSAv6 not implemented */
4983 assert(arm_feature(env
, ARM_FEATURE_V7
));
4984 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
4985 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
4987 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
4990 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
4991 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
4993 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
4994 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
4996 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
4997 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
4999 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5000 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5002 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5003 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5005 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5006 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5008 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5009 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5011 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5012 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5014 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5015 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5017 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5018 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5020 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5021 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5023 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5024 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5026 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5027 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5028 * be read-only (ie write causes UNDEF exception).
5031 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5032 /* Pre-v8 MIDR space.
5033 * Note that the MIDR isn't a simple constant register because
5034 * of the TI925 behaviour where writes to another register can
5035 * cause the MIDR value to change.
5037 * Unimplemented registers in the c15 0 0 0 space default to
5038 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5039 * and friends override accordingly.
5042 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5043 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5044 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5045 .readfn
= midr_read
,
5046 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5047 .type
= ARM_CP_OVERRIDE
},
5048 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5050 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5051 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5053 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5054 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5056 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5057 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5059 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5060 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5062 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5063 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5066 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5067 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5068 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5069 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5070 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5071 .readfn
= midr_read
},
5072 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5073 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5074 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5075 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5076 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5077 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5078 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5079 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5080 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5081 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5084 ARMCPRegInfo id_cp_reginfo
[] = {
5085 /* These are common to v8 and pre-v8 */
5087 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5088 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5089 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5090 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5091 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5092 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5093 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5095 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5096 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5099 /* TLBTR is specific to VMSA */
5100 ARMCPRegInfo id_tlbtr_reginfo
= {
5102 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5103 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5105 /* MPUIR is specific to PMSA V6+ */
5106 ARMCPRegInfo id_mpuir_reginfo
= {
5108 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5109 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5110 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5112 ARMCPRegInfo crn0_wi_reginfo
= {
5113 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5114 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5115 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5117 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5118 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5120 /* Register the blanket "writes ignored" value first to cover the
5121 * whole space. Then update the specific ID registers to allow write
5122 * access, so that they ignore writes rather than causing them to
5125 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5126 for (r
= id_pre_v8_midr_cp_reginfo
;
5127 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5130 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5133 id_tlbtr_reginfo
.access
= PL1_RW
;
5134 id_tlbtr_reginfo
.access
= PL1_RW
;
5136 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5137 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5139 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5141 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5142 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5143 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5144 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5145 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5149 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5150 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5153 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5154 ARMCPRegInfo auxcr_reginfo
[] = {
5155 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5156 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5157 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5158 .resetvalue
= cpu
->reset_auxcr
},
5159 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5160 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5161 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5163 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5164 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5165 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5169 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5172 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5173 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5174 /* 32 bit view is [31:18] 0...0 [43:32]. */
5175 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5176 | extract64(cpu
->reset_cbar
, 32, 12);
5177 ARMCPRegInfo cbar_reginfo
[] = {
5179 .type
= ARM_CP_CONST
,
5180 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5181 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5182 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5183 .type
= ARM_CP_CONST
,
5184 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5185 .access
= PL1_R
, .resetvalue
= cbar32
},
5188 /* We don't implement a r/w 64 bit CBAR currently */
5189 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5190 define_arm_cp_regs(cpu
, cbar_reginfo
);
5192 ARMCPRegInfo cbar
= {
5194 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5195 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5196 .fieldoffset
= offsetof(CPUARMState
,
5197 cp15
.c15_config_base_address
)
5199 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5200 cbar
.access
= PL1_R
;
5201 cbar
.fieldoffset
= 0;
5202 cbar
.type
= ARM_CP_CONST
;
5204 define_one_arm_cp_reg(cpu
, &cbar
);
5208 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5209 ARMCPRegInfo vbar_cp_reginfo
[] = {
5210 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5211 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5212 .access
= PL1_RW
, .writefn
= vbar_write
,
5213 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5214 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5218 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5221 /* Generic registers whose values depend on the implementation */
5223 ARMCPRegInfo sctlr
= {
5224 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5225 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5227 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5228 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5229 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5230 .raw_writefn
= raw_write
,
5232 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5233 /* Normally we would always end the TB on an SCTLR write, but Linux
5234 * arch/arm/mach-pxa/sleep.S expects two instructions following
5235 * an MMU enable to execute from cache. Imitate this behaviour.
5237 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5239 define_one_arm_cp_reg(cpu
, &sctlr
);
5243 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5245 CPUState
*cs
= CPU(cpu
);
5246 CPUARMState
*env
= &cpu
->env
;
5248 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5249 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5250 aarch64_fpu_gdb_set_reg
,
5251 34, "aarch64-fpu.xml", 0);
5252 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5253 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5254 51, "arm-neon.xml", 0);
5255 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5256 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5257 35, "arm-vfp3.xml", 0);
5258 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5259 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5260 19, "arm-vfp.xml", 0);
5264 /* Sort alphabetically by type name, except for "any". */
5265 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5267 ObjectClass
*class_a
= (ObjectClass
*)a
;
5268 ObjectClass
*class_b
= (ObjectClass
*)b
;
5269 const char *name_a
, *name_b
;
5271 name_a
= object_class_get_name(class_a
);
5272 name_b
= object_class_get_name(class_b
);
5273 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5275 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5278 return strcmp(name_a
, name_b
);
5282 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5284 ObjectClass
*oc
= data
;
5285 CPUListState
*s
= user_data
;
5286 const char *typename
;
5289 typename
= object_class_get_name(oc
);
5290 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5291 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
5296 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
5300 .cpu_fprintf
= cpu_fprintf
,
5304 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5305 list
= g_slist_sort(list
, arm_cpu_list_compare
);
5306 (*cpu_fprintf
)(f
, "Available CPUs:\n");
5307 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
5310 /* The 'host' CPU type is dynamically registered only if KVM is
5311 * enabled, so we have to special-case it here:
5313 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
5317 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
5319 ObjectClass
*oc
= data
;
5320 CpuDefinitionInfoList
**cpu_list
= user_data
;
5321 CpuDefinitionInfoList
*entry
;
5322 CpuDefinitionInfo
*info
;
5323 const char *typename
;
5325 typename
= object_class_get_name(oc
);
5326 info
= g_malloc0(sizeof(*info
));
5327 info
->name
= g_strndup(typename
,
5328 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5329 info
->q_typename
= g_strdup(typename
);
5331 entry
= g_malloc0(sizeof(*entry
));
5332 entry
->value
= info
;
5333 entry
->next
= *cpu_list
;
5337 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
5339 CpuDefinitionInfoList
*cpu_list
= NULL
;
5342 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5343 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
5349 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
5350 void *opaque
, int state
, int secstate
,
5351 int crm
, int opc1
, int opc2
)
5353 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5354 * add a single reginfo struct to the hash table.
5356 uint32_t *key
= g_new(uint32_t, 1);
5357 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
5358 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
5359 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
5361 /* Reset the secure state to the specific incoming state. This is
5362 * necessary as the register may have been defined with both states.
5364 r2
->secure
= secstate
;
5366 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5367 /* Register is banked (using both entries in array).
5368 * Overwriting fieldoffset as the array is only used to define
5369 * banked registers but later only fieldoffset is used.
5371 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
5374 if (state
== ARM_CP_STATE_AA32
) {
5375 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5376 /* If the register is banked then we don't need to migrate or
5377 * reset the 32-bit instance in certain cases:
5379 * 1) If the register has both 32-bit and 64-bit instances then we
5380 * can count on the 64-bit instance taking care of the
5382 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5383 * taking care of the secure bank. This requires that separate
5384 * 32 and 64-bit definitions are provided.
5386 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
5387 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
5388 r2
->type
|= ARM_CP_ALIAS
;
5390 } else if ((secstate
!= r
->secure
) && !ns
) {
5391 /* The register is not banked so we only want to allow migration of
5392 * the non-secure instance.
5394 r2
->type
|= ARM_CP_ALIAS
;
5397 if (r
->state
== ARM_CP_STATE_BOTH
) {
5398 /* We assume it is a cp15 register if the .cp field is left unset.
5404 #ifdef HOST_WORDS_BIGENDIAN
5405 if (r2
->fieldoffset
) {
5406 r2
->fieldoffset
+= sizeof(uint32_t);
5411 if (state
== ARM_CP_STATE_AA64
) {
5412 /* To allow abbreviation of ARMCPRegInfo
5413 * definitions, we treat cp == 0 as equivalent to
5414 * the value for "standard guest-visible sysreg".
5415 * STATE_BOTH definitions are also always "standard
5416 * sysreg" in their AArch64 view (the .cp value may
5417 * be non-zero for the benefit of the AArch32 view).
5419 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
5420 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
5422 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
5423 r2
->opc0
, opc1
, opc2
);
5425 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
5428 r2
->opaque
= opaque
;
5430 /* reginfo passed to helpers is correct for the actual access,
5431 * and is never ARM_CP_STATE_BOTH:
5434 /* Make sure reginfo passed to helpers for wildcarded regs
5435 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5440 /* By convention, for wildcarded registers only the first
5441 * entry is used for migration; the others are marked as
5442 * ALIAS so we don't try to transfer the register
5443 * multiple times. Special registers (ie NOP/WFI) are
5444 * never migratable and not even raw-accessible.
5446 if ((r
->type
& ARM_CP_SPECIAL
)) {
5447 r2
->type
|= ARM_CP_NO_RAW
;
5449 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
5450 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
5451 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
5452 r2
->type
|= ARM_CP_ALIAS
;
5455 /* Check that raw accesses are either forbidden or handled. Note that
5456 * we can't assert this earlier because the setup of fieldoffset for
5457 * banked registers has to be done first.
5459 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
5460 assert(!raw_accessors_invalid(r2
));
5463 /* Overriding of an existing definition must be explicitly
5466 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
5467 ARMCPRegInfo
*oldreg
;
5468 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
5469 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
5470 fprintf(stderr
, "Register redefined: cp=%d %d bit "
5471 "crn=%d crm=%d opc1=%d opc2=%d, "
5472 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
5473 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
5474 oldreg
->name
, r2
->name
);
5475 g_assert_not_reached();
5478 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
5482 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
5483 const ARMCPRegInfo
*r
, void *opaque
)
5485 /* Define implementations of coprocessor registers.
5486 * We store these in a hashtable because typically
5487 * there are less than 150 registers in a space which
5488 * is 16*16*16*8*8 = 262144 in size.
5489 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5490 * If a register is defined twice then the second definition is
5491 * used, so this can be used to define some generic registers and
5492 * then override them with implementation specific variations.
5493 * At least one of the original and the second definition should
5494 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5495 * against accidental use.
5497 * The state field defines whether the register is to be
5498 * visible in the AArch32 or AArch64 execution state. If the
5499 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5500 * reginfo structure for the AArch32 view, which sees the lower
5501 * 32 bits of the 64 bit register.
5503 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5504 * be wildcarded. AArch64 registers are always considered to be 64
5505 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5506 * the register, if any.
5508 int crm
, opc1
, opc2
, state
;
5509 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
5510 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
5511 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
5512 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
5513 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
5514 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
5515 /* 64 bit registers have only CRm and Opc1 fields */
5516 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
5517 /* op0 only exists in the AArch64 encodings */
5518 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
5519 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5520 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
5521 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5522 * encodes a minimum access level for the register. We roll this
5523 * runtime check into our general permission check code, so check
5524 * here that the reginfo's specified permissions are strict enough
5525 * to encompass the generic architectural permission check.
5527 if (r
->state
!= ARM_CP_STATE_AA32
) {
5530 case 0: case 1: case 2:
5543 /* unallocated encoding, so not possible */
5551 /* min_EL EL1, secure mode only (we don't check the latter) */
5555 /* broken reginfo with out-of-range opc1 */
5559 /* assert our permissions are not too lax (stricter is fine) */
5560 assert((r
->access
& ~mask
) == 0);
5563 /* Check that the register definition has enough info to handle
5564 * reads and writes if they are permitted.
5566 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
5567 if (r
->access
& PL3_R
) {
5568 assert((r
->fieldoffset
||
5569 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5572 if (r
->access
& PL3_W
) {
5573 assert((r
->fieldoffset
||
5574 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5578 /* Bad type field probably means missing sentinel at end of reg list */
5579 assert(cptype_valid(r
->type
));
5580 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
5581 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
5582 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
5583 for (state
= ARM_CP_STATE_AA32
;
5584 state
<= ARM_CP_STATE_AA64
; state
++) {
5585 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
5588 if (state
== ARM_CP_STATE_AA32
) {
5589 /* Under AArch32 CP registers can be common
5590 * (same for secure and non-secure world) or banked.
5592 switch (r
->secure
) {
5593 case ARM_CP_SECSTATE_S
:
5594 case ARM_CP_SECSTATE_NS
:
5595 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5596 r
->secure
, crm
, opc1
, opc2
);
5599 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5602 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5608 /* AArch64 registers get mapped to non-secure instance
5610 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5620 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
5621 const ARMCPRegInfo
*regs
, void *opaque
)
5623 /* Define a whole list of registers */
5624 const ARMCPRegInfo
*r
;
5625 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5626 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
5630 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
5632 return g_hash_table_lookup(cpregs
, &encoded_cp
);
5635 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5638 /* Helper coprocessor write function for write-ignore registers */
5641 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5643 /* Helper coprocessor write function for read-as-zero registers */
5647 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
5649 /* Helper coprocessor reset function for do-nothing-on-reset registers */
5652 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
5654 /* Return true if it is not valid for us to switch to
5655 * this CPU mode (ie all the UNPREDICTABLE cases in
5656 * the ARM ARM CPSRWriteByInstr pseudocode).
5659 /* Changes to or from Hyp via MSR and CPS are illegal. */
5660 if (write_type
== CPSRWriteByInstr
&&
5661 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
5662 mode
== ARM_CPU_MODE_HYP
)) {
5667 case ARM_CPU_MODE_USR
:
5669 case ARM_CPU_MODE_SYS
:
5670 case ARM_CPU_MODE_SVC
:
5671 case ARM_CPU_MODE_ABT
:
5672 case ARM_CPU_MODE_UND
:
5673 case ARM_CPU_MODE_IRQ
:
5674 case ARM_CPU_MODE_FIQ
:
5675 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5676 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5678 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5679 * and CPS are treated as illegal mode changes.
5681 if (write_type
== CPSRWriteByInstr
&&
5682 (env
->cp15
.hcr_el2
& HCR_TGE
) &&
5683 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
5684 !arm_is_secure_below_el3(env
)) {
5688 case ARM_CPU_MODE_HYP
:
5689 return !arm_feature(env
, ARM_FEATURE_EL2
)
5690 || arm_current_el(env
) < 2 || arm_is_secure(env
);
5691 case ARM_CPU_MODE_MON
:
5692 return arm_current_el(env
) < 3;
5698 uint32_t cpsr_read(CPUARMState
*env
)
5701 ZF
= (env
->ZF
== 0);
5702 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
5703 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
5704 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
5705 | ((env
->condexec_bits
& 0xfc) << 8)
5706 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
5709 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
5710 CPSRWriteType write_type
)
5712 uint32_t changed_daif
;
5714 if (mask
& CPSR_NZCV
) {
5715 env
->ZF
= (~val
) & CPSR_Z
;
5717 env
->CF
= (val
>> 29) & 1;
5718 env
->VF
= (val
<< 3) & 0x80000000;
5721 env
->QF
= ((val
& CPSR_Q
) != 0);
5723 env
->thumb
= ((val
& CPSR_T
) != 0);
5724 if (mask
& CPSR_IT_0_1
) {
5725 env
->condexec_bits
&= ~3;
5726 env
->condexec_bits
|= (val
>> 25) & 3;
5728 if (mask
& CPSR_IT_2_7
) {
5729 env
->condexec_bits
&= 3;
5730 env
->condexec_bits
|= (val
>> 8) & 0xfc;
5732 if (mask
& CPSR_GE
) {
5733 env
->GE
= (val
>> 16) & 0xf;
5736 /* In a V7 implementation that includes the security extensions but does
5737 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5738 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5739 * bits respectively.
5741 * In a V8 implementation, it is permitted for privileged software to
5742 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5744 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
5745 arm_feature(env
, ARM_FEATURE_EL3
) &&
5746 !arm_feature(env
, ARM_FEATURE_EL2
) &&
5747 !arm_is_secure(env
)) {
5749 changed_daif
= (env
->daif
^ val
) & mask
;
5751 if (changed_daif
& CPSR_A
) {
5752 /* Check to see if we are allowed to change the masking of async
5753 * abort exceptions from a non-secure state.
5755 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
5756 qemu_log_mask(LOG_GUEST_ERROR
,
5757 "Ignoring attempt to switch CPSR_A flag from "
5758 "non-secure world with SCR.AW bit clear\n");
5763 if (changed_daif
& CPSR_F
) {
5764 /* Check to see if we are allowed to change the masking of FIQ
5765 * exceptions from a non-secure state.
5767 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
5768 qemu_log_mask(LOG_GUEST_ERROR
,
5769 "Ignoring attempt to switch CPSR_F flag from "
5770 "non-secure world with SCR.FW bit clear\n");
5774 /* Check whether non-maskable FIQ (NMFI) support is enabled.
5775 * If this bit is set software is not allowed to mask
5776 * FIQs, but is allowed to set CPSR_F to 0.
5778 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
5780 qemu_log_mask(LOG_GUEST_ERROR
,
5781 "Ignoring attempt to enable CPSR_F flag "
5782 "(non-maskable FIQ [NMFI] support enabled)\n");
5788 env
->daif
&= ~(CPSR_AIF
& mask
);
5789 env
->daif
|= val
& CPSR_AIF
& mask
;
5791 if (write_type
!= CPSRWriteRaw
&&
5792 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
5793 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
5794 /* Note that we can only get here in USR mode if this is a
5795 * gdb stub write; for this case we follow the architectural
5796 * behaviour for guest writes in USR mode of ignoring an attempt
5797 * to switch mode. (Those are caught by translate.c for writes
5798 * triggered by guest instructions.)
5801 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
5802 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5803 * v7, and has defined behaviour in v8:
5804 * + leave CPSR.M untouched
5805 * + allow changes to the other CPSR fields
5807 * For user changes via the GDB stub, we don't set PSTATE.IL,
5808 * as this would be unnecessarily harsh for a user error.
5811 if (write_type
!= CPSRWriteByGDBStub
&&
5812 arm_feature(env
, ARM_FEATURE_V8
)) {
5817 switch_mode(env
, val
& CPSR_M
);
5820 mask
&= ~CACHED_CPSR_BITS
;
5821 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
5824 /* Sign/zero extend */
5825 uint32_t HELPER(sxtb16
)(uint32_t x
)
5828 res
= (uint16_t)(int8_t)x
;
5829 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
5833 uint32_t HELPER(uxtb16
)(uint32_t x
)
5836 res
= (uint16_t)(uint8_t)x
;
5837 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
5841 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
5845 if (num
== INT_MIN
&& den
== -1)
5850 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
5857 uint32_t HELPER(rbit
)(uint32_t x
)
5862 #if defined(CONFIG_USER_ONLY)
5864 /* These should probably raise undefined insn exceptions. */
5865 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
5867 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5869 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
5872 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
5874 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5876 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
5880 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
5882 /* translate.c should never generate calls here in user-only mode */
5883 g_assert_not_reached();
5886 void switch_mode(CPUARMState
*env
, int mode
)
5888 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5890 if (mode
!= ARM_CPU_MODE_USR
) {
5891 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
5895 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
5896 uint32_t cur_el
, bool secure
)
5901 void aarch64_sync_64_to_32(CPUARMState
*env
)
5903 g_assert_not_reached();
5908 void switch_mode(CPUARMState
*env
, int mode
)
5913 old_mode
= env
->uncached_cpsr
& CPSR_M
;
5914 if (mode
== old_mode
)
5917 if (old_mode
== ARM_CPU_MODE_FIQ
) {
5918 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
5919 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
5920 } else if (mode
== ARM_CPU_MODE_FIQ
) {
5921 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
5922 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
5925 i
= bank_number(old_mode
);
5926 env
->banked_r13
[i
] = env
->regs
[13];
5927 env
->banked_r14
[i
] = env
->regs
[14];
5928 env
->banked_spsr
[i
] = env
->spsr
;
5930 i
= bank_number(mode
);
5931 env
->regs
[13] = env
->banked_r13
[i
];
5932 env
->regs
[14] = env
->banked_r14
[i
];
5933 env
->spsr
= env
->banked_spsr
[i
];
5936 /* Physical Interrupt Target EL Lookup Table
5938 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
5940 * The below multi-dimensional table is used for looking up the target
5941 * exception level given numerous condition criteria. Specifically, the
5942 * target EL is based on SCR and HCR routing controls as well as the
5943 * currently executing EL and secure state.
5946 * target_el_table[2][2][2][2][2][4]
5947 * | | | | | +--- Current EL
5948 * | | | | +------ Non-secure(0)/Secure(1)
5949 * | | | +--------- HCR mask override
5950 * | | +------------ SCR exec state control
5951 * | +--------------- SCR mask override
5952 * +------------------ 32-bit(0)/64-bit(1) EL3
5954 * The table values are as such:
5958 * The ARM ARM target EL table includes entries indicating that an "exception
5959 * is not taken". The two cases where this is applicable are:
5960 * 1) An exception is taken from EL3 but the SCR does not have the exception
5962 * 2) An exception is taken from EL2 but the HCR does not have the exception
5964 * In these two cases, the below table contain a target of EL1. This value is
5965 * returned as it is expected that the consumer of the table data will check
5966 * for "target EL >= current EL" to ensure the exception is not taken.
5970 * BIT IRQ IMO Non-secure Secure
5971 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
5973 static const int8_t target_el_table
[2][2][2][2][2][4] = {
5974 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
5975 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
5976 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
5977 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
5978 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
5979 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
5980 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
5981 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
5982 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
5983 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
5984 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
5985 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
5986 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
5987 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
5988 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
5989 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
5993 * Determine the target EL for physical exceptions
5995 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
5996 uint32_t cur_el
, bool secure
)
5998 CPUARMState
*env
= cs
->env_ptr
;
6003 /* Is the highest EL AArch64? */
6004 int is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6006 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6007 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6009 /* Either EL2 is the highest EL (and so the EL2 register width
6010 * is given by is64); or there is no EL2 or EL3, in which case
6011 * the value of 'rw' does not affect the table lookup anyway.
6018 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6019 hcr
= ((env
->cp15
.hcr_el2
& HCR_IMO
) == HCR_IMO
);
6022 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6023 hcr
= ((env
->cp15
.hcr_el2
& HCR_FMO
) == HCR_FMO
);
6026 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6027 hcr
= ((env
->cp15
.hcr_el2
& HCR_AMO
) == HCR_AMO
);
6031 /* If HCR.TGE is set then HCR is treated as being 1 */
6032 hcr
|= ((env
->cp15
.hcr_el2
& HCR_TGE
) == HCR_TGE
);
6034 /* Perform a table-lookup for the target EL given the current state */
6035 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6037 assert(target_el
> 0);
6042 static void v7m_push(CPUARMState
*env
, uint32_t val
)
6044 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
6047 stl_phys(cs
->as
, env
->regs
[13], val
);
6050 /* Return true if we're using the process stack pointer (not the MSP) */
6051 static bool v7m_using_psp(CPUARMState
*env
)
6053 /* Handler mode always uses the main stack; for thread mode
6054 * the CONTROL.SPSEL bit determines the answer.
6055 * Note that in v7M it is not possible to be in Handler mode with
6056 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6058 return !arm_v7m_is_handler_mode(env
) &&
6059 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
6062 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6063 * This may change the current stack pointer between Main and Process
6064 * stack pointers if it is done for the CONTROL register for the current
6067 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6071 bool old_is_psp
= v7m_using_psp(env
);
6073 env
->v7m
.control
[secstate
] =
6074 deposit32(env
->v7m
.control
[secstate
],
6075 R_V7M_CONTROL_SPSEL_SHIFT
,
6076 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6078 if (secstate
== env
->v7m
.secure
) {
6079 bool new_is_psp
= v7m_using_psp(env
);
6082 if (old_is_psp
!= new_is_psp
) {
6083 tmp
= env
->v7m
.other_sp
;
6084 env
->v7m
.other_sp
= env
->regs
[13];
6085 env
->regs
[13] = tmp
;
6090 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6091 * stack pointer between Main and Process stack pointers.
6093 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6095 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6098 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6100 /* Write a new value to v7m.exception, thus transitioning into or out
6101 * of Handler mode; this may result in a change of active stack pointer.
6103 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6106 env
->v7m
.exception
= new_exc
;
6108 new_is_psp
= v7m_using_psp(env
);
6110 if (old_is_psp
!= new_is_psp
) {
6111 tmp
= env
->v7m
.other_sp
;
6112 env
->v7m
.other_sp
= env
->regs
[13];
6113 env
->regs
[13] = tmp
;
6117 /* Switch M profile security state between NS and S */
6118 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6120 uint32_t new_ss_msp
, new_ss_psp
;
6122 if (env
->v7m
.secure
== new_secstate
) {
6126 /* All the banked state is accessed by looking at env->v7m.secure
6127 * except for the stack pointer; rearrange the SP appropriately.
6129 new_ss_msp
= env
->v7m
.other_ss_msp
;
6130 new_ss_psp
= env
->v7m
.other_ss_psp
;
6132 if (v7m_using_psp(env
)) {
6133 env
->v7m
.other_ss_psp
= env
->regs
[13];
6134 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6136 env
->v7m
.other_ss_msp
= env
->regs
[13];
6137 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6140 env
->v7m
.secure
= new_secstate
;
6142 if (v7m_using_psp(env
)) {
6143 env
->regs
[13] = new_ss_psp
;
6144 env
->v7m
.other_sp
= new_ss_msp
;
6146 env
->regs
[13] = new_ss_msp
;
6147 env
->v7m
.other_sp
= new_ss_psp
;
6151 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6154 * - if the return value is a magic value, do exception return (like BX)
6155 * - otherwise bit 0 of the return value is the target security state
6157 if (dest
>= 0xff000000) {
6158 /* This is an exception return magic value; put it where
6159 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6160 * Note that if we ever add gen_ss_advance() singlestep support to
6161 * M profile this should count as an "instruction execution complete"
6162 * event (compare gen_bx_excret_final_code()).
6164 env
->regs
[15] = dest
& ~1;
6165 env
->thumb
= dest
& 1;
6166 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
6170 /* translate.c should have made BXNS UNDEF unless we're secure */
6171 assert(env
->v7m
.secure
);
6173 switch_v7m_security_state(env
, dest
& 1);
6175 env
->regs
[15] = dest
& ~1;
6178 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
6181 /* Return a pointer to the location where we currently store the
6182 * stack pointer for the requested security state and thread mode.
6183 * This pointer will become invalid if the CPU state is updated
6184 * such that the stack pointers are switched around (eg changing
6185 * the SPSEL control bit).
6186 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6187 * Unlike that pseudocode, we require the caller to pass us in the
6188 * SPSEL control bit value; this is because we also use this
6189 * function in handling of pushing of the callee-saves registers
6190 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6191 * and in the tailchain codepath the SPSEL bit comes from the exception
6192 * return magic LR value from the previous exception. The pseudocode
6193 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6194 * to make this utility function generic enough to do the job.
6196 bool want_psp
= threadmode
&& spsel
;
6198 if (secure
== env
->v7m
.secure
) {
6199 if (want_psp
== v7m_using_psp(env
)) {
6200 return &env
->regs
[13];
6202 return &env
->v7m
.other_sp
;
6206 return &env
->v7m
.other_ss_psp
;
6208 return &env
->v7m
.other_ss_msp
;
6213 static uint32_t arm_v7m_load_vector(ARMCPU
*cpu
)
6215 CPUState
*cs
= CPU(cpu
);
6216 CPUARMState
*env
= &cpu
->env
;
6218 hwaddr vec
= env
->v7m
.vecbase
[env
->v7m
.secure
] + env
->v7m
.exception
* 4;
6221 addr
= address_space_ldl(cs
->as
, vec
,
6222 MEMTXATTRS_UNSPECIFIED
, &result
);
6223 if (result
!= MEMTX_OK
) {
6224 /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
6225 * which would then be immediately followed by our failing to load
6226 * the entry vector for that HardFault, which is a Lockup case.
6227 * Since we don't model Lockup, we just report this guest error
6230 cpu_abort(cs
, "Failed to read from exception vector table "
6231 "entry %08x\n", (unsigned)vec
);
6236 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
)
6238 /* Do the "take the exception" parts of exception entry,
6239 * but not the pushing of state to the stack. This is
6240 * similar to the pseudocode ExceptionTaken() function.
6242 CPUARMState
*env
= &cpu
->env
;
6245 armv7m_nvic_acknowledge_irq(env
->nvic
);
6246 write_v7m_control_spsel(env
, 0);
6247 arm_clear_exclusive(env
);
6249 env
->condexec_bits
= 0;
6251 addr
= arm_v7m_load_vector(cpu
);
6252 env
->regs
[15] = addr
& 0xfffffffe;
6253 env
->thumb
= addr
& 1;
6256 static void v7m_push_stack(ARMCPU
*cpu
)
6258 /* Do the "set up stack frame" part of exception entry,
6259 * similar to pseudocode PushStack().
6261 CPUARMState
*env
= &cpu
->env
;
6262 uint32_t xpsr
= xpsr_read(env
);
6264 /* Align stack pointer if the guest wants that */
6265 if ((env
->regs
[13] & 4) &&
6266 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
6268 xpsr
|= XPSR_SPREALIGN
;
6270 /* Switch to the handler mode. */
6271 v7m_push(env
, xpsr
);
6272 v7m_push(env
, env
->regs
[15]);
6273 v7m_push(env
, env
->regs
[14]);
6274 v7m_push(env
, env
->regs
[12]);
6275 v7m_push(env
, env
->regs
[3]);
6276 v7m_push(env
, env
->regs
[2]);
6277 v7m_push(env
, env
->regs
[1]);
6278 v7m_push(env
, env
->regs
[0]);
6281 static void do_v7m_exception_exit(ARMCPU
*cpu
)
6283 CPUARMState
*env
= &cpu
->env
;
6284 CPUState
*cs
= CPU(cpu
);
6287 bool ufault
= false;
6288 bool sfault
= false;
6289 bool return_to_sp_process
;
6290 bool return_to_handler
;
6291 bool rettobase
= false;
6292 bool exc_secure
= false;
6293 bool return_to_secure
;
6295 /* We can only get here from an EXCP_EXCEPTION_EXIT, and
6296 * gen_bx_excret() enforces the architectural rule
6297 * that jumps to magic addresses don't have magic behaviour unless
6298 * we're in Handler mode (compare pseudocode BXWritePC()).
6300 assert(arm_v7m_is_handler_mode(env
));
6302 /* In the spec pseudocode ExceptionReturn() is called directly
6303 * from BXWritePC() and gets the full target PC value including
6304 * bit zero. In QEMU's implementation we treat it as a normal
6305 * jump-to-register (which is then caught later on), and so split
6306 * the target value up between env->regs[15] and env->thumb in
6307 * gen_bx(). Reconstitute it.
6309 excret
= env
->regs
[15];
6314 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
6315 " previous exception %d\n",
6316 excret
, env
->v7m
.exception
);
6318 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
6319 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
6320 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
6324 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6325 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
6326 * we pick which FAULTMASK to clear.
6328 if (!env
->v7m
.secure
&&
6329 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
6330 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
6332 /* For all other purposes, treat ES as 0 (R_HXSR) */
6333 excret
&= ~R_V7M_EXCRET_ES_MASK
;
6337 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
6338 /* Auto-clear FAULTMASK on return from other than NMI.
6339 * If the security extension is implemented then this only
6340 * happens if the raw execution priority is >= 0; the
6341 * value of the ES bit in the exception return value indicates
6342 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
6344 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6345 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
6346 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
6347 env
->v7m
.faultmask
[exc_secure
] = 0;
6350 env
->v7m
.faultmask
[M_REG_NS
] = 0;
6354 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
6357 /* attempt to exit an exception that isn't active */
6361 /* still an irq active now */
6364 /* we returned to base exception level, no nesting.
6365 * (In the pseudocode this is written using "NestedActivation != 1"
6366 * where we have 'rettobase == false'.)
6371 g_assert_not_reached();
6374 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
6375 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
6376 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6377 (excret
& R_V7M_EXCRET_S_MASK
);
6379 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6380 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6381 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
6382 * we choose to take the UsageFault.
6384 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
6385 (excret
& R_V7M_EXCRET_ES_MASK
) ||
6386 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
6390 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
6394 /* For v7M we only recognize certain combinations of the low bits */
6395 switch (excret
& 0xf) {
6396 case 1: /* Return to Handler */
6398 case 13: /* Return to Thread using Process stack */
6399 case 9: /* Return to Thread using Main stack */
6400 /* We only need to check NONBASETHRDENA for v7M, because in
6401 * v8M this bit does not exist (it is RES1).
6404 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
6405 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
6415 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
6416 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
6417 v7m_exception_taken(cpu
, excret
);
6418 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
6419 "stackframe: failed EXC_RETURN.ES validity check\n");
6424 /* Bad exception return: instead of popping the exception
6425 * stack, directly take a usage fault on the current stack.
6427 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6428 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
6429 v7m_exception_taken(cpu
, excret
);
6430 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
6431 "stackframe: failed exception return integrity check\n");
6435 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
6436 * Handler mode (and will be until we write the new XPSR.Interrupt
6437 * field) this does not switch around the current stack pointer.
6439 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
6441 switch_v7m_security_state(env
, return_to_secure
);
6444 /* The stack pointer we should be reading the exception frame from
6445 * depends on bits in the magic exception return type value (and
6446 * for v8M isn't necessarily the stack pointer we will eventually
6447 * end up resuming execution with). Get a pointer to the location
6448 * in the CPU state struct where the SP we need is currently being
6449 * stored; we will use and modify it in place.
6450 * We use this limited C variable scope so we don't accidentally
6451 * use 'frame_sp_p' after we do something that makes it invalid.
6453 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
6456 return_to_sp_process
);
6457 uint32_t frameptr
= *frame_sp_p
;
6459 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
6460 arm_feature(env
, ARM_FEATURE_V8
)) {
6461 qemu_log_mask(LOG_GUEST_ERROR
,
6462 "M profile exception return with non-8-aligned SP "
6463 "for destination state is UNPREDICTABLE\n");
6466 /* Do we need to pop callee-saved registers? */
6467 if (return_to_secure
&&
6468 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
6469 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
6470 uint32_t expected_sig
= 0xfefa125b;
6471 uint32_t actual_sig
= ldl_phys(cs
->as
, frameptr
);
6473 if (expected_sig
!= actual_sig
) {
6474 /* Take a SecureFault on the current stack */
6475 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
6476 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
6477 v7m_exception_taken(cpu
, excret
);
6478 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
6479 "stackframe: failed exception return integrity "
6480 "signature check\n");
6484 env
->regs
[4] = ldl_phys(cs
->as
, frameptr
+ 0x8);
6485 env
->regs
[5] = ldl_phys(cs
->as
, frameptr
+ 0xc);
6486 env
->regs
[6] = ldl_phys(cs
->as
, frameptr
+ 0x10);
6487 env
->regs
[7] = ldl_phys(cs
->as
, frameptr
+ 0x14);
6488 env
->regs
[8] = ldl_phys(cs
->as
, frameptr
+ 0x18);
6489 env
->regs
[9] = ldl_phys(cs
->as
, frameptr
+ 0x1c);
6490 env
->regs
[10] = ldl_phys(cs
->as
, frameptr
+ 0x20);
6491 env
->regs
[11] = ldl_phys(cs
->as
, frameptr
+ 0x24);
6496 /* Pop registers. TODO: make these accesses use the correct
6497 * attributes and address space (S/NS, priv/unpriv) and handle
6498 * memory transaction failures.
6500 env
->regs
[0] = ldl_phys(cs
->as
, frameptr
);
6501 env
->regs
[1] = ldl_phys(cs
->as
, frameptr
+ 0x4);
6502 env
->regs
[2] = ldl_phys(cs
->as
, frameptr
+ 0x8);
6503 env
->regs
[3] = ldl_phys(cs
->as
, frameptr
+ 0xc);
6504 env
->regs
[12] = ldl_phys(cs
->as
, frameptr
+ 0x10);
6505 env
->regs
[14] = ldl_phys(cs
->as
, frameptr
+ 0x14);
6506 env
->regs
[15] = ldl_phys(cs
->as
, frameptr
+ 0x18);
6508 /* Returning from an exception with a PC with bit 0 set is defined
6509 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
6510 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
6511 * the lsbit, and there are several RTOSes out there which incorrectly
6512 * assume the r15 in the stack frame should be a Thumb-style "lsbit
6513 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
6514 * complain about the badly behaved guest.
6516 if (env
->regs
[15] & 1) {
6517 env
->regs
[15] &= ~1U;
6518 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
6519 qemu_log_mask(LOG_GUEST_ERROR
,
6520 "M profile return from interrupt with misaligned "
6521 "PC is UNPREDICTABLE on v7M\n");
6525 xpsr
= ldl_phys(cs
->as
, frameptr
+ 0x1c);
6527 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6528 /* For v8M we have to check whether the xPSR exception field
6529 * matches the EXCRET value for return to handler/thread
6530 * before we commit to changing the SP and xPSR.
6532 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
6533 if (return_to_handler
!= will_be_handler
) {
6534 /* Take an INVPC UsageFault on the current stack.
6535 * By this point we will have switched to the security state
6536 * for the background state, so this UsageFault will target
6539 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
6541 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6542 v7m_exception_taken(cpu
, excret
);
6543 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
6544 "stackframe: failed exception return integrity "
6550 /* Commit to consuming the stack frame */
6552 /* Undo stack alignment (the SPREALIGN bit indicates that the original
6553 * pre-exception SP was not 8-aligned and we added a padding word to
6554 * align it, so we undo this by ORing in the bit that increases it
6555 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
6556 * would work too but a logical OR is how the pseudocode specifies it.)
6558 if (xpsr
& XPSR_SPREALIGN
) {
6561 *frame_sp_p
= frameptr
;
6563 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
6564 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
6566 /* The restored xPSR exception field will be zero if we're
6567 * resuming in Thread mode. If that doesn't match what the
6568 * exception return excret specified then this is a UsageFault.
6569 * v7M requires we make this check here; v8M did it earlier.
6571 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
6572 /* Take an INVPC UsageFault by pushing the stack again;
6573 * we know we're v7M so this is never a Secure UsageFault.
6575 assert(!arm_feature(env
, ARM_FEATURE_V8
));
6576 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
6577 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
6578 v7m_push_stack(cpu
);
6579 v7m_exception_taken(cpu
, excret
);
6580 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
6581 "failed exception return integrity check\n");
6585 /* Otherwise, we have a successful exception exit. */
6586 arm_clear_exclusive(env
);
6587 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
6590 static void arm_log_exception(int idx
)
6592 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
6593 const char *exc
= NULL
;
6594 static const char * const excnames
[] = {
6595 [EXCP_UDEF
] = "Undefined Instruction",
6597 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
6598 [EXCP_DATA_ABORT
] = "Data Abort",
6601 [EXCP_BKPT
] = "Breakpoint",
6602 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
6603 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
6604 [EXCP_HVC
] = "Hypervisor Call",
6605 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
6606 [EXCP_SMC
] = "Secure Monitor Call",
6607 [EXCP_VIRQ
] = "Virtual IRQ",
6608 [EXCP_VFIQ
] = "Virtual FIQ",
6609 [EXCP_SEMIHOST
] = "Semihosting call",
6610 [EXCP_NOCP
] = "v7M NOCP UsageFault",
6611 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
6614 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
6615 exc
= excnames
[idx
];
6620 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
6624 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
6626 ARMCPU
*cpu
= ARM_CPU(cs
);
6627 CPUARMState
*env
= &cpu
->env
;
6630 arm_log_exception(cs
->exception_index
);
6632 /* For exceptions we just mark as pending on the NVIC, and let that
6634 switch (cs
->exception_index
) {
6636 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
6637 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
6640 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
6641 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
6644 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
6645 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
6648 /* The PC already points to the next instruction. */
6649 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
6651 case EXCP_PREFETCH_ABORT
:
6652 case EXCP_DATA_ABORT
:
6653 /* Note that for M profile we don't have a guest facing FSR, but
6654 * the env->exception.fsr will be populated by the code that
6655 * raises the fault, in the A profile short-descriptor format.
6657 switch (env
->exception
.fsr
& 0xf) {
6658 case 0x8: /* External Abort */
6659 switch (cs
->exception_index
) {
6660 case EXCP_PREFETCH_ABORT
:
6661 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
6662 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
6664 case EXCP_DATA_ABORT
:
6665 env
->v7m
.cfsr
[M_REG_NS
] |=
6666 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
6667 env
->v7m
.bfar
= env
->exception
.vaddress
;
6668 qemu_log_mask(CPU_LOG_INT
,
6669 "...with CFSR.PRECISERR and BFAR 0x%x\n",
6673 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
6676 /* All other FSR values are either MPU faults or "can't happen
6677 * for M profile" cases.
6679 switch (cs
->exception_index
) {
6680 case EXCP_PREFETCH_ABORT
:
6681 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
6682 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
6684 case EXCP_DATA_ABORT
:
6685 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
6686 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
6687 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
6688 qemu_log_mask(CPU_LOG_INT
,
6689 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
6690 env
->v7m
.mmfar
[env
->v7m
.secure
]);
6693 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
6699 if (semihosting_enabled()) {
6701 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
6704 qemu_log_mask(CPU_LOG_INT
,
6705 "...handling as semihosting call 0x%x\n",
6707 env
->regs
[0] = do_arm_semihosting(env
);
6711 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
6715 case EXCP_EXCEPTION_EXIT
:
6716 do_v7m_exception_exit(cpu
);
6719 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
6720 return; /* Never happens. Keep compiler happy. */
6723 lr
= R_V7M_EXCRET_RES1_MASK
|
6724 R_V7M_EXCRET_S_MASK
|
6725 R_V7M_EXCRET_DCRS_MASK
|
6726 R_V7M_EXCRET_FTYPE_MASK
|
6727 R_V7M_EXCRET_ES_MASK
;
6728 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
6729 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
6731 if (!arm_v7m_is_handler_mode(env
)) {
6732 lr
|= R_V7M_EXCRET_MODE_MASK
;
6735 v7m_push_stack(cpu
);
6736 v7m_exception_taken(cpu
, lr
);
6737 qemu_log_mask(CPU_LOG_INT
, "... as %d\n", env
->v7m
.exception
);
6740 /* Function used to synchronize QEMU's AArch64 register set with AArch32
6741 * register set. This is necessary when switching between AArch32 and AArch64
6744 void aarch64_sync_32_to_64(CPUARMState
*env
)
6747 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
6749 /* We can blanket copy R[0:7] to X[0:7] */
6750 for (i
= 0; i
< 8; i
++) {
6751 env
->xregs
[i
] = env
->regs
[i
];
6754 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
6755 * Otherwise, they come from the banked user regs.
6757 if (mode
== ARM_CPU_MODE_FIQ
) {
6758 for (i
= 8; i
< 13; i
++) {
6759 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
6762 for (i
= 8; i
< 13; i
++) {
6763 env
->xregs
[i
] = env
->regs
[i
];
6767 /* Registers x13-x23 are the various mode SP and FP registers. Registers
6768 * r13 and r14 are only copied if we are in that mode, otherwise we copy
6769 * from the mode banked register.
6771 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
6772 env
->xregs
[13] = env
->regs
[13];
6773 env
->xregs
[14] = env
->regs
[14];
6775 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
6776 /* HYP is an exception in that it is copied from r14 */
6777 if (mode
== ARM_CPU_MODE_HYP
) {
6778 env
->xregs
[14] = env
->regs
[14];
6780 env
->xregs
[14] = env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)];
6784 if (mode
== ARM_CPU_MODE_HYP
) {
6785 env
->xregs
[15] = env
->regs
[13];
6787 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
6790 if (mode
== ARM_CPU_MODE_IRQ
) {
6791 env
->xregs
[16] = env
->regs
[14];
6792 env
->xregs
[17] = env
->regs
[13];
6794 env
->xregs
[16] = env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)];
6795 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
6798 if (mode
== ARM_CPU_MODE_SVC
) {
6799 env
->xregs
[18] = env
->regs
[14];
6800 env
->xregs
[19] = env
->regs
[13];
6802 env
->xregs
[18] = env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)];
6803 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
6806 if (mode
== ARM_CPU_MODE_ABT
) {
6807 env
->xregs
[20] = env
->regs
[14];
6808 env
->xregs
[21] = env
->regs
[13];
6810 env
->xregs
[20] = env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)];
6811 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
6814 if (mode
== ARM_CPU_MODE_UND
) {
6815 env
->xregs
[22] = env
->regs
[14];
6816 env
->xregs
[23] = env
->regs
[13];
6818 env
->xregs
[22] = env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)];
6819 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
6822 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
6823 * mode, then we can copy from r8-r14. Otherwise, we copy from the
6824 * FIQ bank for r8-r14.
6826 if (mode
== ARM_CPU_MODE_FIQ
) {
6827 for (i
= 24; i
< 31; i
++) {
6828 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
6831 for (i
= 24; i
< 29; i
++) {
6832 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
6834 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
6835 env
->xregs
[30] = env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)];
6838 env
->pc
= env
->regs
[15];
6841 /* Function used to synchronize QEMU's AArch32 register set with AArch64
6842 * register set. This is necessary when switching between AArch32 and AArch64
6845 void aarch64_sync_64_to_32(CPUARMState
*env
)
6848 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
6850 /* We can blanket copy X[0:7] to R[0:7] */
6851 for (i
= 0; i
< 8; i
++) {
6852 env
->regs
[i
] = env
->xregs
[i
];
6855 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
6856 * Otherwise, we copy x8-x12 into the banked user regs.
6858 if (mode
== ARM_CPU_MODE_FIQ
) {
6859 for (i
= 8; i
< 13; i
++) {
6860 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
6863 for (i
= 8; i
< 13; i
++) {
6864 env
->regs
[i
] = env
->xregs
[i
];
6868 /* Registers r13 & r14 depend on the current mode.
6869 * If we are in a given mode, we copy the corresponding x registers to r13
6870 * and r14. Otherwise, we copy the x register to the banked r13 and r14
6873 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
6874 env
->regs
[13] = env
->xregs
[13];
6875 env
->regs
[14] = env
->xregs
[14];
6877 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
6879 /* HYP is an exception in that it does not have its own banked r14 but
6880 * shares the USR r14
6882 if (mode
== ARM_CPU_MODE_HYP
) {
6883 env
->regs
[14] = env
->xregs
[14];
6885 env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
6889 if (mode
== ARM_CPU_MODE_HYP
) {
6890 env
->regs
[13] = env
->xregs
[15];
6892 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
6895 if (mode
== ARM_CPU_MODE_IRQ
) {
6896 env
->regs
[14] = env
->xregs
[16];
6897 env
->regs
[13] = env
->xregs
[17];
6899 env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
6900 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
6903 if (mode
== ARM_CPU_MODE_SVC
) {
6904 env
->regs
[14] = env
->xregs
[18];
6905 env
->regs
[13] = env
->xregs
[19];
6907 env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
6908 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
6911 if (mode
== ARM_CPU_MODE_ABT
) {
6912 env
->regs
[14] = env
->xregs
[20];
6913 env
->regs
[13] = env
->xregs
[21];
6915 env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
6916 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
6919 if (mode
== ARM_CPU_MODE_UND
) {
6920 env
->regs
[14] = env
->xregs
[22];
6921 env
->regs
[13] = env
->xregs
[23];
6923 env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
6924 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
6927 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
6928 * mode, then we can copy to r8-r14. Otherwise, we copy to the
6929 * FIQ bank for r8-r14.
6931 if (mode
== ARM_CPU_MODE_FIQ
) {
6932 for (i
= 24; i
< 31; i
++) {
6933 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
6936 for (i
= 24; i
< 29; i
++) {
6937 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
6939 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
6940 env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
6943 env
->regs
[15] = env
->pc
;
6946 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
6948 ARMCPU
*cpu
= ARM_CPU(cs
);
6949 CPUARMState
*env
= &cpu
->env
;
6956 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
6957 switch (env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
) {
6959 case EC_BREAKPOINT_SAME_EL
:
6963 case EC_WATCHPOINT_SAME_EL
:
6969 case EC_VECTORCATCH
:
6978 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
6981 /* TODO: Vectored interrupt controller. */
6982 switch (cs
->exception_index
) {
6984 new_mode
= ARM_CPU_MODE_UND
;
6993 new_mode
= ARM_CPU_MODE_SVC
;
6996 /* The PC already points to the next instruction. */
7000 env
->exception
.fsr
= 2;
7001 /* Fall through to prefetch abort. */
7002 case EXCP_PREFETCH_ABORT
:
7003 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
7004 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
7005 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
7006 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
7007 new_mode
= ARM_CPU_MODE_ABT
;
7009 mask
= CPSR_A
| CPSR_I
;
7012 case EXCP_DATA_ABORT
:
7013 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
7014 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
7015 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
7017 (uint32_t)env
->exception
.vaddress
);
7018 new_mode
= ARM_CPU_MODE_ABT
;
7020 mask
= CPSR_A
| CPSR_I
;
7024 new_mode
= ARM_CPU_MODE_IRQ
;
7026 /* Disable IRQ and imprecise data aborts. */
7027 mask
= CPSR_A
| CPSR_I
;
7029 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
7030 /* IRQ routed to monitor mode */
7031 new_mode
= ARM_CPU_MODE_MON
;
7036 new_mode
= ARM_CPU_MODE_FIQ
;
7038 /* Disable FIQ, IRQ and imprecise data aborts. */
7039 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7040 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
7041 /* FIQ routed to monitor mode */
7042 new_mode
= ARM_CPU_MODE_MON
;
7047 new_mode
= ARM_CPU_MODE_IRQ
;
7049 /* Disable IRQ and imprecise data aborts. */
7050 mask
= CPSR_A
| CPSR_I
;
7054 new_mode
= ARM_CPU_MODE_FIQ
;
7056 /* Disable FIQ, IRQ and imprecise data aborts. */
7057 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7061 new_mode
= ARM_CPU_MODE_MON
;
7063 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7067 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7068 return; /* Never happens. Keep compiler happy. */
7071 if (new_mode
== ARM_CPU_MODE_MON
) {
7072 addr
+= env
->cp15
.mvbar
;
7073 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
7074 /* High vectors. When enabled, base address cannot be remapped. */
7077 /* ARM v7 architectures provide a vector base address register to remap
7078 * the interrupt vector table.
7079 * This register is only followed in non-monitor mode, and is banked.
7080 * Note: only bits 31:5 are valid.
7082 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
7085 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
7086 env
->cp15
.scr_el3
&= ~SCR_NS
;
7089 switch_mode (env
, new_mode
);
7090 /* For exceptions taken to AArch32 we must clear the SS bit in both
7091 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
7093 env
->uncached_cpsr
&= ~PSTATE_SS
;
7094 env
->spsr
= cpsr_read(env
);
7095 /* Clear IT bits. */
7096 env
->condexec_bits
= 0;
7097 /* Switch to the new mode, and to the correct instruction set. */
7098 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
7099 /* Set new mode endianness */
7100 env
->uncached_cpsr
&= ~CPSR_E
;
7101 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
7102 env
->uncached_cpsr
|= CPSR_E
;
7105 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
7106 * and we should just guard the thumb mode on V4 */
7107 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
7108 env
->thumb
= (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
7110 env
->regs
[14] = env
->regs
[15] + offset
;
7111 env
->regs
[15] = addr
;
7114 /* Handle exception entry to a target EL which is using AArch64 */
7115 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
7117 ARMCPU
*cpu
= ARM_CPU(cs
);
7118 CPUARMState
*env
= &cpu
->env
;
7119 unsigned int new_el
= env
->exception
.target_el
;
7120 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
7121 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
7123 if (arm_current_el(env
) < new_el
) {
7124 /* Entry vector offset depends on whether the implemented EL
7125 * immediately lower than the target level is using AArch32 or AArch64
7131 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
7134 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
7137 is_aa64
= is_a64(env
);
7140 g_assert_not_reached();
7148 } else if (pstate_read(env
) & PSTATE_SP
) {
7152 switch (cs
->exception_index
) {
7153 case EXCP_PREFETCH_ABORT
:
7154 case EXCP_DATA_ABORT
:
7155 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
7156 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
7157 env
->cp15
.far_el
[new_el
]);
7165 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
7176 qemu_log_mask(CPU_LOG_INT
,
7177 "...handling as semihosting call 0x%" PRIx64
"\n",
7179 env
->xregs
[0] = do_arm_semihosting(env
);
7182 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7186 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
7187 aarch64_save_sp(env
, arm_current_el(env
));
7188 env
->elr_el
[new_el
] = env
->pc
;
7190 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
7191 env
->elr_el
[new_el
] = env
->regs
[15];
7193 aarch64_sync_32_to_64(env
);
7195 env
->condexec_bits
= 0;
7197 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
7198 env
->elr_el
[new_el
]);
7200 pstate_write(env
, PSTATE_DAIF
| new_mode
);
7202 aarch64_restore_sp(env
, new_el
);
7206 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
7207 new_el
, env
->pc
, pstate_read(env
));
7210 static inline bool check_for_semihosting(CPUState
*cs
)
7212 /* Check whether this exception is a semihosting call; if so
7213 * then handle it and return true; otherwise return false.
7215 ARMCPU
*cpu
= ARM_CPU(cs
);
7216 CPUARMState
*env
= &cpu
->env
;
7219 if (cs
->exception_index
== EXCP_SEMIHOST
) {
7220 /* This is always the 64-bit semihosting exception.
7221 * The "is this usermode" and "is semihosting enabled"
7222 * checks have been done at translate time.
7224 qemu_log_mask(CPU_LOG_INT
,
7225 "...handling as semihosting call 0x%" PRIx64
"\n",
7227 env
->xregs
[0] = do_arm_semihosting(env
);
7234 /* Only intercept calls from privileged modes, to provide some
7235 * semblance of security.
7237 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
7238 (!semihosting_enabled() ||
7239 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
7243 switch (cs
->exception_index
) {
7245 /* This is always a semihosting call; the "is this usermode"
7246 * and "is semihosting enabled" checks have been done at
7251 /* Check for semihosting interrupt. */
7253 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
7259 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
7261 if (imm
== 0x123456) {
7267 /* See if this is a semihosting syscall. */
7269 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
7281 qemu_log_mask(CPU_LOG_INT
,
7282 "...handling as semihosting call 0x%x\n",
7284 env
->regs
[0] = do_arm_semihosting(env
);
7289 /* Handle a CPU exception for A and R profile CPUs.
7290 * Do any appropriate logging, handle PSCI calls, and then hand off
7291 * to the AArch64-entry or AArch32-entry function depending on the
7292 * target exception level's register width.
7294 void arm_cpu_do_interrupt(CPUState
*cs
)
7296 ARMCPU
*cpu
= ARM_CPU(cs
);
7297 CPUARMState
*env
= &cpu
->env
;
7298 unsigned int new_el
= env
->exception
.target_el
;
7300 assert(!arm_feature(env
, ARM_FEATURE_M
));
7302 arm_log_exception(cs
->exception_index
);
7303 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
7305 if (qemu_loglevel_mask(CPU_LOG_INT
)
7306 && !excp_is_internal(cs
->exception_index
)) {
7307 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
7308 env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
,
7309 env
->exception
.syndrome
);
7312 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
7313 arm_handle_psci_call(cpu
);
7314 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
7318 /* Semihosting semantics depend on the register width of the
7319 * code that caused the exception, not the target exception level,
7320 * so must be handled here.
7322 if (check_for_semihosting(cs
)) {
7326 assert(!excp_is_internal(cs
->exception_index
));
7327 if (arm_el_is_aa64(env
, new_el
)) {
7328 arm_cpu_do_interrupt_aarch64(cs
);
7330 arm_cpu_do_interrupt_aarch32(cs
);
7333 /* Hooks may change global state so BQL should be held, also the
7334 * BQL needs to be held for any modification of
7335 * cs->interrupt_request.
7337 g_assert(qemu_mutex_iothread_locked());
7339 arm_call_el_change_hook(cpu
);
7341 if (!kvm_enabled()) {
7342 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
7346 /* Return the exception level which controls this address translation regime */
7347 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7350 case ARMMMUIdx_S2NS
:
7351 case ARMMMUIdx_S1E2
:
7353 case ARMMMUIdx_S1E3
:
7355 case ARMMMUIdx_S1SE0
:
7356 return arm_el_is_aa64(env
, 3) ? 1 : 3;
7357 case ARMMMUIdx_S1SE1
:
7358 case ARMMMUIdx_S1NSE0
:
7359 case ARMMMUIdx_S1NSE1
:
7360 case ARMMMUIdx_MPriv
:
7361 case ARMMMUIdx_MNegPri
:
7362 case ARMMMUIdx_MUser
:
7363 case ARMMMUIdx_MSPriv
:
7364 case ARMMMUIdx_MSNegPri
:
7365 case ARMMMUIdx_MSUser
:
7368 g_assert_not_reached();
7372 /* Return the SCTLR value which controls this address translation regime */
7373 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7375 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
7378 /* Return true if the specified stage of address translation is disabled */
7379 static inline bool regime_translation_disabled(CPUARMState
*env
,
7382 if (arm_feature(env
, ARM_FEATURE_M
)) {
7383 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
7384 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
7385 case R_V7M_MPU_CTRL_ENABLE_MASK
:
7386 /* Enabled, but not for HardFault and NMI */
7387 return mmu_idx
== ARMMMUIdx_MNegPri
||
7388 mmu_idx
== ARMMMUIdx_MSNegPri
;
7389 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
7390 /* Enabled for all cases */
7394 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
7395 * we warned about that in armv7m_nvic.c when the guest set it.
7401 if (mmu_idx
== ARMMMUIdx_S2NS
) {
7402 return (env
->cp15
.hcr_el2
& HCR_VM
) == 0;
7404 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
7407 static inline bool regime_translation_big_endian(CPUARMState
*env
,
7410 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
7413 /* Return the TCR controlling this translation regime */
7414 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7416 if (mmu_idx
== ARMMMUIdx_S2NS
) {
7417 return &env
->cp15
.vtcr_el2
;
7419 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
7422 /* Convert a possible stage1+2 MMU index into the appropriate
7425 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
7427 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
7428 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
7433 /* Returns TBI0 value for current regime el */
7434 uint32_t arm_regime_tbi0(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7439 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7440 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7442 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
7444 tcr
= regime_tcr(env
, mmu_idx
);
7445 el
= regime_el(env
, mmu_idx
);
7448 return extract64(tcr
->raw_tcr
, 20, 1);
7450 return extract64(tcr
->raw_tcr
, 37, 1);
7454 /* Returns TBI1 value for current regime el */
7455 uint32_t arm_regime_tbi1(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7460 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7461 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7463 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
7465 tcr
= regime_tcr(env
, mmu_idx
);
7466 el
= regime_el(env
, mmu_idx
);
7471 return extract64(tcr
->raw_tcr
, 38, 1);
7475 /* Return the TTBR associated with this translation regime */
7476 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
7479 if (mmu_idx
== ARMMMUIdx_S2NS
) {
7480 return env
->cp15
.vttbr_el2
;
7483 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
7485 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
7489 /* Return true if the translation regime is using LPAE format page tables */
7490 static inline bool regime_using_lpae_format(CPUARMState
*env
,
7493 int el
= regime_el(env
, mmu_idx
);
7494 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
7497 if (arm_feature(env
, ARM_FEATURE_LPAE
)
7498 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
7504 /* Returns true if the stage 1 translation regime is using LPAE format page
7505 * tables. Used when raising alignment exceptions, whose FSR changes depending
7506 * on whether the long or short descriptor format is in use. */
7507 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7509 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
7511 return regime_using_lpae_format(env
, mmu_idx
);
7514 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
7517 case ARMMMUIdx_S1SE0
:
7518 case ARMMMUIdx_S1NSE0
:
7519 case ARMMMUIdx_MUser
:
7523 case ARMMMUIdx_S12NSE0
:
7524 case ARMMMUIdx_S12NSE1
:
7525 g_assert_not_reached();
7529 /* Translate section/page access permissions to page
7530 * R/W protection flags
7533 * @mmu_idx: MMU index indicating required translation regime
7534 * @ap: The 3-bit access permissions (AP[2:0])
7535 * @domain_prot: The 2-bit domain access permissions
7537 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
7538 int ap
, int domain_prot
)
7540 bool is_user
= regime_is_user(env
, mmu_idx
);
7542 if (domain_prot
== 3) {
7543 return PAGE_READ
| PAGE_WRITE
;
7548 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7551 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
7553 return is_user
? 0 : PAGE_READ
;
7560 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
7565 return PAGE_READ
| PAGE_WRITE
;
7568 return PAGE_READ
| PAGE_WRITE
;
7569 case 4: /* Reserved. */
7572 return is_user
? 0 : PAGE_READ
;
7576 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
7581 g_assert_not_reached();
7585 /* Translate section/page access permissions to page
7586 * R/W protection flags.
7588 * @ap: The 2-bit simple AP (AP[2:1])
7589 * @is_user: TRUE if accessing from PL0
7591 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
7595 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
7597 return PAGE_READ
| PAGE_WRITE
;
7599 return is_user
? 0 : PAGE_READ
;
7603 g_assert_not_reached();
7608 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
7610 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
7613 /* Translate S2 section/page access permissions to protection flags
7616 * @s2ap: The 2-bit stage2 access permissions (S2AP)
7617 * @xn: XN (execute-never) bit
7619 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
7630 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
7637 /* Translate section/page access permissions to protection flags
7640 * @mmu_idx: MMU index indicating required translation regime
7641 * @is_aa64: TRUE if AArch64
7642 * @ap: The 2-bit simple AP (AP[2:1])
7643 * @ns: NS (non-secure) bit
7644 * @xn: XN (execute-never) bit
7645 * @pxn: PXN (privileged execute-never) bit
7647 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
7648 int ap
, int ns
, int xn
, int pxn
)
7650 bool is_user
= regime_is_user(env
, mmu_idx
);
7651 int prot_rw
, user_rw
;
7655 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
7657 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
7661 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
7664 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
7668 /* TODO have_wxn should be replaced with
7669 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
7670 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
7671 * compatible processors have EL2, which is required for [U]WXN.
7673 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
7676 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
7680 switch (regime_el(env
, mmu_idx
)) {
7683 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
7690 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
7691 switch (regime_el(env
, mmu_idx
)) {
7695 xn
= xn
|| !(user_rw
& PAGE_READ
);
7699 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
7701 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
7702 (uwxn
&& (user_rw
& PAGE_WRITE
));
7712 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
7715 return prot_rw
| PAGE_EXEC
;
7718 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
7719 uint32_t *table
, uint32_t address
)
7721 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
7722 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
7724 if (address
& tcr
->mask
) {
7725 if (tcr
->raw_tcr
& TTBCR_PD1
) {
7726 /* Translation table walk disabled for TTBR1 */
7729 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
7731 if (tcr
->raw_tcr
& TTBCR_PD0
) {
7732 /* Translation table walk disabled for TTBR0 */
7735 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
7737 *table
|= (address
>> 18) & 0x3ffc;
7741 /* Translate a S1 pagetable walk through S2 if needed. */
7742 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
7743 hwaddr addr
, MemTxAttrs txattrs
,
7745 ARMMMUFaultInfo
*fi
)
7747 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
7748 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
7749 target_ulong s2size
;
7754 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
7755 &txattrs
, &s2prot
, &s2size
, fsr
, fi
);
7767 /* All loads done in the course of a page table walk go through here.
7768 * TODO: rather than ignoring errors from physical memory reads (which
7769 * are external aborts in ARM terminology) we should propagate this
7770 * error out so that we can turn it into a Data Abort if this walk
7771 * was being done for a CPU load/store or an address translation instruction
7772 * (but not if it was for a debug access).
7774 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
7775 ARMMMUIdx mmu_idx
, uint32_t *fsr
,
7776 ARMMMUFaultInfo
*fi
)
7778 ARMCPU
*cpu
= ARM_CPU(cs
);
7779 CPUARMState
*env
= &cpu
->env
;
7780 MemTxAttrs attrs
= {};
7783 attrs
.secure
= is_secure
;
7784 as
= arm_addressspace(cs
, attrs
);
7785 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fsr
, fi
);
7789 if (regime_translation_big_endian(env
, mmu_idx
)) {
7790 return address_space_ldl_be(as
, addr
, attrs
, NULL
);
7792 return address_space_ldl_le(as
, addr
, attrs
, NULL
);
7796 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
7797 ARMMMUIdx mmu_idx
, uint32_t *fsr
,
7798 ARMMMUFaultInfo
*fi
)
7800 ARMCPU
*cpu
= ARM_CPU(cs
);
7801 CPUARMState
*env
= &cpu
->env
;
7802 MemTxAttrs attrs
= {};
7805 attrs
.secure
= is_secure
;
7806 as
= arm_addressspace(cs
, attrs
);
7807 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fsr
, fi
);
7811 if (regime_translation_big_endian(env
, mmu_idx
)) {
7812 return address_space_ldq_be(as
, addr
, attrs
, NULL
);
7814 return address_space_ldq_le(as
, addr
, attrs
, NULL
);
7818 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
7819 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
7820 hwaddr
*phys_ptr
, int *prot
,
7821 target_ulong
*page_size
, uint32_t *fsr
,
7822 ARMMMUFaultInfo
*fi
)
7824 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
7835 /* Pagetable walk. */
7836 /* Lookup l1 descriptor. */
7837 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
7838 /* Section translation fault if page walk is disabled by PD0 or PD1 */
7842 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
7845 domain
= (desc
>> 5) & 0x0f;
7846 if (regime_el(env
, mmu_idx
) == 1) {
7847 dacr
= env
->cp15
.dacr_ns
;
7849 dacr
= env
->cp15
.dacr_s
;
7851 domain_prot
= (dacr
>> (domain
* 2)) & 3;
7853 /* Section translation fault. */
7857 if (domain_prot
== 0 || domain_prot
== 2) {
7859 code
= 9; /* Section domain fault. */
7861 code
= 11; /* Page domain fault. */
7866 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
7867 ap
= (desc
>> 10) & 3;
7869 *page_size
= 1024 * 1024;
7871 /* Lookup l2 entry. */
7873 /* Coarse pagetable. */
7874 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
7876 /* Fine pagetable. */
7877 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
7879 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
7882 case 0: /* Page translation fault. */
7885 case 1: /* 64k page. */
7886 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
7887 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
7888 *page_size
= 0x10000;
7890 case 2: /* 4k page. */
7891 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
7892 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
7893 *page_size
= 0x1000;
7895 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
7897 /* ARMv6/XScale extended small page format */
7898 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
7899 || arm_feature(env
, ARM_FEATURE_V6
)) {
7900 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
7901 *page_size
= 0x1000;
7903 /* UNPREDICTABLE in ARMv5; we choose to take a
7904 * page translation fault.
7910 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
7913 ap
= (desc
>> 4) & 3;
7916 /* Never happens, but compiler isn't smart enough to tell. */
7921 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
7922 *prot
|= *prot
? PAGE_EXEC
: 0;
7923 if (!(*prot
& (1 << access_type
))) {
7924 /* Access permission fault. */
7927 *phys_ptr
= phys_addr
;
7930 *fsr
= code
| (domain
<< 4);
7934 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
7935 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
7936 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
7937 target_ulong
*page_size
, uint32_t *fsr
,
7938 ARMMMUFaultInfo
*fi
)
7940 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
7954 /* Pagetable walk. */
7955 /* Lookup l1 descriptor. */
7956 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
7957 /* Section translation fault if page walk is disabled by PD0 or PD1 */
7961 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
7964 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
7965 /* Section translation fault, or attempt to use the encoding
7966 * which is Reserved on implementations without PXN.
7971 if ((type
== 1) || !(desc
& (1 << 18))) {
7972 /* Page or Section. */
7973 domain
= (desc
>> 5) & 0x0f;
7975 if (regime_el(env
, mmu_idx
) == 1) {
7976 dacr
= env
->cp15
.dacr_ns
;
7978 dacr
= env
->cp15
.dacr_s
;
7980 domain_prot
= (dacr
>> (domain
* 2)) & 3;
7981 if (domain_prot
== 0 || domain_prot
== 2) {
7983 code
= 9; /* Section domain fault. */
7985 code
= 11; /* Page domain fault. */
7990 if (desc
& (1 << 18)) {
7992 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
7993 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
7994 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
7995 *page_size
= 0x1000000;
7998 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
7999 *page_size
= 0x100000;
8001 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
8002 xn
= desc
& (1 << 4);
8005 ns
= extract32(desc
, 19, 1);
8007 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
8008 pxn
= (desc
>> 2) & 1;
8010 ns
= extract32(desc
, 3, 1);
8011 /* Lookup l2 entry. */
8012 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8013 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8015 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
8017 case 0: /* Page translation fault. */
8020 case 1: /* 64k page. */
8021 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8022 xn
= desc
& (1 << 15);
8023 *page_size
= 0x10000;
8025 case 2: case 3: /* 4k page. */
8026 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8028 *page_size
= 0x1000;
8031 /* Never happens, but compiler isn't smart enough to tell. */
8036 if (domain_prot
== 3) {
8037 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
8039 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
8042 if (xn
&& access_type
== MMU_INST_FETCH
)
8045 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
8046 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
8047 /* The simplified model uses AP[0] as an access control bit. */
8048 if ((ap
& 1) == 0) {
8049 /* Access flag fault. */
8050 code
= (code
== 15) ? 6 : 3;
8053 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
8055 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
8060 if (!(*prot
& (1 << access_type
))) {
8061 /* Access permission fault. */
8066 /* The NS bit will (as required by the architecture) have no effect if
8067 * the CPU doesn't support TZ or this is a non-secure translation
8068 * regime, because the attribute will already be non-secure.
8070 attrs
->secure
= false;
8072 *phys_ptr
= phys_addr
;
8075 *fsr
= code
| (domain
<< 4);
8079 /* Fault type for long-descriptor MMU fault reporting; this corresponds
8080 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
8083 translation_fault
= 1,
8085 permission_fault
= 3,
8089 * check_s2_mmu_setup
8091 * @is_aa64: True if the translation regime is in AArch64 state
8092 * @startlevel: Suggested starting level
8093 * @inputsize: Bitsize of IPAs
8094 * @stride: Page-table stride (See the ARM ARM)
8096 * Returns true if the suggested S2 translation parameters are OK and
8099 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
8100 int inputsize
, int stride
)
8102 const int grainsize
= stride
+ 3;
8105 /* Negative levels are never allowed. */
8110 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
8111 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
8116 CPUARMState
*env
= &cpu
->env
;
8117 unsigned int pamax
= arm_pamax(cpu
);
8120 case 13: /* 64KB Pages. */
8121 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
8125 case 11: /* 16KB Pages. */
8126 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
8130 case 9: /* 4KB Pages. */
8131 if (level
== 0 && pamax
<= 42) {
8136 g_assert_not_reached();
8139 /* Inputsize checks. */
8140 if (inputsize
> pamax
&&
8141 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
8142 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
8146 /* AArch32 only supports 4KB pages. Assert on that. */
8147 assert(stride
== 9);
8156 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
8157 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8158 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
8159 target_ulong
*page_size_ptr
, uint32_t *fsr
,
8160 ARMMMUFaultInfo
*fi
)
8162 ARMCPU
*cpu
= arm_env_get_cpu(env
);
8163 CPUState
*cs
= CPU(cpu
);
8164 /* Read an LPAE long-descriptor translation table. */
8165 MMUFaultType fault_type
= translation_fault
;
8172 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
8173 uint32_t tableattrs
;
8174 target_ulong page_size
;
8180 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8181 int ap
, ns
, xn
, pxn
;
8182 uint32_t el
= regime_el(env
, mmu_idx
);
8183 bool ttbr1_valid
= true;
8184 uint64_t descaddrmask
;
8185 bool aarch64
= arm_el_is_aa64(env
, el
);
8188 * This code does not handle the different format TCR for VTCR_EL2.
8189 * This code also does not support shareability levels.
8190 * Attribute and permission bit handling should also be checked when adding
8191 * support for those page table walks.
8197 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8198 tbi
= extract64(tcr
->raw_tcr
, 20, 1);
8201 if (extract64(address
, 55, 1)) {
8202 tbi
= extract64(tcr
->raw_tcr
, 38, 1);
8204 tbi
= extract64(tcr
->raw_tcr
, 37, 1);
8209 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
8213 ttbr1_valid
= false;
8218 /* There is no TTBR1 for EL2 */
8220 ttbr1_valid
= false;
8224 /* Determine whether this address is in the region controlled by
8225 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
8226 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
8227 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
8230 /* AArch64 translation. */
8231 t0sz
= extract32(tcr
->raw_tcr
, 0, 6);
8232 t0sz
= MIN(t0sz
, 39);
8233 t0sz
= MAX(t0sz
, 16);
8234 } else if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8235 /* AArch32 stage 1 translation. */
8236 t0sz
= extract32(tcr
->raw_tcr
, 0, 3);
8238 /* AArch32 stage 2 translation. */
8239 bool sext
= extract32(tcr
->raw_tcr
, 4, 1);
8240 bool sign
= extract32(tcr
->raw_tcr
, 3, 1);
8241 /* Address size is 40-bit for a stage 2 translation,
8242 * and t0sz can be negative (from -8 to 7),
8243 * so we need to adjust it to use the TTBR selecting logic below.
8246 t0sz
= sextract32(tcr
->raw_tcr
, 0, 4) + 8;
8248 /* If the sign-extend bit is not the same as t0sz[3], the result
8249 * is unpredictable. Flag this as a guest error. */
8251 qemu_log_mask(LOG_GUEST_ERROR
,
8252 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
8255 t1sz
= extract32(tcr
->raw_tcr
, 16, 6);
8257 t1sz
= MIN(t1sz
, 39);
8258 t1sz
= MAX(t1sz
, 16);
8260 if (t0sz
&& !extract64(address
, addrsize
- t0sz
, t0sz
- tbi
)) {
8261 /* there is a ttbr0 region and we are in it (high bits all zero) */
8263 } else if (ttbr1_valid
&& t1sz
&&
8264 !extract64(~address
, addrsize
- t1sz
, t1sz
- tbi
)) {
8265 /* there is a ttbr1 region and we are in it (high bits all one) */
8268 /* ttbr0 region is "everything not in the ttbr1 region" */
8270 } else if (!t1sz
&& ttbr1_valid
) {
8271 /* ttbr1 region is "everything not in the ttbr0 region" */
8274 /* in the gap between the two regions, this is a Translation fault */
8275 fault_type
= translation_fault
;
8279 /* Note that QEMU ignores shareability and cacheability attributes,
8280 * so we don't need to do anything with the SH, ORGN, IRGN fields
8281 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
8282 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
8283 * implement any ASID-like capability so we can ignore it (instead
8284 * we will always flush the TLB any time the ASID is changed).
8286 if (ttbr_select
== 0) {
8287 ttbr
= regime_ttbr(env
, mmu_idx
, 0);
8289 epd
= extract32(tcr
->raw_tcr
, 7, 1);
8291 inputsize
= addrsize
- t0sz
;
8293 tg
= extract32(tcr
->raw_tcr
, 14, 2);
8294 if (tg
== 1) { /* 64KB pages */
8297 if (tg
== 2) { /* 16KB pages */
8301 /* We should only be here if TTBR1 is valid */
8302 assert(ttbr1_valid
);
8304 ttbr
= regime_ttbr(env
, mmu_idx
, 1);
8305 epd
= extract32(tcr
->raw_tcr
, 23, 1);
8306 inputsize
= addrsize
- t1sz
;
8308 tg
= extract32(tcr
->raw_tcr
, 30, 2);
8309 if (tg
== 3) { /* 64KB pages */
8312 if (tg
== 1) { /* 16KB pages */
8317 /* Here we should have set up all the parameters for the translation:
8318 * inputsize, ttbr, epd, stride, tbi
8322 /* Translation table walk disabled => Translation fault on TLB miss
8323 * Note: This is always 0 on 64-bit EL2 and EL3.
8328 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
8329 /* The starting level depends on the virtual address size (which can
8330 * be up to 48 bits) and the translation granule size. It indicates
8331 * the number of strides (stride bits at a time) needed to
8332 * consume the bits of the input address. In the pseudocode this is:
8333 * level = 4 - RoundUp((inputsize - grainsize) / stride)
8334 * where their 'inputsize' is our 'inputsize', 'grainsize' is
8335 * our 'stride + 3' and 'stride' is our 'stride'.
8336 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
8337 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
8338 * = 4 - (inputsize - 4) / stride;
8340 level
= 4 - (inputsize
- 4) / stride
;
8342 /* For stage 2 translations the starting level is specified by the
8343 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
8345 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
8346 uint32_t startlevel
;
8349 if (!aarch64
|| stride
== 9) {
8350 /* AArch32 or 4KB pages */
8351 startlevel
= 2 - sl0
;
8353 /* 16KB or 64KB pages */
8354 startlevel
= 3 - sl0
;
8357 /* Check that the starting level is valid. */
8358 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
8361 fault_type
= translation_fault
;
8367 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
8368 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
8370 /* Now we can extract the actual base address from the TTBR */
8371 descaddr
= extract64(ttbr
, 0, 48);
8372 descaddr
&= ~indexmask
;
8374 /* The address field in the descriptor goes up to bit 39 for ARMv7
8375 * but up to bit 47 for ARMv8, but we use the descaddrmask
8376 * up to bit 39 for AArch32, because we don't need other bits in that case
8377 * to construct next descriptor address (anyway they should be all zeroes).
8379 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
8380 ~indexmask_grainsize
;
8382 /* Secure accesses start with the page table in secure memory and
8383 * can be downgraded to non-secure at any step. Non-secure accesses
8384 * remain non-secure. We implement this by just ORing in the NSTable/NS
8385 * bits at each step.
8387 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
8389 uint64_t descriptor
;
8392 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
8394 nstable
= extract32(tableattrs
, 4, 1);
8395 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fsr
, fi
);
8400 if (!(descriptor
& 1) ||
8401 (!(descriptor
& 2) && (level
== 3))) {
8402 /* Invalid, or the Reserved level 3 encoding */
8405 descaddr
= descriptor
& descaddrmask
;
8407 if ((descriptor
& 2) && (level
< 3)) {
8408 /* Table entry. The top five bits are attributes which may
8409 * propagate down through lower levels of the table (and
8410 * which are all arranged so that 0 means "no effect", so
8411 * we can gather them up by ORing in the bits at each level).
8413 tableattrs
|= extract64(descriptor
, 59, 5);
8415 indexmask
= indexmask_grainsize
;
8418 /* Block entry at level 1 or 2, or page entry at level 3.
8419 * These are basically the same thing, although the number
8420 * of bits we pull in from the vaddr varies.
8422 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
8423 descaddr
|= (address
& (page_size
- 1));
8424 /* Extract attributes from the descriptor */
8425 attrs
= extract64(descriptor
, 2, 10)
8426 | (extract64(descriptor
, 52, 12) << 10);
8428 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8429 /* Stage 2 table descriptors do not include any attribute fields */
8432 /* Merge in attributes from table descriptors */
8433 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
8434 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
8435 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
8436 * means "force PL1 access only", which means forcing AP[1] to 0.
8438 if (extract32(tableattrs
, 2, 1)) {
8441 attrs
|= nstable
<< 3; /* NS */
8444 /* Here descaddr is the final physical address, and attributes
8447 fault_type
= access_fault
;
8448 if ((attrs
& (1 << 8)) == 0) {
8453 ap
= extract32(attrs
, 4, 2);
8454 xn
= extract32(attrs
, 12, 1);
8456 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8458 *prot
= get_S2prot(env
, ap
, xn
);
8460 ns
= extract32(attrs
, 3, 1);
8461 pxn
= extract32(attrs
, 11, 1);
8462 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
8465 fault_type
= permission_fault
;
8466 if (!(*prot
& (1 << access_type
))) {
8471 /* The NS bit will (as required by the architecture) have no effect if
8472 * the CPU doesn't support TZ or this is a non-secure translation
8473 * regime, because the attribute will already be non-secure.
8475 txattrs
->secure
= false;
8477 *phys_ptr
= descaddr
;
8478 *page_size_ptr
= page_size
;
8482 /* Long-descriptor format IFSR/DFSR value */
8483 *fsr
= (1 << 9) | (fault_type
<< 2) | level
;
8484 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
8485 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
8489 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
8491 int32_t address
, int *prot
)
8493 if (!arm_feature(env
, ARM_FEATURE_M
)) {
8494 *prot
= PAGE_READ
| PAGE_WRITE
;
8496 case 0xF0000000 ... 0xFFFFFFFF:
8497 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
8498 /* hivecs execing is ok */
8502 case 0x00000000 ... 0x7FFFFFFF:
8507 /* Default system address map for M profile cores.
8508 * The architecture specifies which regions are execute-never;
8509 * at the MPU level no other checks are defined.
8512 case 0x00000000 ... 0x1fffffff: /* ROM */
8513 case 0x20000000 ... 0x3fffffff: /* SRAM */
8514 case 0x60000000 ... 0x7fffffff: /* RAM */
8515 case 0x80000000 ... 0x9fffffff: /* RAM */
8516 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
8518 case 0x40000000 ... 0x5fffffff: /* Peripheral */
8519 case 0xa0000000 ... 0xbfffffff: /* Device */
8520 case 0xc0000000 ... 0xdfffffff: /* Device */
8521 case 0xe0000000 ... 0xffffffff: /* System */
8522 *prot
= PAGE_READ
| PAGE_WRITE
;
8525 g_assert_not_reached();
8530 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
8531 ARMMMUIdx mmu_idx
, bool is_user
)
8533 /* Return true if we should use the default memory map as a
8534 * "background" region if there are no hits against any MPU regions.
8536 CPUARMState
*env
= &cpu
->env
;
8542 if (arm_feature(env
, ARM_FEATURE_M
)) {
8543 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
8544 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
8546 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
8550 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
8552 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
8553 return arm_feature(env
, ARM_FEATURE_M
) &&
8554 extract32(address
, 20, 12) == 0xe00;
8557 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
8559 /* True if address is in the M profile system region
8560 * 0xe0000000 - 0xffffffff
8562 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
8565 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
8566 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8567 hwaddr
*phys_ptr
, int *prot
, uint32_t *fsr
)
8569 ARMCPU
*cpu
= arm_env_get_cpu(env
);
8571 bool is_user
= regime_is_user(env
, mmu_idx
);
8573 *phys_ptr
= address
;
8576 if (regime_translation_disabled(env
, mmu_idx
) ||
8577 m_is_ppb_region(env
, address
)) {
8578 /* MPU disabled or M profile PPB access: use default memory map.
8579 * The other case which uses the default memory map in the
8580 * v7M ARM ARM pseudocode is exception vector reads from the vector
8581 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
8582 * which always does a direct read using address_space_ldl(), rather
8583 * than going via this function, so we don't need to check that here.
8585 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
8586 } else { /* MPU enabled */
8587 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
8589 uint32_t base
= env
->pmsav7
.drbar
[n
];
8590 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
8594 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
8599 qemu_log_mask(LOG_GUEST_ERROR
,
8600 "DRSR[%d]: Rsize field cannot be 0\n", n
);
8604 rmask
= (1ull << rsize
) - 1;
8607 qemu_log_mask(LOG_GUEST_ERROR
,
8608 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
8609 "to DRSR region size, mask = 0x%" PRIx32
"\n",
8614 if (address
< base
|| address
> base
+ rmask
) {
8618 /* Region matched */
8620 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
8622 uint32_t srdis_mask
;
8624 rsize
-= 3; /* sub region size (power of 2) */
8625 snd
= ((address
- base
) >> rsize
) & 0x7;
8626 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
8628 srdis_mask
= srdis
? 0x3 : 0x0;
8629 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
8630 /* This will check in groups of 2, 4 and then 8, whether
8631 * the subregion bits are consistent. rsize is incremented
8632 * back up to give the region size, considering consistent
8633 * adjacent subregions as one region. Stop testing if rsize
8634 * is already big enough for an entire QEMU page.
8636 int snd_rounded
= snd
& ~(i
- 1);
8637 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
8638 snd_rounded
+ 8, i
);
8639 if (srdis_mask
^ srdis_multi
) {
8642 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
8646 if (rsize
< TARGET_PAGE_BITS
) {
8647 qemu_log_mask(LOG_UNIMP
,
8648 "DRSR[%d]: No support for MPU (sub)region "
8649 "alignment of %" PRIu32
" bits. Minimum is %d\n",
8650 n
, rsize
, TARGET_PAGE_BITS
);
8659 if (n
== -1) { /* no hits */
8660 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
8661 /* background fault */
8665 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
8666 } else { /* a MPU hit! */
8667 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
8668 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
8670 if (m_is_system_region(env
, address
)) {
8671 /* System space is always execute never */
8675 if (is_user
) { /* User mode AP bit decoding */
8680 break; /* no access */
8682 *prot
|= PAGE_WRITE
;
8686 *prot
|= PAGE_READ
| PAGE_EXEC
;
8689 qemu_log_mask(LOG_GUEST_ERROR
,
8690 "DRACR[%d]: Bad value for AP bits: 0x%"
8691 PRIx32
"\n", n
, ap
);
8693 } else { /* Priv. mode AP bits decoding */
8696 break; /* no access */
8700 *prot
|= PAGE_WRITE
;
8704 *prot
|= PAGE_READ
| PAGE_EXEC
;
8707 qemu_log_mask(LOG_GUEST_ERROR
,
8708 "DRACR[%d]: Bad value for AP bits: 0x%"
8709 PRIx32
"\n", n
, ap
);
8715 *prot
&= ~PAGE_EXEC
;
8720 *fsr
= 0x00d; /* Permission fault */
8721 return !(*prot
& (1 << access_type
));
8724 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
8725 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8726 hwaddr
*phys_ptr
, int *prot
, uint32_t *fsr
)
8728 ARMCPU
*cpu
= arm_env_get_cpu(env
);
8729 bool is_user
= regime_is_user(env
, mmu_idx
);
8730 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
8732 int matchregion
= -1;
8735 *phys_ptr
= address
;
8738 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
8739 * was an exception vector read from the vector table (which is always
8740 * done using the default system address map), because those accesses
8741 * are done in arm_v7m_load_vector(), which always does a direct
8742 * read using address_space_ldl(), rather than going via this function.
8744 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
8746 } else if (m_is_ppb_region(env
, address
)) {
8748 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
8751 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
8753 /* Note that the base address is bits [31:5] from the register
8754 * with bits [4:0] all zeroes, but the limit address is bits
8755 * [31:5] from the register with bits [4:0] all ones.
8757 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
8758 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
8760 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
8761 /* Region disabled */
8765 if (address
< base
|| address
> limit
) {
8770 /* Multiple regions match -- always a failure (unlike
8771 * PMSAv7 where highest-numbered-region wins)
8773 *fsr
= 0x00d; /* permission fault */
8780 if (base
& ~TARGET_PAGE_MASK
) {
8781 qemu_log_mask(LOG_UNIMP
,
8782 "MPU_RBAR[%d]: No support for MPU region base"
8783 "address of 0x%" PRIx32
". Minimum alignment is "
8785 n
, base
, TARGET_PAGE_BITS
);
8788 if ((limit
+ 1) & ~TARGET_PAGE_MASK
) {
8789 qemu_log_mask(LOG_UNIMP
,
8790 "MPU_RBAR[%d]: No support for MPU region limit"
8791 "address of 0x%" PRIx32
". Minimum alignment is "
8793 n
, limit
, TARGET_PAGE_BITS
);
8800 /* background fault */
8805 if (matchregion
== -1) {
8806 /* hit using the background region */
8807 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
8809 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
8810 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
8812 if (m_is_system_region(env
, address
)) {
8813 /* System space is always execute never */
8817 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
8821 /* We don't need to look the attribute up in the MAIR0/MAIR1
8822 * registers because that only tells us about cacheability.
8826 *fsr
= 0x00d; /* Permission fault */
8827 return !(*prot
& (1 << access_type
));
8830 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
8831 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8832 hwaddr
*phys_ptr
, int *prot
, uint32_t *fsr
)
8837 bool is_user
= regime_is_user(env
, mmu_idx
);
8839 if (regime_translation_disabled(env
, mmu_idx
)) {
8841 *phys_ptr
= address
;
8842 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
8846 *phys_ptr
= address
;
8847 for (n
= 7; n
>= 0; n
--) {
8848 base
= env
->cp15
.c6_region
[n
];
8849 if ((base
& 1) == 0) {
8852 mask
= 1 << ((base
>> 1) & 0x1f);
8853 /* Keep this shift separate from the above to avoid an
8854 (undefined) << 32. */
8855 mask
= (mask
<< 1) - 1;
8856 if (((base
^ address
) & ~mask
) == 0) {
8865 if (access_type
== MMU_INST_FETCH
) {
8866 mask
= env
->cp15
.pmsav5_insn_ap
;
8868 mask
= env
->cp15
.pmsav5_data_ap
;
8870 mask
= (mask
>> (n
* 4)) & 0xf;
8880 *prot
= PAGE_READ
| PAGE_WRITE
;
8885 *prot
|= PAGE_WRITE
;
8889 *prot
= PAGE_READ
| PAGE_WRITE
;
8902 /* Bad permission. */
8910 /* get_phys_addr - get the physical address for this virtual address
8912 * Find the physical address corresponding to the given virtual address,
8913 * by doing a translation table walk on MMU based systems or using the
8914 * MPU state on MPU based systems.
8916 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
8917 * prot and page_size may not be filled in, and the populated fsr value provides
8918 * information on why the translation aborted, in the format of a
8919 * DFSR/IFSR fault register, with the following caveats:
8920 * * we honour the short vs long DFSR format differences.
8921 * * the WnR bit is never set (the caller must do this).
8922 * * for PSMAv5 based systems we don't bother to return a full FSR format
8926 * @address: virtual address to get physical address for
8927 * @access_type: 0 for read, 1 for write, 2 for execute
8928 * @mmu_idx: MMU index indicating required translation regime
8929 * @phys_ptr: set to the physical address corresponding to the virtual address
8930 * @attrs: set to the memory transaction attributes to use
8931 * @prot: set to the permissions for the page containing phys_ptr
8932 * @page_size: set to the size of the page containing phys_ptr
8933 * @fsr: set to the DFSR/IFSR value on failure
8935 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
8936 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8937 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
8938 target_ulong
*page_size
, uint32_t *fsr
,
8939 ARMMMUFaultInfo
*fi
)
8941 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
8942 /* Call ourselves recursively to do the stage 1 and then stage 2
8945 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8950 ret
= get_phys_addr(env
, address
, access_type
,
8951 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
8952 prot
, page_size
, fsr
, fi
);
8954 /* If S1 fails or S2 is disabled, return early. */
8955 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
8960 /* S1 is done. Now do S2 translation. */
8961 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
8962 phys_ptr
, attrs
, &s2_prot
,
8963 page_size
, fsr
, fi
);
8965 /* Combine the S1 and S2 perms. */
8970 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
8972 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8976 /* The page table entries may downgrade secure to non-secure, but
8977 * cannot upgrade an non-secure translation regime's attributes
8980 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
8981 attrs
->user
= regime_is_user(env
, mmu_idx
);
8983 /* Fast Context Switch Extension. This doesn't exist at all in v8.
8984 * In v7 and earlier it affects all stage 1 translations.
8986 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
8987 && !arm_feature(env
, ARM_FEATURE_V8
)) {
8988 if (regime_el(env
, mmu_idx
) == 3) {
8989 address
+= env
->cp15
.fcseidr_s
;
8991 address
+= env
->cp15
.fcseidr_ns
;
8995 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
8997 *page_size
= TARGET_PAGE_SIZE
;
8999 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9001 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
9002 phys_ptr
, prot
, fsr
);
9003 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9005 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
9006 phys_ptr
, prot
, fsr
);
9009 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
9010 phys_ptr
, prot
, fsr
);
9012 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
9013 " mmu_idx %u -> %s (prot %c%c%c)\n",
9014 access_type
== MMU_DATA_LOAD
? "reading" :
9015 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
9016 (uint32_t)address
, mmu_idx
,
9017 ret
? "Miss" : "Hit",
9018 *prot
& PAGE_READ
? 'r' : '-',
9019 *prot
& PAGE_WRITE
? 'w' : '-',
9020 *prot
& PAGE_EXEC
? 'x' : '-');
9025 /* Definitely a real MMU, not an MPU */
9027 if (regime_translation_disabled(env
, mmu_idx
)) {
9029 *phys_ptr
= address
;
9030 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9031 *page_size
= TARGET_PAGE_SIZE
;
9035 if (regime_using_lpae_format(env
, mmu_idx
)) {
9036 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, phys_ptr
,
9037 attrs
, prot
, page_size
, fsr
, fi
);
9038 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
9039 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
, phys_ptr
,
9040 attrs
, prot
, page_size
, fsr
, fi
);
9042 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
, phys_ptr
,
9043 prot
, page_size
, fsr
, fi
);
9047 /* Walk the page table and (if the mapping exists) add the page
9048 * to the TLB. Return false on success, or true on failure. Populate
9049 * fsr with ARM DFSR/IFSR fault register format value on failure.
9051 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
9052 MMUAccessType access_type
, int mmu_idx
, uint32_t *fsr
,
9053 ARMMMUFaultInfo
*fi
)
9055 ARMCPU
*cpu
= ARM_CPU(cs
);
9056 CPUARMState
*env
= &cpu
->env
;
9058 target_ulong page_size
;
9061 MemTxAttrs attrs
= {};
9063 ret
= get_phys_addr(env
, address
, access_type
,
9064 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
9065 &attrs
, &prot
, &page_size
, fsr
, fi
);
9067 /* Map a single [sub]page. */
9068 phys_addr
&= TARGET_PAGE_MASK
;
9069 address
&= TARGET_PAGE_MASK
;
9070 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
9071 prot
, mmu_idx
, page_size
);
9078 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
9081 ARMCPU
*cpu
= ARM_CPU(cs
);
9082 CPUARMState
*env
= &cpu
->env
;
9084 target_ulong page_size
;
9088 ARMMMUFaultInfo fi
= {};
9089 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
9091 *attrs
= (MemTxAttrs
) {};
9093 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
9094 attrs
, &prot
, &page_size
, &fsr
, &fi
);
9102 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
9105 unsigned el
= arm_current_el(env
);
9107 /* First handle registers which unprivileged can read */
9110 case 0 ... 7: /* xPSR sub-fields */
9112 if ((reg
& 1) && el
) {
9113 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
9116 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
9118 /* EPSR reads as zero */
9119 return xpsr_read(env
) & mask
;
9121 case 20: /* CONTROL */
9122 return env
->v7m
.control
[env
->v7m
.secure
];
9123 case 0x94: /* CONTROL_NS */
9124 /* We have to handle this here because unprivileged Secure code
9125 * can read the NS CONTROL register.
9127 if (!env
->v7m
.secure
) {
9130 return env
->v7m
.control
[M_REG_NS
];
9134 return 0; /* unprivileged reads others as zero */
9137 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
9139 case 0x88: /* MSP_NS */
9140 if (!env
->v7m
.secure
) {
9143 return env
->v7m
.other_ss_msp
;
9144 case 0x89: /* PSP_NS */
9145 if (!env
->v7m
.secure
) {
9148 return env
->v7m
.other_ss_psp
;
9149 case 0x90: /* PRIMASK_NS */
9150 if (!env
->v7m
.secure
) {
9153 return env
->v7m
.primask
[M_REG_NS
];
9154 case 0x91: /* BASEPRI_NS */
9155 if (!env
->v7m
.secure
) {
9158 return env
->v7m
.basepri
[M_REG_NS
];
9159 case 0x93: /* FAULTMASK_NS */
9160 if (!env
->v7m
.secure
) {
9163 return env
->v7m
.faultmask
[M_REG_NS
];
9164 case 0x98: /* SP_NS */
9166 /* This gives the non-secure SP selected based on whether we're
9167 * currently in handler mode or not, using the NS CONTROL.SPSEL.
9169 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
9171 if (!env
->v7m
.secure
) {
9174 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
9175 return env
->v7m
.other_ss_psp
;
9177 return env
->v7m
.other_ss_msp
;
9187 return (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
) ?
9188 env
->v7m
.other_sp
: env
->regs
[13];
9190 return (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
) ?
9191 env
->regs
[13] : env
->v7m
.other_sp
;
9192 case 16: /* PRIMASK */
9193 return env
->v7m
.primask
[env
->v7m
.secure
];
9194 case 17: /* BASEPRI */
9195 case 18: /* BASEPRI_MAX */
9196 return env
->v7m
.basepri
[env
->v7m
.secure
];
9197 case 19: /* FAULTMASK */
9198 return env
->v7m
.faultmask
[env
->v7m
.secure
];
9200 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
9201 " register %d\n", reg
);
9206 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
9208 /* We're passed bits [11..0] of the instruction; extract
9209 * SYSm and the mask bits.
9210 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
9211 * we choose to treat them as if the mask bits were valid.
9212 * NB that the pseudocode 'mask' variable is bits [11..10],
9213 * whereas ours is [11..8].
9215 uint32_t mask
= extract32(maskreg
, 8, 4);
9216 uint32_t reg
= extract32(maskreg
, 0, 8);
9218 if (arm_current_el(env
) == 0 && reg
> 7) {
9219 /* only xPSR sub-fields may be written by unprivileged */
9223 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
9225 case 0x88: /* MSP_NS */
9226 if (!env
->v7m
.secure
) {
9229 env
->v7m
.other_ss_msp
= val
;
9231 case 0x89: /* PSP_NS */
9232 if (!env
->v7m
.secure
) {
9235 env
->v7m
.other_ss_psp
= val
;
9237 case 0x90: /* PRIMASK_NS */
9238 if (!env
->v7m
.secure
) {
9241 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
9243 case 0x91: /* BASEPRI_NS */
9244 if (!env
->v7m
.secure
) {
9247 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
9249 case 0x93: /* FAULTMASK_NS */
9250 if (!env
->v7m
.secure
) {
9253 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
9255 case 0x98: /* SP_NS */
9257 /* This gives the non-secure SP selected based on whether we're
9258 * currently in handler mode or not, using the NS CONTROL.SPSEL.
9260 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
9262 if (!env
->v7m
.secure
) {
9265 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
9266 env
->v7m
.other_ss_psp
= val
;
9268 env
->v7m
.other_ss_msp
= val
;
9278 case 0 ... 7: /* xPSR sub-fields */
9279 /* only APSR is actually writable */
9281 uint32_t apsrmask
= 0;
9284 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
9286 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
9287 apsrmask
|= XPSR_GE
;
9289 xpsr_write(env
, val
, apsrmask
);
9293 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
9294 env
->v7m
.other_sp
= val
;
9296 env
->regs
[13] = val
;
9300 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
9301 env
->regs
[13] = val
;
9303 env
->v7m
.other_sp
= val
;
9306 case 16: /* PRIMASK */
9307 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
9309 case 17: /* BASEPRI */
9310 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
9312 case 18: /* BASEPRI_MAX */
9314 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
9315 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
9316 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
9319 case 19: /* FAULTMASK */
9320 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
9322 case 20: /* CONTROL */
9323 /* Writing to the SPSEL bit only has an effect if we are in
9324 * thread mode; other bits can be updated by any privileged code.
9325 * write_v7m_control_spsel() deals with updating the SPSEL bit in
9326 * env->v7m.control, so we only need update the others.
9328 if (!arm_v7m_is_handler_mode(env
)) {
9329 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
9331 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
9332 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
9335 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
9336 " register %d\n", reg
);
9343 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
9345 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
9346 * Note that we do not implement the (architecturally mandated)
9347 * alignment fault for attempts to use this on Device memory
9348 * (which matches the usual QEMU behaviour of not implementing either
9349 * alignment faults or any memory attribute handling).
9352 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9353 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
9354 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
9356 #ifndef CONFIG_USER_ONLY
9358 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
9359 * the block size so we might have to do more than one TLB lookup.
9360 * We know that in fact for any v8 CPU the page size is at least 4K
9361 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
9362 * 1K as an artefact of legacy v5 subpage support being present in the
9363 * same QEMU executable.
9365 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
9366 void *hostaddr
[maxidx
];
9368 unsigned mmu_idx
= cpu_mmu_index(env
, false);
9369 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
9371 for (try = 0; try < 2; try++) {
9373 for (i
= 0; i
< maxidx
; i
++) {
9374 hostaddr
[i
] = tlb_vaddr_to_host(env
,
9375 vaddr
+ TARGET_PAGE_SIZE
* i
,
9382 /* If it's all in the TLB it's fair game for just writing to;
9383 * we know we don't need to update dirty status, etc.
9385 for (i
= 0; i
< maxidx
- 1; i
++) {
9386 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
9388 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
9391 /* OK, try a store and see if we can populate the tlb. This
9392 * might cause an exception if the memory isn't writable,
9393 * in which case we will longjmp out of here. We must for
9394 * this purpose use the actual register value passed to us
9395 * so that we get the fault address right.
9397 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
9398 /* Now we can populate the other TLB entries, if any */
9399 for (i
= 0; i
< maxidx
; i
++) {
9400 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
9401 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
9402 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
9407 /* Slow path (probably attempt to do this to an I/O device or
9408 * similar, or clearing of a block of code we have translations
9409 * cached for). Just do a series of byte writes as the architecture
9410 * demands. It's not worth trying to use a cpu_physical_memory_map(),
9411 * memset(), unmap() sequence here because:
9412 * + we'd need to account for the blocksize being larger than a page
9413 * + the direct-RAM access case is almost always going to be dealt
9414 * with in the fastpath code above, so there's no speed benefit
9415 * + we would have to deal with the map returning NULL because the
9416 * bounce buffer was in use
9418 for (i
= 0; i
< blocklen
; i
++) {
9419 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
9423 memset(g2h(vaddr
), 0, blocklen
);
9427 /* Note that signed overflow is undefined in C. The following routines are
9428 careful to use unsigned types where modulo arithmetic is required.
9429 Failure to do so _will_ break on newer gcc. */
9431 /* Signed saturating arithmetic. */
9433 /* Perform 16-bit signed saturating addition. */
9434 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
9439 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
9448 /* Perform 8-bit signed saturating addition. */
9449 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
9454 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
9463 /* Perform 16-bit signed saturating subtraction. */
9464 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
9469 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
9478 /* Perform 8-bit signed saturating subtraction. */
9479 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
9484 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
9493 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
9494 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
9495 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
9496 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
9499 #include "op_addsub.h"
9501 /* Unsigned saturating arithmetic. */
9502 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
9511 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
9519 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
9528 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
9536 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
9537 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
9538 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
9539 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
9542 #include "op_addsub.h"
9544 /* Signed modulo arithmetic. */
9545 #define SARITH16(a, b, n, op) do { \
9547 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
9548 RESULT(sum, n, 16); \
9550 ge |= 3 << (n * 2); \
9553 #define SARITH8(a, b, n, op) do { \
9555 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
9556 RESULT(sum, n, 8); \
9562 #define ADD16(a, b, n) SARITH16(a, b, n, +)
9563 #define SUB16(a, b, n) SARITH16(a, b, n, -)
9564 #define ADD8(a, b, n) SARITH8(a, b, n, +)
9565 #define SUB8(a, b, n) SARITH8(a, b, n, -)
9569 #include "op_addsub.h"
9571 /* Unsigned modulo arithmetic. */
9572 #define ADD16(a, b, n) do { \
9574 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
9575 RESULT(sum, n, 16); \
9576 if ((sum >> 16) == 1) \
9577 ge |= 3 << (n * 2); \
9580 #define ADD8(a, b, n) do { \
9582 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
9583 RESULT(sum, n, 8); \
9584 if ((sum >> 8) == 1) \
9588 #define SUB16(a, b, n) do { \
9590 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
9591 RESULT(sum, n, 16); \
9592 if ((sum >> 16) == 0) \
9593 ge |= 3 << (n * 2); \
9596 #define SUB8(a, b, n) do { \
9598 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
9599 RESULT(sum, n, 8); \
9600 if ((sum >> 8) == 0) \
9607 #include "op_addsub.h"
9609 /* Halved signed arithmetic. */
9610 #define ADD16(a, b, n) \
9611 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
9612 #define SUB16(a, b, n) \
9613 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
9614 #define ADD8(a, b, n) \
9615 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
9616 #define SUB8(a, b, n) \
9617 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
9620 #include "op_addsub.h"
9622 /* Halved unsigned arithmetic. */
9623 #define ADD16(a, b, n) \
9624 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
9625 #define SUB16(a, b, n) \
9626 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
9627 #define ADD8(a, b, n) \
9628 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
9629 #define SUB8(a, b, n) \
9630 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
9633 #include "op_addsub.h"
9635 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
9643 /* Unsigned sum of absolute byte differences. */
9644 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
9647 sum
= do_usad(a
, b
);
9648 sum
+= do_usad(a
>> 8, b
>> 8);
9649 sum
+= do_usad(a
>> 16, b
>>16);
9650 sum
+= do_usad(a
>> 24, b
>> 24);
9654 /* For ARMv6 SEL instruction. */
9655 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
9668 return (a
& mask
) | (b
& ~mask
);
9671 /* VFP support. We follow the convention used for VFP instructions:
9672 Single precision routines have a "s" suffix, double precision a
9675 /* Convert host exception flags to vfp form. */
9676 static inline int vfp_exceptbits_from_host(int host_bits
)
9678 int target_bits
= 0;
9680 if (host_bits
& float_flag_invalid
)
9682 if (host_bits
& float_flag_divbyzero
)
9684 if (host_bits
& float_flag_overflow
)
9686 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
9688 if (host_bits
& float_flag_inexact
)
9689 target_bits
|= 0x10;
9690 if (host_bits
& float_flag_input_denormal
)
9691 target_bits
|= 0x80;
9695 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
9700 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
9701 | (env
->vfp
.vec_len
<< 16)
9702 | (env
->vfp
.vec_stride
<< 20);
9703 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
9704 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
9705 fpscr
|= vfp_exceptbits_from_host(i
);
9709 uint32_t vfp_get_fpscr(CPUARMState
*env
)
9711 return HELPER(vfp_get_fpscr
)(env
);
9714 /* Convert vfp exception flags to target form. */
9715 static inline int vfp_exceptbits_to_host(int target_bits
)
9719 if (target_bits
& 1)
9720 host_bits
|= float_flag_invalid
;
9721 if (target_bits
& 2)
9722 host_bits
|= float_flag_divbyzero
;
9723 if (target_bits
& 4)
9724 host_bits
|= float_flag_overflow
;
9725 if (target_bits
& 8)
9726 host_bits
|= float_flag_underflow
;
9727 if (target_bits
& 0x10)
9728 host_bits
|= float_flag_inexact
;
9729 if (target_bits
& 0x80)
9730 host_bits
|= float_flag_input_denormal
;
9734 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
9739 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
9740 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
9741 env
->vfp
.vec_len
= (val
>> 16) & 7;
9742 env
->vfp
.vec_stride
= (val
>> 20) & 3;
9745 if (changed
& (3 << 22)) {
9746 i
= (val
>> 22) & 3;
9748 case FPROUNDING_TIEEVEN
:
9749 i
= float_round_nearest_even
;
9751 case FPROUNDING_POSINF
:
9754 case FPROUNDING_NEGINF
:
9755 i
= float_round_down
;
9757 case FPROUNDING_ZERO
:
9758 i
= float_round_to_zero
;
9761 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
9763 if (changed
& (1 << 24)) {
9764 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
9765 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
9767 if (changed
& (1 << 25))
9768 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
9770 i
= vfp_exceptbits_to_host(val
);
9771 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
9772 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
9775 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
9777 HELPER(vfp_set_fpscr
)(env
, val
);
9780 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
9782 #define VFP_BINOP(name) \
9783 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
9785 float_status *fpst = fpstp; \
9786 return float32_ ## name(a, b, fpst); \
9788 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
9790 float_status *fpst = fpstp; \
9791 return float64_ ## name(a, b, fpst); \
9803 float32
VFP_HELPER(neg
, s
)(float32 a
)
9805 return float32_chs(a
);
9808 float64
VFP_HELPER(neg
, d
)(float64 a
)
9810 return float64_chs(a
);
9813 float32
VFP_HELPER(abs
, s
)(float32 a
)
9815 return float32_abs(a
);
9818 float64
VFP_HELPER(abs
, d
)(float64 a
)
9820 return float64_abs(a
);
9823 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
9825 return float32_sqrt(a
, &env
->vfp
.fp_status
);
9828 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
9830 return float64_sqrt(a
, &env
->vfp
.fp_status
);
9833 /* XXX: check quiet/signaling case */
9834 #define DO_VFP_cmp(p, type) \
9835 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
9838 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
9839 case 0: flags = 0x6; break; \
9840 case -1: flags = 0x8; break; \
9841 case 1: flags = 0x2; break; \
9842 default: case 2: flags = 0x3; break; \
9844 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9845 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9847 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
9850 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
9851 case 0: flags = 0x6; break; \
9852 case -1: flags = 0x8; break; \
9853 case 1: flags = 0x2; break; \
9854 default: case 2: flags = 0x3; break; \
9856 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9857 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9859 DO_VFP_cmp(s
, float32
)
9860 DO_VFP_cmp(d
, float64
)
9863 /* Integer to float and float to integer conversions */
9865 #define CONV_ITOF(name, fsz, sign) \
9866 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
9868 float_status *fpst = fpstp; \
9869 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
9872 #define CONV_FTOI(name, fsz, sign, round) \
9873 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
9875 float_status *fpst = fpstp; \
9876 if (float##fsz##_is_any_nan(x)) { \
9877 float_raise(float_flag_invalid, fpst); \
9880 return float##fsz##_to_##sign##int32##round(x, fpst); \
9883 #define FLOAT_CONVS(name, p, fsz, sign) \
9884 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
9885 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
9886 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
9888 FLOAT_CONVS(si
, s
, 32, )
9889 FLOAT_CONVS(si
, d
, 64, )
9890 FLOAT_CONVS(ui
, s
, 32, u
)
9891 FLOAT_CONVS(ui
, d
, 64, u
)
9897 /* floating point conversion */
9898 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
9900 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
9901 /* ARM requires that S<->D conversion of any kind of NaN generates
9902 * a quiet NaN by forcing the most significant frac bit to 1.
9904 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
9907 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
9909 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
9910 /* ARM requires that S<->D conversion of any kind of NaN generates
9911 * a quiet NaN by forcing the most significant frac bit to 1.
9913 return float32_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
9916 /* VFP3 fixed point conversion. */
9917 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
9918 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
9921 float_status *fpst = fpstp; \
9923 tmp = itype##_to_##float##fsz(x, fpst); \
9924 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
9927 /* Notice that we want only input-denormal exception flags from the
9928 * scalbn operation: the other possible flags (overflow+inexact if
9929 * we overflow to infinity, output-denormal) aren't correct for the
9930 * complete scale-and-convert operation.
9932 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
9933 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
9937 float_status *fpst = fpstp; \
9938 int old_exc_flags = get_float_exception_flags(fpst); \
9940 if (float##fsz##_is_any_nan(x)) { \
9941 float_raise(float_flag_invalid, fpst); \
9944 tmp = float##fsz##_scalbn(x, shift, fpst); \
9945 old_exc_flags |= get_float_exception_flags(fpst) \
9946 & float_flag_input_denormal; \
9947 set_float_exception_flags(old_exc_flags, fpst); \
9948 return float##fsz##_to_##itype##round(tmp, fpst); \
9951 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
9952 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
9953 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
9954 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9956 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
9957 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
9958 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9960 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
9961 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
9962 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
9963 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
9964 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
9965 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
9966 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
9967 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
9968 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
9969 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
9970 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
9971 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
9973 #undef VFP_CONV_FIX_FLOAT
9974 #undef VFP_CONV_FLOAT_FIX_ROUND
9976 /* Set the current fp rounding mode and return the old one.
9977 * The argument is a softfloat float_round_ value.
9979 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
9981 float_status
*fp_status
= &env
->vfp
.fp_status
;
9983 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
9984 set_float_rounding_mode(rmode
, fp_status
);
9989 /* Set the current fp rounding mode in the standard fp status and return
9990 * the old one. This is for NEON instructions that need to change the
9991 * rounding mode but wish to use the standard FPSCR values for everything
9992 * else. Always set the rounding mode back to the correct value after
9994 * The argument is a softfloat float_round_ value.
9996 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
9998 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
10000 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
10001 set_float_rounding_mode(rmode
, fp_status
);
10006 /* Half precision conversions. */
10007 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
10009 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10010 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
10012 return float32_maybe_silence_nan(r
, s
);
10017 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
10019 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10020 float16 r
= float32_to_float16(a
, ieee
, s
);
10022 r
= float16_maybe_silence_nan(r
, s
);
10024 return float16_val(r
);
10027 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
10029 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
10032 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
10034 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
10037 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
10039 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
10042 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
10044 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
10047 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
10049 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10050 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
10052 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
10057 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
10059 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
10060 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
10062 r
= float16_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
10064 return float16_val(r
);
10067 #define float32_two make_float32(0x40000000)
10068 #define float32_three make_float32(0x40400000)
10069 #define float32_one_point_five make_float32(0x3fc00000)
10071 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
10073 float_status
*s
= &env
->vfp
.standard_fp_status
;
10074 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
10075 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
10076 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
10077 float_raise(float_flag_input_denormal
, s
);
10079 return float32_two
;
10081 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
10084 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
10086 float_status
*s
= &env
->vfp
.standard_fp_status
;
10088 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
10089 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
10090 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
10091 float_raise(float_flag_input_denormal
, s
);
10093 return float32_one_point_five
;
10095 product
= float32_mul(a
, b
, s
);
10096 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
10099 /* NEON helpers. */
10101 /* Constants 256 and 512 are used in some helpers; we avoid relying on
10102 * int->float conversions at run-time. */
10103 #define float64_256 make_float64(0x4070000000000000LL)
10104 #define float64_512 make_float64(0x4080000000000000LL)
10105 #define float32_maxnorm make_float32(0x7f7fffff)
10106 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
10108 /* Reciprocal functions
10110 * The algorithm that must be used to calculate the estimate
10111 * is specified by the ARM ARM, see FPRecipEstimate()
10114 static float64
recip_estimate(float64 a
, float_status
*real_fp_status
)
10116 /* These calculations mustn't set any fp exception flags,
10117 * so we use a local copy of the fp_status.
10119 float_status dummy_status
= *real_fp_status
;
10120 float_status
*s
= &dummy_status
;
10121 /* q = (int)(a * 512.0) */
10122 float64 q
= float64_mul(float64_512
, a
, s
);
10123 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
10125 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
10126 q
= int64_to_float64(q_int
, s
);
10127 q
= float64_add(q
, float64_half
, s
);
10128 q
= float64_div(q
, float64_512
, s
);
10129 q
= float64_div(float64_one
, q
, s
);
10131 /* s = (int)(256.0 * r + 0.5) */
10132 q
= float64_mul(q
, float64_256
, s
);
10133 q
= float64_add(q
, float64_half
, s
);
10134 q_int
= float64_to_int64_round_to_zero(q
, s
);
10136 /* return (double)s / 256.0 */
10137 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
10140 /* Common wrapper to call recip_estimate */
10141 static float64
call_recip_estimate(float64 num
, int off
, float_status
*fpst
)
10143 uint64_t val64
= float64_val(num
);
10144 uint64_t frac
= extract64(val64
, 0, 52);
10145 int64_t exp
= extract64(val64
, 52, 11);
10147 float64 scaled
, estimate
;
10149 /* Generate the scaled number for the estimate function */
10151 if (extract64(frac
, 51, 1) == 0) {
10153 frac
= extract64(frac
, 0, 50) << 2;
10155 frac
= extract64(frac
, 0, 51) << 1;
10159 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
10160 scaled
= make_float64((0x3feULL
<< 52)
10161 | extract64(frac
, 44, 8) << 44);
10163 estimate
= recip_estimate(scaled
, fpst
);
10165 /* Build new result */
10166 val64
= float64_val(estimate
);
10167 sbit
= 0x8000000000000000ULL
& val64
;
10169 frac
= extract64(val64
, 0, 52);
10172 frac
= 1ULL << 51 | extract64(frac
, 1, 51);
10173 } else if (exp
== -1) {
10174 frac
= 1ULL << 50 | extract64(frac
, 2, 50);
10178 return make_float64(sbit
| (exp
<< 52) | frac
);
10181 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
10183 switch (fpst
->float_rounding_mode
) {
10184 case float_round_nearest_even
: /* Round to Nearest */
10186 case float_round_up
: /* Round to +Inf */
10188 case float_round_down
: /* Round to -Inf */
10190 case float_round_to_zero
: /* Round to Zero */
10194 g_assert_not_reached();
10197 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
10199 float_status
*fpst
= fpstp
;
10200 float32 f32
= float32_squash_input_denormal(input
, fpst
);
10201 uint32_t f32_val
= float32_val(f32
);
10202 uint32_t f32_sbit
= 0x80000000ULL
& f32_val
;
10203 int32_t f32_exp
= extract32(f32_val
, 23, 8);
10204 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
10210 if (float32_is_any_nan(f32
)) {
10212 if (float32_is_signaling_nan(f32
, fpst
)) {
10213 float_raise(float_flag_invalid
, fpst
);
10214 nan
= float32_maybe_silence_nan(f32
, fpst
);
10216 if (fpst
->default_nan_mode
) {
10217 nan
= float32_default_nan(fpst
);
10220 } else if (float32_is_infinity(f32
)) {
10221 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
10222 } else if (float32_is_zero(f32
)) {
10223 float_raise(float_flag_divbyzero
, fpst
);
10224 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
10225 } else if ((f32_val
& ~(1ULL << 31)) < (1ULL << 21)) {
10226 /* Abs(value) < 2.0^-128 */
10227 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
10228 if (round_to_inf(fpst
, f32_sbit
)) {
10229 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
10231 return float32_set_sign(float32_maxnorm
, float32_is_neg(f32
));
10233 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
10234 float_raise(float_flag_underflow
, fpst
);
10235 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
10239 f64
= make_float64(((int64_t)(f32_exp
) << 52) | (int64_t)(f32_frac
) << 29);
10240 r64
= call_recip_estimate(f64
, 253, fpst
);
10241 r64_val
= float64_val(r64
);
10242 r64_exp
= extract64(r64_val
, 52, 11);
10243 r64_frac
= extract64(r64_val
, 0, 52);
10245 /* result = sign : result_exp<7:0> : fraction<51:29>; */
10246 return make_float32(f32_sbit
|
10247 (r64_exp
& 0xff) << 23 |
10248 extract64(r64_frac
, 29, 24));
10251 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
10253 float_status
*fpst
= fpstp
;
10254 float64 f64
= float64_squash_input_denormal(input
, fpst
);
10255 uint64_t f64_val
= float64_val(f64
);
10256 uint64_t f64_sbit
= 0x8000000000000000ULL
& f64_val
;
10257 int64_t f64_exp
= extract64(f64_val
, 52, 11);
10263 /* Deal with any special cases */
10264 if (float64_is_any_nan(f64
)) {
10266 if (float64_is_signaling_nan(f64
, fpst
)) {
10267 float_raise(float_flag_invalid
, fpst
);
10268 nan
= float64_maybe_silence_nan(f64
, fpst
);
10270 if (fpst
->default_nan_mode
) {
10271 nan
= float64_default_nan(fpst
);
10274 } else if (float64_is_infinity(f64
)) {
10275 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
10276 } else if (float64_is_zero(f64
)) {
10277 float_raise(float_flag_divbyzero
, fpst
);
10278 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
10279 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
10280 /* Abs(value) < 2.0^-1024 */
10281 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
10282 if (round_to_inf(fpst
, f64_sbit
)) {
10283 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
10285 return float64_set_sign(float64_maxnorm
, float64_is_neg(f64
));
10287 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
10288 float_raise(float_flag_underflow
, fpst
);
10289 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
10292 r64
= call_recip_estimate(f64
, 2045, fpst
);
10293 r64_val
= float64_val(r64
);
10294 r64_exp
= extract64(r64_val
, 52, 11);
10295 r64_frac
= extract64(r64_val
, 0, 52);
10297 /* result = sign : result_exp<10:0> : fraction<51:0> */
10298 return make_float64(f64_sbit
|
10299 ((r64_exp
& 0x7ff) << 52) |
10303 /* The algorithm that must be used to calculate the estimate
10304 * is specified by the ARM ARM.
10306 static float64
recip_sqrt_estimate(float64 a
, float_status
*real_fp_status
)
10308 /* These calculations mustn't set any fp exception flags,
10309 * so we use a local copy of the fp_status.
10311 float_status dummy_status
= *real_fp_status
;
10312 float_status
*s
= &dummy_status
;
10316 if (float64_lt(a
, float64_half
, s
)) {
10317 /* range 0.25 <= a < 0.5 */
10319 /* a in units of 1/512 rounded down */
10320 /* q0 = (int)(a * 512.0); */
10321 q
= float64_mul(float64_512
, a
, s
);
10322 q_int
= float64_to_int64_round_to_zero(q
, s
);
10324 /* reciprocal root r */
10325 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
10326 q
= int64_to_float64(q_int
, s
);
10327 q
= float64_add(q
, float64_half
, s
);
10328 q
= float64_div(q
, float64_512
, s
);
10329 q
= float64_sqrt(q
, s
);
10330 q
= float64_div(float64_one
, q
, s
);
10332 /* range 0.5 <= a < 1.0 */
10334 /* a in units of 1/256 rounded down */
10335 /* q1 = (int)(a * 256.0); */
10336 q
= float64_mul(float64_256
, a
, s
);
10337 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
10339 /* reciprocal root r */
10340 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
10341 q
= int64_to_float64(q_int
, s
);
10342 q
= float64_add(q
, float64_half
, s
);
10343 q
= float64_div(q
, float64_256
, s
);
10344 q
= float64_sqrt(q
, s
);
10345 q
= float64_div(float64_one
, q
, s
);
10347 /* r in units of 1/256 rounded to nearest */
10348 /* s = (int)(256.0 * r + 0.5); */
10350 q
= float64_mul(q
, float64_256
,s
);
10351 q
= float64_add(q
, float64_half
, s
);
10352 q_int
= float64_to_int64_round_to_zero(q
, s
);
10354 /* return (double)s / 256.0;*/
10355 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
10358 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
10360 float_status
*s
= fpstp
;
10361 float32 f32
= float32_squash_input_denormal(input
, s
);
10362 uint32_t val
= float32_val(f32
);
10363 uint32_t f32_sbit
= 0x80000000 & val
;
10364 int32_t f32_exp
= extract32(val
, 23, 8);
10365 uint32_t f32_frac
= extract32(val
, 0, 23);
10371 if (float32_is_any_nan(f32
)) {
10373 if (float32_is_signaling_nan(f32
, s
)) {
10374 float_raise(float_flag_invalid
, s
);
10375 nan
= float32_maybe_silence_nan(f32
, s
);
10377 if (s
->default_nan_mode
) {
10378 nan
= float32_default_nan(s
);
10381 } else if (float32_is_zero(f32
)) {
10382 float_raise(float_flag_divbyzero
, s
);
10383 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
10384 } else if (float32_is_neg(f32
)) {
10385 float_raise(float_flag_invalid
, s
);
10386 return float32_default_nan(s
);
10387 } else if (float32_is_infinity(f32
)) {
10388 return float32_zero
;
10391 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
10392 * preserving the parity of the exponent. */
10394 f64_frac
= ((uint64_t) f32_frac
) << 29;
10395 if (f32_exp
== 0) {
10396 while (extract64(f64_frac
, 51, 1) == 0) {
10397 f64_frac
= f64_frac
<< 1;
10398 f32_exp
= f32_exp
-1;
10400 f64_frac
= extract64(f64_frac
, 0, 51) << 1;
10403 if (extract64(f32_exp
, 0, 1) == 0) {
10404 f64
= make_float64(((uint64_t) f32_sbit
) << 32
10408 f64
= make_float64(((uint64_t) f32_sbit
) << 32
10413 result_exp
= (380 - f32_exp
) / 2;
10415 f64
= recip_sqrt_estimate(f64
, s
);
10417 val64
= float64_val(f64
);
10419 val
= ((result_exp
& 0xff) << 23)
10420 | ((val64
>> 29) & 0x7fffff);
10421 return make_float32(val
);
10424 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
10426 float_status
*s
= fpstp
;
10427 float64 f64
= float64_squash_input_denormal(input
, s
);
10428 uint64_t val
= float64_val(f64
);
10429 uint64_t f64_sbit
= 0x8000000000000000ULL
& val
;
10430 int64_t f64_exp
= extract64(val
, 52, 11);
10431 uint64_t f64_frac
= extract64(val
, 0, 52);
10432 int64_t result_exp
;
10433 uint64_t result_frac
;
10435 if (float64_is_any_nan(f64
)) {
10437 if (float64_is_signaling_nan(f64
, s
)) {
10438 float_raise(float_flag_invalid
, s
);
10439 nan
= float64_maybe_silence_nan(f64
, s
);
10441 if (s
->default_nan_mode
) {
10442 nan
= float64_default_nan(s
);
10445 } else if (float64_is_zero(f64
)) {
10446 float_raise(float_flag_divbyzero
, s
);
10447 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
10448 } else if (float64_is_neg(f64
)) {
10449 float_raise(float_flag_invalid
, s
);
10450 return float64_default_nan(s
);
10451 } else if (float64_is_infinity(f64
)) {
10452 return float64_zero
;
10455 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
10456 * preserving the parity of the exponent. */
10458 if (f64_exp
== 0) {
10459 while (extract64(f64_frac
, 51, 1) == 0) {
10460 f64_frac
= f64_frac
<< 1;
10461 f64_exp
= f64_exp
- 1;
10463 f64_frac
= extract64(f64_frac
, 0, 51) << 1;
10466 if (extract64(f64_exp
, 0, 1) == 0) {
10467 f64
= make_float64(f64_sbit
10471 f64
= make_float64(f64_sbit
10476 result_exp
= (3068 - f64_exp
) / 2;
10478 f64
= recip_sqrt_estimate(f64
, s
);
10480 result_frac
= extract64(float64_val(f64
), 0, 52);
10482 return make_float64(f64_sbit
|
10483 ((result_exp
& 0x7ff) << 52) |
10487 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
10489 float_status
*s
= fpstp
;
10492 if ((a
& 0x80000000) == 0) {
10496 f64
= make_float64((0x3feULL
<< 52)
10497 | ((int64_t)(a
& 0x7fffffff) << 21));
10499 f64
= recip_estimate(f64
, s
);
10501 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
10504 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
10506 float_status
*fpst
= fpstp
;
10509 if ((a
& 0xc0000000) == 0) {
10513 if (a
& 0x80000000) {
10514 f64
= make_float64((0x3feULL
<< 52)
10515 | ((uint64_t)(a
& 0x7fffffff) << 21));
10516 } else { /* bits 31-30 == '01' */
10517 f64
= make_float64((0x3fdULL
<< 52)
10518 | ((uint64_t)(a
& 0x3fffffff) << 22));
10521 f64
= recip_sqrt_estimate(f64
, fpst
);
10523 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
10526 /* VFPv4 fused multiply-accumulate */
10527 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
10529 float_status
*fpst
= fpstp
;
10530 return float32_muladd(a
, b
, c
, 0, fpst
);
10533 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
10535 float_status
*fpst
= fpstp
;
10536 return float64_muladd(a
, b
, c
, 0, fpst
);
10539 /* ARMv8 round to integral */
10540 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
10542 return float32_round_to_int(x
, fp_status
);
10545 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
10547 return float64_round_to_int(x
, fp_status
);
10550 float32
HELPER(rints
)(float32 x
, void *fp_status
)
10552 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
10555 ret
= float32_round_to_int(x
, fp_status
);
10557 /* Suppress any inexact exceptions the conversion produced */
10558 if (!(old_flags
& float_flag_inexact
)) {
10559 new_flags
= get_float_exception_flags(fp_status
);
10560 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
10566 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
10568 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
10571 ret
= float64_round_to_int(x
, fp_status
);
10573 new_flags
= get_float_exception_flags(fp_status
);
10575 /* Suppress any inexact exceptions the conversion produced */
10576 if (!(old_flags
& float_flag_inexact
)) {
10577 new_flags
= get_float_exception_flags(fp_status
);
10578 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
10584 /* Convert ARM rounding mode to softfloat */
10585 int arm_rmode_to_sf(int rmode
)
10588 case FPROUNDING_TIEAWAY
:
10589 rmode
= float_round_ties_away
;
10591 case FPROUNDING_ODD
:
10592 /* FIXME: add support for TIEAWAY and ODD */
10593 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
10595 case FPROUNDING_TIEEVEN
:
10597 rmode
= float_round_nearest_even
;
10599 case FPROUNDING_POSINF
:
10600 rmode
= float_round_up
;
10602 case FPROUNDING_NEGINF
:
10603 rmode
= float_round_down
;
10605 case FPROUNDING_ZERO
:
10606 rmode
= float_round_to_zero
;
10613 * The upper bytes of val (above the number specified by 'bytes') must have
10614 * been zeroed out by the caller.
10616 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
10620 stl_le_p(buf
, val
);
10622 /* zlib crc32 converts the accumulator and output to one's complement. */
10623 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
10626 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
10630 stl_le_p(buf
, val
);
10632 /* Linux crc32c converts the output to one's complement. */
10633 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;