target/arm: Fix sve_zcr_len_for_el for VHE mode running
[qemu/rayw.git] / target / arm / helper.c
blob649958a727d913476eee7eee793655bbcb728f77
1 /*
2 * ARM generic helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/host-utils.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/bitops.h"
19 #include "qemu/crc32c.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
22 #include <zlib.h> /* For crc32 */
23 #include "hw/irq.h"
24 #include "semihosting/semihost.h"
25 #include "sysemu/cpus.h"
26 #include "sysemu/cpu-timers.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #include "semihosting/common-semi.h"
37 #endif
39 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
40 #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
42 #ifndef CONFIG_USER_ONLY
44 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
45 MMUAccessType access_type, ARMMMUIdx mmu_idx,
46 bool s1_is_el0,
47 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
48 target_ulong *page_size_ptr,
49 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
50 __attribute__((nonnull));
51 #endif
53 static void switch_mode(CPUARMState *env, int mode);
54 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
56 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
58 assert(ri->fieldoffset);
59 if (cpreg_field_is_64bit(ri)) {
60 return CPREG_FIELD64(env, ri);
61 } else {
62 return CPREG_FIELD32(env, ri);
66 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
67 uint64_t value)
69 assert(ri->fieldoffset);
70 if (cpreg_field_is_64bit(ri)) {
71 CPREG_FIELD64(env, ri) = value;
72 } else {
73 CPREG_FIELD32(env, ri) = value;
77 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
79 return (char *)env + ri->fieldoffset;
82 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
84 /* Raw read of a coprocessor register (as needed for migration, etc). */
85 if (ri->type & ARM_CP_CONST) {
86 return ri->resetvalue;
87 } else if (ri->raw_readfn) {
88 return ri->raw_readfn(env, ri);
89 } else if (ri->readfn) {
90 return ri->readfn(env, ri);
91 } else {
92 return raw_read(env, ri);
96 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
97 uint64_t v)
99 /* Raw write of a coprocessor register (as needed for migration, etc).
100 * Note that constant registers are treated as write-ignored; the
101 * caller should check for success by whether a readback gives the
102 * value written.
104 if (ri->type & ARM_CP_CONST) {
105 return;
106 } else if (ri->raw_writefn) {
107 ri->raw_writefn(env, ri, v);
108 } else if (ri->writefn) {
109 ri->writefn(env, ri, v);
110 } else {
111 raw_write(env, ri, v);
115 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
117 /* Return true if the regdef would cause an assertion if you called
118 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
119 * program bug for it not to have the NO_RAW flag).
120 * NB that returning false here doesn't necessarily mean that calling
121 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
122 * read/write access functions which are safe for raw use" from "has
123 * read/write access functions which have side effects but has forgotten
124 * to provide raw access functions".
125 * The tests here line up with the conditions in read/write_raw_cp_reg()
126 * and assertions in raw_read()/raw_write().
128 if ((ri->type & ARM_CP_CONST) ||
129 ri->fieldoffset ||
130 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
131 return false;
133 return true;
136 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
138 /* Write the coprocessor state from cpu->env to the (index,value) list. */
139 int i;
140 bool ok = true;
142 for (i = 0; i < cpu->cpreg_array_len; i++) {
143 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
144 const ARMCPRegInfo *ri;
145 uint64_t newval;
147 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
148 if (!ri) {
149 ok = false;
150 continue;
152 if (ri->type & ARM_CP_NO_RAW) {
153 continue;
156 newval = read_raw_cp_reg(&cpu->env, ri);
157 if (kvm_sync) {
159 * Only sync if the previous list->cpustate sync succeeded.
160 * Rather than tracking the success/failure state for every
161 * item in the list, we just recheck "does the raw write we must
162 * have made in write_list_to_cpustate() read back OK" here.
164 uint64_t oldval = cpu->cpreg_values[i];
166 if (oldval == newval) {
167 continue;
170 write_raw_cp_reg(&cpu->env, ri, oldval);
171 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
172 continue;
175 write_raw_cp_reg(&cpu->env, ri, newval);
177 cpu->cpreg_values[i] = newval;
179 return ok;
182 bool write_list_to_cpustate(ARMCPU *cpu)
184 int i;
185 bool ok = true;
187 for (i = 0; i < cpu->cpreg_array_len; i++) {
188 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
189 uint64_t v = cpu->cpreg_values[i];
190 const ARMCPRegInfo *ri;
192 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
193 if (!ri) {
194 ok = false;
195 continue;
197 if (ri->type & ARM_CP_NO_RAW) {
198 continue;
200 /* Write value and confirm it reads back as written
201 * (to catch read-only registers and partially read-only
202 * registers where the incoming migration value doesn't match)
204 write_raw_cp_reg(&cpu->env, ri, v);
205 if (read_raw_cp_reg(&cpu->env, ri) != v) {
206 ok = false;
209 return ok;
212 static void add_cpreg_to_list(gpointer key, gpointer opaque)
214 ARMCPU *cpu = opaque;
215 uint64_t regidx;
216 const ARMCPRegInfo *ri;
218 regidx = *(uint32_t *)key;
219 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
221 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
222 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
223 /* The value array need not be initialized at this point */
224 cpu->cpreg_array_len++;
228 static void count_cpreg(gpointer key, gpointer opaque)
230 ARMCPU *cpu = opaque;
231 uint64_t regidx;
232 const ARMCPRegInfo *ri;
234 regidx = *(uint32_t *)key;
235 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
237 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
238 cpu->cpreg_array_len++;
242 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
244 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
245 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
247 if (aidx > bidx) {
248 return 1;
250 if (aidx < bidx) {
251 return -1;
253 return 0;
256 void init_cpreg_list(ARMCPU *cpu)
258 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
259 * Note that we require cpreg_tuples[] to be sorted by key ID.
261 GList *keys;
262 int arraylen;
264 keys = g_hash_table_get_keys(cpu->cp_regs);
265 keys = g_list_sort(keys, cpreg_key_compare);
267 cpu->cpreg_array_len = 0;
269 g_list_foreach(keys, count_cpreg, cpu);
271 arraylen = cpu->cpreg_array_len;
272 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
273 cpu->cpreg_values = g_new(uint64_t, arraylen);
274 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
275 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
276 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
277 cpu->cpreg_array_len = 0;
279 g_list_foreach(keys, add_cpreg_to_list, cpu);
281 assert(cpu->cpreg_array_len == arraylen);
283 g_list_free(keys);
287 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
289 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
290 const ARMCPRegInfo *ri,
291 bool isread)
293 if (!is_a64(env) && arm_current_el(env) == 3 &&
294 arm_is_secure_below_el3(env)) {
295 return CP_ACCESS_TRAP_UNCATEGORIZED;
297 return CP_ACCESS_OK;
300 /* Some secure-only AArch32 registers trap to EL3 if used from
301 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
302 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
303 * We assume that the .access field is set to PL1_RW.
305 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
306 const ARMCPRegInfo *ri,
307 bool isread)
309 if (arm_current_el(env) == 3) {
310 return CP_ACCESS_OK;
312 if (arm_is_secure_below_el3(env)) {
313 if (env->cp15.scr_el3 & SCR_EEL2) {
314 return CP_ACCESS_TRAP_EL2;
316 return CP_ACCESS_TRAP_EL3;
318 /* This will be EL1 NS and EL2 NS, which just UNDEF */
319 return CP_ACCESS_TRAP_UNCATEGORIZED;
322 static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
324 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
327 /* Check for traps to "powerdown debug" registers, which are controlled
328 * by MDCR.TDOSA
330 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
331 bool isread)
333 int el = arm_current_el(env);
334 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
335 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
336 (arm_hcr_el2_eff(env) & HCR_TGE);
338 if (el < 2 && mdcr_el2_tdosa) {
339 return CP_ACCESS_TRAP_EL2;
341 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
342 return CP_ACCESS_TRAP_EL3;
344 return CP_ACCESS_OK;
347 /* Check for traps to "debug ROM" registers, which are controlled
348 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
350 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
351 bool isread)
353 int el = arm_current_el(env);
354 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
355 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
356 (arm_hcr_el2_eff(env) & HCR_TGE);
358 if (el < 2 && mdcr_el2_tdra) {
359 return CP_ACCESS_TRAP_EL2;
361 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
362 return CP_ACCESS_TRAP_EL3;
364 return CP_ACCESS_OK;
367 /* Check for traps to general debug registers, which are controlled
368 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
370 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
371 bool isread)
373 int el = arm_current_el(env);
374 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
375 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
376 (arm_hcr_el2_eff(env) & HCR_TGE);
378 if (el < 2 && mdcr_el2_tda) {
379 return CP_ACCESS_TRAP_EL2;
381 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
382 return CP_ACCESS_TRAP_EL3;
384 return CP_ACCESS_OK;
387 /* Check for traps to performance monitor registers, which are controlled
388 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
390 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
391 bool isread)
393 int el = arm_current_el(env);
394 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
396 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
397 return CP_ACCESS_TRAP_EL2;
399 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
400 return CP_ACCESS_TRAP_EL3;
402 return CP_ACCESS_OK;
405 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
406 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
407 bool isread)
409 if (arm_current_el(env) == 1) {
410 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
411 if (arm_hcr_el2_eff(env) & trap) {
412 return CP_ACCESS_TRAP_EL2;
415 return CP_ACCESS_OK;
418 /* Check for traps from EL1 due to HCR_EL2.TSW. */
419 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
420 bool isread)
422 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
423 return CP_ACCESS_TRAP_EL2;
425 return CP_ACCESS_OK;
428 /* Check for traps from EL1 due to HCR_EL2.TACR. */
429 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
430 bool isread)
432 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
433 return CP_ACCESS_TRAP_EL2;
435 return CP_ACCESS_OK;
438 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
439 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
440 bool isread)
442 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
443 return CP_ACCESS_TRAP_EL2;
445 return CP_ACCESS_OK;
448 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
450 ARMCPU *cpu = env_archcpu(env);
452 raw_write(env, ri, value);
453 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
456 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
458 ARMCPU *cpu = env_archcpu(env);
460 if (raw_read(env, ri) != value) {
461 /* Unlike real hardware the qemu TLB uses virtual addresses,
462 * not modified virtual addresses, so this causes a TLB flush.
464 tlb_flush(CPU(cpu));
465 raw_write(env, ri, value);
469 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
470 uint64_t value)
472 ARMCPU *cpu = env_archcpu(env);
474 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
475 && !extended_addresses_enabled(env)) {
476 /* For VMSA (when not using the LPAE long descriptor page table
477 * format) this register includes the ASID, so do a TLB flush.
478 * For PMSA it is purely a process ID and no action is needed.
480 tlb_flush(CPU(cpu));
482 raw_write(env, ri, value);
485 /* IS variants of TLB operations must affect all cores */
486 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
487 uint64_t value)
489 CPUState *cs = env_cpu(env);
491 tlb_flush_all_cpus_synced(cs);
494 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
495 uint64_t value)
497 CPUState *cs = env_cpu(env);
499 tlb_flush_all_cpus_synced(cs);
502 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
503 uint64_t value)
505 CPUState *cs = env_cpu(env);
507 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
510 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
511 uint64_t value)
513 CPUState *cs = env_cpu(env);
515 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
519 * Non-IS variants of TLB operations are upgraded to
520 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
521 * force broadcast of these operations.
523 static bool tlb_force_broadcast(CPUARMState *env)
525 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
528 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
529 uint64_t value)
531 /* Invalidate all (TLBIALL) */
532 CPUState *cs = env_cpu(env);
534 if (tlb_force_broadcast(env)) {
535 tlb_flush_all_cpus_synced(cs);
536 } else {
537 tlb_flush(cs);
541 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
542 uint64_t value)
544 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
545 CPUState *cs = env_cpu(env);
547 value &= TARGET_PAGE_MASK;
548 if (tlb_force_broadcast(env)) {
549 tlb_flush_page_all_cpus_synced(cs, value);
550 } else {
551 tlb_flush_page(cs, value);
555 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
556 uint64_t value)
558 /* Invalidate by ASID (TLBIASID) */
559 CPUState *cs = env_cpu(env);
561 if (tlb_force_broadcast(env)) {
562 tlb_flush_all_cpus_synced(cs);
563 } else {
564 tlb_flush(cs);
568 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
569 uint64_t value)
571 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
572 CPUState *cs = env_cpu(env);
574 value &= TARGET_PAGE_MASK;
575 if (tlb_force_broadcast(env)) {
576 tlb_flush_page_all_cpus_synced(cs, value);
577 } else {
578 tlb_flush_page(cs, value);
582 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
583 uint64_t value)
585 CPUState *cs = env_cpu(env);
587 tlb_flush_by_mmuidx(cs,
588 ARMMMUIdxBit_E10_1 |
589 ARMMMUIdxBit_E10_1_PAN |
590 ARMMMUIdxBit_E10_0);
593 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
594 uint64_t value)
596 CPUState *cs = env_cpu(env);
598 tlb_flush_by_mmuidx_all_cpus_synced(cs,
599 ARMMMUIdxBit_E10_1 |
600 ARMMMUIdxBit_E10_1_PAN |
601 ARMMMUIdxBit_E10_0);
605 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
606 uint64_t value)
608 CPUState *cs = env_cpu(env);
610 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
613 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
614 uint64_t value)
616 CPUState *cs = env_cpu(env);
618 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
621 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
622 uint64_t value)
624 CPUState *cs = env_cpu(env);
625 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
627 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
630 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
631 uint64_t value)
633 CPUState *cs = env_cpu(env);
634 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
636 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
637 ARMMMUIdxBit_E2);
640 static const ARMCPRegInfo cp_reginfo[] = {
641 /* Define the secure and non-secure FCSE identifier CP registers
642 * separately because there is no secure bank in V8 (no _EL3). This allows
643 * the secure register to be properly reset and migrated. There is also no
644 * v8 EL1 version of the register so the non-secure instance stands alone.
646 { .name = "FCSEIDR",
647 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
648 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
649 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
650 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
651 { .name = "FCSEIDR_S",
652 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
653 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
654 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
655 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
656 /* Define the secure and non-secure context identifier CP registers
657 * separately because there is no secure bank in V8 (no _EL3). This allows
658 * the secure register to be properly reset and migrated. In the
659 * non-secure case, the 32-bit register will have reset and migration
660 * disabled during registration as it is handled by the 64-bit instance.
662 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
663 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
664 .access = PL1_RW, .accessfn = access_tvm_trvm,
665 .secure = ARM_CP_SECSTATE_NS,
666 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
667 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
668 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
669 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
670 .access = PL1_RW, .accessfn = access_tvm_trvm,
671 .secure = ARM_CP_SECSTATE_S,
672 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
673 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
674 REGINFO_SENTINEL
677 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
678 /* NB: Some of these registers exist in v8 but with more precise
679 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
681 /* MMU Domain access control / MPU write buffer control */
682 { .name = "DACR",
683 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
684 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
685 .writefn = dacr_write, .raw_writefn = raw_write,
686 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
687 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
688 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
689 * For v6 and v5, these mappings are overly broad.
691 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
692 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
693 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
694 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
695 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
696 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
697 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
698 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
699 /* Cache maintenance ops; some of this space may be overridden later. */
700 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
701 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
702 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
703 REGINFO_SENTINEL
706 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
707 /* Not all pre-v6 cores implemented this WFI, so this is slightly
708 * over-broad.
710 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
711 .access = PL1_W, .type = ARM_CP_WFI },
712 REGINFO_SENTINEL
715 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
716 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
717 * is UNPREDICTABLE; we choose to NOP as most implementations do).
719 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
720 .access = PL1_W, .type = ARM_CP_WFI },
721 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
722 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
723 * OMAPCP will override this space.
725 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
726 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
727 .resetvalue = 0 },
728 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
729 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
730 .resetvalue = 0 },
731 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
732 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
733 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
734 .resetvalue = 0 },
735 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
736 * implementing it as RAZ means the "debug architecture version" bits
737 * will read as a reserved value, which should cause Linux to not try
738 * to use the debug hardware.
740 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
741 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
742 /* MMU TLB control. Note that the wildcarding means we cover not just
743 * the unified TLB ops but also the dside/iside/inner-shareable variants.
745 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
746 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
747 .type = ARM_CP_NO_RAW },
748 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
749 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
750 .type = ARM_CP_NO_RAW },
751 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
752 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
753 .type = ARM_CP_NO_RAW },
754 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
755 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
756 .type = ARM_CP_NO_RAW },
757 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
758 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
759 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
760 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
761 REGINFO_SENTINEL
764 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
765 uint64_t value)
767 uint32_t mask = 0;
769 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
770 if (!arm_feature(env, ARM_FEATURE_V8)) {
771 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
772 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
773 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
775 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
776 /* VFP coprocessor: cp10 & cp11 [23:20] */
777 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
779 if (!arm_feature(env, ARM_FEATURE_NEON)) {
780 /* ASEDIS [31] bit is RAO/WI */
781 value |= (1 << 31);
784 /* VFPv3 and upwards with NEON implement 32 double precision
785 * registers (D0-D31).
787 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
788 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
789 value |= (1 << 30);
792 value &= mask;
796 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
797 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
799 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
800 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
801 value &= ~(0xf << 20);
802 value |= env->cp15.cpacr_el1 & (0xf << 20);
805 env->cp15.cpacr_el1 = value;
808 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
811 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
812 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
814 uint64_t value = env->cp15.cpacr_el1;
816 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
817 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
818 value &= ~(0xf << 20);
820 return value;
824 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
826 /* Call cpacr_write() so that we reset with the correct RAO bits set
827 * for our CPU features.
829 cpacr_write(env, ri, 0);
832 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
833 bool isread)
835 if (arm_feature(env, ARM_FEATURE_V8)) {
836 /* Check if CPACR accesses are to be trapped to EL2 */
837 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
838 (env->cp15.cptr_el[2] & CPTR_TCPAC)) {
839 return CP_ACCESS_TRAP_EL2;
840 /* Check if CPACR accesses are to be trapped to EL3 */
841 } else if (arm_current_el(env) < 3 &&
842 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
843 return CP_ACCESS_TRAP_EL3;
847 return CP_ACCESS_OK;
850 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
851 bool isread)
853 /* Check if CPTR accesses are set to trap to EL3 */
854 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
855 return CP_ACCESS_TRAP_EL3;
858 return CP_ACCESS_OK;
861 static const ARMCPRegInfo v6_cp_reginfo[] = {
862 /* prefetch by MVA in v6, NOP in v7 */
863 { .name = "MVA_prefetch",
864 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
865 .access = PL1_W, .type = ARM_CP_NOP },
866 /* We need to break the TB after ISB to execute self-modifying code
867 * correctly and also to take any pending interrupts immediately.
868 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
870 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
871 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
872 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
873 .access = PL0_W, .type = ARM_CP_NOP },
874 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
875 .access = PL0_W, .type = ARM_CP_NOP },
876 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
877 .access = PL1_RW, .accessfn = access_tvm_trvm,
878 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
879 offsetof(CPUARMState, cp15.ifar_ns) },
880 .resetvalue = 0, },
881 /* Watchpoint Fault Address Register : should actually only be present
882 * for 1136, 1176, 11MPCore.
884 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
885 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
886 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
887 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
888 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
889 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
890 REGINFO_SENTINEL
893 typedef struct pm_event {
894 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
895 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
896 bool (*supported)(CPUARMState *);
898 * Retrieve the current count of the underlying event. The programmed
899 * counters hold a difference from the return value from this function
901 uint64_t (*get_count)(CPUARMState *);
903 * Return how many nanoseconds it will take (at a minimum) for count events
904 * to occur. A negative value indicates the counter will never overflow, or
905 * that the counter has otherwise arranged for the overflow bit to be set
906 * and the PMU interrupt to be raised on overflow.
908 int64_t (*ns_per_count)(uint64_t);
909 } pm_event;
911 static bool event_always_supported(CPUARMState *env)
913 return true;
916 static uint64_t swinc_get_count(CPUARMState *env)
919 * SW_INCR events are written directly to the pmevcntr's by writes to
920 * PMSWINC, so there is no underlying count maintained by the PMU itself
922 return 0;
925 static int64_t swinc_ns_per(uint64_t ignored)
927 return -1;
931 * Return the underlying cycle count for the PMU cycle counters. If we're in
932 * usermode, simply return 0.
934 static uint64_t cycles_get_count(CPUARMState *env)
936 #ifndef CONFIG_USER_ONLY
937 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
938 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
939 #else
940 return cpu_get_host_ticks();
941 #endif
944 #ifndef CONFIG_USER_ONLY
945 static int64_t cycles_ns_per(uint64_t cycles)
947 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
950 static bool instructions_supported(CPUARMState *env)
952 return icount_enabled() == 1; /* Precise instruction counting */
955 static uint64_t instructions_get_count(CPUARMState *env)
957 return (uint64_t)icount_get_raw();
960 static int64_t instructions_ns_per(uint64_t icount)
962 return icount_to_ns((int64_t)icount);
964 #endif
966 static bool pmu_8_1_events_supported(CPUARMState *env)
968 /* For events which are supported in any v8.1 PMU */
969 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
972 static bool pmu_8_4_events_supported(CPUARMState *env)
974 /* For events which are supported in any v8.1 PMU */
975 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
978 static uint64_t zero_event_get_count(CPUARMState *env)
980 /* For events which on QEMU never fire, so their count is always zero */
981 return 0;
984 static int64_t zero_event_ns_per(uint64_t cycles)
986 /* An event which never fires can never overflow */
987 return -1;
990 static const pm_event pm_events[] = {
991 { .number = 0x000, /* SW_INCR */
992 .supported = event_always_supported,
993 .get_count = swinc_get_count,
994 .ns_per_count = swinc_ns_per,
996 #ifndef CONFIG_USER_ONLY
997 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
998 .supported = instructions_supported,
999 .get_count = instructions_get_count,
1000 .ns_per_count = instructions_ns_per,
1002 { .number = 0x011, /* CPU_CYCLES, Cycle */
1003 .supported = event_always_supported,
1004 .get_count = cycles_get_count,
1005 .ns_per_count = cycles_ns_per,
1007 #endif
1008 { .number = 0x023, /* STALL_FRONTEND */
1009 .supported = pmu_8_1_events_supported,
1010 .get_count = zero_event_get_count,
1011 .ns_per_count = zero_event_ns_per,
1013 { .number = 0x024, /* STALL_BACKEND */
1014 .supported = pmu_8_1_events_supported,
1015 .get_count = zero_event_get_count,
1016 .ns_per_count = zero_event_ns_per,
1018 { .number = 0x03c, /* STALL */
1019 .supported = pmu_8_4_events_supported,
1020 .get_count = zero_event_get_count,
1021 .ns_per_count = zero_event_ns_per,
1026 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1027 * events (i.e. the statistical profiling extension), this implementation
1028 * should first be updated to something sparse instead of the current
1029 * supported_event_map[] array.
1031 #define MAX_EVENT_ID 0x3c
1032 #define UNSUPPORTED_EVENT UINT16_MAX
1033 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1036 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1037 * of ARM event numbers to indices in our pm_events array.
1039 * Note: Events in the 0x40XX range are not currently supported.
1041 void pmu_init(ARMCPU *cpu)
1043 unsigned int i;
1046 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1047 * events to them
1049 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1050 supported_event_map[i] = UNSUPPORTED_EVENT;
1052 cpu->pmceid0 = 0;
1053 cpu->pmceid1 = 0;
1055 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1056 const pm_event *cnt = &pm_events[i];
1057 assert(cnt->number <= MAX_EVENT_ID);
1058 /* We do not currently support events in the 0x40xx range */
1059 assert(cnt->number <= 0x3f);
1061 if (cnt->supported(&cpu->env)) {
1062 supported_event_map[cnt->number] = i;
1063 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1064 if (cnt->number & 0x20) {
1065 cpu->pmceid1 |= event_mask;
1066 } else {
1067 cpu->pmceid0 |= event_mask;
1074 * Check at runtime whether a PMU event is supported for the current machine
1076 static bool event_supported(uint16_t number)
1078 if (number > MAX_EVENT_ID) {
1079 return false;
1081 return supported_event_map[number] != UNSUPPORTED_EVENT;
1084 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1085 bool isread)
1087 /* Performance monitor registers user accessibility is controlled
1088 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1089 * trapping to EL2 or EL3 for other accesses.
1091 int el = arm_current_el(env);
1092 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1094 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1095 return CP_ACCESS_TRAP;
1097 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1098 return CP_ACCESS_TRAP_EL2;
1100 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1101 return CP_ACCESS_TRAP_EL3;
1104 return CP_ACCESS_OK;
1107 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1108 const ARMCPRegInfo *ri,
1109 bool isread)
1111 /* ER: event counter read trap control */
1112 if (arm_feature(env, ARM_FEATURE_V8)
1113 && arm_current_el(env) == 0
1114 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1115 && isread) {
1116 return CP_ACCESS_OK;
1119 return pmreg_access(env, ri, isread);
1122 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1123 const ARMCPRegInfo *ri,
1124 bool isread)
1126 /* SW: software increment write trap control */
1127 if (arm_feature(env, ARM_FEATURE_V8)
1128 && arm_current_el(env) == 0
1129 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1130 && !isread) {
1131 return CP_ACCESS_OK;
1134 return pmreg_access(env, ri, isread);
1137 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1138 const ARMCPRegInfo *ri,
1139 bool isread)
1141 /* ER: event counter read trap control */
1142 if (arm_feature(env, ARM_FEATURE_V8)
1143 && arm_current_el(env) == 0
1144 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1145 return CP_ACCESS_OK;
1148 return pmreg_access(env, ri, isread);
1151 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1152 const ARMCPRegInfo *ri,
1153 bool isread)
1155 /* CR: cycle counter read trap control */
1156 if (arm_feature(env, ARM_FEATURE_V8)
1157 && arm_current_el(env) == 0
1158 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1159 && isread) {
1160 return CP_ACCESS_OK;
1163 return pmreg_access(env, ri, isread);
1166 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1167 * the current EL, security state, and register configuration.
1169 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1171 uint64_t filter;
1172 bool e, p, u, nsk, nsu, nsh, m;
1173 bool enabled, prohibited, filtered;
1174 bool secure = arm_is_secure(env);
1175 int el = arm_current_el(env);
1176 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1177 uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
1179 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1180 return false;
1183 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1184 (counter < hpmn || counter == 31)) {
1185 e = env->cp15.c9_pmcr & PMCRE;
1186 } else {
1187 e = mdcr_el2 & MDCR_HPME;
1189 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1191 if (!secure) {
1192 if (el == 2 && (counter < hpmn || counter == 31)) {
1193 prohibited = mdcr_el2 & MDCR_HPMD;
1194 } else {
1195 prohibited = false;
1197 } else {
1198 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1199 !(env->cp15.mdcr_el3 & MDCR_SPME);
1202 if (prohibited && counter == 31) {
1203 prohibited = env->cp15.c9_pmcr & PMCRDP;
1206 if (counter == 31) {
1207 filter = env->cp15.pmccfiltr_el0;
1208 } else {
1209 filter = env->cp15.c14_pmevtyper[counter];
1212 p = filter & PMXEVTYPER_P;
1213 u = filter & PMXEVTYPER_U;
1214 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1215 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1216 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1217 m = arm_el_is_aa64(env, 1) &&
1218 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1220 if (el == 0) {
1221 filtered = secure ? u : u != nsu;
1222 } else if (el == 1) {
1223 filtered = secure ? p : p != nsk;
1224 } else if (el == 2) {
1225 filtered = !nsh;
1226 } else { /* EL3 */
1227 filtered = m != p;
1230 if (counter != 31) {
1232 * If not checking PMCCNTR, ensure the counter is setup to an event we
1233 * support
1235 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1236 if (!event_supported(event)) {
1237 return false;
1241 return enabled && !prohibited && !filtered;
1244 static void pmu_update_irq(CPUARMState *env)
1246 ARMCPU *cpu = env_archcpu(env);
1247 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1248 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1252 * Ensure c15_ccnt is the guest-visible count so that operations such as
1253 * enabling/disabling the counter or filtering, modifying the count itself,
1254 * etc. can be done logically. This is essentially a no-op if the counter is
1255 * not enabled at the time of the call.
1257 static void pmccntr_op_start(CPUARMState *env)
1259 uint64_t cycles = cycles_get_count(env);
1261 if (pmu_counter_enabled(env, 31)) {
1262 uint64_t eff_cycles = cycles;
1263 if (env->cp15.c9_pmcr & PMCRD) {
1264 /* Increment once every 64 processor clock cycles */
1265 eff_cycles /= 64;
1268 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1270 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1271 1ull << 63 : 1ull << 31;
1272 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1273 env->cp15.c9_pmovsr |= (1 << 31);
1274 pmu_update_irq(env);
1277 env->cp15.c15_ccnt = new_pmccntr;
1279 env->cp15.c15_ccnt_delta = cycles;
1283 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1284 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1285 * pmccntr_op_start.
1287 static void pmccntr_op_finish(CPUARMState *env)
1289 if (pmu_counter_enabled(env, 31)) {
1290 #ifndef CONFIG_USER_ONLY
1291 /* Calculate when the counter will next overflow */
1292 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1293 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1294 remaining_cycles = (uint32_t)remaining_cycles;
1296 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1298 if (overflow_in > 0) {
1299 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1300 overflow_in;
1301 ARMCPU *cpu = env_archcpu(env);
1302 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1304 #endif
1306 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1307 if (env->cp15.c9_pmcr & PMCRD) {
1308 /* Increment once every 64 processor clock cycles */
1309 prev_cycles /= 64;
1311 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1315 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1318 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1319 uint64_t count = 0;
1320 if (event_supported(event)) {
1321 uint16_t event_idx = supported_event_map[event];
1322 count = pm_events[event_idx].get_count(env);
1325 if (pmu_counter_enabled(env, counter)) {
1326 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1328 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1329 env->cp15.c9_pmovsr |= (1 << counter);
1330 pmu_update_irq(env);
1332 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1334 env->cp15.c14_pmevcntr_delta[counter] = count;
1337 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1339 if (pmu_counter_enabled(env, counter)) {
1340 #ifndef CONFIG_USER_ONLY
1341 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1342 uint16_t event_idx = supported_event_map[event];
1343 uint64_t delta = UINT32_MAX -
1344 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1345 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1347 if (overflow_in > 0) {
1348 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1349 overflow_in;
1350 ARMCPU *cpu = env_archcpu(env);
1351 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1353 #endif
1355 env->cp15.c14_pmevcntr_delta[counter] -=
1356 env->cp15.c14_pmevcntr[counter];
1360 void pmu_op_start(CPUARMState *env)
1362 unsigned int i;
1363 pmccntr_op_start(env);
1364 for (i = 0; i < pmu_num_counters(env); i++) {
1365 pmevcntr_op_start(env, i);
1369 void pmu_op_finish(CPUARMState *env)
1371 unsigned int i;
1372 pmccntr_op_finish(env);
1373 for (i = 0; i < pmu_num_counters(env); i++) {
1374 pmevcntr_op_finish(env, i);
1378 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1380 pmu_op_start(&cpu->env);
1383 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1385 pmu_op_finish(&cpu->env);
1388 void arm_pmu_timer_cb(void *opaque)
1390 ARMCPU *cpu = opaque;
1393 * Update all the counter values based on the current underlying counts,
1394 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1395 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1396 * counter may expire.
1398 pmu_op_start(&cpu->env);
1399 pmu_op_finish(&cpu->env);
1402 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1403 uint64_t value)
1405 pmu_op_start(env);
1407 if (value & PMCRC) {
1408 /* The counter has been reset */
1409 env->cp15.c15_ccnt = 0;
1412 if (value & PMCRP) {
1413 unsigned int i;
1414 for (i = 0; i < pmu_num_counters(env); i++) {
1415 env->cp15.c14_pmevcntr[i] = 0;
1419 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1420 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1422 pmu_op_finish(env);
1425 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1426 uint64_t value)
1428 unsigned int i;
1429 for (i = 0; i < pmu_num_counters(env); i++) {
1430 /* Increment a counter's count iff: */
1431 if ((value & (1 << i)) && /* counter's bit is set */
1432 /* counter is enabled and not filtered */
1433 pmu_counter_enabled(env, i) &&
1434 /* counter is SW_INCR */
1435 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1436 pmevcntr_op_start(env, i);
1439 * Detect if this write causes an overflow since we can't predict
1440 * PMSWINC overflows like we can for other events
1442 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1444 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1445 env->cp15.c9_pmovsr |= (1 << i);
1446 pmu_update_irq(env);
1449 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1451 pmevcntr_op_finish(env, i);
1456 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1458 uint64_t ret;
1459 pmccntr_op_start(env);
1460 ret = env->cp15.c15_ccnt;
1461 pmccntr_op_finish(env);
1462 return ret;
1465 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1466 uint64_t value)
1468 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1469 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1470 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1471 * accessed.
1473 env->cp15.c9_pmselr = value & 0x1f;
1476 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1477 uint64_t value)
1479 pmccntr_op_start(env);
1480 env->cp15.c15_ccnt = value;
1481 pmccntr_op_finish(env);
1484 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1485 uint64_t value)
1487 uint64_t cur_val = pmccntr_read(env, NULL);
1489 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1492 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1493 uint64_t value)
1495 pmccntr_op_start(env);
1496 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1497 pmccntr_op_finish(env);
1500 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1501 uint64_t value)
1503 pmccntr_op_start(env);
1504 /* M is not accessible from AArch32 */
1505 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1506 (value & PMCCFILTR);
1507 pmccntr_op_finish(env);
1510 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1512 /* M is not visible in AArch32 */
1513 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1516 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1517 uint64_t value)
1519 value &= pmu_counter_mask(env);
1520 env->cp15.c9_pmcnten |= value;
1523 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1524 uint64_t value)
1526 value &= pmu_counter_mask(env);
1527 env->cp15.c9_pmcnten &= ~value;
1530 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1531 uint64_t value)
1533 value &= pmu_counter_mask(env);
1534 env->cp15.c9_pmovsr &= ~value;
1535 pmu_update_irq(env);
1538 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1539 uint64_t value)
1541 value &= pmu_counter_mask(env);
1542 env->cp15.c9_pmovsr |= value;
1543 pmu_update_irq(env);
1546 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1547 uint64_t value, const uint8_t counter)
1549 if (counter == 31) {
1550 pmccfiltr_write(env, ri, value);
1551 } else if (counter < pmu_num_counters(env)) {
1552 pmevcntr_op_start(env, counter);
1555 * If this counter's event type is changing, store the current
1556 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1557 * pmevcntr_op_finish has the correct baseline when it converts back to
1558 * a delta.
1560 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1561 PMXEVTYPER_EVTCOUNT;
1562 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1563 if (old_event != new_event) {
1564 uint64_t count = 0;
1565 if (event_supported(new_event)) {
1566 uint16_t event_idx = supported_event_map[new_event];
1567 count = pm_events[event_idx].get_count(env);
1569 env->cp15.c14_pmevcntr_delta[counter] = count;
1572 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1573 pmevcntr_op_finish(env, counter);
1575 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1576 * PMSELR value is equal to or greater than the number of implemented
1577 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1581 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1582 const uint8_t counter)
1584 if (counter == 31) {
1585 return env->cp15.pmccfiltr_el0;
1586 } else if (counter < pmu_num_counters(env)) {
1587 return env->cp15.c14_pmevtyper[counter];
1588 } else {
1590 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1591 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1593 return 0;
1597 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1598 uint64_t value)
1600 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1601 pmevtyper_write(env, ri, value, counter);
1604 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1605 uint64_t value)
1607 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1608 env->cp15.c14_pmevtyper[counter] = value;
1611 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1612 * pmu_op_finish calls when loading saved state for a migration. Because
1613 * we're potentially updating the type of event here, the value written to
1614 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1615 * different counter type. Therefore, we need to set this value to the
1616 * current count for the counter type we're writing so that pmu_op_finish
1617 * has the correct count for its calculation.
1619 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1620 if (event_supported(event)) {
1621 uint16_t event_idx = supported_event_map[event];
1622 env->cp15.c14_pmevcntr_delta[counter] =
1623 pm_events[event_idx].get_count(env);
1627 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1629 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1630 return pmevtyper_read(env, ri, counter);
1633 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1634 uint64_t value)
1636 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1639 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1641 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1644 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1645 uint64_t value, uint8_t counter)
1647 if (counter < pmu_num_counters(env)) {
1648 pmevcntr_op_start(env, counter);
1649 env->cp15.c14_pmevcntr[counter] = value;
1650 pmevcntr_op_finish(env, counter);
1653 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1654 * are CONSTRAINED UNPREDICTABLE.
1658 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1659 uint8_t counter)
1661 if (counter < pmu_num_counters(env)) {
1662 uint64_t ret;
1663 pmevcntr_op_start(env, counter);
1664 ret = env->cp15.c14_pmevcntr[counter];
1665 pmevcntr_op_finish(env, counter);
1666 return ret;
1667 } else {
1668 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1669 * are CONSTRAINED UNPREDICTABLE. */
1670 return 0;
1674 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1675 uint64_t value)
1677 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1678 pmevcntr_write(env, ri, value, counter);
1681 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1683 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1684 return pmevcntr_read(env, ri, counter);
1687 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1688 uint64_t value)
1690 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1691 assert(counter < pmu_num_counters(env));
1692 env->cp15.c14_pmevcntr[counter] = value;
1693 pmevcntr_write(env, ri, value, counter);
1696 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1698 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1699 assert(counter < pmu_num_counters(env));
1700 return env->cp15.c14_pmevcntr[counter];
1703 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1704 uint64_t value)
1706 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1709 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1711 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1714 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1715 uint64_t value)
1717 if (arm_feature(env, ARM_FEATURE_V8)) {
1718 env->cp15.c9_pmuserenr = value & 0xf;
1719 } else {
1720 env->cp15.c9_pmuserenr = value & 1;
1724 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1725 uint64_t value)
1727 /* We have no event counters so only the C bit can be changed */
1728 value &= pmu_counter_mask(env);
1729 env->cp15.c9_pminten |= value;
1730 pmu_update_irq(env);
1733 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 uint64_t value)
1736 value &= pmu_counter_mask(env);
1737 env->cp15.c9_pminten &= ~value;
1738 pmu_update_irq(env);
1741 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1742 uint64_t value)
1744 /* Note that even though the AArch64 view of this register has bits
1745 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1746 * architectural requirements for bits which are RES0 only in some
1747 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1748 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1750 raw_write(env, ri, value & ~0x1FULL);
1753 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1755 /* Begin with base v8.0 state. */
1756 uint32_t valid_mask = 0x3fff;
1757 ARMCPU *cpu = env_archcpu(env);
1759 if (ri->state == ARM_CP_STATE_AA64) {
1760 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
1761 !cpu_isar_feature(aa64_aa32_el1, cpu)) {
1762 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
1764 valid_mask &= ~SCR_NET;
1766 if (cpu_isar_feature(aa64_lor, cpu)) {
1767 valid_mask |= SCR_TLOR;
1769 if (cpu_isar_feature(aa64_pauth, cpu)) {
1770 valid_mask |= SCR_API | SCR_APK;
1772 if (cpu_isar_feature(aa64_sel2, cpu)) {
1773 valid_mask |= SCR_EEL2;
1775 if (cpu_isar_feature(aa64_mte, cpu)) {
1776 valid_mask |= SCR_ATA;
1778 } else {
1779 valid_mask &= ~(SCR_RW | SCR_ST);
1782 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1783 valid_mask &= ~SCR_HCE;
1785 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1786 * supported if EL2 exists. The bit is UNK/SBZP when
1787 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1788 * when EL2 is unavailable.
1789 * On ARMv8, this bit is always available.
1791 if (arm_feature(env, ARM_FEATURE_V7) &&
1792 !arm_feature(env, ARM_FEATURE_V8)) {
1793 valid_mask &= ~SCR_SMD;
1797 /* Clear all-context RES0 bits. */
1798 value &= valid_mask;
1799 raw_write(env, ri, value);
1802 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1805 * scr_write will set the RES1 bits on an AArch64-only CPU.
1806 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1808 scr_write(env, ri, 0);
1811 static CPAccessResult access_aa64_tid2(CPUARMState *env,
1812 const ARMCPRegInfo *ri,
1813 bool isread)
1815 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1816 return CP_ACCESS_TRAP_EL2;
1819 return CP_ACCESS_OK;
1822 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1824 ARMCPU *cpu = env_archcpu(env);
1826 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1827 * bank
1829 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1830 ri->secure & ARM_CP_SECSTATE_S);
1832 return cpu->ccsidr[index];
1835 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1836 uint64_t value)
1838 raw_write(env, ri, value & 0xf);
1841 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1843 CPUState *cs = env_cpu(env);
1844 bool el1 = arm_current_el(env) == 1;
1845 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1846 uint64_t ret = 0;
1848 if (hcr_el2 & HCR_IMO) {
1849 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1850 ret |= CPSR_I;
1852 } else {
1853 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1854 ret |= CPSR_I;
1858 if (hcr_el2 & HCR_FMO) {
1859 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1860 ret |= CPSR_F;
1862 } else {
1863 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1864 ret |= CPSR_F;
1868 /* External aborts are not possible in QEMU so A bit is always clear */
1869 return ret;
1872 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1873 bool isread)
1875 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1876 return CP_ACCESS_TRAP_EL2;
1879 return CP_ACCESS_OK;
1882 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1883 bool isread)
1885 if (arm_feature(env, ARM_FEATURE_V8)) {
1886 return access_aa64_tid1(env, ri, isread);
1889 return CP_ACCESS_OK;
1892 static const ARMCPRegInfo v7_cp_reginfo[] = {
1893 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1894 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1895 .access = PL1_W, .type = ARM_CP_NOP },
1896 /* Performance monitors are implementation defined in v7,
1897 * but with an ARM recommended set of registers, which we
1898 * follow.
1900 * Performance registers fall into three categories:
1901 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1902 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1903 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1904 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1905 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1907 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1908 .access = PL0_RW, .type = ARM_CP_ALIAS,
1909 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1910 .writefn = pmcntenset_write,
1911 .accessfn = pmreg_access,
1912 .raw_writefn = raw_write },
1913 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1914 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1915 .access = PL0_RW, .accessfn = pmreg_access,
1916 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1917 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1918 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1919 .access = PL0_RW,
1920 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1921 .accessfn = pmreg_access,
1922 .writefn = pmcntenclr_write,
1923 .type = ARM_CP_ALIAS },
1924 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1925 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1926 .access = PL0_RW, .accessfn = pmreg_access,
1927 .type = ARM_CP_ALIAS,
1928 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1929 .writefn = pmcntenclr_write },
1930 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1931 .access = PL0_RW, .type = ARM_CP_IO,
1932 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1933 .accessfn = pmreg_access,
1934 .writefn = pmovsr_write,
1935 .raw_writefn = raw_write },
1936 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1937 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1938 .access = PL0_RW, .accessfn = pmreg_access,
1939 .type = ARM_CP_ALIAS | ARM_CP_IO,
1940 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1941 .writefn = pmovsr_write,
1942 .raw_writefn = raw_write },
1943 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1944 .access = PL0_W, .accessfn = pmreg_access_swinc,
1945 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1946 .writefn = pmswinc_write },
1947 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1948 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1949 .access = PL0_W, .accessfn = pmreg_access_swinc,
1950 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1951 .writefn = pmswinc_write },
1952 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1953 .access = PL0_RW, .type = ARM_CP_ALIAS,
1954 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1955 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1956 .raw_writefn = raw_write},
1957 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1958 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1959 .access = PL0_RW, .accessfn = pmreg_access_selr,
1960 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1961 .writefn = pmselr_write, .raw_writefn = raw_write, },
1962 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1963 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1964 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1965 .accessfn = pmreg_access_ccntr },
1966 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1967 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1968 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1969 .type = ARM_CP_IO,
1970 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1971 .readfn = pmccntr_read, .writefn = pmccntr_write,
1972 .raw_readfn = raw_read, .raw_writefn = raw_write, },
1973 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1974 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1975 .access = PL0_RW, .accessfn = pmreg_access,
1976 .type = ARM_CP_ALIAS | ARM_CP_IO,
1977 .resetvalue = 0, },
1978 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1979 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1980 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
1981 .access = PL0_RW, .accessfn = pmreg_access,
1982 .type = ARM_CP_IO,
1983 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1984 .resetvalue = 0, },
1985 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1986 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1987 .accessfn = pmreg_access,
1988 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1989 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1990 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1991 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1992 .accessfn = pmreg_access,
1993 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1994 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1995 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1996 .accessfn = pmreg_access_xevcntr,
1997 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1998 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1999 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2000 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2001 .accessfn = pmreg_access_xevcntr,
2002 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2003 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2004 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2005 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2006 .resetvalue = 0,
2007 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2008 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2009 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2010 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2011 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2012 .resetvalue = 0,
2013 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2014 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2015 .access = PL1_RW, .accessfn = access_tpm,
2016 .type = ARM_CP_ALIAS | ARM_CP_IO,
2017 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2018 .resetvalue = 0,
2019 .writefn = pmintenset_write, .raw_writefn = raw_write },
2020 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2021 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2022 .access = PL1_RW, .accessfn = access_tpm,
2023 .type = ARM_CP_IO,
2024 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2025 .writefn = pmintenset_write, .raw_writefn = raw_write,
2026 .resetvalue = 0x0 },
2027 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2028 .access = PL1_RW, .accessfn = access_tpm,
2029 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2030 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2031 .writefn = pmintenclr_write, },
2032 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2033 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2034 .access = PL1_RW, .accessfn = access_tpm,
2035 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2036 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2037 .writefn = pmintenclr_write },
2038 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2039 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2040 .access = PL1_R,
2041 .accessfn = access_aa64_tid2,
2042 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2043 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2044 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2045 .access = PL1_RW,
2046 .accessfn = access_aa64_tid2,
2047 .writefn = csselr_write, .resetvalue = 0,
2048 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2049 offsetof(CPUARMState, cp15.csselr_ns) } },
2050 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2051 * just RAZ for all cores:
2053 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2054 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2055 .access = PL1_R, .type = ARM_CP_CONST,
2056 .accessfn = access_aa64_tid1,
2057 .resetvalue = 0 },
2058 /* Auxiliary fault status registers: these also are IMPDEF, and we
2059 * choose to RAZ/WI for all cores.
2061 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2062 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2063 .access = PL1_RW, .accessfn = access_tvm_trvm,
2064 .type = ARM_CP_CONST, .resetvalue = 0 },
2065 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2066 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2067 .access = PL1_RW, .accessfn = access_tvm_trvm,
2068 .type = ARM_CP_CONST, .resetvalue = 0 },
2069 /* MAIR can just read-as-written because we don't implement caches
2070 * and so don't need to care about memory attributes.
2072 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2073 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2074 .access = PL1_RW, .accessfn = access_tvm_trvm,
2075 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2076 .resetvalue = 0 },
2077 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2078 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2079 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2080 .resetvalue = 0 },
2081 /* For non-long-descriptor page tables these are PRRR and NMRR;
2082 * regardless they still act as reads-as-written for QEMU.
2084 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2085 * allows them to assign the correct fieldoffset based on the endianness
2086 * handled in the field definitions.
2088 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2089 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2090 .access = PL1_RW, .accessfn = access_tvm_trvm,
2091 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2092 offsetof(CPUARMState, cp15.mair0_ns) },
2093 .resetfn = arm_cp_reset_ignore },
2094 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2095 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2096 .access = PL1_RW, .accessfn = access_tvm_trvm,
2097 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2098 offsetof(CPUARMState, cp15.mair1_ns) },
2099 .resetfn = arm_cp_reset_ignore },
2100 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2101 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2102 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2103 /* 32 bit ITLB invalidates */
2104 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2105 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2106 .writefn = tlbiall_write },
2107 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2108 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2109 .writefn = tlbimva_write },
2110 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2111 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2112 .writefn = tlbiasid_write },
2113 /* 32 bit DTLB invalidates */
2114 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2115 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2116 .writefn = tlbiall_write },
2117 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2118 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2119 .writefn = tlbimva_write },
2120 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2121 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2122 .writefn = tlbiasid_write },
2123 /* 32 bit TLB invalidates */
2124 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2125 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2126 .writefn = tlbiall_write },
2127 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2128 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2129 .writefn = tlbimva_write },
2130 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2131 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2132 .writefn = tlbiasid_write },
2133 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2134 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2135 .writefn = tlbimvaa_write },
2136 REGINFO_SENTINEL
2139 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2140 /* 32 bit TLB invalidates, Inner Shareable */
2141 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2142 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2143 .writefn = tlbiall_is_write },
2144 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2145 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2146 .writefn = tlbimva_is_write },
2147 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2148 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2149 .writefn = tlbiasid_is_write },
2150 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2151 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2152 .writefn = tlbimvaa_is_write },
2153 REGINFO_SENTINEL
2156 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2157 /* PMOVSSET is not implemented in v7 before v7ve */
2158 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2159 .access = PL0_RW, .accessfn = pmreg_access,
2160 .type = ARM_CP_ALIAS | ARM_CP_IO,
2161 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2162 .writefn = pmovsset_write,
2163 .raw_writefn = raw_write },
2164 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2165 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2166 .access = PL0_RW, .accessfn = pmreg_access,
2167 .type = ARM_CP_ALIAS | ARM_CP_IO,
2168 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2169 .writefn = pmovsset_write,
2170 .raw_writefn = raw_write },
2171 REGINFO_SENTINEL
2174 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2175 uint64_t value)
2177 value &= 1;
2178 env->teecr = value;
2181 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2182 bool isread)
2185 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2186 * at all, so we don't need to check whether we're v8A.
2188 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2189 (env->cp15.hstr_el2 & HSTR_TTEE)) {
2190 return CP_ACCESS_TRAP_EL2;
2192 return CP_ACCESS_OK;
2195 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2196 bool isread)
2198 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2199 return CP_ACCESS_TRAP;
2201 return teecr_access(env, ri, isread);
2204 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2205 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2206 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2207 .resetvalue = 0,
2208 .writefn = teecr_write, .accessfn = teecr_access },
2209 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2210 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2211 .accessfn = teehbr_access, .resetvalue = 0 },
2212 REGINFO_SENTINEL
2215 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2216 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2217 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2218 .access = PL0_RW,
2219 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2220 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2221 .access = PL0_RW,
2222 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2223 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2224 .resetfn = arm_cp_reset_ignore },
2225 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2226 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2227 .access = PL0_R|PL1_W,
2228 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2229 .resetvalue = 0},
2230 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2231 .access = PL0_R|PL1_W,
2232 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2233 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2234 .resetfn = arm_cp_reset_ignore },
2235 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2236 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2237 .access = PL1_RW,
2238 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2239 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2240 .access = PL1_RW,
2241 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2242 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2243 .resetvalue = 0 },
2244 REGINFO_SENTINEL
2247 #ifndef CONFIG_USER_ONLY
2249 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2250 bool isread)
2252 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2253 * Writable only at the highest implemented exception level.
2255 int el = arm_current_el(env);
2256 uint64_t hcr;
2257 uint32_t cntkctl;
2259 switch (el) {
2260 case 0:
2261 hcr = arm_hcr_el2_eff(env);
2262 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2263 cntkctl = env->cp15.cnthctl_el2;
2264 } else {
2265 cntkctl = env->cp15.c14_cntkctl;
2267 if (!extract32(cntkctl, 0, 2)) {
2268 return CP_ACCESS_TRAP;
2270 break;
2271 case 1:
2272 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2273 arm_is_secure_below_el3(env)) {
2274 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2275 return CP_ACCESS_TRAP_UNCATEGORIZED;
2277 break;
2278 case 2:
2279 case 3:
2280 break;
2283 if (!isread && el < arm_highest_el(env)) {
2284 return CP_ACCESS_TRAP_UNCATEGORIZED;
2287 return CP_ACCESS_OK;
2290 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2291 bool isread)
2293 unsigned int cur_el = arm_current_el(env);
2294 bool has_el2 = arm_is_el2_enabled(env);
2295 uint64_t hcr = arm_hcr_el2_eff(env);
2297 switch (cur_el) {
2298 case 0:
2299 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2300 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2301 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2302 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2305 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2306 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2307 return CP_ACCESS_TRAP;
2310 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2311 if (hcr & HCR_E2H) {
2312 if (timeridx == GTIMER_PHYS &&
2313 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2314 return CP_ACCESS_TRAP_EL2;
2316 } else {
2317 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2318 if (has_el2 && timeridx == GTIMER_PHYS &&
2319 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2320 return CP_ACCESS_TRAP_EL2;
2323 break;
2325 case 1:
2326 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2327 if (has_el2 && timeridx == GTIMER_PHYS &&
2328 (hcr & HCR_E2H
2329 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2330 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2331 return CP_ACCESS_TRAP_EL2;
2333 break;
2335 return CP_ACCESS_OK;
2338 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2339 bool isread)
2341 unsigned int cur_el = arm_current_el(env);
2342 bool has_el2 = arm_is_el2_enabled(env);
2343 uint64_t hcr = arm_hcr_el2_eff(env);
2345 switch (cur_el) {
2346 case 0:
2347 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2348 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2349 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2350 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2354 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2355 * EL0 if EL0[PV]TEN is zero.
2357 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2358 return CP_ACCESS_TRAP;
2360 /* fall through */
2362 case 1:
2363 if (has_el2 && timeridx == GTIMER_PHYS) {
2364 if (hcr & HCR_E2H) {
2365 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2366 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2367 return CP_ACCESS_TRAP_EL2;
2369 } else {
2370 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2371 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2372 return CP_ACCESS_TRAP_EL2;
2376 break;
2378 return CP_ACCESS_OK;
2381 static CPAccessResult gt_pct_access(CPUARMState *env,
2382 const ARMCPRegInfo *ri,
2383 bool isread)
2385 return gt_counter_access(env, GTIMER_PHYS, isread);
2388 static CPAccessResult gt_vct_access(CPUARMState *env,
2389 const ARMCPRegInfo *ri,
2390 bool isread)
2392 return gt_counter_access(env, GTIMER_VIRT, isread);
2395 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2396 bool isread)
2398 return gt_timer_access(env, GTIMER_PHYS, isread);
2401 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2402 bool isread)
2404 return gt_timer_access(env, GTIMER_VIRT, isread);
2407 static CPAccessResult gt_stimer_access(CPUARMState *env,
2408 const ARMCPRegInfo *ri,
2409 bool isread)
2411 /* The AArch64 register view of the secure physical timer is
2412 * always accessible from EL3, and configurably accessible from
2413 * Secure EL1.
2415 switch (arm_current_el(env)) {
2416 case 1:
2417 if (!arm_is_secure(env)) {
2418 return CP_ACCESS_TRAP;
2420 if (!(env->cp15.scr_el3 & SCR_ST)) {
2421 return CP_ACCESS_TRAP_EL3;
2423 return CP_ACCESS_OK;
2424 case 0:
2425 case 2:
2426 return CP_ACCESS_TRAP;
2427 case 3:
2428 return CP_ACCESS_OK;
2429 default:
2430 g_assert_not_reached();
2434 static uint64_t gt_get_countervalue(CPUARMState *env)
2436 ARMCPU *cpu = env_archcpu(env);
2438 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2441 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2443 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2445 if (gt->ctl & 1) {
2446 /* Timer enabled: calculate and set current ISTATUS, irq, and
2447 * reset timer to when ISTATUS next has to change
2449 uint64_t offset = timeridx == GTIMER_VIRT ?
2450 cpu->env.cp15.cntvoff_el2 : 0;
2451 uint64_t count = gt_get_countervalue(&cpu->env);
2452 /* Note that this must be unsigned 64 bit arithmetic: */
2453 int istatus = count - offset >= gt->cval;
2454 uint64_t nexttick;
2455 int irqstate;
2457 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2459 irqstate = (istatus && !(gt->ctl & 2));
2460 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2462 if (istatus) {
2463 /* Next transition is when count rolls back over to zero */
2464 nexttick = UINT64_MAX;
2465 } else {
2466 /* Next transition is when we hit cval */
2467 nexttick = gt->cval + offset;
2469 /* Note that the desired next expiry time might be beyond the
2470 * signed-64-bit range of a QEMUTimer -- in this case we just
2471 * set the timer for as far in the future as possible. When the
2472 * timer expires we will reset the timer for any remaining period.
2474 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2475 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2476 } else {
2477 timer_mod(cpu->gt_timer[timeridx], nexttick);
2479 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2480 } else {
2481 /* Timer disabled: ISTATUS and timer output always clear */
2482 gt->ctl &= ~4;
2483 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2484 timer_del(cpu->gt_timer[timeridx]);
2485 trace_arm_gt_recalc_disabled(timeridx);
2489 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2490 int timeridx)
2492 ARMCPU *cpu = env_archcpu(env);
2494 timer_del(cpu->gt_timer[timeridx]);
2497 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2499 return gt_get_countervalue(env);
2502 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2504 uint64_t hcr;
2506 switch (arm_current_el(env)) {
2507 case 2:
2508 hcr = arm_hcr_el2_eff(env);
2509 if (hcr & HCR_E2H) {
2510 return 0;
2512 break;
2513 case 0:
2514 hcr = arm_hcr_el2_eff(env);
2515 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2516 return 0;
2518 break;
2521 return env->cp15.cntvoff_el2;
2524 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2526 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2529 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2530 int timeridx,
2531 uint64_t value)
2533 trace_arm_gt_cval_write(timeridx, value);
2534 env->cp15.c14_timer[timeridx].cval = value;
2535 gt_recalc_timer(env_archcpu(env), timeridx);
2538 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2539 int timeridx)
2541 uint64_t offset = 0;
2543 switch (timeridx) {
2544 case GTIMER_VIRT:
2545 case GTIMER_HYPVIRT:
2546 offset = gt_virt_cnt_offset(env);
2547 break;
2550 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2551 (gt_get_countervalue(env) - offset));
2554 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2555 int timeridx,
2556 uint64_t value)
2558 uint64_t offset = 0;
2560 switch (timeridx) {
2561 case GTIMER_VIRT:
2562 case GTIMER_HYPVIRT:
2563 offset = gt_virt_cnt_offset(env);
2564 break;
2567 trace_arm_gt_tval_write(timeridx, value);
2568 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2569 sextract64(value, 0, 32);
2570 gt_recalc_timer(env_archcpu(env), timeridx);
2573 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2574 int timeridx,
2575 uint64_t value)
2577 ARMCPU *cpu = env_archcpu(env);
2578 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2580 trace_arm_gt_ctl_write(timeridx, value);
2581 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2582 if ((oldval ^ value) & 1) {
2583 /* Enable toggled */
2584 gt_recalc_timer(cpu, timeridx);
2585 } else if ((oldval ^ value) & 2) {
2586 /* IMASK toggled: don't need to recalculate,
2587 * just set the interrupt line based on ISTATUS
2589 int irqstate = (oldval & 4) && !(value & 2);
2591 trace_arm_gt_imask_toggle(timeridx, irqstate);
2592 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2596 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2598 gt_timer_reset(env, ri, GTIMER_PHYS);
2601 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2602 uint64_t value)
2604 gt_cval_write(env, ri, GTIMER_PHYS, value);
2607 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2609 return gt_tval_read(env, ri, GTIMER_PHYS);
2612 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2613 uint64_t value)
2615 gt_tval_write(env, ri, GTIMER_PHYS, value);
2618 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2619 uint64_t value)
2621 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2624 static int gt_phys_redir_timeridx(CPUARMState *env)
2626 switch (arm_mmu_idx(env)) {
2627 case ARMMMUIdx_E20_0:
2628 case ARMMMUIdx_E20_2:
2629 case ARMMMUIdx_E20_2_PAN:
2630 case ARMMMUIdx_SE20_0:
2631 case ARMMMUIdx_SE20_2:
2632 case ARMMMUIdx_SE20_2_PAN:
2633 return GTIMER_HYP;
2634 default:
2635 return GTIMER_PHYS;
2639 static int gt_virt_redir_timeridx(CPUARMState *env)
2641 switch (arm_mmu_idx(env)) {
2642 case ARMMMUIdx_E20_0:
2643 case ARMMMUIdx_E20_2:
2644 case ARMMMUIdx_E20_2_PAN:
2645 case ARMMMUIdx_SE20_0:
2646 case ARMMMUIdx_SE20_2:
2647 case ARMMMUIdx_SE20_2_PAN:
2648 return GTIMER_HYPVIRT;
2649 default:
2650 return GTIMER_VIRT;
2654 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2655 const ARMCPRegInfo *ri)
2657 int timeridx = gt_phys_redir_timeridx(env);
2658 return env->cp15.c14_timer[timeridx].cval;
2661 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2662 uint64_t value)
2664 int timeridx = gt_phys_redir_timeridx(env);
2665 gt_cval_write(env, ri, timeridx, value);
2668 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2669 const ARMCPRegInfo *ri)
2671 int timeridx = gt_phys_redir_timeridx(env);
2672 return gt_tval_read(env, ri, timeridx);
2675 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2676 uint64_t value)
2678 int timeridx = gt_phys_redir_timeridx(env);
2679 gt_tval_write(env, ri, timeridx, value);
2682 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2683 const ARMCPRegInfo *ri)
2685 int timeridx = gt_phys_redir_timeridx(env);
2686 return env->cp15.c14_timer[timeridx].ctl;
2689 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2690 uint64_t value)
2692 int timeridx = gt_phys_redir_timeridx(env);
2693 gt_ctl_write(env, ri, timeridx, value);
2696 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2698 gt_timer_reset(env, ri, GTIMER_VIRT);
2701 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2702 uint64_t value)
2704 gt_cval_write(env, ri, GTIMER_VIRT, value);
2707 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2709 return gt_tval_read(env, ri, GTIMER_VIRT);
2712 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2713 uint64_t value)
2715 gt_tval_write(env, ri, GTIMER_VIRT, value);
2718 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2719 uint64_t value)
2721 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2724 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725 uint64_t value)
2727 ARMCPU *cpu = env_archcpu(env);
2729 trace_arm_gt_cntvoff_write(value);
2730 raw_write(env, ri, value);
2731 gt_recalc_timer(cpu, GTIMER_VIRT);
2734 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2735 const ARMCPRegInfo *ri)
2737 int timeridx = gt_virt_redir_timeridx(env);
2738 return env->cp15.c14_timer[timeridx].cval;
2741 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2742 uint64_t value)
2744 int timeridx = gt_virt_redir_timeridx(env);
2745 gt_cval_write(env, ri, timeridx, value);
2748 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2749 const ARMCPRegInfo *ri)
2751 int timeridx = gt_virt_redir_timeridx(env);
2752 return gt_tval_read(env, ri, timeridx);
2755 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2756 uint64_t value)
2758 int timeridx = gt_virt_redir_timeridx(env);
2759 gt_tval_write(env, ri, timeridx, value);
2762 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2763 const ARMCPRegInfo *ri)
2765 int timeridx = gt_virt_redir_timeridx(env);
2766 return env->cp15.c14_timer[timeridx].ctl;
2769 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2770 uint64_t value)
2772 int timeridx = gt_virt_redir_timeridx(env);
2773 gt_ctl_write(env, ri, timeridx, value);
2776 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2778 gt_timer_reset(env, ri, GTIMER_HYP);
2781 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2782 uint64_t value)
2784 gt_cval_write(env, ri, GTIMER_HYP, value);
2787 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2789 return gt_tval_read(env, ri, GTIMER_HYP);
2792 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2793 uint64_t value)
2795 gt_tval_write(env, ri, GTIMER_HYP, value);
2798 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2799 uint64_t value)
2801 gt_ctl_write(env, ri, GTIMER_HYP, value);
2804 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2806 gt_timer_reset(env, ri, GTIMER_SEC);
2809 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2810 uint64_t value)
2812 gt_cval_write(env, ri, GTIMER_SEC, value);
2815 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2817 return gt_tval_read(env, ri, GTIMER_SEC);
2820 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2821 uint64_t value)
2823 gt_tval_write(env, ri, GTIMER_SEC, value);
2826 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2827 uint64_t value)
2829 gt_ctl_write(env, ri, GTIMER_SEC, value);
2832 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2834 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2837 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2838 uint64_t value)
2840 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2843 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2845 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2848 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2849 uint64_t value)
2851 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2854 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2855 uint64_t value)
2857 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2860 void arm_gt_ptimer_cb(void *opaque)
2862 ARMCPU *cpu = opaque;
2864 gt_recalc_timer(cpu, GTIMER_PHYS);
2867 void arm_gt_vtimer_cb(void *opaque)
2869 ARMCPU *cpu = opaque;
2871 gt_recalc_timer(cpu, GTIMER_VIRT);
2874 void arm_gt_htimer_cb(void *opaque)
2876 ARMCPU *cpu = opaque;
2878 gt_recalc_timer(cpu, GTIMER_HYP);
2881 void arm_gt_stimer_cb(void *opaque)
2883 ARMCPU *cpu = opaque;
2885 gt_recalc_timer(cpu, GTIMER_SEC);
2888 void arm_gt_hvtimer_cb(void *opaque)
2890 ARMCPU *cpu = opaque;
2892 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2895 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2897 ARMCPU *cpu = env_archcpu(env);
2899 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2902 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2903 /* Note that CNTFRQ is purely reads-as-written for the benefit
2904 * of software; writing it doesn't actually change the timer frequency.
2905 * Our reset value matches the fixed frequency we implement the timer at.
2907 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2908 .type = ARM_CP_ALIAS,
2909 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2910 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2912 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2913 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2914 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2915 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2916 .resetfn = arm_gt_cntfrq_reset,
2918 /* overall control: mostly access permissions */
2919 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2920 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2921 .access = PL1_RW,
2922 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2923 .resetvalue = 0,
2925 /* per-timer control */
2926 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2927 .secure = ARM_CP_SECSTATE_NS,
2928 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2929 .accessfn = gt_ptimer_access,
2930 .fieldoffset = offsetoflow32(CPUARMState,
2931 cp15.c14_timer[GTIMER_PHYS].ctl),
2932 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2933 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2935 { .name = "CNTP_CTL_S",
2936 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2937 .secure = ARM_CP_SECSTATE_S,
2938 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2939 .accessfn = gt_ptimer_access,
2940 .fieldoffset = offsetoflow32(CPUARMState,
2941 cp15.c14_timer[GTIMER_SEC].ctl),
2942 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2944 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2945 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2946 .type = ARM_CP_IO, .access = PL0_RW,
2947 .accessfn = gt_ptimer_access,
2948 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2949 .resetvalue = 0,
2950 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2951 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2953 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2954 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2955 .accessfn = gt_vtimer_access,
2956 .fieldoffset = offsetoflow32(CPUARMState,
2957 cp15.c14_timer[GTIMER_VIRT].ctl),
2958 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2959 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2961 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2962 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2963 .type = ARM_CP_IO, .access = PL0_RW,
2964 .accessfn = gt_vtimer_access,
2965 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2966 .resetvalue = 0,
2967 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2968 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2970 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2971 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2972 .secure = ARM_CP_SECSTATE_NS,
2973 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2974 .accessfn = gt_ptimer_access,
2975 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2977 { .name = "CNTP_TVAL_S",
2978 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2979 .secure = ARM_CP_SECSTATE_S,
2980 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2981 .accessfn = gt_ptimer_access,
2982 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2984 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2985 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2986 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2987 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2988 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2990 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2991 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2992 .accessfn = gt_vtimer_access,
2993 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2995 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2996 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2997 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2998 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2999 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3001 /* The counter itself */
3002 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3003 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3004 .accessfn = gt_pct_access,
3005 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3007 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3008 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3009 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3010 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3012 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3013 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3014 .accessfn = gt_vct_access,
3015 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3017 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3018 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3019 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3020 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3022 /* Comparison value, indicating when the timer goes off */
3023 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3024 .secure = ARM_CP_SECSTATE_NS,
3025 .access = PL0_RW,
3026 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3027 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3028 .accessfn = gt_ptimer_access,
3029 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3030 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3032 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3033 .secure = ARM_CP_SECSTATE_S,
3034 .access = PL0_RW,
3035 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3036 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3037 .accessfn = gt_ptimer_access,
3038 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3040 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3041 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3042 .access = PL0_RW,
3043 .type = ARM_CP_IO,
3044 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3045 .resetvalue = 0, .accessfn = gt_ptimer_access,
3046 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3047 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3049 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3050 .access = PL0_RW,
3051 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3052 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3053 .accessfn = gt_vtimer_access,
3054 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3055 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3057 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3058 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3059 .access = PL0_RW,
3060 .type = ARM_CP_IO,
3061 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3062 .resetvalue = 0, .accessfn = gt_vtimer_access,
3063 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3064 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3066 /* Secure timer -- this is actually restricted to only EL3
3067 * and configurably Secure-EL1 via the accessfn.
3069 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3070 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3071 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3072 .accessfn = gt_stimer_access,
3073 .readfn = gt_sec_tval_read,
3074 .writefn = gt_sec_tval_write,
3075 .resetfn = gt_sec_timer_reset,
3077 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3078 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3079 .type = ARM_CP_IO, .access = PL1_RW,
3080 .accessfn = gt_stimer_access,
3081 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3082 .resetvalue = 0,
3083 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3085 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3086 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3087 .type = ARM_CP_IO, .access = PL1_RW,
3088 .accessfn = gt_stimer_access,
3089 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3090 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3092 REGINFO_SENTINEL
3095 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3096 bool isread)
3098 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3099 return CP_ACCESS_TRAP;
3101 return CP_ACCESS_OK;
3104 #else
3106 /* In user-mode most of the generic timer registers are inaccessible
3107 * however modern kernels (4.12+) allow access to cntvct_el0
3110 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3112 ARMCPU *cpu = env_archcpu(env);
3114 /* Currently we have no support for QEMUTimer in linux-user so we
3115 * can't call gt_get_countervalue(env), instead we directly
3116 * call the lower level functions.
3118 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3121 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3122 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3123 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3124 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3125 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3126 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3128 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3129 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3130 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3131 .readfn = gt_virt_cnt_read,
3133 REGINFO_SENTINEL
3136 #endif
3138 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3140 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3141 raw_write(env, ri, value);
3142 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3143 raw_write(env, ri, value & 0xfffff6ff);
3144 } else {
3145 raw_write(env, ri, value & 0xfffff1ff);
3149 #ifndef CONFIG_USER_ONLY
3150 /* get_phys_addr() isn't present for user-mode-only targets */
3152 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3153 bool isread)
3155 if (ri->opc2 & 4) {
3156 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3157 * Secure EL1 (which can only happen if EL3 is AArch64).
3158 * They are simply UNDEF if executed from NS EL1.
3159 * They function normally from EL2 or EL3.
3161 if (arm_current_el(env) == 1) {
3162 if (arm_is_secure_below_el3(env)) {
3163 if (env->cp15.scr_el3 & SCR_EEL2) {
3164 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
3166 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3168 return CP_ACCESS_TRAP_UNCATEGORIZED;
3171 return CP_ACCESS_OK;
3174 #ifdef CONFIG_TCG
3175 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3176 MMUAccessType access_type, ARMMMUIdx mmu_idx)
3178 hwaddr phys_addr;
3179 target_ulong page_size;
3180 int prot;
3181 bool ret;
3182 uint64_t par64;
3183 bool format64 = false;
3184 MemTxAttrs attrs = {};
3185 ARMMMUFaultInfo fi = {};
3186 ARMCacheAttrs cacheattrs = {};
3188 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3189 &prot, &page_size, &fi, &cacheattrs);
3191 if (ret) {
3193 * Some kinds of translation fault must cause exceptions rather
3194 * than being reported in the PAR.
3196 int current_el = arm_current_el(env);
3197 int target_el;
3198 uint32_t syn, fsr, fsc;
3199 bool take_exc = false;
3201 if (fi.s1ptw && current_el == 1
3202 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3204 * Synchronous stage 2 fault on an access made as part of the
3205 * translation table walk for AT S1E0* or AT S1E1* insn
3206 * executed from NS EL1. If this is a synchronous external abort
3207 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3208 * to EL3. Otherwise the fault is taken as an exception to EL2,
3209 * and HPFAR_EL2 holds the faulting IPA.
3211 if (fi.type == ARMFault_SyncExternalOnWalk &&
3212 (env->cp15.scr_el3 & SCR_EA)) {
3213 target_el = 3;
3214 } else {
3215 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3216 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3217 env->cp15.hpfar_el2 |= HPFAR_NS;
3219 target_el = 2;
3221 take_exc = true;
3222 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3224 * Synchronous external aborts during a translation table walk
3225 * are taken as Data Abort exceptions.
3227 if (fi.stage2) {
3228 if (current_el == 3) {
3229 target_el = 3;
3230 } else {
3231 target_el = 2;
3233 } else {
3234 target_el = exception_target_el(env);
3236 take_exc = true;
3239 if (take_exc) {
3240 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3241 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3242 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3243 fsr = arm_fi_to_lfsc(&fi);
3244 fsc = extract32(fsr, 0, 6);
3245 } else {
3246 fsr = arm_fi_to_sfsc(&fi);
3247 fsc = 0x3f;
3250 * Report exception with ESR indicating a fault due to a
3251 * translation table walk for a cache maintenance instruction.
3253 syn = syn_data_abort_no_iss(current_el == target_el, 0,
3254 fi.ea, 1, fi.s1ptw, 1, fsc);
3255 env->exception.vaddress = value;
3256 env->exception.fsr = fsr;
3257 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3261 if (is_a64(env)) {
3262 format64 = true;
3263 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3265 * ATS1Cxx:
3266 * * TTBCR.EAE determines whether the result is returned using the
3267 * 32-bit or the 64-bit PAR format
3268 * * Instructions executed in Hyp mode always use the 64bit format
3270 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3271 * * The Non-secure TTBCR.EAE bit is set to 1
3272 * * The implementation includes EL2, and the value of HCR.VM is 1
3274 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3276 * ATS1Hx always uses the 64bit format.
3278 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3280 if (arm_feature(env, ARM_FEATURE_EL2)) {
3281 if (mmu_idx == ARMMMUIdx_E10_0 ||
3282 mmu_idx == ARMMMUIdx_E10_1 ||
3283 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3284 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3285 } else {
3286 format64 |= arm_current_el(env) == 2;
3291 if (format64) {
3292 /* Create a 64-bit PAR */
3293 par64 = (1 << 11); /* LPAE bit always set */
3294 if (!ret) {
3295 par64 |= phys_addr & ~0xfffULL;
3296 if (!attrs.secure) {
3297 par64 |= (1 << 9); /* NS */
3299 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3300 par64 |= cacheattrs.shareability << 7; /* SH */
3301 } else {
3302 uint32_t fsr = arm_fi_to_lfsc(&fi);
3304 par64 |= 1; /* F */
3305 par64 |= (fsr & 0x3f) << 1; /* FS */
3306 if (fi.stage2) {
3307 par64 |= (1 << 9); /* S */
3309 if (fi.s1ptw) {
3310 par64 |= (1 << 8); /* PTW */
3313 } else {
3314 /* fsr is a DFSR/IFSR value for the short descriptor
3315 * translation table format (with WnR always clear).
3316 * Convert it to a 32-bit PAR.
3318 if (!ret) {
3319 /* We do not set any attribute bits in the PAR */
3320 if (page_size == (1 << 24)
3321 && arm_feature(env, ARM_FEATURE_V7)) {
3322 par64 = (phys_addr & 0xff000000) | (1 << 1);
3323 } else {
3324 par64 = phys_addr & 0xfffff000;
3326 if (!attrs.secure) {
3327 par64 |= (1 << 9); /* NS */
3329 } else {
3330 uint32_t fsr = arm_fi_to_sfsc(&fi);
3332 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3333 ((fsr & 0xf) << 1) | 1;
3336 return par64;
3338 #endif /* CONFIG_TCG */
3340 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3342 #ifdef CONFIG_TCG
3343 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3344 uint64_t par64;
3345 ARMMMUIdx mmu_idx;
3346 int el = arm_current_el(env);
3347 bool secure = arm_is_secure_below_el3(env);
3349 switch (ri->opc2 & 6) {
3350 case 0:
3351 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3352 switch (el) {
3353 case 3:
3354 mmu_idx = ARMMMUIdx_SE3;
3355 break;
3356 case 2:
3357 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
3358 /* fall through */
3359 case 1:
3360 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3361 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3362 : ARMMMUIdx_Stage1_E1_PAN);
3363 } else {
3364 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3366 break;
3367 default:
3368 g_assert_not_reached();
3370 break;
3371 case 2:
3372 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3373 switch (el) {
3374 case 3:
3375 mmu_idx = ARMMMUIdx_SE10_0;
3376 break;
3377 case 2:
3378 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
3379 mmu_idx = ARMMMUIdx_Stage1_E0;
3380 break;
3381 case 1:
3382 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3383 break;
3384 default:
3385 g_assert_not_reached();
3387 break;
3388 case 4:
3389 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3390 mmu_idx = ARMMMUIdx_E10_1;
3391 break;
3392 case 6:
3393 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3394 mmu_idx = ARMMMUIdx_E10_0;
3395 break;
3396 default:
3397 g_assert_not_reached();
3400 par64 = do_ats_write(env, value, access_type, mmu_idx);
3402 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3403 #else
3404 /* Handled by hardware accelerator. */
3405 g_assert_not_reached();
3406 #endif /* CONFIG_TCG */
3409 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3410 uint64_t value)
3412 #ifdef CONFIG_TCG
3413 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3414 uint64_t par64;
3416 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
3418 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3419 #else
3420 /* Handled by hardware accelerator. */
3421 g_assert_not_reached();
3422 #endif /* CONFIG_TCG */
3425 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3426 bool isread)
3428 if (arm_current_el(env) == 3 &&
3429 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3430 return CP_ACCESS_TRAP;
3432 return CP_ACCESS_OK;
3435 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3436 uint64_t value)
3438 #ifdef CONFIG_TCG
3439 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3440 ARMMMUIdx mmu_idx;
3441 int secure = arm_is_secure_below_el3(env);
3443 switch (ri->opc2 & 6) {
3444 case 0:
3445 switch (ri->opc1) {
3446 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3447 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3448 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3449 : ARMMMUIdx_Stage1_E1_PAN);
3450 } else {
3451 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3453 break;
3454 case 4: /* AT S1E2R, AT S1E2W */
3455 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
3456 break;
3457 case 6: /* AT S1E3R, AT S1E3W */
3458 mmu_idx = ARMMMUIdx_SE3;
3459 break;
3460 default:
3461 g_assert_not_reached();
3463 break;
3464 case 2: /* AT S1E0R, AT S1E0W */
3465 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3466 break;
3467 case 4: /* AT S12E1R, AT S12E1W */
3468 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
3469 break;
3470 case 6: /* AT S12E0R, AT S12E0W */
3471 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
3472 break;
3473 default:
3474 g_assert_not_reached();
3477 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3478 #else
3479 /* Handled by hardware accelerator. */
3480 g_assert_not_reached();
3481 #endif /* CONFIG_TCG */
3483 #endif
3485 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3486 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3487 .access = PL1_RW, .resetvalue = 0,
3488 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3489 offsetoflow32(CPUARMState, cp15.par_ns) },
3490 .writefn = par_write },
3491 #ifndef CONFIG_USER_ONLY
3492 /* This underdecoding is safe because the reginfo is NO_RAW. */
3493 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3494 .access = PL1_W, .accessfn = ats_access,
3495 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3496 #endif
3497 REGINFO_SENTINEL
3500 /* Return basic MPU access permission bits. */
3501 static uint32_t simple_mpu_ap_bits(uint32_t val)
3503 uint32_t ret;
3504 uint32_t mask;
3505 int i;
3506 ret = 0;
3507 mask = 3;
3508 for (i = 0; i < 16; i += 2) {
3509 ret |= (val >> i) & mask;
3510 mask <<= 2;
3512 return ret;
3515 /* Pad basic MPU access permission bits to extended format. */
3516 static uint32_t extended_mpu_ap_bits(uint32_t val)
3518 uint32_t ret;
3519 uint32_t mask;
3520 int i;
3521 ret = 0;
3522 mask = 3;
3523 for (i = 0; i < 16; i += 2) {
3524 ret |= (val & mask) << i;
3525 mask <<= 2;
3527 return ret;
3530 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3531 uint64_t value)
3533 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3536 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3538 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3541 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3542 uint64_t value)
3544 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3547 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3549 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3552 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3554 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3556 if (!u32p) {
3557 return 0;
3560 u32p += env->pmsav7.rnr[M_REG_NS];
3561 return *u32p;
3564 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3565 uint64_t value)
3567 ARMCPU *cpu = env_archcpu(env);
3568 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3570 if (!u32p) {
3571 return;
3574 u32p += env->pmsav7.rnr[M_REG_NS];
3575 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3576 *u32p = value;
3579 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3580 uint64_t value)
3582 ARMCPU *cpu = env_archcpu(env);
3583 uint32_t nrgs = cpu->pmsav7_dregion;
3585 if (value >= nrgs) {
3586 qemu_log_mask(LOG_GUEST_ERROR,
3587 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3588 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3589 return;
3592 raw_write(env, ri, value);
3595 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3596 /* Reset for all these registers is handled in arm_cpu_reset(),
3597 * because the PMSAv7 is also used by M-profile CPUs, which do
3598 * not register cpregs but still need the state to be reset.
3600 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3601 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3602 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3603 .readfn = pmsav7_read, .writefn = pmsav7_write,
3604 .resetfn = arm_cp_reset_ignore },
3605 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3606 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3607 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3608 .readfn = pmsav7_read, .writefn = pmsav7_write,
3609 .resetfn = arm_cp_reset_ignore },
3610 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3611 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3612 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3613 .readfn = pmsav7_read, .writefn = pmsav7_write,
3614 .resetfn = arm_cp_reset_ignore },
3615 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3616 .access = PL1_RW,
3617 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3618 .writefn = pmsav7_rgnr_write,
3619 .resetfn = arm_cp_reset_ignore },
3620 REGINFO_SENTINEL
3623 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3624 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3625 .access = PL1_RW, .type = ARM_CP_ALIAS,
3626 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3627 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3628 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3629 .access = PL1_RW, .type = ARM_CP_ALIAS,
3630 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3631 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3632 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3633 .access = PL1_RW,
3634 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3635 .resetvalue = 0, },
3636 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3637 .access = PL1_RW,
3638 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3639 .resetvalue = 0, },
3640 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3641 .access = PL1_RW,
3642 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3643 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3644 .access = PL1_RW,
3645 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3646 /* Protection region base and size registers */
3647 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3648 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3649 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3650 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3651 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3652 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3653 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3654 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3655 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3656 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3657 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3658 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3659 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3660 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3661 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3662 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3663 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3664 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3665 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3666 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3667 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3668 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3669 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3670 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3671 REGINFO_SENTINEL
3674 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3675 uint64_t value)
3677 TCR *tcr = raw_ptr(env, ri);
3678 int maskshift = extract32(value, 0, 3);
3680 if (!arm_feature(env, ARM_FEATURE_V8)) {
3681 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3682 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3683 * using Long-desciptor translation table format */
3684 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3685 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3686 /* In an implementation that includes the Security Extensions
3687 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3688 * Short-descriptor translation table format.
3690 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3691 } else {
3692 value &= TTBCR_N;
3696 /* Update the masks corresponding to the TCR bank being written
3697 * Note that we always calculate mask and base_mask, but
3698 * they are only used for short-descriptor tables (ie if EAE is 0);
3699 * for long-descriptor tables the TCR fields are used differently
3700 * and the mask and base_mask values are meaningless.
3702 tcr->raw_tcr = value;
3703 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3704 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3707 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3708 uint64_t value)
3710 ARMCPU *cpu = env_archcpu(env);
3711 TCR *tcr = raw_ptr(env, ri);
3713 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3714 /* With LPAE the TTBCR could result in a change of ASID
3715 * via the TTBCR.A1 bit, so do a TLB flush.
3717 tlb_flush(CPU(cpu));
3719 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3720 value = deposit64(tcr->raw_tcr, 0, 32, value);
3721 vmsa_ttbcr_raw_write(env, ri, value);
3724 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3726 TCR *tcr = raw_ptr(env, ri);
3728 /* Reset both the TCR as well as the masks corresponding to the bank of
3729 * the TCR being reset.
3731 tcr->raw_tcr = 0;
3732 tcr->mask = 0;
3733 tcr->base_mask = 0xffffc000u;
3736 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
3737 uint64_t value)
3739 ARMCPU *cpu = env_archcpu(env);
3740 TCR *tcr = raw_ptr(env, ri);
3742 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3743 tlb_flush(CPU(cpu));
3744 tcr->raw_tcr = value;
3747 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3748 uint64_t value)
3750 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3751 if (cpreg_field_is_64bit(ri) &&
3752 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3753 ARMCPU *cpu = env_archcpu(env);
3754 tlb_flush(CPU(cpu));
3756 raw_write(env, ri, value);
3759 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3760 uint64_t value)
3763 * If we are running with E2&0 regime, then an ASID is active.
3764 * Flush if that might be changing. Note we're not checking
3765 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3766 * holds the active ASID, only checking the field that might.
3768 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3769 (arm_hcr_el2_eff(env) & HCR_E2H)) {
3770 uint16_t mask = ARMMMUIdxBit_E20_2 |
3771 ARMMMUIdxBit_E20_2_PAN |
3772 ARMMMUIdxBit_E20_0;
3774 if (arm_is_secure_below_el3(env)) {
3775 mask >>= ARM_MMU_IDX_A_NS;
3778 tlb_flush_by_mmuidx(env_cpu(env), mask);
3780 raw_write(env, ri, value);
3783 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3784 uint64_t value)
3786 ARMCPU *cpu = env_archcpu(env);
3787 CPUState *cs = CPU(cpu);
3790 * A change in VMID to the stage2 page table (Stage2) invalidates
3791 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3793 if (raw_read(env, ri) != value) {
3794 uint16_t mask = ARMMMUIdxBit_E10_1 |
3795 ARMMMUIdxBit_E10_1_PAN |
3796 ARMMMUIdxBit_E10_0;
3798 if (arm_is_secure_below_el3(env)) {
3799 mask >>= ARM_MMU_IDX_A_NS;
3802 tlb_flush_by_mmuidx(cs, mask);
3803 raw_write(env, ri, value);
3807 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3808 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3809 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
3810 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3811 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3812 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3813 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3814 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3815 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3816 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3817 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3818 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3819 offsetof(CPUARMState, cp15.dfar_ns) } },
3820 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3821 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3822 .access = PL1_RW, .accessfn = access_tvm_trvm,
3823 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3824 .resetvalue = 0, },
3825 REGINFO_SENTINEL
3828 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3829 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3830 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3831 .access = PL1_RW, .accessfn = access_tvm_trvm,
3832 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3833 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3834 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3835 .access = PL1_RW, .accessfn = access_tvm_trvm,
3836 .writefn = vmsa_ttbr_write, .resetvalue = 0,
3837 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3838 offsetof(CPUARMState, cp15.ttbr0_ns) } },
3839 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3840 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3841 .access = PL1_RW, .accessfn = access_tvm_trvm,
3842 .writefn = vmsa_ttbr_write, .resetvalue = 0,
3843 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3844 offsetof(CPUARMState, cp15.ttbr1_ns) } },
3845 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3846 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3847 .access = PL1_RW, .accessfn = access_tvm_trvm,
3848 .writefn = vmsa_tcr_el12_write,
3849 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3850 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3851 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3852 .access = PL1_RW, .accessfn = access_tvm_trvm,
3853 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3854 .raw_writefn = vmsa_ttbcr_raw_write,
3855 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
3856 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
3857 offsetof(CPUARMState, cp15.tcr_el[1])} },
3858 REGINFO_SENTINEL
3861 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3862 * qemu tlbs nor adjusting cached masks.
3864 static const ARMCPRegInfo ttbcr2_reginfo = {
3865 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3866 .access = PL1_RW, .accessfn = access_tvm_trvm,
3867 .type = ARM_CP_ALIAS,
3868 .bank_fieldoffsets = {
3869 offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
3870 offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
3874 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3875 uint64_t value)
3877 env->cp15.c15_ticonfig = value & 0xe7;
3878 /* The OS_TYPE bit in this register changes the reported CPUID! */
3879 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3880 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3883 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3884 uint64_t value)
3886 env->cp15.c15_threadid = value & 0xffff;
3889 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3890 uint64_t value)
3892 /* Wait-for-interrupt (deprecated) */
3893 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3896 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3897 uint64_t value)
3899 /* On OMAP there are registers indicating the max/min index of dcache lines
3900 * containing a dirty line; cache flush operations have to reset these.
3902 env->cp15.c15_i_max = 0x000;
3903 env->cp15.c15_i_min = 0xff0;
3906 static const ARMCPRegInfo omap_cp_reginfo[] = {
3907 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3908 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3909 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3910 .resetvalue = 0, },
3911 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3912 .access = PL1_RW, .type = ARM_CP_NOP },
3913 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3914 .access = PL1_RW,
3915 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3916 .writefn = omap_ticonfig_write },
3917 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3918 .access = PL1_RW,
3919 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3920 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3921 .access = PL1_RW, .resetvalue = 0xff0,
3922 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3923 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3924 .access = PL1_RW,
3925 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3926 .writefn = omap_threadid_write },
3927 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3928 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3929 .type = ARM_CP_NO_RAW,
3930 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3931 /* TODO: Peripheral port remap register:
3932 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3933 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3934 * when MMU is off.
3936 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3937 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3938 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3939 .writefn = omap_cachemaint_write },
3940 { .name = "C9", .cp = 15, .crn = 9,
3941 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3942 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3943 REGINFO_SENTINEL
3946 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3947 uint64_t value)
3949 env->cp15.c15_cpar = value & 0x3fff;
3952 static const ARMCPRegInfo xscale_cp_reginfo[] = {
3953 { .name = "XSCALE_CPAR",
3954 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3955 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3956 .writefn = xscale_cpar_write, },
3957 { .name = "XSCALE_AUXCR",
3958 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3959 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3960 .resetvalue = 0, },
3961 /* XScale specific cache-lockdown: since we have no cache we NOP these
3962 * and hope the guest does not really rely on cache behaviour.
3964 { .name = "XSCALE_LOCK_ICACHE_LINE",
3965 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3966 .access = PL1_W, .type = ARM_CP_NOP },
3967 { .name = "XSCALE_UNLOCK_ICACHE",
3968 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3969 .access = PL1_W, .type = ARM_CP_NOP },
3970 { .name = "XSCALE_DCACHE_LOCK",
3971 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3972 .access = PL1_RW, .type = ARM_CP_NOP },
3973 { .name = "XSCALE_UNLOCK_DCACHE",
3974 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3975 .access = PL1_W, .type = ARM_CP_NOP },
3976 REGINFO_SENTINEL
3979 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3980 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3981 * implementation of this implementation-defined space.
3982 * Ideally this should eventually disappear in favour of actually
3983 * implementing the correct behaviour for all cores.
3985 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3986 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3987 .access = PL1_RW,
3988 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3989 .resetvalue = 0 },
3990 REGINFO_SENTINEL
3993 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3994 /* Cache status: RAZ because we have no cache so it's always clean */
3995 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3996 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3997 .resetvalue = 0 },
3998 REGINFO_SENTINEL
4001 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4002 /* We never have a a block transfer operation in progress */
4003 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4004 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4005 .resetvalue = 0 },
4006 /* The cache ops themselves: these all NOP for QEMU */
4007 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4008 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4009 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4010 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4011 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4012 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4013 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4014 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4015 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4016 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4017 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4018 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4019 REGINFO_SENTINEL
4022 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4023 /* The cache test-and-clean instructions always return (1 << 30)
4024 * to indicate that there are no dirty cache lines.
4026 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4027 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4028 .resetvalue = (1 << 30) },
4029 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4030 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4031 .resetvalue = (1 << 30) },
4032 REGINFO_SENTINEL
4035 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4036 /* Ignore ReadBuffer accesses */
4037 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4038 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4039 .access = PL1_RW, .resetvalue = 0,
4040 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4041 REGINFO_SENTINEL
4044 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4046 unsigned int cur_el = arm_current_el(env);
4048 if (arm_is_el2_enabled(env) && cur_el == 1) {
4049 return env->cp15.vpidr_el2;
4051 return raw_read(env, ri);
4054 static uint64_t mpidr_read_val(CPUARMState *env)
4056 ARMCPU *cpu = env_archcpu(env);
4057 uint64_t mpidr = cpu->mp_affinity;
4059 if (arm_feature(env, ARM_FEATURE_V7MP)) {
4060 mpidr |= (1U << 31);
4061 /* Cores which are uniprocessor (non-coherent)
4062 * but still implement the MP extensions set
4063 * bit 30. (For instance, Cortex-R5).
4065 if (cpu->mp_is_up) {
4066 mpidr |= (1u << 30);
4069 return mpidr;
4072 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4074 unsigned int cur_el = arm_current_el(env);
4076 if (arm_is_el2_enabled(env) && cur_el == 1) {
4077 return env->cp15.vmpidr_el2;
4079 return mpidr_read_val(env);
4082 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4083 /* NOP AMAIR0/1 */
4084 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4085 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4086 .access = PL1_RW, .accessfn = access_tvm_trvm,
4087 .type = ARM_CP_CONST, .resetvalue = 0 },
4088 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4089 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4090 .access = PL1_RW, .accessfn = access_tvm_trvm,
4091 .type = ARM_CP_CONST, .resetvalue = 0 },
4092 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4093 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4094 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4095 offsetof(CPUARMState, cp15.par_ns)} },
4096 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4097 .access = PL1_RW, .accessfn = access_tvm_trvm,
4098 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4099 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4100 offsetof(CPUARMState, cp15.ttbr0_ns) },
4101 .writefn = vmsa_ttbr_write, },
4102 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4103 .access = PL1_RW, .accessfn = access_tvm_trvm,
4104 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4105 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4106 offsetof(CPUARMState, cp15.ttbr1_ns) },
4107 .writefn = vmsa_ttbr_write, },
4108 REGINFO_SENTINEL
4111 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4113 return vfp_get_fpcr(env);
4116 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4117 uint64_t value)
4119 vfp_set_fpcr(env, value);
4122 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4124 return vfp_get_fpsr(env);
4127 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4128 uint64_t value)
4130 vfp_set_fpsr(env, value);
4133 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4134 bool isread)
4136 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4137 return CP_ACCESS_TRAP;
4139 return CP_ACCESS_OK;
4142 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4143 uint64_t value)
4145 env->daif = value & PSTATE_DAIF;
4148 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4150 return env->pstate & PSTATE_PAN;
4153 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4154 uint64_t value)
4156 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4159 static const ARMCPRegInfo pan_reginfo = {
4160 .name = "PAN", .state = ARM_CP_STATE_AA64,
4161 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4162 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4163 .readfn = aa64_pan_read, .writefn = aa64_pan_write
4166 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4168 return env->pstate & PSTATE_UAO;
4171 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4172 uint64_t value)
4174 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4177 static const ARMCPRegInfo uao_reginfo = {
4178 .name = "UAO", .state = ARM_CP_STATE_AA64,
4179 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4180 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4181 .readfn = aa64_uao_read, .writefn = aa64_uao_write
4184 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4186 return env->pstate & PSTATE_DIT;
4189 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4190 uint64_t value)
4192 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4195 static const ARMCPRegInfo dit_reginfo = {
4196 .name = "DIT", .state = ARM_CP_STATE_AA64,
4197 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4198 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4199 .readfn = aa64_dit_read, .writefn = aa64_dit_write
4202 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4204 return env->pstate & PSTATE_SSBS;
4207 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4208 uint64_t value)
4210 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4213 static const ARMCPRegInfo ssbs_reginfo = {
4214 .name = "SSBS", .state = ARM_CP_STATE_AA64,
4215 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4216 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4217 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4220 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4221 const ARMCPRegInfo *ri,
4222 bool isread)
4224 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4225 switch (arm_current_el(env)) {
4226 case 0:
4227 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4228 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4229 return CP_ACCESS_TRAP;
4231 /* fall through */
4232 case 1:
4233 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4234 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4235 return CP_ACCESS_TRAP_EL2;
4237 break;
4239 return CP_ACCESS_OK;
4242 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
4243 const ARMCPRegInfo *ri,
4244 bool isread)
4246 /* Cache invalidate/clean to Point of Unification... */
4247 switch (arm_current_el(env)) {
4248 case 0:
4249 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4250 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4251 return CP_ACCESS_TRAP;
4253 /* fall through */
4254 case 1:
4255 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4256 if (arm_hcr_el2_eff(env) & HCR_TPU) {
4257 return CP_ACCESS_TRAP_EL2;
4259 break;
4261 return CP_ACCESS_OK;
4264 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4265 * Page D4-1736 (DDI0487A.b)
4268 static int vae1_tlbmask(CPUARMState *env)
4270 uint64_t hcr = arm_hcr_el2_eff(env);
4271 uint16_t mask;
4273 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4274 mask = ARMMMUIdxBit_E20_2 |
4275 ARMMMUIdxBit_E20_2_PAN |
4276 ARMMMUIdxBit_E20_0;
4277 } else {
4278 mask = ARMMMUIdxBit_E10_1 |
4279 ARMMMUIdxBit_E10_1_PAN |
4280 ARMMMUIdxBit_E10_0;
4283 if (arm_is_secure_below_el3(env)) {
4284 mask >>= ARM_MMU_IDX_A_NS;
4287 return mask;
4290 /* Return 56 if TBI is enabled, 64 otherwise. */
4291 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4292 uint64_t addr)
4294 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
4295 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4296 int select = extract64(addr, 55, 1);
4298 return (tbi >> select) & 1 ? 56 : 64;
4301 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4303 uint64_t hcr = arm_hcr_el2_eff(env);
4304 ARMMMUIdx mmu_idx;
4306 /* Only the regime of the mmu_idx below is significant. */
4307 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4308 mmu_idx = ARMMMUIdx_E20_0;
4309 } else {
4310 mmu_idx = ARMMMUIdx_E10_0;
4313 if (arm_is_secure_below_el3(env)) {
4314 mmu_idx &= ~ARM_MMU_IDX_A_NS;
4317 return tlbbits_for_regime(env, mmu_idx, addr);
4320 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4321 uint64_t value)
4323 CPUState *cs = env_cpu(env);
4324 int mask = vae1_tlbmask(env);
4326 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4329 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4330 uint64_t value)
4332 CPUState *cs = env_cpu(env);
4333 int mask = vae1_tlbmask(env);
4335 if (tlb_force_broadcast(env)) {
4336 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4337 } else {
4338 tlb_flush_by_mmuidx(cs, mask);
4342 static int alle1_tlbmask(CPUARMState *env)
4345 * Note that the 'ALL' scope must invalidate both stage 1 and
4346 * stage 2 translations, whereas most other scopes only invalidate
4347 * stage 1 translations.
4349 if (arm_is_secure_below_el3(env)) {
4350 return ARMMMUIdxBit_SE10_1 |
4351 ARMMMUIdxBit_SE10_1_PAN |
4352 ARMMMUIdxBit_SE10_0;
4353 } else {
4354 return ARMMMUIdxBit_E10_1 |
4355 ARMMMUIdxBit_E10_1_PAN |
4356 ARMMMUIdxBit_E10_0;
4360 static int e2_tlbmask(CPUARMState *env)
4362 if (arm_is_secure_below_el3(env)) {
4363 return ARMMMUIdxBit_SE20_0 |
4364 ARMMMUIdxBit_SE20_2 |
4365 ARMMMUIdxBit_SE20_2_PAN |
4366 ARMMMUIdxBit_SE2;
4367 } else {
4368 return ARMMMUIdxBit_E20_0 |
4369 ARMMMUIdxBit_E20_2 |
4370 ARMMMUIdxBit_E20_2_PAN |
4371 ARMMMUIdxBit_E2;
4375 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4376 uint64_t value)
4378 CPUState *cs = env_cpu(env);
4379 int mask = alle1_tlbmask(env);
4381 tlb_flush_by_mmuidx(cs, mask);
4384 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385 uint64_t value)
4387 CPUState *cs = env_cpu(env);
4388 int mask = e2_tlbmask(env);
4390 tlb_flush_by_mmuidx(cs, mask);
4393 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4394 uint64_t value)
4396 ARMCPU *cpu = env_archcpu(env);
4397 CPUState *cs = CPU(cpu);
4399 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
4402 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4403 uint64_t value)
4405 CPUState *cs = env_cpu(env);
4406 int mask = alle1_tlbmask(env);
4408 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4411 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4412 uint64_t value)
4414 CPUState *cs = env_cpu(env);
4415 int mask = e2_tlbmask(env);
4417 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4420 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4421 uint64_t value)
4423 CPUState *cs = env_cpu(env);
4425 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
4428 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4429 uint64_t value)
4431 /* Invalidate by VA, EL2
4432 * Currently handles both VAE2 and VALE2, since we don't support
4433 * flush-last-level-only.
4435 CPUState *cs = env_cpu(env);
4436 int mask = e2_tlbmask(env);
4437 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4439 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4442 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4443 uint64_t value)
4445 /* Invalidate by VA, EL3
4446 * Currently handles both VAE3 and VALE3, since we don't support
4447 * flush-last-level-only.
4449 ARMCPU *cpu = env_archcpu(env);
4450 CPUState *cs = CPU(cpu);
4451 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4453 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
4456 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4457 uint64_t value)
4459 CPUState *cs = env_cpu(env);
4460 int mask = vae1_tlbmask(env);
4461 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4462 int bits = vae1_tlbbits(env, pageaddr);
4464 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4467 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4468 uint64_t value)
4470 /* Invalidate by VA, EL1&0 (AArch64 version).
4471 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4472 * since we don't support flush-for-specific-ASID-only or
4473 * flush-last-level-only.
4475 CPUState *cs = env_cpu(env);
4476 int mask = vae1_tlbmask(env);
4477 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4478 int bits = vae1_tlbbits(env, pageaddr);
4480 if (tlb_force_broadcast(env)) {
4481 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4482 } else {
4483 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4487 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4488 uint64_t value)
4490 CPUState *cs = env_cpu(env);
4491 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4492 bool secure = arm_is_secure_below_el3(env);
4493 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
4494 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
4495 pageaddr);
4497 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4500 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4501 uint64_t value)
4503 CPUState *cs = env_cpu(env);
4504 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4505 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
4507 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4508 ARMMMUIdxBit_SE3, bits);
4511 #ifdef TARGET_AARCH64
4512 static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
4513 uint64_t value)
4515 unsigned int page_shift;
4516 unsigned int page_size_granule;
4517 uint64_t num;
4518 uint64_t scale;
4519 uint64_t exponent;
4520 uint64_t length;
4522 num = extract64(value, 39, 5);
4523 scale = extract64(value, 44, 2);
4524 page_size_granule = extract64(value, 46, 2);
4526 if (page_size_granule == 0) {
4527 qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
4528 page_size_granule);
4529 return 0;
4532 page_shift = (page_size_granule - 1) * 2 + 12;
4534 exponent = (5 * scale) + 1;
4535 length = (num + 1) << (exponent + page_shift);
4537 return length;
4540 static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
4541 bool two_ranges)
4543 /* TODO: ARMv8.7 FEAT_LPA2 */
4544 uint64_t pageaddr;
4546 if (two_ranges) {
4547 pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
4548 } else {
4549 pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
4552 return pageaddr;
4555 static void do_rvae_write(CPUARMState *env, uint64_t value,
4556 int idxmap, bool synced)
4558 ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
4559 bool two_ranges = regime_has_2_ranges(one_idx);
4560 uint64_t baseaddr, length;
4561 int bits;
4563 baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
4564 length = tlbi_aa64_range_get_length(env, value);
4565 bits = tlbbits_for_regime(env, one_idx, baseaddr);
4567 if (synced) {
4568 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
4569 baseaddr,
4570 length,
4571 idxmap,
4572 bits);
4573 } else {
4574 tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
4575 length, idxmap, bits);
4579 static void tlbi_aa64_rvae1_write(CPUARMState *env,
4580 const ARMCPRegInfo *ri,
4581 uint64_t value)
4584 * Invalidate by VA range, EL1&0.
4585 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4586 * since we don't support flush-for-specific-ASID-only or
4587 * flush-last-level-only.
4590 do_rvae_write(env, value, vae1_tlbmask(env),
4591 tlb_force_broadcast(env));
4594 static void tlbi_aa64_rvae1is_write(CPUARMState *env,
4595 const ARMCPRegInfo *ri,
4596 uint64_t value)
4599 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4600 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4601 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4602 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4603 * shareable specific flushes.
4606 do_rvae_write(env, value, vae1_tlbmask(env), true);
4609 static int vae2_tlbmask(CPUARMState *env)
4611 return (arm_is_secure_below_el3(env)
4612 ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
4615 static void tlbi_aa64_rvae2_write(CPUARMState *env,
4616 const ARMCPRegInfo *ri,
4617 uint64_t value)
4620 * Invalidate by VA range, EL2.
4621 * Currently handles all of RVAE2 and RVALE2,
4622 * since we don't support flush-for-specific-ASID-only or
4623 * flush-last-level-only.
4626 do_rvae_write(env, value, vae2_tlbmask(env),
4627 tlb_force_broadcast(env));
4632 static void tlbi_aa64_rvae2is_write(CPUARMState *env,
4633 const ARMCPRegInfo *ri,
4634 uint64_t value)
4637 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4638 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4639 * since we don't support flush-for-specific-ASID-only,
4640 * flush-last-level-only or inner/outer shareable specific flushes.
4643 do_rvae_write(env, value, vae2_tlbmask(env), true);
4647 static void tlbi_aa64_rvae3_write(CPUARMState *env,
4648 const ARMCPRegInfo *ri,
4649 uint64_t value)
4652 * Invalidate by VA range, EL3.
4653 * Currently handles all of RVAE3 and RVALE3,
4654 * since we don't support flush-for-specific-ASID-only or
4655 * flush-last-level-only.
4658 do_rvae_write(env, value, ARMMMUIdxBit_SE3,
4659 tlb_force_broadcast(env));
4662 static void tlbi_aa64_rvae3is_write(CPUARMState *env,
4663 const ARMCPRegInfo *ri,
4664 uint64_t value)
4667 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4668 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4669 * since we don't support flush-for-specific-ASID-only,
4670 * flush-last-level-only or inner/outer specific flushes.
4673 do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
4675 #endif
4677 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4678 bool isread)
4680 int cur_el = arm_current_el(env);
4682 if (cur_el < 2) {
4683 uint64_t hcr = arm_hcr_el2_eff(env);
4685 if (cur_el == 0) {
4686 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4687 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4688 return CP_ACCESS_TRAP_EL2;
4690 } else {
4691 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4692 return CP_ACCESS_TRAP;
4694 if (hcr & HCR_TDZ) {
4695 return CP_ACCESS_TRAP_EL2;
4698 } else if (hcr & HCR_TDZ) {
4699 return CP_ACCESS_TRAP_EL2;
4702 return CP_ACCESS_OK;
4705 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4707 ARMCPU *cpu = env_archcpu(env);
4708 int dzp_bit = 1 << 4;
4710 /* DZP indicates whether DC ZVA access is allowed */
4711 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4712 dzp_bit = 0;
4714 return cpu->dcz_blocksize | dzp_bit;
4717 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4718 bool isread)
4720 if (!(env->pstate & PSTATE_SP)) {
4721 /* Access to SP_EL0 is undefined if it's being used as
4722 * the stack pointer.
4724 return CP_ACCESS_TRAP_UNCATEGORIZED;
4726 return CP_ACCESS_OK;
4729 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4731 return env->pstate & PSTATE_SP;
4734 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4736 update_spsel(env, val);
4739 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4740 uint64_t value)
4742 ARMCPU *cpu = env_archcpu(env);
4744 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4745 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4746 value &= ~SCTLR_M;
4749 /* ??? Lots of these bits are not implemented. */
4751 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4752 if (ri->opc1 == 6) { /* SCTLR_EL3 */
4753 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4754 } else {
4755 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4756 SCTLR_ATA0 | SCTLR_ATA);
4760 if (raw_read(env, ri) == value) {
4761 /* Skip the TLB flush if nothing actually changed; Linux likes
4762 * to do a lot of pointless SCTLR writes.
4764 return;
4767 raw_write(env, ri, value);
4769 /* This may enable/disable the MMU, so do a TLB flush. */
4770 tlb_flush(CPU(cpu));
4772 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4774 * Normally we would always end the TB on an SCTLR write; see the
4775 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4776 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4777 * of hflags from the translator, so do it here.
4779 arm_rebuild_hflags(env);
4783 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4784 bool isread)
4786 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4787 return CP_ACCESS_TRAP_FP_EL2;
4789 if (env->cp15.cptr_el[3] & CPTR_TFP) {
4790 return CP_ACCESS_TRAP_FP_EL3;
4792 return CP_ACCESS_OK;
4795 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4796 uint64_t value)
4798 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4801 static const ARMCPRegInfo v8_cp_reginfo[] = {
4802 /* Minimal set of EL0-visible registers. This will need to be expanded
4803 * significantly for system emulation of AArch64 CPUs.
4805 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4806 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4807 .access = PL0_RW, .type = ARM_CP_NZCV },
4808 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4809 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4810 .type = ARM_CP_NO_RAW,
4811 .access = PL0_RW, .accessfn = aa64_daif_access,
4812 .fieldoffset = offsetof(CPUARMState, daif),
4813 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4814 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4815 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4816 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4817 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4818 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4819 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4820 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4821 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4822 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4823 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4824 .access = PL0_R, .type = ARM_CP_NO_RAW,
4825 .readfn = aa64_dczid_read },
4826 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4827 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4828 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4829 #ifndef CONFIG_USER_ONLY
4830 /* Avoid overhead of an access check that always passes in user-mode */
4831 .accessfn = aa64_zva_access,
4832 #endif
4834 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4835 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4836 .access = PL1_R, .type = ARM_CP_CURRENTEL },
4837 /* Cache ops: all NOPs since we don't emulate caches */
4838 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4839 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4840 .access = PL1_W, .type = ARM_CP_NOP,
4841 .accessfn = aa64_cacheop_pou_access },
4842 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4843 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4844 .access = PL1_W, .type = ARM_CP_NOP,
4845 .accessfn = aa64_cacheop_pou_access },
4846 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4847 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4848 .access = PL0_W, .type = ARM_CP_NOP,
4849 .accessfn = aa64_cacheop_pou_access },
4850 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4851 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4852 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4853 .type = ARM_CP_NOP },
4854 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4855 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4856 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4857 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4858 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4859 .access = PL0_W, .type = ARM_CP_NOP,
4860 .accessfn = aa64_cacheop_poc_access },
4861 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4862 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4863 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4864 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4865 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4866 .access = PL0_W, .type = ARM_CP_NOP,
4867 .accessfn = aa64_cacheop_pou_access },
4868 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4869 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4870 .access = PL0_W, .type = ARM_CP_NOP,
4871 .accessfn = aa64_cacheop_poc_access },
4872 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4873 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4874 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4875 /* TLBI operations */
4876 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4877 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4878 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4879 .writefn = tlbi_aa64_vmalle1is_write },
4880 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4881 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4882 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4883 .writefn = tlbi_aa64_vae1is_write },
4884 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4885 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4886 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4887 .writefn = tlbi_aa64_vmalle1is_write },
4888 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4889 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4890 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4891 .writefn = tlbi_aa64_vae1is_write },
4892 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4893 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4894 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4895 .writefn = tlbi_aa64_vae1is_write },
4896 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4897 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4898 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4899 .writefn = tlbi_aa64_vae1is_write },
4900 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4901 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4902 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4903 .writefn = tlbi_aa64_vmalle1_write },
4904 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4905 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4906 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4907 .writefn = tlbi_aa64_vae1_write },
4908 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4909 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4910 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4911 .writefn = tlbi_aa64_vmalle1_write },
4912 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4913 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4914 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4915 .writefn = tlbi_aa64_vae1_write },
4916 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4917 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4918 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4919 .writefn = tlbi_aa64_vae1_write },
4920 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4921 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4922 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4923 .writefn = tlbi_aa64_vae1_write },
4924 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4925 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4926 .access = PL2_W, .type = ARM_CP_NOP },
4927 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4928 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4929 .access = PL2_W, .type = ARM_CP_NOP },
4930 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4931 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4932 .access = PL2_W, .type = ARM_CP_NO_RAW,
4933 .writefn = tlbi_aa64_alle1is_write },
4934 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4935 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4936 .access = PL2_W, .type = ARM_CP_NO_RAW,
4937 .writefn = tlbi_aa64_alle1is_write },
4938 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4939 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4940 .access = PL2_W, .type = ARM_CP_NOP },
4941 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4942 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4943 .access = PL2_W, .type = ARM_CP_NOP },
4944 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4945 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4946 .access = PL2_W, .type = ARM_CP_NO_RAW,
4947 .writefn = tlbi_aa64_alle1_write },
4948 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4949 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4950 .access = PL2_W, .type = ARM_CP_NO_RAW,
4951 .writefn = tlbi_aa64_alle1is_write },
4952 #ifndef CONFIG_USER_ONLY
4953 /* 64 bit address translation operations */
4954 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4955 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4956 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4957 .writefn = ats_write64 },
4958 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4959 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4960 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4961 .writefn = ats_write64 },
4962 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4963 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4964 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4965 .writefn = ats_write64 },
4966 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4967 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4968 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4969 .writefn = ats_write64 },
4970 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4971 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4972 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4973 .writefn = ats_write64 },
4974 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4975 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4976 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4977 .writefn = ats_write64 },
4978 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4979 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4980 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4981 .writefn = ats_write64 },
4982 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4983 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4984 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4985 .writefn = ats_write64 },
4986 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4987 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4988 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4989 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4990 .writefn = ats_write64 },
4991 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4992 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4993 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4994 .writefn = ats_write64 },
4995 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4996 .type = ARM_CP_ALIAS,
4997 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4998 .access = PL1_RW, .resetvalue = 0,
4999 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5000 .writefn = par_write },
5001 #endif
5002 /* TLB invalidate last level of translation table walk */
5003 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5004 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5005 .writefn = tlbimva_is_write },
5006 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5007 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5008 .writefn = tlbimvaa_is_write },
5009 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5010 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5011 .writefn = tlbimva_write },
5012 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5013 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5014 .writefn = tlbimvaa_write },
5015 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5016 .type = ARM_CP_NO_RAW, .access = PL2_W,
5017 .writefn = tlbimva_hyp_write },
5018 { .name = "TLBIMVALHIS",
5019 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5020 .type = ARM_CP_NO_RAW, .access = PL2_W,
5021 .writefn = tlbimva_hyp_is_write },
5022 { .name = "TLBIIPAS2",
5023 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5024 .type = ARM_CP_NOP, .access = PL2_W },
5025 { .name = "TLBIIPAS2IS",
5026 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5027 .type = ARM_CP_NOP, .access = PL2_W },
5028 { .name = "TLBIIPAS2L",
5029 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5030 .type = ARM_CP_NOP, .access = PL2_W },
5031 { .name = "TLBIIPAS2LIS",
5032 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5033 .type = ARM_CP_NOP, .access = PL2_W },
5034 /* 32 bit cache operations */
5035 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5036 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5037 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5038 .type = ARM_CP_NOP, .access = PL1_W },
5039 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5040 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5041 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5042 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5043 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5044 .type = ARM_CP_NOP, .access = PL1_W },
5045 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5046 .type = ARM_CP_NOP, .access = PL1_W },
5047 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5048 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5049 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5050 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5051 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5052 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5053 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5054 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5055 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5056 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5057 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5058 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5059 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5060 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5061 /* MMU Domain access control / MPU write buffer control */
5062 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5063 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5064 .writefn = dacr_write, .raw_writefn = raw_write,
5065 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5066 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5067 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5068 .type = ARM_CP_ALIAS,
5069 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5070 .access = PL1_RW,
5071 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5072 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5073 .type = ARM_CP_ALIAS,
5074 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5075 .access = PL1_RW,
5076 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5077 /* We rely on the access checks not allowing the guest to write to the
5078 * state field when SPSel indicates that it's being used as the stack
5079 * pointer.
5081 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5082 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5083 .access = PL1_RW, .accessfn = sp_el0_access,
5084 .type = ARM_CP_ALIAS,
5085 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5086 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5087 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5088 .access = PL2_RW, .type = ARM_CP_ALIAS,
5089 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5090 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5091 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5092 .type = ARM_CP_NO_RAW,
5093 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5094 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5095 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5096 .type = ARM_CP_ALIAS,
5097 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
5098 .access = PL2_RW, .accessfn = fpexc32_access },
5099 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5100 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5101 .access = PL2_RW, .resetvalue = 0,
5102 .writefn = dacr_write, .raw_writefn = raw_write,
5103 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5104 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5105 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5106 .access = PL2_RW, .resetvalue = 0,
5107 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5108 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5109 .type = ARM_CP_ALIAS,
5110 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5111 .access = PL2_RW,
5112 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5113 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5114 .type = ARM_CP_ALIAS,
5115 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5116 .access = PL2_RW,
5117 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5118 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5119 .type = ARM_CP_ALIAS,
5120 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5121 .access = PL2_RW,
5122 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5123 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5124 .type = ARM_CP_ALIAS,
5125 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5126 .access = PL2_RW,
5127 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5128 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5129 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5130 .resetvalue = 0,
5131 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5132 { .name = "SDCR", .type = ARM_CP_ALIAS,
5133 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5134 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5135 .writefn = sdcr_write,
5136 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5137 REGINFO_SENTINEL
5140 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5141 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
5142 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5143 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5144 .access = PL2_RW,
5145 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
5146 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
5147 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5148 .access = PL2_RW,
5149 .type = ARM_CP_CONST, .resetvalue = 0 },
5150 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5151 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5152 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5153 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5154 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5155 .access = PL2_RW,
5156 .type = ARM_CP_CONST, .resetvalue = 0 },
5157 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5158 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5159 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5160 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5161 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5162 .access = PL2_RW, .type = ARM_CP_CONST,
5163 .resetvalue = 0 },
5164 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5165 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5166 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5167 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5168 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5169 .access = PL2_RW, .type = ARM_CP_CONST,
5170 .resetvalue = 0 },
5171 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5172 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5173 .access = PL2_RW, .type = ARM_CP_CONST,
5174 .resetvalue = 0 },
5175 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5176 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5177 .access = PL2_RW, .type = ARM_CP_CONST,
5178 .resetvalue = 0 },
5179 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5180 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5181 .access = PL2_RW, .type = ARM_CP_CONST,
5182 .resetvalue = 0 },
5183 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5184 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5185 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5186 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
5187 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5188 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5189 .type = ARM_CP_CONST, .resetvalue = 0 },
5190 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5191 .cp = 15, .opc1 = 6, .crm = 2,
5192 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5193 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
5194 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5195 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5196 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5197 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5198 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5199 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5200 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5201 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5202 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5203 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5204 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5205 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5206 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5207 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5208 .resetvalue = 0 },
5209 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5210 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5211 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5212 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5213 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5214 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5215 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5216 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5217 .resetvalue = 0 },
5218 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5219 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5220 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5221 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5222 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5223 .resetvalue = 0 },
5224 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5225 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5226 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5227 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5228 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5229 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5230 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5231 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5232 .access = PL2_RW, .accessfn = access_tda,
5233 .type = ARM_CP_CONST, .resetvalue = 0 },
5234 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
5235 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5236 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5237 .type = ARM_CP_CONST, .resetvalue = 0 },
5238 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5239 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5240 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5241 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5242 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5243 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5244 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5245 .type = ARM_CP_CONST,
5246 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5247 .access = PL2_RW, .resetvalue = 0 },
5248 REGINFO_SENTINEL
5251 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5252 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
5253 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5254 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5255 .access = PL2_RW,
5256 .type = ARM_CP_CONST, .resetvalue = 0 },
5257 REGINFO_SENTINEL
5260 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5262 ARMCPU *cpu = env_archcpu(env);
5264 if (arm_feature(env, ARM_FEATURE_V8)) {
5265 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5266 } else {
5267 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5270 if (arm_feature(env, ARM_FEATURE_EL3)) {
5271 valid_mask &= ~HCR_HCD;
5272 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5273 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5274 * However, if we're using the SMC PSCI conduit then QEMU is
5275 * effectively acting like EL3 firmware and so the guest at
5276 * EL2 should retain the ability to prevent EL1 from being
5277 * able to make SMC calls into the ersatz firmware, so in
5278 * that case HCR.TSC should be read/write.
5280 valid_mask &= ~HCR_TSC;
5283 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5284 if (cpu_isar_feature(aa64_vh, cpu)) {
5285 valid_mask |= HCR_E2H;
5287 if (cpu_isar_feature(aa64_lor, cpu)) {
5288 valid_mask |= HCR_TLOR;
5290 if (cpu_isar_feature(aa64_pauth, cpu)) {
5291 valid_mask |= HCR_API | HCR_APK;
5293 if (cpu_isar_feature(aa64_mte, cpu)) {
5294 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5298 /* Clear RES0 bits. */
5299 value &= valid_mask;
5302 * These bits change the MMU setup:
5303 * HCR_VM enables stage 2 translation
5304 * HCR_PTW forbids certain page-table setups
5305 * HCR_DC disables stage1 and enables stage2 translation
5306 * HCR_DCT enables tagging on (disabled) stage1 translation
5308 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
5309 tlb_flush(CPU(cpu));
5311 env->cp15.hcr_el2 = value;
5314 * Updates to VI and VF require us to update the status of
5315 * virtual interrupts, which are the logical OR of these bits
5316 * and the state of the input lines from the GIC. (This requires
5317 * that we have the iothread lock, which is done by marking the
5318 * reginfo structs as ARM_CP_IO.)
5319 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5320 * possible for it to be taken immediately, because VIRQ and
5321 * VFIQ are masked unless running at EL0 or EL1, and HCR
5322 * can only be written at EL2.
5324 g_assert(qemu_mutex_iothread_locked());
5325 arm_cpu_update_virq(cpu);
5326 arm_cpu_update_vfiq(cpu);
5329 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5331 do_hcr_write(env, value, 0);
5334 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5335 uint64_t value)
5337 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5338 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5339 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5342 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5343 uint64_t value)
5345 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5346 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5347 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5351 * Return the effective value of HCR_EL2.
5352 * Bits that are not included here:
5353 * RW (read from SCR_EL3.RW as needed)
5355 uint64_t arm_hcr_el2_eff(CPUARMState *env)
5357 uint64_t ret = env->cp15.hcr_el2;
5359 if (!arm_is_el2_enabled(env)) {
5361 * "This register has no effect if EL2 is not enabled in the
5362 * current Security state". This is ARMv8.4-SecEL2 speak for
5363 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5365 * Prior to that, the language was "In an implementation that
5366 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5367 * as if this field is 0 for all purposes other than a direct
5368 * read or write access of HCR_EL2". With lots of enumeration
5369 * on a per-field basis. In current QEMU, this is condition
5370 * is arm_is_secure_below_el3.
5372 * Since the v8.4 language applies to the entire register, and
5373 * appears to be backward compatible, use that.
5375 return 0;
5379 * For a cpu that supports both aarch64 and aarch32, we can set bits
5380 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5381 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5383 if (!arm_el_is_aa64(env, 2)) {
5384 uint64_t aa32_valid;
5387 * These bits are up-to-date as of ARMv8.6.
5388 * For HCR, it's easiest to list just the 2 bits that are invalid.
5389 * For HCR2, list those that are valid.
5391 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5392 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5393 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5394 ret &= aa32_valid;
5397 if (ret & HCR_TGE) {
5398 /* These bits are up-to-date as of ARMv8.6. */
5399 if (ret & HCR_E2H) {
5400 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5401 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5402 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5403 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5404 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5405 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5406 } else {
5407 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5409 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5410 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5411 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5412 HCR_TLOR);
5415 return ret;
5418 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5419 uint64_t value)
5422 * For A-profile AArch32 EL3, if NSACR.CP10
5423 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5425 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5426 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5427 value &= ~(0x3 << 10);
5428 value |= env->cp15.cptr_el[2] & (0x3 << 10);
5430 env->cp15.cptr_el[2] = value;
5433 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5436 * For A-profile AArch32 EL3, if NSACR.CP10
5437 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5439 uint64_t value = env->cp15.cptr_el[2];
5441 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5442 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5443 value |= 0x3 << 10;
5445 return value;
5448 static const ARMCPRegInfo el2_cp_reginfo[] = {
5449 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5450 .type = ARM_CP_IO,
5451 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5452 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5453 .writefn = hcr_write },
5454 { .name = "HCR", .state = ARM_CP_STATE_AA32,
5455 .type = ARM_CP_ALIAS | ARM_CP_IO,
5456 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5457 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5458 .writefn = hcr_writelow },
5459 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5460 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5461 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5462 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5463 .type = ARM_CP_ALIAS,
5464 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5465 .access = PL2_RW,
5466 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5467 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5468 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5469 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5470 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5471 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5472 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5473 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5474 .type = ARM_CP_ALIAS,
5475 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5476 .access = PL2_RW,
5477 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5478 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5479 .type = ARM_CP_ALIAS,
5480 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5481 .access = PL2_RW,
5482 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5483 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5484 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5485 .access = PL2_RW, .writefn = vbar_write,
5486 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5487 .resetvalue = 0 },
5488 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5489 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5490 .access = PL3_RW, .type = ARM_CP_ALIAS,
5491 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5492 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5493 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5494 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5495 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5496 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5497 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5498 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5499 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5500 .resetvalue = 0 },
5501 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5502 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5503 .access = PL2_RW, .type = ARM_CP_ALIAS,
5504 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5505 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5506 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5507 .access = PL2_RW, .type = ARM_CP_CONST,
5508 .resetvalue = 0 },
5509 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5510 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5511 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5512 .access = PL2_RW, .type = ARM_CP_CONST,
5513 .resetvalue = 0 },
5514 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5515 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5516 .access = PL2_RW, .type = ARM_CP_CONST,
5517 .resetvalue = 0 },
5518 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5519 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5520 .access = PL2_RW, .type = ARM_CP_CONST,
5521 .resetvalue = 0 },
5522 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5523 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5524 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5525 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5526 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5527 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5528 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5529 .type = ARM_CP_ALIAS,
5530 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5531 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5532 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5533 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5534 .access = PL2_RW,
5535 /* no .writefn needed as this can't cause an ASID change;
5536 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5538 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5539 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5540 .cp = 15, .opc1 = 6, .crm = 2,
5541 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5542 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5543 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5544 .writefn = vttbr_write },
5545 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5546 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5547 .access = PL2_RW, .writefn = vttbr_write,
5548 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5549 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5550 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5551 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5552 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5553 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5554 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5555 .access = PL2_RW, .resetvalue = 0,
5556 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5557 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5558 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5559 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
5560 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5561 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5562 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5563 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5564 { .name = "TLBIALLNSNH",
5565 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5566 .type = ARM_CP_NO_RAW, .access = PL2_W,
5567 .writefn = tlbiall_nsnh_write },
5568 { .name = "TLBIALLNSNHIS",
5569 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5570 .type = ARM_CP_NO_RAW, .access = PL2_W,
5571 .writefn = tlbiall_nsnh_is_write },
5572 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5573 .type = ARM_CP_NO_RAW, .access = PL2_W,
5574 .writefn = tlbiall_hyp_write },
5575 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5576 .type = ARM_CP_NO_RAW, .access = PL2_W,
5577 .writefn = tlbiall_hyp_is_write },
5578 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5579 .type = ARM_CP_NO_RAW, .access = PL2_W,
5580 .writefn = tlbimva_hyp_write },
5581 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5582 .type = ARM_CP_NO_RAW, .access = PL2_W,
5583 .writefn = tlbimva_hyp_is_write },
5584 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5585 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5586 .type = ARM_CP_NO_RAW, .access = PL2_W,
5587 .writefn = tlbi_aa64_alle2_write },
5588 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5589 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5590 .type = ARM_CP_NO_RAW, .access = PL2_W,
5591 .writefn = tlbi_aa64_vae2_write },
5592 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5593 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5594 .access = PL2_W, .type = ARM_CP_NO_RAW,
5595 .writefn = tlbi_aa64_vae2_write },
5596 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5597 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5598 .access = PL2_W, .type = ARM_CP_NO_RAW,
5599 .writefn = tlbi_aa64_alle2is_write },
5600 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5601 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5602 .type = ARM_CP_NO_RAW, .access = PL2_W,
5603 .writefn = tlbi_aa64_vae2is_write },
5604 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5605 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5606 .access = PL2_W, .type = ARM_CP_NO_RAW,
5607 .writefn = tlbi_aa64_vae2is_write },
5608 #ifndef CONFIG_USER_ONLY
5609 /* Unlike the other EL2-related AT operations, these must
5610 * UNDEF from EL3 if EL2 is not implemented, which is why we
5611 * define them here rather than with the rest of the AT ops.
5613 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5614 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5615 .access = PL2_W, .accessfn = at_s1e2_access,
5616 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5617 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5618 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5619 .access = PL2_W, .accessfn = at_s1e2_access,
5620 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5621 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5622 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5623 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5624 * to behave as if SCR.NS was 1.
5626 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5627 .access = PL2_W,
5628 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5629 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5630 .access = PL2_W,
5631 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5632 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5633 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5634 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5635 * reset values as IMPDEF. We choose to reset to 3 to comply with
5636 * both ARMv7 and ARMv8.
5638 .access = PL2_RW, .resetvalue = 3,
5639 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5640 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5641 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5642 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5643 .writefn = gt_cntvoff_write,
5644 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5645 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5646 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5647 .writefn = gt_cntvoff_write,
5648 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5649 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5650 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5651 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5652 .type = ARM_CP_IO, .access = PL2_RW,
5653 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5654 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5655 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5656 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5657 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5658 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5659 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5660 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5661 .resetfn = gt_hyp_timer_reset,
5662 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5663 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5664 .type = ARM_CP_IO,
5665 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5666 .access = PL2_RW,
5667 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5668 .resetvalue = 0,
5669 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5670 #endif
5671 /* The only field of MDCR_EL2 that has a defined architectural reset value
5672 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
5674 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5675 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5676 .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
5677 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
5678 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5679 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5680 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5681 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5682 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5683 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5684 .access = PL2_RW,
5685 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5686 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5687 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5688 .access = PL2_RW,
5689 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5690 REGINFO_SENTINEL
5693 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5694 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5695 .type = ARM_CP_ALIAS | ARM_CP_IO,
5696 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5697 .access = PL2_RW,
5698 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5699 .writefn = hcr_writehigh },
5700 REGINFO_SENTINEL
5703 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5704 bool isread)
5706 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5707 return CP_ACCESS_OK;
5709 return CP_ACCESS_TRAP_UNCATEGORIZED;
5712 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5713 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5714 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5715 .access = PL2_RW, .accessfn = sel2_access,
5716 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5717 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5718 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5719 .access = PL2_RW, .accessfn = sel2_access,
5720 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
5721 REGINFO_SENTINEL
5724 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5725 bool isread)
5727 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5728 * At Secure EL1 it traps to EL3 or EL2.
5730 if (arm_current_el(env) == 3) {
5731 return CP_ACCESS_OK;
5733 if (arm_is_secure_below_el3(env)) {
5734 if (env->cp15.scr_el3 & SCR_EEL2) {
5735 return CP_ACCESS_TRAP_EL2;
5737 return CP_ACCESS_TRAP_EL3;
5739 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5740 if (isread) {
5741 return CP_ACCESS_OK;
5743 return CP_ACCESS_TRAP_UNCATEGORIZED;
5746 static const ARMCPRegInfo el3_cp_reginfo[] = {
5747 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5748 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5749 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5750 .resetfn = scr_reset, .writefn = scr_write },
5751 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5752 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5753 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5754 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5755 .writefn = scr_write },
5756 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5757 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5758 .access = PL3_RW, .resetvalue = 0,
5759 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5760 { .name = "SDER",
5761 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5762 .access = PL3_RW, .resetvalue = 0,
5763 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5764 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5765 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5766 .writefn = vbar_write, .resetvalue = 0,
5767 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5768 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5769 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5770 .access = PL3_RW, .resetvalue = 0,
5771 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5772 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5773 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5774 .access = PL3_RW,
5775 /* no .writefn needed as this can't cause an ASID change;
5776 * we must provide a .raw_writefn and .resetfn because we handle
5777 * reset and migration for the AArch32 TTBCR(S), which might be
5778 * using mask and base_mask.
5780 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5781 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5782 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5783 .type = ARM_CP_ALIAS,
5784 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5785 .access = PL3_RW,
5786 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5787 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5788 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5789 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5790 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5791 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5792 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5793 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5794 .type = ARM_CP_ALIAS,
5795 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5796 .access = PL3_RW,
5797 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5798 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5799 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5800 .access = PL3_RW, .writefn = vbar_write,
5801 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5802 .resetvalue = 0 },
5803 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5804 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5805 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5806 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5807 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5808 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5809 .access = PL3_RW, .resetvalue = 0,
5810 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5811 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5812 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5813 .access = PL3_RW, .type = ARM_CP_CONST,
5814 .resetvalue = 0 },
5815 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5816 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5817 .access = PL3_RW, .type = ARM_CP_CONST,
5818 .resetvalue = 0 },
5819 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5820 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5821 .access = PL3_RW, .type = ARM_CP_CONST,
5822 .resetvalue = 0 },
5823 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5824 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5825 .access = PL3_W, .type = ARM_CP_NO_RAW,
5826 .writefn = tlbi_aa64_alle3is_write },
5827 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5828 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5829 .access = PL3_W, .type = ARM_CP_NO_RAW,
5830 .writefn = tlbi_aa64_vae3is_write },
5831 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5832 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5833 .access = PL3_W, .type = ARM_CP_NO_RAW,
5834 .writefn = tlbi_aa64_vae3is_write },
5835 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5836 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5837 .access = PL3_W, .type = ARM_CP_NO_RAW,
5838 .writefn = tlbi_aa64_alle3_write },
5839 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5840 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5841 .access = PL3_W, .type = ARM_CP_NO_RAW,
5842 .writefn = tlbi_aa64_vae3_write },
5843 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5844 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5845 .access = PL3_W, .type = ARM_CP_NO_RAW,
5846 .writefn = tlbi_aa64_vae3_write },
5847 REGINFO_SENTINEL
5850 #ifndef CONFIG_USER_ONLY
5851 /* Test if system register redirection is to occur in the current state. */
5852 static bool redirect_for_e2h(CPUARMState *env)
5854 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5857 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5859 CPReadFn *readfn;
5861 if (redirect_for_e2h(env)) {
5862 /* Switch to the saved EL2 version of the register. */
5863 ri = ri->opaque;
5864 readfn = ri->readfn;
5865 } else {
5866 readfn = ri->orig_readfn;
5868 if (readfn == NULL) {
5869 readfn = raw_read;
5871 return readfn(env, ri);
5874 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5875 uint64_t value)
5877 CPWriteFn *writefn;
5879 if (redirect_for_e2h(env)) {
5880 /* Switch to the saved EL2 version of the register. */
5881 ri = ri->opaque;
5882 writefn = ri->writefn;
5883 } else {
5884 writefn = ri->orig_writefn;
5886 if (writefn == NULL) {
5887 writefn = raw_write;
5889 writefn(env, ri, value);
5892 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5894 struct E2HAlias {
5895 uint32_t src_key, dst_key, new_key;
5896 const char *src_name, *dst_name, *new_name;
5897 bool (*feature)(const ARMISARegisters *id);
5900 #define K(op0, op1, crn, crm, op2) \
5901 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5903 static const struct E2HAlias aliases[] = {
5904 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5905 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5906 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5907 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5908 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5909 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5910 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5911 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5912 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5913 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5914 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5915 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5916 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5917 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5918 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5919 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5920 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5921 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5922 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5923 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5924 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5925 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5926 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5927 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5928 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5929 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5930 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5931 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5932 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5933 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5934 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5935 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5938 * Note that redirection of ZCR is mentioned in the description
5939 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5940 * not in the summary table.
5942 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5943 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
5945 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5946 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5948 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5949 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5951 #undef K
5953 size_t i;
5955 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5956 const struct E2HAlias *a = &aliases[i];
5957 ARMCPRegInfo *src_reg, *dst_reg;
5959 if (a->feature && !a->feature(&cpu->isar)) {
5960 continue;
5963 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
5964 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
5965 g_assert(src_reg != NULL);
5966 g_assert(dst_reg != NULL);
5968 /* Cross-compare names to detect typos in the keys. */
5969 g_assert(strcmp(src_reg->name, a->src_name) == 0);
5970 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5972 /* None of the core system registers use opaque; we will. */
5973 g_assert(src_reg->opaque == NULL);
5975 /* Create alias before redirection so we dup the right data. */
5976 if (a->new_key) {
5977 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
5978 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
5979 bool ok;
5981 new_reg->name = a->new_name;
5982 new_reg->type |= ARM_CP_ALIAS;
5983 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5984 new_reg->access &= PL2_RW | PL3_RW;
5986 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
5987 g_assert(ok);
5990 src_reg->opaque = dst_reg;
5991 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5992 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5993 if (!src_reg->raw_readfn) {
5994 src_reg->raw_readfn = raw_read;
5996 if (!src_reg->raw_writefn) {
5997 src_reg->raw_writefn = raw_write;
5999 src_reg->readfn = el2_e2h_read;
6000 src_reg->writefn = el2_e2h_write;
6003 #endif
6005 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6006 bool isread)
6008 int cur_el = arm_current_el(env);
6010 if (cur_el < 2) {
6011 uint64_t hcr = arm_hcr_el2_eff(env);
6013 if (cur_el == 0) {
6014 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6015 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6016 return CP_ACCESS_TRAP_EL2;
6018 } else {
6019 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6020 return CP_ACCESS_TRAP;
6022 if (hcr & HCR_TID2) {
6023 return CP_ACCESS_TRAP_EL2;
6026 } else if (hcr & HCR_TID2) {
6027 return CP_ACCESS_TRAP_EL2;
6031 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6032 return CP_ACCESS_TRAP_EL2;
6035 return CP_ACCESS_OK;
6038 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
6039 uint64_t value)
6041 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6042 * read via a bit in OSLSR_EL1.
6044 int oslock;
6046 if (ri->state == ARM_CP_STATE_AA32) {
6047 oslock = (value == 0xC5ACCE55);
6048 } else {
6049 oslock = value & 1;
6052 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
6055 static const ARMCPRegInfo debug_cp_reginfo[] = {
6056 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6057 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6058 * unlike DBGDRAR it is never accessible from EL0.
6059 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6060 * accessor.
6062 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
6063 .access = PL0_R, .accessfn = access_tdra,
6064 .type = ARM_CP_CONST, .resetvalue = 0 },
6065 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
6066 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6067 .access = PL1_R, .accessfn = access_tdra,
6068 .type = ARM_CP_CONST, .resetvalue = 0 },
6069 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
6070 .access = PL0_R, .accessfn = access_tdra,
6071 .type = ARM_CP_CONST, .resetvalue = 0 },
6072 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6073 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
6074 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6075 .access = PL1_RW, .accessfn = access_tda,
6076 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
6077 .resetvalue = 0 },
6079 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
6080 * Debug Communication Channel is not implemented.
6082 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
6083 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
6084 .access = PL0_R, .accessfn = access_tda,
6085 .type = ARM_CP_CONST, .resetvalue = 0 },
6087 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
6088 * it is unlikely a guest will care.
6089 * We don't implement the configurable EL0 access.
6091 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32,
6092 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6093 .type = ARM_CP_ALIAS,
6094 .access = PL1_R, .accessfn = access_tda,
6095 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
6096 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
6097 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
6098 .access = PL1_W, .type = ARM_CP_NO_RAW,
6099 .accessfn = access_tdosa,
6100 .writefn = oslar_write },
6101 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
6102 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
6103 .access = PL1_R, .resetvalue = 10,
6104 .accessfn = access_tdosa,
6105 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
6106 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6107 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
6108 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
6109 .access = PL1_RW, .accessfn = access_tdosa,
6110 .type = ARM_CP_NOP },
6111 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6112 * implement vector catch debug events yet.
6114 { .name = "DBGVCR",
6115 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6116 .access = PL1_RW, .accessfn = access_tda,
6117 .type = ARM_CP_NOP },
6118 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6119 * to save and restore a 32-bit guest's DBGVCR)
6121 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
6122 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
6123 .access = PL2_RW, .accessfn = access_tda,
6124 .type = ARM_CP_NOP },
6125 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6126 * Channel but Linux may try to access this register. The 32-bit
6127 * alias is DBGDCCINT.
6129 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
6130 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6131 .access = PL1_RW, .accessfn = access_tda,
6132 .type = ARM_CP_NOP },
6133 REGINFO_SENTINEL
6136 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
6137 /* 64 bit access versions of the (dummy) debug registers */
6138 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
6139 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6140 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
6141 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6142 REGINFO_SENTINEL
6145 /* Return the exception level to which exceptions should be taken
6146 * via SVEAccessTrap. If an exception should be routed through
6147 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6148 * take care of raising that exception.
6149 * C.f. the ARM pseudocode function CheckSVEEnabled.
6151 int sve_exception_el(CPUARMState *env, int el)
6153 #ifndef CONFIG_USER_ONLY
6154 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
6156 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
6157 bool disabled = false;
6159 /* The CPACR.ZEN controls traps to EL1:
6160 * 0, 2 : trap EL0 and EL1 accesses
6161 * 1 : trap only EL0 accesses
6162 * 3 : trap no accesses
6164 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
6165 disabled = true;
6166 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
6167 disabled = el == 0;
6169 if (disabled) {
6170 /* route_to_el2 */
6171 return hcr_el2 & HCR_TGE ? 2 : 1;
6174 /* Check CPACR.FPEN. */
6175 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
6176 disabled = true;
6177 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
6178 disabled = el == 0;
6180 if (disabled) {
6181 return 0;
6185 /* CPTR_EL2. Since TZ and TFP are positive,
6186 * they will be zero when EL2 is not present.
6188 if (el <= 2 && arm_is_el2_enabled(env)) {
6189 if (env->cp15.cptr_el[2] & CPTR_TZ) {
6190 return 2;
6192 if (env->cp15.cptr_el[2] & CPTR_TFP) {
6193 return 0;
6197 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6198 if (arm_feature(env, ARM_FEATURE_EL3)
6199 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
6200 return 3;
6202 #endif
6203 return 0;
6206 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
6208 uint32_t end_len;
6210 start_len = MIN(start_len, ARM_MAX_VQ - 1);
6211 end_len = start_len;
6213 if (!test_bit(start_len, cpu->sve_vq_map)) {
6214 end_len = find_last_bit(cpu->sve_vq_map, start_len);
6215 assert(end_len < start_len);
6217 return end_len;
6221 * Given that SVE is enabled, return the vector length for EL.
6223 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
6225 ARMCPU *cpu = env_archcpu(env);
6226 uint32_t zcr_len = cpu->sve_max_vq - 1;
6228 if (el <= 1 &&
6229 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
6230 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
6232 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6233 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
6235 if (arm_feature(env, ARM_FEATURE_EL3)) {
6236 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
6239 return aarch64_sve_zcr_get_valid_len(cpu, zcr_len);
6242 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6243 uint64_t value)
6245 int cur_el = arm_current_el(env);
6246 int old_len = sve_zcr_len_for_el(env, cur_el);
6247 int new_len;
6249 /* Bits other than [3:0] are RAZ/WI. */
6250 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6251 raw_write(env, ri, value & 0xf);
6254 * Because we arrived here, we know both FP and SVE are enabled;
6255 * otherwise we would have trapped access to the ZCR_ELn register.
6257 new_len = sve_zcr_len_for_el(env, cur_el);
6258 if (new_len < old_len) {
6259 aarch64_sve_narrow_vq(env, new_len + 1);
6263 static const ARMCPRegInfo zcr_el1_reginfo = {
6264 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6265 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6266 .access = PL1_RW, .type = ARM_CP_SVE,
6267 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6268 .writefn = zcr_write, .raw_writefn = raw_write
6271 static const ARMCPRegInfo zcr_el2_reginfo = {
6272 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6273 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6274 .access = PL2_RW, .type = ARM_CP_SVE,
6275 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6276 .writefn = zcr_write, .raw_writefn = raw_write
6279 static const ARMCPRegInfo zcr_no_el2_reginfo = {
6280 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6281 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6282 .access = PL2_RW, .type = ARM_CP_SVE,
6283 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
6286 static const ARMCPRegInfo zcr_el3_reginfo = {
6287 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6288 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6289 .access = PL3_RW, .type = ARM_CP_SVE,
6290 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6291 .writefn = zcr_write, .raw_writefn = raw_write
6294 void hw_watchpoint_update(ARMCPU *cpu, int n)
6296 CPUARMState *env = &cpu->env;
6297 vaddr len = 0;
6298 vaddr wvr = env->cp15.dbgwvr[n];
6299 uint64_t wcr = env->cp15.dbgwcr[n];
6300 int mask;
6301 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
6303 if (env->cpu_watchpoint[n]) {
6304 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
6305 env->cpu_watchpoint[n] = NULL;
6308 if (!extract64(wcr, 0, 1)) {
6309 /* E bit clear : watchpoint disabled */
6310 return;
6313 switch (extract64(wcr, 3, 2)) {
6314 case 0:
6315 /* LSC 00 is reserved and must behave as if the wp is disabled */
6316 return;
6317 case 1:
6318 flags |= BP_MEM_READ;
6319 break;
6320 case 2:
6321 flags |= BP_MEM_WRITE;
6322 break;
6323 case 3:
6324 flags |= BP_MEM_ACCESS;
6325 break;
6328 /* Attempts to use both MASK and BAS fields simultaneously are
6329 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6330 * thus generating a watchpoint for every byte in the masked region.
6332 mask = extract64(wcr, 24, 4);
6333 if (mask == 1 || mask == 2) {
6334 /* Reserved values of MASK; we must act as if the mask value was
6335 * some non-reserved value, or as if the watchpoint were disabled.
6336 * We choose the latter.
6338 return;
6339 } else if (mask) {
6340 /* Watchpoint covers an aligned area up to 2GB in size */
6341 len = 1ULL << mask;
6342 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6343 * whether the watchpoint fires when the unmasked bits match; we opt
6344 * to generate the exceptions.
6346 wvr &= ~(len - 1);
6347 } else {
6348 /* Watchpoint covers bytes defined by the byte address select bits */
6349 int bas = extract64(wcr, 5, 8);
6350 int basstart;
6352 if (extract64(wvr, 2, 1)) {
6353 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6354 * ignored, and BAS[3:0] define which bytes to watch.
6356 bas &= 0xf;
6359 if (bas == 0) {
6360 /* This must act as if the watchpoint is disabled */
6361 return;
6364 /* The BAS bits are supposed to be programmed to indicate a contiguous
6365 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6366 * we fire for each byte in the word/doubleword addressed by the WVR.
6367 * We choose to ignore any non-zero bits after the first range of 1s.
6369 basstart = ctz32(bas);
6370 len = cto32(bas >> basstart);
6371 wvr += basstart;
6374 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
6375 &env->cpu_watchpoint[n]);
6378 void hw_watchpoint_update_all(ARMCPU *cpu)
6380 int i;
6381 CPUARMState *env = &cpu->env;
6383 /* Completely clear out existing QEMU watchpoints and our array, to
6384 * avoid possible stale entries following migration load.
6386 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
6387 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
6389 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
6390 hw_watchpoint_update(cpu, i);
6394 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6395 uint64_t value)
6397 ARMCPU *cpu = env_archcpu(env);
6398 int i = ri->crm;
6400 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6401 * register reads and behaves as if values written are sign extended.
6402 * Bits [1:0] are RES0.
6404 value = sextract64(value, 0, 49) & ~3ULL;
6406 raw_write(env, ri, value);
6407 hw_watchpoint_update(cpu, i);
6410 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6411 uint64_t value)
6413 ARMCPU *cpu = env_archcpu(env);
6414 int i = ri->crm;
6416 raw_write(env, ri, value);
6417 hw_watchpoint_update(cpu, i);
6420 void hw_breakpoint_update(ARMCPU *cpu, int n)
6422 CPUARMState *env = &cpu->env;
6423 uint64_t bvr = env->cp15.dbgbvr[n];
6424 uint64_t bcr = env->cp15.dbgbcr[n];
6425 vaddr addr;
6426 int bt;
6427 int flags = BP_CPU;
6429 if (env->cpu_breakpoint[n]) {
6430 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
6431 env->cpu_breakpoint[n] = NULL;
6434 if (!extract64(bcr, 0, 1)) {
6435 /* E bit clear : watchpoint disabled */
6436 return;
6439 bt = extract64(bcr, 20, 4);
6441 switch (bt) {
6442 case 4: /* unlinked address mismatch (reserved if AArch64) */
6443 case 5: /* linked address mismatch (reserved if AArch64) */
6444 qemu_log_mask(LOG_UNIMP,
6445 "arm: address mismatch breakpoint types not implemented\n");
6446 return;
6447 case 0: /* unlinked address match */
6448 case 1: /* linked address match */
6450 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6451 * we behave as if the register was sign extended. Bits [1:0] are
6452 * RES0. The BAS field is used to allow setting breakpoints on 16
6453 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6454 * a bp will fire if the addresses covered by the bp and the addresses
6455 * covered by the insn overlap but the insn doesn't start at the
6456 * start of the bp address range. We choose to require the insn and
6457 * the bp to have the same address. The constraints on writing to
6458 * BAS enforced in dbgbcr_write mean we have only four cases:
6459 * 0b0000 => no breakpoint
6460 * 0b0011 => breakpoint on addr
6461 * 0b1100 => breakpoint on addr + 2
6462 * 0b1111 => breakpoint on addr
6463 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6465 int bas = extract64(bcr, 5, 4);
6466 addr = sextract64(bvr, 0, 49) & ~3ULL;
6467 if (bas == 0) {
6468 return;
6470 if (bas == 0xc) {
6471 addr += 2;
6473 break;
6475 case 2: /* unlinked context ID match */
6476 case 8: /* unlinked VMID match (reserved if no EL2) */
6477 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6478 qemu_log_mask(LOG_UNIMP,
6479 "arm: unlinked context breakpoint types not implemented\n");
6480 return;
6481 case 9: /* linked VMID match (reserved if no EL2) */
6482 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6483 case 3: /* linked context ID match */
6484 default:
6485 /* We must generate no events for Linked context matches (unless
6486 * they are linked to by some other bp/wp, which is handled in
6487 * updates for the linking bp/wp). We choose to also generate no events
6488 * for reserved values.
6490 return;
6493 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
6496 void hw_breakpoint_update_all(ARMCPU *cpu)
6498 int i;
6499 CPUARMState *env = &cpu->env;
6501 /* Completely clear out existing QEMU breakpoints and our array, to
6502 * avoid possible stale entries following migration load.
6504 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
6505 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
6507 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
6508 hw_breakpoint_update(cpu, i);
6512 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6513 uint64_t value)
6515 ARMCPU *cpu = env_archcpu(env);
6516 int i = ri->crm;
6518 raw_write(env, ri, value);
6519 hw_breakpoint_update(cpu, i);
6522 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6523 uint64_t value)
6525 ARMCPU *cpu = env_archcpu(env);
6526 int i = ri->crm;
6528 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6529 * copy of BAS[0].
6531 value = deposit64(value, 6, 1, extract64(value, 5, 1));
6532 value = deposit64(value, 8, 1, extract64(value, 7, 1));
6534 raw_write(env, ri, value);
6535 hw_breakpoint_update(cpu, i);
6538 static void define_debug_regs(ARMCPU *cpu)
6540 /* Define v7 and v8 architectural debug registers.
6541 * These are just dummy implementations for now.
6543 int i;
6544 int wrps, brps, ctx_cmps;
6547 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6548 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
6549 * the register must not exist for this cpu.
6551 if (cpu->isar.dbgdidr != 0) {
6552 ARMCPRegInfo dbgdidr = {
6553 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
6554 .opc1 = 0, .opc2 = 0,
6555 .access = PL0_R, .accessfn = access_tda,
6556 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
6558 define_one_arm_cp_reg(cpu, &dbgdidr);
6561 /* Note that all these register fields hold "number of Xs minus 1". */
6562 brps = arm_num_brps(cpu);
6563 wrps = arm_num_wrps(cpu);
6564 ctx_cmps = arm_num_ctx_cmps(cpu);
6566 assert(ctx_cmps <= brps);
6568 define_arm_cp_regs(cpu, debug_cp_reginfo);
6570 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
6571 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
6574 for (i = 0; i < brps; i++) {
6575 ARMCPRegInfo dbgregs[] = {
6576 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
6577 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
6578 .access = PL1_RW, .accessfn = access_tda,
6579 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
6580 .writefn = dbgbvr_write, .raw_writefn = raw_write
6582 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
6583 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
6584 .access = PL1_RW, .accessfn = access_tda,
6585 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
6586 .writefn = dbgbcr_write, .raw_writefn = raw_write
6588 REGINFO_SENTINEL
6590 define_arm_cp_regs(cpu, dbgregs);
6593 for (i = 0; i < wrps; i++) {
6594 ARMCPRegInfo dbgregs[] = {
6595 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
6596 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
6597 .access = PL1_RW, .accessfn = access_tda,
6598 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
6599 .writefn = dbgwvr_write, .raw_writefn = raw_write
6601 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
6602 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
6603 .access = PL1_RW, .accessfn = access_tda,
6604 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
6605 .writefn = dbgwcr_write, .raw_writefn = raw_write
6607 REGINFO_SENTINEL
6609 define_arm_cp_regs(cpu, dbgregs);
6613 static void define_pmu_regs(ARMCPU *cpu)
6616 * v7 performance monitor control register: same implementor
6617 * field as main ID register, and we implement four counters in
6618 * addition to the cycle count register.
6620 unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
6621 ARMCPRegInfo pmcr = {
6622 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6623 .access = PL0_RW,
6624 .type = ARM_CP_IO | ARM_CP_ALIAS,
6625 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6626 .accessfn = pmreg_access, .writefn = pmcr_write,
6627 .raw_writefn = raw_write,
6629 ARMCPRegInfo pmcr64 = {
6630 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6631 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6632 .access = PL0_RW, .accessfn = pmreg_access,
6633 .type = ARM_CP_IO,
6634 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6635 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
6636 PMCRLC,
6637 .writefn = pmcr_write, .raw_writefn = raw_write,
6639 define_one_arm_cp_reg(cpu, &pmcr);
6640 define_one_arm_cp_reg(cpu, &pmcr64);
6641 for (i = 0; i < pmcrn; i++) {
6642 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6643 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6644 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6645 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6646 ARMCPRegInfo pmev_regs[] = {
6647 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6648 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6649 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6650 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6651 .accessfn = pmreg_access },
6652 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6653 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6654 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6655 .type = ARM_CP_IO,
6656 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6657 .raw_readfn = pmevcntr_rawread,
6658 .raw_writefn = pmevcntr_rawwrite },
6659 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6660 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6661 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6662 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6663 .accessfn = pmreg_access },
6664 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6665 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6666 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6667 .type = ARM_CP_IO,
6668 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6669 .raw_writefn = pmevtyper_rawwrite },
6670 REGINFO_SENTINEL
6672 define_arm_cp_regs(cpu, pmev_regs);
6673 g_free(pmevcntr_name);
6674 g_free(pmevcntr_el0_name);
6675 g_free(pmevtyper_name);
6676 g_free(pmevtyper_el0_name);
6678 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
6679 ARMCPRegInfo v81_pmu_regs[] = {
6680 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6681 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6682 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6683 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6684 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6685 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6686 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6687 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6688 REGINFO_SENTINEL
6690 define_arm_cp_regs(cpu, v81_pmu_regs);
6692 if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6693 static const ARMCPRegInfo v84_pmmir = {
6694 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6695 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6696 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6697 .resetvalue = 0
6699 define_one_arm_cp_reg(cpu, &v84_pmmir);
6703 /* We don't know until after realize whether there's a GICv3
6704 * attached, and that is what registers the gicv3 sysregs.
6705 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6706 * at runtime.
6708 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6710 ARMCPU *cpu = env_archcpu(env);
6711 uint64_t pfr1 = cpu->isar.id_pfr1;
6713 if (env->gicv3state) {
6714 pfr1 |= 1 << 28;
6716 return pfr1;
6719 #ifndef CONFIG_USER_ONLY
6720 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6722 ARMCPU *cpu = env_archcpu(env);
6723 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6725 if (env->gicv3state) {
6726 pfr0 |= 1 << 24;
6728 return pfr0;
6730 #endif
6732 /* Shared logic between LORID and the rest of the LOR* registers.
6733 * Secure state exclusion has already been dealt with.
6735 static CPAccessResult access_lor_ns(CPUARMState *env,
6736 const ARMCPRegInfo *ri, bool isread)
6738 int el = arm_current_el(env);
6740 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6741 return CP_ACCESS_TRAP_EL2;
6743 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6744 return CP_ACCESS_TRAP_EL3;
6746 return CP_ACCESS_OK;
6749 static CPAccessResult access_lor_other(CPUARMState *env,
6750 const ARMCPRegInfo *ri, bool isread)
6752 if (arm_is_secure_below_el3(env)) {
6753 /* Access denied in secure mode. */
6754 return CP_ACCESS_TRAP;
6756 return access_lor_ns(env, ri, isread);
6760 * A trivial implementation of ARMv8.1-LOR leaves all of these
6761 * registers fixed at 0, which indicates that there are zero
6762 * supported Limited Ordering regions.
6764 static const ARMCPRegInfo lor_reginfo[] = {
6765 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6766 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6767 .access = PL1_RW, .accessfn = access_lor_other,
6768 .type = ARM_CP_CONST, .resetvalue = 0 },
6769 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6770 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6771 .access = PL1_RW, .accessfn = access_lor_other,
6772 .type = ARM_CP_CONST, .resetvalue = 0 },
6773 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6774 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6775 .access = PL1_RW, .accessfn = access_lor_other,
6776 .type = ARM_CP_CONST, .resetvalue = 0 },
6777 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6778 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6779 .access = PL1_RW, .accessfn = access_lor_other,
6780 .type = ARM_CP_CONST, .resetvalue = 0 },
6781 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6782 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6783 .access = PL1_R, .accessfn = access_lor_ns,
6784 .type = ARM_CP_CONST, .resetvalue = 0 },
6785 REGINFO_SENTINEL
6788 #ifdef TARGET_AARCH64
6789 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6790 bool isread)
6792 int el = arm_current_el(env);
6794 if (el < 2 &&
6795 arm_feature(env, ARM_FEATURE_EL2) &&
6796 !(arm_hcr_el2_eff(env) & HCR_APK)) {
6797 return CP_ACCESS_TRAP_EL2;
6799 if (el < 3 &&
6800 arm_feature(env, ARM_FEATURE_EL3) &&
6801 !(env->cp15.scr_el3 & SCR_APK)) {
6802 return CP_ACCESS_TRAP_EL3;
6804 return CP_ACCESS_OK;
6807 static const ARMCPRegInfo pauth_reginfo[] = {
6808 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6809 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6810 .access = PL1_RW, .accessfn = access_pauth,
6811 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6812 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6813 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6814 .access = PL1_RW, .accessfn = access_pauth,
6815 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6816 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6817 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6818 .access = PL1_RW, .accessfn = access_pauth,
6819 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6820 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6821 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6822 .access = PL1_RW, .accessfn = access_pauth,
6823 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6824 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6825 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6826 .access = PL1_RW, .accessfn = access_pauth,
6827 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6828 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6829 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6830 .access = PL1_RW, .accessfn = access_pauth,
6831 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6832 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6833 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6834 .access = PL1_RW, .accessfn = access_pauth,
6835 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6836 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6837 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6838 .access = PL1_RW, .accessfn = access_pauth,
6839 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6840 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6841 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6842 .access = PL1_RW, .accessfn = access_pauth,
6843 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6844 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6845 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6846 .access = PL1_RW, .accessfn = access_pauth,
6847 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6848 REGINFO_SENTINEL
6851 static const ARMCPRegInfo tlbirange_reginfo[] = {
6852 { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
6853 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
6854 .access = PL1_W, .type = ARM_CP_NO_RAW,
6855 .writefn = tlbi_aa64_rvae1is_write },
6856 { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
6857 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
6858 .access = PL1_W, .type = ARM_CP_NO_RAW,
6859 .writefn = tlbi_aa64_rvae1is_write },
6860 { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
6861 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
6862 .access = PL1_W, .type = ARM_CP_NO_RAW,
6863 .writefn = tlbi_aa64_rvae1is_write },
6864 { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
6865 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
6866 .access = PL1_W, .type = ARM_CP_NO_RAW,
6867 .writefn = tlbi_aa64_rvae1is_write },
6868 { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
6869 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
6870 .access = PL1_W, .type = ARM_CP_NO_RAW,
6871 .writefn = tlbi_aa64_rvae1is_write },
6872 { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
6873 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
6874 .access = PL1_W, .type = ARM_CP_NO_RAW,
6875 .writefn = tlbi_aa64_rvae1is_write },
6876 { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
6877 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
6878 .access = PL1_W, .type = ARM_CP_NO_RAW,
6879 .writefn = tlbi_aa64_rvae1is_write },
6880 { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
6881 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
6882 .access = PL1_W, .type = ARM_CP_NO_RAW,
6883 .writefn = tlbi_aa64_rvae1is_write },
6884 { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
6885 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
6886 .access = PL1_W, .type = ARM_CP_NO_RAW,
6887 .writefn = tlbi_aa64_rvae1_write },
6888 { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
6889 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
6890 .access = PL1_W, .type = ARM_CP_NO_RAW,
6891 .writefn = tlbi_aa64_rvae1_write },
6892 { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
6893 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
6894 .access = PL1_W, .type = ARM_CP_NO_RAW,
6895 .writefn = tlbi_aa64_rvae1_write },
6896 { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
6897 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
6898 .access = PL1_W, .type = ARM_CP_NO_RAW,
6899 .writefn = tlbi_aa64_rvae1_write },
6900 { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
6901 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
6902 .access = PL2_W, .type = ARM_CP_NOP },
6903 { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
6904 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
6905 .access = PL2_W, .type = ARM_CP_NOP },
6906 { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
6907 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
6908 .access = PL2_W, .type = ARM_CP_NO_RAW,
6909 .writefn = tlbi_aa64_rvae2is_write },
6910 { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
6911 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
6912 .access = PL2_W, .type = ARM_CP_NO_RAW,
6913 .writefn = tlbi_aa64_rvae2is_write },
6914 { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
6915 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
6916 .access = PL2_W, .type = ARM_CP_NOP },
6917 { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
6918 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
6919 .access = PL2_W, .type = ARM_CP_NOP },
6920 { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
6921 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
6922 .access = PL2_W, .type = ARM_CP_NO_RAW,
6923 .writefn = tlbi_aa64_rvae2is_write },
6924 { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
6925 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
6926 .access = PL2_W, .type = ARM_CP_NO_RAW,
6927 .writefn = tlbi_aa64_rvae2is_write },
6928 { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
6929 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
6930 .access = PL2_W, .type = ARM_CP_NO_RAW,
6931 .writefn = tlbi_aa64_rvae2_write },
6932 { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
6933 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
6934 .access = PL2_W, .type = ARM_CP_NO_RAW,
6935 .writefn = tlbi_aa64_rvae2_write },
6936 { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
6937 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
6938 .access = PL3_W, .type = ARM_CP_NO_RAW,
6939 .writefn = tlbi_aa64_rvae3is_write },
6940 { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
6941 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
6942 .access = PL3_W, .type = ARM_CP_NO_RAW,
6943 .writefn = tlbi_aa64_rvae3is_write },
6944 { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
6945 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
6946 .access = PL3_W, .type = ARM_CP_NO_RAW,
6947 .writefn = tlbi_aa64_rvae3is_write },
6948 { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
6949 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
6950 .access = PL3_W, .type = ARM_CP_NO_RAW,
6951 .writefn = tlbi_aa64_rvae3is_write },
6952 { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
6953 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
6954 .access = PL3_W, .type = ARM_CP_NO_RAW,
6955 .writefn = tlbi_aa64_rvae3_write },
6956 { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
6957 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
6958 .access = PL3_W, .type = ARM_CP_NO_RAW,
6959 .writefn = tlbi_aa64_rvae3_write },
6960 REGINFO_SENTINEL
6963 static const ARMCPRegInfo tlbios_reginfo[] = {
6964 { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
6965 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
6966 .access = PL1_W, .type = ARM_CP_NO_RAW,
6967 .writefn = tlbi_aa64_vmalle1is_write },
6968 { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
6969 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
6970 .access = PL1_W, .type = ARM_CP_NO_RAW,
6971 .writefn = tlbi_aa64_vae1is_write },
6972 { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
6973 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
6974 .access = PL1_W, .type = ARM_CP_NO_RAW,
6975 .writefn = tlbi_aa64_vmalle1is_write },
6976 { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
6977 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
6978 .access = PL1_W, .type = ARM_CP_NO_RAW,
6979 .writefn = tlbi_aa64_vae1is_write },
6980 { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
6981 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
6982 .access = PL1_W, .type = ARM_CP_NO_RAW,
6983 .writefn = tlbi_aa64_vae1is_write },
6984 { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
6985 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
6986 .access = PL1_W, .type = ARM_CP_NO_RAW,
6987 .writefn = tlbi_aa64_vae1is_write },
6988 { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
6989 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
6990 .access = PL2_W, .type = ARM_CP_NO_RAW,
6991 .writefn = tlbi_aa64_alle2is_write },
6992 { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
6993 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
6994 .access = PL2_W, .type = ARM_CP_NO_RAW,
6995 .writefn = tlbi_aa64_vae2is_write },
6996 { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
6997 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
6998 .access = PL2_W, .type = ARM_CP_NO_RAW,
6999 .writefn = tlbi_aa64_alle1is_write },
7000 { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
7001 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
7002 .access = PL2_W, .type = ARM_CP_NO_RAW,
7003 .writefn = tlbi_aa64_vae2is_write },
7004 { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
7005 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
7006 .access = PL2_W, .type = ARM_CP_NO_RAW,
7007 .writefn = tlbi_aa64_alle1is_write },
7008 { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
7009 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
7010 .access = PL2_W, .type = ARM_CP_NOP },
7011 { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
7012 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
7013 .access = PL2_W, .type = ARM_CP_NOP },
7014 { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7015 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
7016 .access = PL2_W, .type = ARM_CP_NOP },
7017 { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7018 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
7019 .access = PL2_W, .type = ARM_CP_NOP },
7020 { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
7021 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
7022 .access = PL3_W, .type = ARM_CP_NO_RAW,
7023 .writefn = tlbi_aa64_alle3is_write },
7024 { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
7025 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
7026 .access = PL3_W, .type = ARM_CP_NO_RAW,
7027 .writefn = tlbi_aa64_vae3is_write },
7028 { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
7029 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
7030 .access = PL3_W, .type = ARM_CP_NO_RAW,
7031 .writefn = tlbi_aa64_vae3is_write },
7032 REGINFO_SENTINEL
7035 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
7037 Error *err = NULL;
7038 uint64_t ret;
7040 /* Success sets NZCV = 0000. */
7041 env->NF = env->CF = env->VF = 0, env->ZF = 1;
7043 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
7045 * ??? Failed, for unknown reasons in the crypto subsystem.
7046 * The best we can do is log the reason and return the
7047 * timed-out indication to the guest. There is no reason
7048 * we know to expect this failure to be transitory, so the
7049 * guest may well hang retrying the operation.
7051 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
7052 ri->name, error_get_pretty(err));
7053 error_free(err);
7055 env->ZF = 0; /* NZCF = 0100 */
7056 return 0;
7058 return ret;
7061 /* We do not support re-seeding, so the two registers operate the same. */
7062 static const ARMCPRegInfo rndr_reginfo[] = {
7063 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7064 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7065 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
7066 .access = PL0_R, .readfn = rndr_readfn },
7067 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7068 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7069 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
7070 .access = PL0_R, .readfn = rndr_readfn },
7071 REGINFO_SENTINEL
7074 #ifndef CONFIG_USER_ONLY
7075 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
7076 uint64_t value)
7078 ARMCPU *cpu = env_archcpu(env);
7079 /* CTR_EL0 System register -> DminLine, bits [19:16] */
7080 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
7081 uint64_t vaddr_in = (uint64_t) value;
7082 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
7083 void *haddr;
7084 int mem_idx = cpu_mmu_index(env, false);
7086 /* This won't be crossing page boundaries */
7087 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
7088 if (haddr) {
7090 ram_addr_t offset;
7091 MemoryRegion *mr;
7093 /* RCU lock is already being held */
7094 mr = memory_region_from_host(haddr, &offset);
7096 if (mr) {
7097 memory_region_writeback(mr, offset, dline_size);
7102 static const ARMCPRegInfo dcpop_reg[] = {
7103 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
7104 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
7105 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7106 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7107 REGINFO_SENTINEL
7110 static const ARMCPRegInfo dcpodp_reg[] = {
7111 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
7112 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
7113 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7114 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7115 REGINFO_SENTINEL
7117 #endif /*CONFIG_USER_ONLY*/
7119 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
7120 bool isread)
7122 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
7123 return CP_ACCESS_TRAP_EL2;
7126 return CP_ACCESS_OK;
7129 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
7130 bool isread)
7132 int el = arm_current_el(env);
7134 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
7135 uint64_t hcr = arm_hcr_el2_eff(env);
7136 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7137 return CP_ACCESS_TRAP_EL2;
7140 if (el < 3 &&
7141 arm_feature(env, ARM_FEATURE_EL3) &&
7142 !(env->cp15.scr_el3 & SCR_ATA)) {
7143 return CP_ACCESS_TRAP_EL3;
7145 return CP_ACCESS_OK;
7148 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7150 return env->pstate & PSTATE_TCO;
7153 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7155 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7158 static const ARMCPRegInfo mte_reginfo[] = {
7159 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7160 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7161 .access = PL1_RW, .accessfn = access_mte,
7162 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7163 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7164 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7165 .access = PL1_RW, .accessfn = access_mte,
7166 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7167 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7168 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7169 .access = PL2_RW, .accessfn = access_mte,
7170 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7171 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7172 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7173 .access = PL3_RW,
7174 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7175 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7176 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7177 .access = PL1_RW, .accessfn = access_mte,
7178 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7179 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7180 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7181 .access = PL1_RW, .accessfn = access_mte,
7182 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7183 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7184 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7185 .access = PL1_R, .accessfn = access_aa64_tid5,
7186 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
7187 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7188 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7189 .type = ARM_CP_NO_RAW,
7190 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7191 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7192 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7193 .type = ARM_CP_NOP, .access = PL1_W,
7194 .accessfn = aa64_cacheop_poc_access },
7195 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7196 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7197 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7198 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7199 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
7200 .type = ARM_CP_NOP, .access = PL1_W,
7201 .accessfn = aa64_cacheop_poc_access },
7202 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7203 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
7204 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7205 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7206 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
7207 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7208 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7209 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
7210 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7211 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7212 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
7213 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7214 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7215 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
7216 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7217 REGINFO_SENTINEL
7220 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
7221 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7222 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7223 .type = ARM_CP_CONST, .access = PL0_RW, },
7224 REGINFO_SENTINEL
7227 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
7228 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7229 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
7230 .type = ARM_CP_NOP, .access = PL0_W,
7231 .accessfn = aa64_cacheop_poc_access },
7232 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7233 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
7234 .type = ARM_CP_NOP, .access = PL0_W,
7235 .accessfn = aa64_cacheop_poc_access },
7236 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7237 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7238 .type = ARM_CP_NOP, .access = PL0_W,
7239 .accessfn = aa64_cacheop_poc_access },
7240 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7241 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7242 .type = ARM_CP_NOP, .access = PL0_W,
7243 .accessfn = aa64_cacheop_poc_access },
7244 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7245 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7246 .type = ARM_CP_NOP, .access = PL0_W,
7247 .accessfn = aa64_cacheop_poc_access },
7248 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7249 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7250 .type = ARM_CP_NOP, .access = PL0_W,
7251 .accessfn = aa64_cacheop_poc_access },
7252 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7253 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7254 .type = ARM_CP_NOP, .access = PL0_W,
7255 .accessfn = aa64_cacheop_poc_access },
7256 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7257 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7258 .type = ARM_CP_NOP, .access = PL0_W,
7259 .accessfn = aa64_cacheop_poc_access },
7260 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7261 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7262 .access = PL0_W, .type = ARM_CP_DC_GVA,
7263 #ifndef CONFIG_USER_ONLY
7264 /* Avoid overhead of an access check that always passes in user-mode */
7265 .accessfn = aa64_zva_access,
7266 #endif
7268 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7269 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7270 .access = PL0_W, .type = ARM_CP_DC_GZVA,
7271 #ifndef CONFIG_USER_ONLY
7272 /* Avoid overhead of an access check that always passes in user-mode */
7273 .accessfn = aa64_zva_access,
7274 #endif
7276 REGINFO_SENTINEL
7279 #endif
7281 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7282 bool isread)
7284 int el = arm_current_el(env);
7286 if (el == 0) {
7287 uint64_t sctlr = arm_sctlr(env, el);
7288 if (!(sctlr & SCTLR_EnRCTX)) {
7289 return CP_ACCESS_TRAP;
7291 } else if (el == 1) {
7292 uint64_t hcr = arm_hcr_el2_eff(env);
7293 if (hcr & HCR_NV) {
7294 return CP_ACCESS_TRAP_EL2;
7297 return CP_ACCESS_OK;
7300 static const ARMCPRegInfo predinv_reginfo[] = {
7301 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7302 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7303 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7304 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7305 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7306 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7307 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7308 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7309 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7311 * Note the AArch32 opcodes have a different OPC1.
7313 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7314 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7315 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7316 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7317 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7318 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7319 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7320 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7321 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7322 REGINFO_SENTINEL
7325 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7327 /* Read the high 32 bits of the current CCSIDR */
7328 return extract64(ccsidr_read(env, ri), 32, 32);
7331 static const ARMCPRegInfo ccsidr2_reginfo[] = {
7332 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7333 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7334 .access = PL1_R,
7335 .accessfn = access_aa64_tid2,
7336 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7337 REGINFO_SENTINEL
7340 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7341 bool isread)
7343 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7344 return CP_ACCESS_TRAP_EL2;
7347 return CP_ACCESS_OK;
7350 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7351 bool isread)
7353 if (arm_feature(env, ARM_FEATURE_V8)) {
7354 return access_aa64_tid3(env, ri, isread);
7357 return CP_ACCESS_OK;
7360 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7361 bool isread)
7363 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7364 return CP_ACCESS_TRAP_EL2;
7367 return CP_ACCESS_OK;
7370 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7371 const ARMCPRegInfo *ri, bool isread)
7374 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7375 * in v7A, not in v8A.
7377 if (!arm_feature(env, ARM_FEATURE_V8) &&
7378 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7379 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7380 return CP_ACCESS_TRAP_EL2;
7382 return CP_ACCESS_OK;
7385 static const ARMCPRegInfo jazelle_regs[] = {
7386 { .name = "JIDR",
7387 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7388 .access = PL1_R, .accessfn = access_jazelle,
7389 .type = ARM_CP_CONST, .resetvalue = 0 },
7390 { .name = "JOSCR",
7391 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7392 .accessfn = access_joscr_jmcr,
7393 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7394 { .name = "JMCR",
7395 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7396 .accessfn = access_joscr_jmcr,
7397 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7398 REGINFO_SENTINEL
7401 static const ARMCPRegInfo vhe_reginfo[] = {
7402 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7403 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7404 .access = PL2_RW,
7405 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
7406 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7407 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7408 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7409 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7410 #ifndef CONFIG_USER_ONLY
7411 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7412 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7413 .fieldoffset =
7414 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7415 .type = ARM_CP_IO, .access = PL2_RW,
7416 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7417 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7418 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7419 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7420 .resetfn = gt_hv_timer_reset,
7421 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7422 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7423 .type = ARM_CP_IO,
7424 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7425 .access = PL2_RW,
7426 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7427 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7428 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7429 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7430 .type = ARM_CP_IO | ARM_CP_ALIAS,
7431 .access = PL2_RW, .accessfn = e2h_access,
7432 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7433 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7434 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7435 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7436 .type = ARM_CP_IO | ARM_CP_ALIAS,
7437 .access = PL2_RW, .accessfn = e2h_access,
7438 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7439 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7440 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7441 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7442 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7443 .access = PL2_RW, .accessfn = e2h_access,
7444 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7445 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7446 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7447 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7448 .access = PL2_RW, .accessfn = e2h_access,
7449 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7450 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7451 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7452 .type = ARM_CP_IO | ARM_CP_ALIAS,
7453 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7454 .access = PL2_RW, .accessfn = e2h_access,
7455 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7456 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7457 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7458 .type = ARM_CP_IO | ARM_CP_ALIAS,
7459 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7460 .access = PL2_RW, .accessfn = e2h_access,
7461 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
7462 #endif
7463 REGINFO_SENTINEL
7466 #ifndef CONFIG_USER_ONLY
7467 static const ARMCPRegInfo ats1e1_reginfo[] = {
7468 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7469 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7470 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7471 .writefn = ats_write64 },
7472 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7473 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7474 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7475 .writefn = ats_write64 },
7476 REGINFO_SENTINEL
7479 static const ARMCPRegInfo ats1cp_reginfo[] = {
7480 { .name = "ATS1CPRP",
7481 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7482 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7483 .writefn = ats_write },
7484 { .name = "ATS1CPWP",
7485 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7486 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7487 .writefn = ats_write },
7488 REGINFO_SENTINEL
7490 #endif
7493 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7494 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7495 * is non-zero, which is never for ARMv7, optionally in ARMv8
7496 * and mandatorily for ARMv8.2 and up.
7497 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7498 * implementation is RAZ/WI we can ignore this detail, as we
7499 * do for ACTLR.
7501 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7502 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7503 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
7504 .access = PL1_RW, .accessfn = access_tacr,
7505 .type = ARM_CP_CONST, .resetvalue = 0 },
7506 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7507 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7508 .access = PL2_RW, .type = ARM_CP_CONST,
7509 .resetvalue = 0 },
7510 REGINFO_SENTINEL
7513 void register_cp_regs_for_features(ARMCPU *cpu)
7515 /* Register all the coprocessor registers based on feature bits */
7516 CPUARMState *env = &cpu->env;
7517 if (arm_feature(env, ARM_FEATURE_M)) {
7518 /* M profile has no coprocessor registers */
7519 return;
7522 define_arm_cp_regs(cpu, cp_reginfo);
7523 if (!arm_feature(env, ARM_FEATURE_V8)) {
7524 /* Must go early as it is full of wildcards that may be
7525 * overridden by later definitions.
7527 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7530 if (arm_feature(env, ARM_FEATURE_V6)) {
7531 /* The ID registers all have impdef reset values */
7532 ARMCPRegInfo v6_idregs[] = {
7533 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7534 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7535 .access = PL1_R, .type = ARM_CP_CONST,
7536 .accessfn = access_aa32_tid3,
7537 .resetvalue = cpu->isar.id_pfr0 },
7538 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7539 * the value of the GIC field until after we define these regs.
7541 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7542 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
7543 .access = PL1_R, .type = ARM_CP_NO_RAW,
7544 .accessfn = access_aa32_tid3,
7545 .readfn = id_pfr1_read,
7546 .writefn = arm_cp_write_ignore },
7547 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7548 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7549 .access = PL1_R, .type = ARM_CP_CONST,
7550 .accessfn = access_aa32_tid3,
7551 .resetvalue = cpu->isar.id_dfr0 },
7552 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7553 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7554 .access = PL1_R, .type = ARM_CP_CONST,
7555 .accessfn = access_aa32_tid3,
7556 .resetvalue = cpu->id_afr0 },
7557 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7558 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7559 .access = PL1_R, .type = ARM_CP_CONST,
7560 .accessfn = access_aa32_tid3,
7561 .resetvalue = cpu->isar.id_mmfr0 },
7562 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7563 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7564 .access = PL1_R, .type = ARM_CP_CONST,
7565 .accessfn = access_aa32_tid3,
7566 .resetvalue = cpu->isar.id_mmfr1 },
7567 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7568 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7569 .access = PL1_R, .type = ARM_CP_CONST,
7570 .accessfn = access_aa32_tid3,
7571 .resetvalue = cpu->isar.id_mmfr2 },
7572 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7573 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7574 .access = PL1_R, .type = ARM_CP_CONST,
7575 .accessfn = access_aa32_tid3,
7576 .resetvalue = cpu->isar.id_mmfr3 },
7577 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7578 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7579 .access = PL1_R, .type = ARM_CP_CONST,
7580 .accessfn = access_aa32_tid3,
7581 .resetvalue = cpu->isar.id_isar0 },
7582 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7583 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7584 .access = PL1_R, .type = ARM_CP_CONST,
7585 .accessfn = access_aa32_tid3,
7586 .resetvalue = cpu->isar.id_isar1 },
7587 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7588 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7589 .access = PL1_R, .type = ARM_CP_CONST,
7590 .accessfn = access_aa32_tid3,
7591 .resetvalue = cpu->isar.id_isar2 },
7592 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7593 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7594 .access = PL1_R, .type = ARM_CP_CONST,
7595 .accessfn = access_aa32_tid3,
7596 .resetvalue = cpu->isar.id_isar3 },
7597 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7598 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7599 .access = PL1_R, .type = ARM_CP_CONST,
7600 .accessfn = access_aa32_tid3,
7601 .resetvalue = cpu->isar.id_isar4 },
7602 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7603 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7604 .access = PL1_R, .type = ARM_CP_CONST,
7605 .accessfn = access_aa32_tid3,
7606 .resetvalue = cpu->isar.id_isar5 },
7607 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7608 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7609 .access = PL1_R, .type = ARM_CP_CONST,
7610 .accessfn = access_aa32_tid3,
7611 .resetvalue = cpu->isar.id_mmfr4 },
7612 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
7613 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7614 .access = PL1_R, .type = ARM_CP_CONST,
7615 .accessfn = access_aa32_tid3,
7616 .resetvalue = cpu->isar.id_isar6 },
7617 REGINFO_SENTINEL
7619 define_arm_cp_regs(cpu, v6_idregs);
7620 define_arm_cp_regs(cpu, v6_cp_reginfo);
7621 } else {
7622 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7624 if (arm_feature(env, ARM_FEATURE_V6K)) {
7625 define_arm_cp_regs(cpu, v6k_cp_reginfo);
7627 if (arm_feature(env, ARM_FEATURE_V7MP) &&
7628 !arm_feature(env, ARM_FEATURE_PMSA)) {
7629 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7631 if (arm_feature(env, ARM_FEATURE_V7VE)) {
7632 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7634 if (arm_feature(env, ARM_FEATURE_V7)) {
7635 ARMCPRegInfo clidr = {
7636 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7637 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7638 .access = PL1_R, .type = ARM_CP_CONST,
7639 .accessfn = access_aa64_tid2,
7640 .resetvalue = cpu->clidr
7642 define_one_arm_cp_reg(cpu, &clidr);
7643 define_arm_cp_regs(cpu, v7_cp_reginfo);
7644 define_debug_regs(cpu);
7645 define_pmu_regs(cpu);
7646 } else {
7647 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7649 if (arm_feature(env, ARM_FEATURE_V8)) {
7650 /* AArch64 ID registers, which all have impdef reset values.
7651 * Note that within the ID register ranges the unused slots
7652 * must all RAZ, not UNDEF; future architecture versions may
7653 * define new registers here.
7655 ARMCPRegInfo v8_idregs[] = {
7657 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7658 * emulation because we don't know the right value for the
7659 * GIC field until after we define these regs.
7661 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7662 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7663 .access = PL1_R,
7664 #ifdef CONFIG_USER_ONLY
7665 .type = ARM_CP_CONST,
7666 .resetvalue = cpu->isar.id_aa64pfr0
7667 #else
7668 .type = ARM_CP_NO_RAW,
7669 .accessfn = access_aa64_tid3,
7670 .readfn = id_aa64pfr0_read,
7671 .writefn = arm_cp_write_ignore
7672 #endif
7674 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7675 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7676 .access = PL1_R, .type = ARM_CP_CONST,
7677 .accessfn = access_aa64_tid3,
7678 .resetvalue = cpu->isar.id_aa64pfr1},
7679 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7680 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7681 .access = PL1_R, .type = ARM_CP_CONST,
7682 .accessfn = access_aa64_tid3,
7683 .resetvalue = 0 },
7684 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7685 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7686 .access = PL1_R, .type = ARM_CP_CONST,
7687 .accessfn = access_aa64_tid3,
7688 .resetvalue = 0 },
7689 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7690 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7691 .access = PL1_R, .type = ARM_CP_CONST,
7692 .accessfn = access_aa64_tid3,
7693 .resetvalue = cpu->isar.id_aa64zfr0 },
7694 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7695 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7696 .access = PL1_R, .type = ARM_CP_CONST,
7697 .accessfn = access_aa64_tid3,
7698 .resetvalue = 0 },
7699 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7700 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7701 .access = PL1_R, .type = ARM_CP_CONST,
7702 .accessfn = access_aa64_tid3,
7703 .resetvalue = 0 },
7704 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7705 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7706 .access = PL1_R, .type = ARM_CP_CONST,
7707 .accessfn = access_aa64_tid3,
7708 .resetvalue = 0 },
7709 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7710 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7711 .access = PL1_R, .type = ARM_CP_CONST,
7712 .accessfn = access_aa64_tid3,
7713 .resetvalue = cpu->isar.id_aa64dfr0 },
7714 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7715 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7716 .access = PL1_R, .type = ARM_CP_CONST,
7717 .accessfn = access_aa64_tid3,
7718 .resetvalue = cpu->isar.id_aa64dfr1 },
7719 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7720 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7721 .access = PL1_R, .type = ARM_CP_CONST,
7722 .accessfn = access_aa64_tid3,
7723 .resetvalue = 0 },
7724 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7725 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7726 .access = PL1_R, .type = ARM_CP_CONST,
7727 .accessfn = access_aa64_tid3,
7728 .resetvalue = 0 },
7729 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7730 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7731 .access = PL1_R, .type = ARM_CP_CONST,
7732 .accessfn = access_aa64_tid3,
7733 .resetvalue = cpu->id_aa64afr0 },
7734 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7735 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7736 .access = PL1_R, .type = ARM_CP_CONST,
7737 .accessfn = access_aa64_tid3,
7738 .resetvalue = cpu->id_aa64afr1 },
7739 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7740 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7741 .access = PL1_R, .type = ARM_CP_CONST,
7742 .accessfn = access_aa64_tid3,
7743 .resetvalue = 0 },
7744 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7745 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7746 .access = PL1_R, .type = ARM_CP_CONST,
7747 .accessfn = access_aa64_tid3,
7748 .resetvalue = 0 },
7749 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7750 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7751 .access = PL1_R, .type = ARM_CP_CONST,
7752 .accessfn = access_aa64_tid3,
7753 .resetvalue = cpu->isar.id_aa64isar0 },
7754 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7755 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7756 .access = PL1_R, .type = ARM_CP_CONST,
7757 .accessfn = access_aa64_tid3,
7758 .resetvalue = cpu->isar.id_aa64isar1 },
7759 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7760 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7761 .access = PL1_R, .type = ARM_CP_CONST,
7762 .accessfn = access_aa64_tid3,
7763 .resetvalue = 0 },
7764 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7765 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7766 .access = PL1_R, .type = ARM_CP_CONST,
7767 .accessfn = access_aa64_tid3,
7768 .resetvalue = 0 },
7769 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7770 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7771 .access = PL1_R, .type = ARM_CP_CONST,
7772 .accessfn = access_aa64_tid3,
7773 .resetvalue = 0 },
7774 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7775 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7776 .access = PL1_R, .type = ARM_CP_CONST,
7777 .accessfn = access_aa64_tid3,
7778 .resetvalue = 0 },
7779 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7780 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7781 .access = PL1_R, .type = ARM_CP_CONST,
7782 .accessfn = access_aa64_tid3,
7783 .resetvalue = 0 },
7784 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7785 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7786 .access = PL1_R, .type = ARM_CP_CONST,
7787 .accessfn = access_aa64_tid3,
7788 .resetvalue = 0 },
7789 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7790 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7791 .access = PL1_R, .type = ARM_CP_CONST,
7792 .accessfn = access_aa64_tid3,
7793 .resetvalue = cpu->isar.id_aa64mmfr0 },
7794 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7795 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7796 .access = PL1_R, .type = ARM_CP_CONST,
7797 .accessfn = access_aa64_tid3,
7798 .resetvalue = cpu->isar.id_aa64mmfr1 },
7799 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7800 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7801 .access = PL1_R, .type = ARM_CP_CONST,
7802 .accessfn = access_aa64_tid3,
7803 .resetvalue = cpu->isar.id_aa64mmfr2 },
7804 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7805 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7806 .access = PL1_R, .type = ARM_CP_CONST,
7807 .accessfn = access_aa64_tid3,
7808 .resetvalue = 0 },
7809 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7810 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7811 .access = PL1_R, .type = ARM_CP_CONST,
7812 .accessfn = access_aa64_tid3,
7813 .resetvalue = 0 },
7814 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7815 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7816 .access = PL1_R, .type = ARM_CP_CONST,
7817 .accessfn = access_aa64_tid3,
7818 .resetvalue = 0 },
7819 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7820 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7821 .access = PL1_R, .type = ARM_CP_CONST,
7822 .accessfn = access_aa64_tid3,
7823 .resetvalue = 0 },
7824 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7825 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7826 .access = PL1_R, .type = ARM_CP_CONST,
7827 .accessfn = access_aa64_tid3,
7828 .resetvalue = 0 },
7829 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7830 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7831 .access = PL1_R, .type = ARM_CP_CONST,
7832 .accessfn = access_aa64_tid3,
7833 .resetvalue = cpu->isar.mvfr0 },
7834 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7835 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7836 .access = PL1_R, .type = ARM_CP_CONST,
7837 .accessfn = access_aa64_tid3,
7838 .resetvalue = cpu->isar.mvfr1 },
7839 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7840 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7841 .access = PL1_R, .type = ARM_CP_CONST,
7842 .accessfn = access_aa64_tid3,
7843 .resetvalue = cpu->isar.mvfr2 },
7844 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7845 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7846 .access = PL1_R, .type = ARM_CP_CONST,
7847 .accessfn = access_aa64_tid3,
7848 .resetvalue = 0 },
7849 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
7850 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7851 .access = PL1_R, .type = ARM_CP_CONST,
7852 .accessfn = access_aa64_tid3,
7853 .resetvalue = cpu->isar.id_pfr2 },
7854 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7855 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7856 .access = PL1_R, .type = ARM_CP_CONST,
7857 .accessfn = access_aa64_tid3,
7858 .resetvalue = 0 },
7859 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7860 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7861 .access = PL1_R, .type = ARM_CP_CONST,
7862 .accessfn = access_aa64_tid3,
7863 .resetvalue = 0 },
7864 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7865 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7866 .access = PL1_R, .type = ARM_CP_CONST,
7867 .accessfn = access_aa64_tid3,
7868 .resetvalue = 0 },
7869 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7870 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7871 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7872 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7873 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7874 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7875 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7876 .resetvalue = cpu->pmceid0 },
7877 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7878 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7879 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7880 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7881 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7882 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7883 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7884 .resetvalue = cpu->pmceid1 },
7885 REGINFO_SENTINEL
7887 #ifdef CONFIG_USER_ONLY
7888 ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7889 { .name = "ID_AA64PFR0_EL1",
7890 .exported_bits = 0x000f000f00ff0000,
7891 .fixed_bits = 0x0000000000000011 },
7892 { .name = "ID_AA64PFR1_EL1",
7893 .exported_bits = 0x00000000000000f0 },
7894 { .name = "ID_AA64PFR*_EL1_RESERVED",
7895 .is_glob = true },
7896 { .name = "ID_AA64ZFR0_EL1" },
7897 { .name = "ID_AA64MMFR0_EL1",
7898 .fixed_bits = 0x00000000ff000000 },
7899 { .name = "ID_AA64MMFR1_EL1" },
7900 { .name = "ID_AA64MMFR*_EL1_RESERVED",
7901 .is_glob = true },
7902 { .name = "ID_AA64DFR0_EL1",
7903 .fixed_bits = 0x0000000000000006 },
7904 { .name = "ID_AA64DFR1_EL1" },
7905 { .name = "ID_AA64DFR*_EL1_RESERVED",
7906 .is_glob = true },
7907 { .name = "ID_AA64AFR*",
7908 .is_glob = true },
7909 { .name = "ID_AA64ISAR0_EL1",
7910 .exported_bits = 0x00fffffff0fffff0 },
7911 { .name = "ID_AA64ISAR1_EL1",
7912 .exported_bits = 0x000000f0ffffffff },
7913 { .name = "ID_AA64ISAR*_EL1_RESERVED",
7914 .is_glob = true },
7915 REGUSERINFO_SENTINEL
7917 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7918 #endif
7919 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7920 if (!arm_feature(env, ARM_FEATURE_EL3) &&
7921 !arm_feature(env, ARM_FEATURE_EL2)) {
7922 ARMCPRegInfo rvbar = {
7923 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7924 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
7925 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
7927 define_one_arm_cp_reg(cpu, &rvbar);
7929 define_arm_cp_regs(cpu, v8_idregs);
7930 define_arm_cp_regs(cpu, v8_cp_reginfo);
7932 if (arm_feature(env, ARM_FEATURE_EL2)) {
7933 uint64_t vmpidr_def = mpidr_read_val(env);
7934 ARMCPRegInfo vpidr_regs[] = {
7935 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7936 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7937 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7938 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
7939 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
7940 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7941 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7942 .access = PL2_RW, .resetvalue = cpu->midr,
7943 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7944 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7945 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7946 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7947 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
7948 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
7949 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7950 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7951 .access = PL2_RW,
7952 .resetvalue = vmpidr_def,
7953 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
7954 REGINFO_SENTINEL
7956 define_arm_cp_regs(cpu, vpidr_regs);
7957 define_arm_cp_regs(cpu, el2_cp_reginfo);
7958 if (arm_feature(env, ARM_FEATURE_V8)) {
7959 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7961 if (cpu_isar_feature(aa64_sel2, cpu)) {
7962 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
7964 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7965 if (!arm_feature(env, ARM_FEATURE_EL3)) {
7966 ARMCPRegInfo rvbar = {
7967 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7968 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
7969 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
7971 define_one_arm_cp_reg(cpu, &rvbar);
7973 } else {
7974 /* If EL2 is missing but higher ELs are enabled, we need to
7975 * register the no_el2 reginfos.
7977 if (arm_feature(env, ARM_FEATURE_EL3)) {
7978 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7979 * of MIDR_EL1 and MPIDR_EL1.
7981 ARMCPRegInfo vpidr_regs[] = {
7982 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7983 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7984 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7985 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
7986 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7987 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7988 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7989 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7990 .type = ARM_CP_NO_RAW,
7991 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
7992 REGINFO_SENTINEL
7994 define_arm_cp_regs(cpu, vpidr_regs);
7995 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
7996 if (arm_feature(env, ARM_FEATURE_V8)) {
7997 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
8001 if (arm_feature(env, ARM_FEATURE_EL3)) {
8002 define_arm_cp_regs(cpu, el3_cp_reginfo);
8003 ARMCPRegInfo el3_regs[] = {
8004 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
8005 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
8006 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
8007 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
8008 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
8009 .access = PL3_RW,
8010 .raw_writefn = raw_write, .writefn = sctlr_write,
8011 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
8012 .resetvalue = cpu->reset_sctlr },
8013 REGINFO_SENTINEL
8016 define_arm_cp_regs(cpu, el3_regs);
8018 /* The behaviour of NSACR is sufficiently various that we don't
8019 * try to describe it in a single reginfo:
8020 * if EL3 is 64 bit, then trap to EL3 from S EL1,
8021 * reads as constant 0xc00 from NS EL1 and NS EL2
8022 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8023 * if v7 without EL3, register doesn't exist
8024 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8026 if (arm_feature(env, ARM_FEATURE_EL3)) {
8027 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8028 ARMCPRegInfo nsacr = {
8029 .name = "NSACR", .type = ARM_CP_CONST,
8030 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8031 .access = PL1_RW, .accessfn = nsacr_access,
8032 .resetvalue = 0xc00
8034 define_one_arm_cp_reg(cpu, &nsacr);
8035 } else {
8036 ARMCPRegInfo nsacr = {
8037 .name = "NSACR",
8038 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8039 .access = PL3_RW | PL1_R,
8040 .resetvalue = 0,
8041 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
8043 define_one_arm_cp_reg(cpu, &nsacr);
8045 } else {
8046 if (arm_feature(env, ARM_FEATURE_V8)) {
8047 ARMCPRegInfo nsacr = {
8048 .name = "NSACR", .type = ARM_CP_CONST,
8049 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8050 .access = PL1_R,
8051 .resetvalue = 0xc00
8053 define_one_arm_cp_reg(cpu, &nsacr);
8057 if (arm_feature(env, ARM_FEATURE_PMSA)) {
8058 if (arm_feature(env, ARM_FEATURE_V6)) {
8059 /* PMSAv6 not implemented */
8060 assert(arm_feature(env, ARM_FEATURE_V7));
8061 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8062 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
8063 } else {
8064 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
8066 } else {
8067 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8068 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
8069 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
8070 if (cpu_isar_feature(aa32_hpd, cpu)) {
8071 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
8074 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
8075 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
8077 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
8078 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
8080 if (arm_feature(env, ARM_FEATURE_VAPA)) {
8081 define_arm_cp_regs(cpu, vapa_cp_reginfo);
8083 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
8084 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
8086 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
8087 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
8089 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
8090 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
8092 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
8093 define_arm_cp_regs(cpu, omap_cp_reginfo);
8095 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
8096 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
8098 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8099 define_arm_cp_regs(cpu, xscale_cp_reginfo);
8101 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
8102 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
8104 if (arm_feature(env, ARM_FEATURE_LPAE)) {
8105 define_arm_cp_regs(cpu, lpae_cp_reginfo);
8107 if (cpu_isar_feature(aa32_jazelle, cpu)) {
8108 define_arm_cp_regs(cpu, jazelle_regs);
8110 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
8111 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8112 * be read-only (ie write causes UNDEF exception).
8115 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
8116 /* Pre-v8 MIDR space.
8117 * Note that the MIDR isn't a simple constant register because
8118 * of the TI925 behaviour where writes to another register can
8119 * cause the MIDR value to change.
8121 * Unimplemented registers in the c15 0 0 0 space default to
8122 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8123 * and friends override accordingly.
8125 { .name = "MIDR",
8126 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
8127 .access = PL1_R, .resetvalue = cpu->midr,
8128 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
8129 .readfn = midr_read,
8130 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8131 .type = ARM_CP_OVERRIDE },
8132 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8133 { .name = "DUMMY",
8134 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
8135 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8136 { .name = "DUMMY",
8137 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
8138 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8139 { .name = "DUMMY",
8140 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
8141 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8142 { .name = "DUMMY",
8143 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
8144 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8145 { .name = "DUMMY",
8146 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
8147 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8148 REGINFO_SENTINEL
8150 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
8151 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
8152 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
8153 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8154 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8155 .readfn = midr_read },
8156 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8157 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8158 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8159 .access = PL1_R, .resetvalue = cpu->midr },
8160 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8161 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
8162 .access = PL1_R, .resetvalue = cpu->midr },
8163 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
8164 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
8165 .access = PL1_R,
8166 .accessfn = access_aa64_tid1,
8167 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8168 REGINFO_SENTINEL
8170 ARMCPRegInfo id_cp_reginfo[] = {
8171 /* These are common to v8 and pre-v8 */
8172 { .name = "CTR",
8173 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
8174 .access = PL1_R, .accessfn = ctr_el0_access,
8175 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8176 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
8177 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
8178 .access = PL0_R, .accessfn = ctr_el0_access,
8179 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8180 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8181 { .name = "TCMTR",
8182 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
8183 .access = PL1_R,
8184 .accessfn = access_aa32_tid1,
8185 .type = ARM_CP_CONST, .resetvalue = 0 },
8186 REGINFO_SENTINEL
8188 /* TLBTR is specific to VMSA */
8189 ARMCPRegInfo id_tlbtr_reginfo = {
8190 .name = "TLBTR",
8191 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
8192 .access = PL1_R,
8193 .accessfn = access_aa32_tid1,
8194 .type = ARM_CP_CONST, .resetvalue = 0,
8196 /* MPUIR is specific to PMSA V6+ */
8197 ARMCPRegInfo id_mpuir_reginfo = {
8198 .name = "MPUIR",
8199 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8200 .access = PL1_R, .type = ARM_CP_CONST,
8201 .resetvalue = cpu->pmsav7_dregion << 8
8203 ARMCPRegInfo crn0_wi_reginfo = {
8204 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
8205 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8206 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8208 #ifdef CONFIG_USER_ONLY
8209 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
8210 { .name = "MIDR_EL1",
8211 .exported_bits = 0x00000000ffffffff },
8212 { .name = "REVIDR_EL1" },
8213 REGUSERINFO_SENTINEL
8215 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8216 #endif
8217 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8218 arm_feature(env, ARM_FEATURE_STRONGARM)) {
8219 ARMCPRegInfo *r;
8220 /* Register the blanket "writes ignored" value first to cover the
8221 * whole space. Then update the specific ID registers to allow write
8222 * access, so that they ignore writes rather than causing them to
8223 * UNDEF.
8225 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
8226 for (r = id_pre_v8_midr_cp_reginfo;
8227 r->type != ARM_CP_SENTINEL; r++) {
8228 r->access = PL1_RW;
8230 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
8231 r->access = PL1_RW;
8233 id_mpuir_reginfo.access = PL1_RW;
8234 id_tlbtr_reginfo.access = PL1_RW;
8236 if (arm_feature(env, ARM_FEATURE_V8)) {
8237 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8238 } else {
8239 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8241 define_arm_cp_regs(cpu, id_cp_reginfo);
8242 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8243 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
8244 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8245 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8249 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
8250 ARMCPRegInfo mpidr_cp_reginfo[] = {
8251 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
8252 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
8253 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
8254 REGINFO_SENTINEL
8256 #ifdef CONFIG_USER_ONLY
8257 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
8258 { .name = "MPIDR_EL1",
8259 .fixed_bits = 0x0000000080000000 },
8260 REGUSERINFO_SENTINEL
8262 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8263 #endif
8264 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8267 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
8268 ARMCPRegInfo auxcr_reginfo[] = {
8269 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8270 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
8271 .access = PL1_RW, .accessfn = access_tacr,
8272 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8273 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8274 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8275 .access = PL2_RW, .type = ARM_CP_CONST,
8276 .resetvalue = 0 },
8277 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8278 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8279 .access = PL3_RW, .type = ARM_CP_CONST,
8280 .resetvalue = 0 },
8281 REGINFO_SENTINEL
8283 define_arm_cp_regs(cpu, auxcr_reginfo);
8284 if (cpu_isar_feature(aa32_ac2, cpu)) {
8285 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
8289 if (arm_feature(env, ARM_FEATURE_CBAR)) {
8291 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8292 * There are two flavours:
8293 * (1) older 32-bit only cores have a simple 32-bit CBAR
8294 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8295 * 32-bit register visible to AArch32 at a different encoding
8296 * to the "flavour 1" register and with the bits rearranged to
8297 * be able to squash a 64-bit address into the 32-bit view.
8298 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8299 * in future if we support AArch32-only configs of some of the
8300 * AArch64 cores we might need to add a specific feature flag
8301 * to indicate cores with "flavour 2" CBAR.
8303 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8304 /* 32 bit view is [31:18] 0...0 [43:32]. */
8305 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8306 | extract64(cpu->reset_cbar, 32, 12);
8307 ARMCPRegInfo cbar_reginfo[] = {
8308 { .name = "CBAR",
8309 .type = ARM_CP_CONST,
8310 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8311 .access = PL1_R, .resetvalue = cbar32 },
8312 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8313 .type = ARM_CP_CONST,
8314 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
8315 .access = PL1_R, .resetvalue = cpu->reset_cbar },
8316 REGINFO_SENTINEL
8318 /* We don't implement a r/w 64 bit CBAR currently */
8319 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8320 define_arm_cp_regs(cpu, cbar_reginfo);
8321 } else {
8322 ARMCPRegInfo cbar = {
8323 .name = "CBAR",
8324 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8325 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8326 .fieldoffset = offsetof(CPUARMState,
8327 cp15.c15_config_base_address)
8329 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8330 cbar.access = PL1_R;
8331 cbar.fieldoffset = 0;
8332 cbar.type = ARM_CP_CONST;
8334 define_one_arm_cp_reg(cpu, &cbar);
8338 if (arm_feature(env, ARM_FEATURE_VBAR)) {
8339 ARMCPRegInfo vbar_cp_reginfo[] = {
8340 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8341 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8342 .access = PL1_RW, .writefn = vbar_write,
8343 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8344 offsetof(CPUARMState, cp15.vbar_ns) },
8345 .resetvalue = 0 },
8346 REGINFO_SENTINEL
8348 define_arm_cp_regs(cpu, vbar_cp_reginfo);
8351 /* Generic registers whose values depend on the implementation */
8353 ARMCPRegInfo sctlr = {
8354 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
8355 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
8356 .access = PL1_RW, .accessfn = access_tvm_trvm,
8357 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8358 offsetof(CPUARMState, cp15.sctlr_ns) },
8359 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8360 .raw_writefn = raw_write,
8362 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8363 /* Normally we would always end the TB on an SCTLR write, but Linux
8364 * arch/arm/mach-pxa/sleep.S expects two instructions following
8365 * an MMU enable to execute from cache. Imitate this behaviour.
8367 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8369 define_one_arm_cp_reg(cpu, &sctlr);
8372 if (cpu_isar_feature(aa64_lor, cpu)) {
8373 define_arm_cp_regs(cpu, lor_reginfo);
8375 if (cpu_isar_feature(aa64_pan, cpu)) {
8376 define_one_arm_cp_reg(cpu, &pan_reginfo);
8378 #ifndef CONFIG_USER_ONLY
8379 if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8380 define_arm_cp_regs(cpu, ats1e1_reginfo);
8382 if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8383 define_arm_cp_regs(cpu, ats1cp_reginfo);
8385 #endif
8386 if (cpu_isar_feature(aa64_uao, cpu)) {
8387 define_one_arm_cp_reg(cpu, &uao_reginfo);
8390 if (cpu_isar_feature(aa64_dit, cpu)) {
8391 define_one_arm_cp_reg(cpu, &dit_reginfo);
8393 if (cpu_isar_feature(aa64_ssbs, cpu)) {
8394 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
8397 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8398 define_arm_cp_regs(cpu, vhe_reginfo);
8401 if (cpu_isar_feature(aa64_sve, cpu)) {
8402 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
8403 if (arm_feature(env, ARM_FEATURE_EL2)) {
8404 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
8405 } else {
8406 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
8408 if (arm_feature(env, ARM_FEATURE_EL3)) {
8409 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
8413 #ifdef TARGET_AARCH64
8414 if (cpu_isar_feature(aa64_pauth, cpu)) {
8415 define_arm_cp_regs(cpu, pauth_reginfo);
8417 if (cpu_isar_feature(aa64_rndr, cpu)) {
8418 define_arm_cp_regs(cpu, rndr_reginfo);
8420 if (cpu_isar_feature(aa64_tlbirange, cpu)) {
8421 define_arm_cp_regs(cpu, tlbirange_reginfo);
8423 if (cpu_isar_feature(aa64_tlbios, cpu)) {
8424 define_arm_cp_regs(cpu, tlbios_reginfo);
8426 #ifndef CONFIG_USER_ONLY
8427 /* Data Cache clean instructions up to PoP */
8428 if (cpu_isar_feature(aa64_dcpop, cpu)) {
8429 define_one_arm_cp_reg(cpu, dcpop_reg);
8431 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8432 define_one_arm_cp_reg(cpu, dcpodp_reg);
8435 #endif /*CONFIG_USER_ONLY*/
8438 * If full MTE is enabled, add all of the system registers.
8439 * If only "instructions available at EL0" are enabled,
8440 * then define only a RAZ/WI version of PSTATE.TCO.
8442 if (cpu_isar_feature(aa64_mte, cpu)) {
8443 define_arm_cp_regs(cpu, mte_reginfo);
8444 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8445 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8446 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
8447 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8449 #endif
8451 if (cpu_isar_feature(any_predinv, cpu)) {
8452 define_arm_cp_regs(cpu, predinv_reginfo);
8455 if (cpu_isar_feature(any_ccidx, cpu)) {
8456 define_arm_cp_regs(cpu, ccsidr2_reginfo);
8459 #ifndef CONFIG_USER_ONLY
8461 * Register redirections and aliases must be done last,
8462 * after the registers from the other extensions have been defined.
8464 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8465 define_arm_vh_e2h_redirects_aliases(cpu);
8467 #endif
8470 /* Sort alphabetically by type name, except for "any". */
8471 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
8473 ObjectClass *class_a = (ObjectClass *)a;
8474 ObjectClass *class_b = (ObjectClass *)b;
8475 const char *name_a, *name_b;
8477 name_a = object_class_get_name(class_a);
8478 name_b = object_class_get_name(class_b);
8479 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
8480 return 1;
8481 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
8482 return -1;
8483 } else {
8484 return strcmp(name_a, name_b);
8488 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
8490 ObjectClass *oc = data;
8491 const char *typename;
8492 char *name;
8494 typename = object_class_get_name(oc);
8495 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
8496 qemu_printf(" %s\n", name);
8497 g_free(name);
8500 void arm_cpu_list(void)
8502 GSList *list;
8504 list = object_class_get_list(TYPE_ARM_CPU, false);
8505 list = g_slist_sort(list, arm_cpu_list_compare);
8506 qemu_printf("Available CPUs:\n");
8507 g_slist_foreach(list, arm_cpu_list_entry, NULL);
8508 g_slist_free(list);
8511 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8513 ObjectClass *oc = data;
8514 CpuDefinitionInfoList **cpu_list = user_data;
8515 CpuDefinitionInfo *info;
8516 const char *typename;
8518 typename = object_class_get_name(oc);
8519 info = g_malloc0(sizeof(*info));
8520 info->name = g_strndup(typename,
8521 strlen(typename) - strlen("-" TYPE_ARM_CPU));
8522 info->q_typename = g_strdup(typename);
8524 QAPI_LIST_PREPEND(*cpu_list, info);
8527 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
8529 CpuDefinitionInfoList *cpu_list = NULL;
8530 GSList *list;
8532 list = object_class_get_list(TYPE_ARM_CPU, false);
8533 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8534 g_slist_free(list);
8536 return cpu_list;
8539 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
8540 void *opaque, int state, int secstate,
8541 int crm, int opc1, int opc2,
8542 const char *name)
8544 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8545 * add a single reginfo struct to the hash table.
8547 uint32_t *key = g_new(uint32_t, 1);
8548 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
8549 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
8550 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
8552 r2->name = g_strdup(name);
8553 /* Reset the secure state to the specific incoming state. This is
8554 * necessary as the register may have been defined with both states.
8556 r2->secure = secstate;
8558 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8559 /* Register is banked (using both entries in array).
8560 * Overwriting fieldoffset as the array is only used to define
8561 * banked registers but later only fieldoffset is used.
8563 r2->fieldoffset = r->bank_fieldoffsets[ns];
8566 if (state == ARM_CP_STATE_AA32) {
8567 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8568 /* If the register is banked then we don't need to migrate or
8569 * reset the 32-bit instance in certain cases:
8571 * 1) If the register has both 32-bit and 64-bit instances then we
8572 * can count on the 64-bit instance taking care of the
8573 * non-secure bank.
8574 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8575 * taking care of the secure bank. This requires that separate
8576 * 32 and 64-bit definitions are provided.
8578 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8579 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
8580 r2->type |= ARM_CP_ALIAS;
8582 } else if ((secstate != r->secure) && !ns) {
8583 /* The register is not banked so we only want to allow migration of
8584 * the non-secure instance.
8586 r2->type |= ARM_CP_ALIAS;
8589 if (r->state == ARM_CP_STATE_BOTH) {
8590 /* We assume it is a cp15 register if the .cp field is left unset.
8592 if (r2->cp == 0) {
8593 r2->cp = 15;
8596 #ifdef HOST_WORDS_BIGENDIAN
8597 if (r2->fieldoffset) {
8598 r2->fieldoffset += sizeof(uint32_t);
8600 #endif
8603 if (state == ARM_CP_STATE_AA64) {
8604 /* To allow abbreviation of ARMCPRegInfo
8605 * definitions, we treat cp == 0 as equivalent to
8606 * the value for "standard guest-visible sysreg".
8607 * STATE_BOTH definitions are also always "standard
8608 * sysreg" in their AArch64 view (the .cp value may
8609 * be non-zero for the benefit of the AArch32 view).
8611 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8612 r2->cp = CP_REG_ARM64_SYSREG_CP;
8614 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
8615 r2->opc0, opc1, opc2);
8616 } else {
8617 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
8619 if (opaque) {
8620 r2->opaque = opaque;
8622 /* reginfo passed to helpers is correct for the actual access,
8623 * and is never ARM_CP_STATE_BOTH:
8625 r2->state = state;
8626 /* Make sure reginfo passed to helpers for wildcarded regs
8627 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8629 r2->crm = crm;
8630 r2->opc1 = opc1;
8631 r2->opc2 = opc2;
8632 /* By convention, for wildcarded registers only the first
8633 * entry is used for migration; the others are marked as
8634 * ALIAS so we don't try to transfer the register
8635 * multiple times. Special registers (ie NOP/WFI) are
8636 * never migratable and not even raw-accessible.
8638 if ((r->type & ARM_CP_SPECIAL)) {
8639 r2->type |= ARM_CP_NO_RAW;
8641 if (((r->crm == CP_ANY) && crm != 0) ||
8642 ((r->opc1 == CP_ANY) && opc1 != 0) ||
8643 ((r->opc2 == CP_ANY) && opc2 != 0)) {
8644 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8647 /* Check that raw accesses are either forbidden or handled. Note that
8648 * we can't assert this earlier because the setup of fieldoffset for
8649 * banked registers has to be done first.
8651 if (!(r2->type & ARM_CP_NO_RAW)) {
8652 assert(!raw_accessors_invalid(r2));
8655 /* Overriding of an existing definition must be explicitly
8656 * requested.
8658 if (!(r->type & ARM_CP_OVERRIDE)) {
8659 ARMCPRegInfo *oldreg;
8660 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
8661 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
8662 fprintf(stderr, "Register redefined: cp=%d %d bit "
8663 "crn=%d crm=%d opc1=%d opc2=%d, "
8664 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
8665 r2->crn, r2->crm, r2->opc1, r2->opc2,
8666 oldreg->name, r2->name);
8667 g_assert_not_reached();
8670 g_hash_table_insert(cpu->cp_regs, key, r2);
8674 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8675 const ARMCPRegInfo *r, void *opaque)
8677 /* Define implementations of coprocessor registers.
8678 * We store these in a hashtable because typically
8679 * there are less than 150 registers in a space which
8680 * is 16*16*16*8*8 = 262144 in size.
8681 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8682 * If a register is defined twice then the second definition is
8683 * used, so this can be used to define some generic registers and
8684 * then override them with implementation specific variations.
8685 * At least one of the original and the second definition should
8686 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8687 * against accidental use.
8689 * The state field defines whether the register is to be
8690 * visible in the AArch32 or AArch64 execution state. If the
8691 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8692 * reginfo structure for the AArch32 view, which sees the lower
8693 * 32 bits of the 64 bit register.
8695 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8696 * be wildcarded. AArch64 registers are always considered to be 64
8697 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8698 * the register, if any.
8700 int crm, opc1, opc2, state;
8701 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8702 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8703 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8704 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8705 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8706 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
8707 /* 64 bit registers have only CRm and Opc1 fields */
8708 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
8709 /* op0 only exists in the AArch64 encodings */
8710 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8711 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8712 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
8714 * This API is only for Arm's system coprocessors (14 and 15) or
8715 * (M-profile or v7A-and-earlier only) for implementation defined
8716 * coprocessors in the range 0..7. Our decode assumes this, since
8717 * 8..13 can be used for other insns including VFP and Neon. See
8718 * valid_cp() in translate.c. Assert here that we haven't tried
8719 * to use an invalid coprocessor number.
8721 switch (r->state) {
8722 case ARM_CP_STATE_BOTH:
8723 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8724 if (r->cp == 0) {
8725 break;
8727 /* fall through */
8728 case ARM_CP_STATE_AA32:
8729 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8730 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8731 assert(r->cp >= 14 && r->cp <= 15);
8732 } else {
8733 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8735 break;
8736 case ARM_CP_STATE_AA64:
8737 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8738 break;
8739 default:
8740 g_assert_not_reached();
8742 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8743 * encodes a minimum access level for the register. We roll this
8744 * runtime check into our general permission check code, so check
8745 * here that the reginfo's specified permissions are strict enough
8746 * to encompass the generic architectural permission check.
8748 if (r->state != ARM_CP_STATE_AA32) {
8749 int mask = 0;
8750 switch (r->opc1) {
8751 case 0:
8752 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8753 mask = PL0U_R | PL1_RW;
8754 break;
8755 case 1: case 2:
8756 /* min_EL EL1 */
8757 mask = PL1_RW;
8758 break;
8759 case 3:
8760 /* min_EL EL0 */
8761 mask = PL0_RW;
8762 break;
8763 case 4:
8764 case 5:
8765 /* min_EL EL2 */
8766 mask = PL2_RW;
8767 break;
8768 case 6:
8769 /* min_EL EL3 */
8770 mask = PL3_RW;
8771 break;
8772 case 7:
8773 /* min_EL EL1, secure mode only (we don't check the latter) */
8774 mask = PL1_RW;
8775 break;
8776 default:
8777 /* broken reginfo with out-of-range opc1 */
8778 assert(false);
8779 break;
8781 /* assert our permissions are not too lax (stricter is fine) */
8782 assert((r->access & ~mask) == 0);
8785 /* Check that the register definition has enough info to handle
8786 * reads and writes if they are permitted.
8788 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
8789 if (r->access & PL3_R) {
8790 assert((r->fieldoffset ||
8791 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8792 r->readfn);
8794 if (r->access & PL3_W) {
8795 assert((r->fieldoffset ||
8796 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8797 r->writefn);
8800 /* Bad type field probably means missing sentinel at end of reg list */
8801 assert(cptype_valid(r->type));
8802 for (crm = crmmin; crm <= crmmax; crm++) {
8803 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8804 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
8805 for (state = ARM_CP_STATE_AA32;
8806 state <= ARM_CP_STATE_AA64; state++) {
8807 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8808 continue;
8810 if (state == ARM_CP_STATE_AA32) {
8811 /* Under AArch32 CP registers can be common
8812 * (same for secure and non-secure world) or banked.
8814 char *name;
8816 switch (r->secure) {
8817 case ARM_CP_SECSTATE_S:
8818 case ARM_CP_SECSTATE_NS:
8819 add_cpreg_to_hashtable(cpu, r, opaque, state,
8820 r->secure, crm, opc1, opc2,
8821 r->name);
8822 break;
8823 default:
8824 name = g_strdup_printf("%s_S", r->name);
8825 add_cpreg_to_hashtable(cpu, r, opaque, state,
8826 ARM_CP_SECSTATE_S,
8827 crm, opc1, opc2, name);
8828 g_free(name);
8829 add_cpreg_to_hashtable(cpu, r, opaque, state,
8830 ARM_CP_SECSTATE_NS,
8831 crm, opc1, opc2, r->name);
8832 break;
8834 } else {
8835 /* AArch64 registers get mapped to non-secure instance
8836 * of AArch32 */
8837 add_cpreg_to_hashtable(cpu, r, opaque, state,
8838 ARM_CP_SECSTATE_NS,
8839 crm, opc1, opc2, r->name);
8847 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
8848 const ARMCPRegInfo *regs, void *opaque)
8850 /* Define a whole list of registers */
8851 const ARMCPRegInfo *r;
8852 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8853 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
8858 * Modify ARMCPRegInfo for access from userspace.
8860 * This is a data driven modification directed by
8861 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8862 * user-space cannot alter any values and dynamic values pertaining to
8863 * execution state are hidden from user space view anyway.
8865 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
8867 const ARMCPRegUserSpaceInfo *m;
8868 ARMCPRegInfo *r;
8870 for (m = mods; m->name; m++) {
8871 GPatternSpec *pat = NULL;
8872 if (m->is_glob) {
8873 pat = g_pattern_spec_new(m->name);
8875 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8876 if (pat && g_pattern_match_string(pat, r->name)) {
8877 r->type = ARM_CP_CONST;
8878 r->access = PL0U_R;
8879 r->resetvalue = 0;
8880 /* continue */
8881 } else if (strcmp(r->name, m->name) == 0) {
8882 r->type = ARM_CP_CONST;
8883 r->access = PL0U_R;
8884 r->resetvalue &= m->exported_bits;
8885 r->resetvalue |= m->fixed_bits;
8886 break;
8889 if (pat) {
8890 g_pattern_spec_free(pat);
8895 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8897 return g_hash_table_lookup(cpregs, &encoded_cp);
8900 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8901 uint64_t value)
8903 /* Helper coprocessor write function for write-ignore registers */
8906 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8908 /* Helper coprocessor write function for read-as-zero registers */
8909 return 0;
8912 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8914 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8917 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8919 /* Return true if it is not valid for us to switch to
8920 * this CPU mode (ie all the UNPREDICTABLE cases in
8921 * the ARM ARM CPSRWriteByInstr pseudocode).
8924 /* Changes to or from Hyp via MSR and CPS are illegal. */
8925 if (write_type == CPSRWriteByInstr &&
8926 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8927 mode == ARM_CPU_MODE_HYP)) {
8928 return 1;
8931 switch (mode) {
8932 case ARM_CPU_MODE_USR:
8933 return 0;
8934 case ARM_CPU_MODE_SYS:
8935 case ARM_CPU_MODE_SVC:
8936 case ARM_CPU_MODE_ABT:
8937 case ARM_CPU_MODE_UND:
8938 case ARM_CPU_MODE_IRQ:
8939 case ARM_CPU_MODE_FIQ:
8940 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8941 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8943 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8944 * and CPS are treated as illegal mode changes.
8946 if (write_type == CPSRWriteByInstr &&
8947 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8948 (arm_hcr_el2_eff(env) & HCR_TGE)) {
8949 return 1;
8951 return 0;
8952 case ARM_CPU_MODE_HYP:
8953 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
8954 case ARM_CPU_MODE_MON:
8955 return arm_current_el(env) < 3;
8956 default:
8957 return 1;
8961 uint32_t cpsr_read(CPUARMState *env)
8963 int ZF;
8964 ZF = (env->ZF == 0);
8965 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8966 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8967 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8968 | ((env->condexec_bits & 0xfc) << 8)
8969 | (env->GE << 16) | (env->daif & CPSR_AIF);
8972 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8973 CPSRWriteType write_type)
8975 uint32_t changed_daif;
8976 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
8977 (mask & (CPSR_M | CPSR_E | CPSR_IL));
8979 if (mask & CPSR_NZCV) {
8980 env->ZF = (~val) & CPSR_Z;
8981 env->NF = val;
8982 env->CF = (val >> 29) & 1;
8983 env->VF = (val << 3) & 0x80000000;
8985 if (mask & CPSR_Q)
8986 env->QF = ((val & CPSR_Q) != 0);
8987 if (mask & CPSR_T)
8988 env->thumb = ((val & CPSR_T) != 0);
8989 if (mask & CPSR_IT_0_1) {
8990 env->condexec_bits &= ~3;
8991 env->condexec_bits |= (val >> 25) & 3;
8993 if (mask & CPSR_IT_2_7) {
8994 env->condexec_bits &= 3;
8995 env->condexec_bits |= (val >> 8) & 0xfc;
8997 if (mask & CPSR_GE) {
8998 env->GE = (val >> 16) & 0xf;
9001 /* In a V7 implementation that includes the security extensions but does
9002 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9003 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9004 * bits respectively.
9006 * In a V8 implementation, it is permitted for privileged software to
9007 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9009 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
9010 arm_feature(env, ARM_FEATURE_EL3) &&
9011 !arm_feature(env, ARM_FEATURE_EL2) &&
9012 !arm_is_secure(env)) {
9014 changed_daif = (env->daif ^ val) & mask;
9016 if (changed_daif & CPSR_A) {
9017 /* Check to see if we are allowed to change the masking of async
9018 * abort exceptions from a non-secure state.
9020 if (!(env->cp15.scr_el3 & SCR_AW)) {
9021 qemu_log_mask(LOG_GUEST_ERROR,
9022 "Ignoring attempt to switch CPSR_A flag from "
9023 "non-secure world with SCR.AW bit clear\n");
9024 mask &= ~CPSR_A;
9028 if (changed_daif & CPSR_F) {
9029 /* Check to see if we are allowed to change the masking of FIQ
9030 * exceptions from a non-secure state.
9032 if (!(env->cp15.scr_el3 & SCR_FW)) {
9033 qemu_log_mask(LOG_GUEST_ERROR,
9034 "Ignoring attempt to switch CPSR_F flag from "
9035 "non-secure world with SCR.FW bit clear\n");
9036 mask &= ~CPSR_F;
9039 /* Check whether non-maskable FIQ (NMFI) support is enabled.
9040 * If this bit is set software is not allowed to mask
9041 * FIQs, but is allowed to set CPSR_F to 0.
9043 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
9044 (val & CPSR_F)) {
9045 qemu_log_mask(LOG_GUEST_ERROR,
9046 "Ignoring attempt to enable CPSR_F flag "
9047 "(non-maskable FIQ [NMFI] support enabled)\n");
9048 mask &= ~CPSR_F;
9053 env->daif &= ~(CPSR_AIF & mask);
9054 env->daif |= val & CPSR_AIF & mask;
9056 if (write_type != CPSRWriteRaw &&
9057 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
9058 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
9059 /* Note that we can only get here in USR mode if this is a
9060 * gdb stub write; for this case we follow the architectural
9061 * behaviour for guest writes in USR mode of ignoring an attempt
9062 * to switch mode. (Those are caught by translate.c for writes
9063 * triggered by guest instructions.)
9065 mask &= ~CPSR_M;
9066 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
9067 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9068 * v7, and has defined behaviour in v8:
9069 * + leave CPSR.M untouched
9070 * + allow changes to the other CPSR fields
9071 * + set PSTATE.IL
9072 * For user changes via the GDB stub, we don't set PSTATE.IL,
9073 * as this would be unnecessarily harsh for a user error.
9075 mask &= ~CPSR_M;
9076 if (write_type != CPSRWriteByGDBStub &&
9077 arm_feature(env, ARM_FEATURE_V8)) {
9078 mask |= CPSR_IL;
9079 val |= CPSR_IL;
9081 qemu_log_mask(LOG_GUEST_ERROR,
9082 "Illegal AArch32 mode switch attempt from %s to %s\n",
9083 aarch32_mode_name(env->uncached_cpsr),
9084 aarch32_mode_name(val));
9085 } else {
9086 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
9087 write_type == CPSRWriteExceptionReturn ?
9088 "Exception return from AArch32" :
9089 "AArch32 mode switch from",
9090 aarch32_mode_name(env->uncached_cpsr),
9091 aarch32_mode_name(val), env->regs[15]);
9092 switch_mode(env, val & CPSR_M);
9095 mask &= ~CACHED_CPSR_BITS;
9096 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
9097 if (rebuild_hflags) {
9098 arm_rebuild_hflags(env);
9102 /* Sign/zero extend */
9103 uint32_t HELPER(sxtb16)(uint32_t x)
9105 uint32_t res;
9106 res = (uint16_t)(int8_t)x;
9107 res |= (uint32_t)(int8_t)(x >> 16) << 16;
9108 return res;
9111 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
9114 * Take a division-by-zero exception if necessary; otherwise return
9115 * to get the usual non-trapping division behaviour (result of 0)
9117 if (arm_feature(env, ARM_FEATURE_M)
9118 && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
9119 raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
9123 uint32_t HELPER(uxtb16)(uint32_t x)
9125 uint32_t res;
9126 res = (uint16_t)(uint8_t)x;
9127 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
9128 return res;
9131 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
9133 if (den == 0) {
9134 handle_possible_div0_trap(env, GETPC());
9135 return 0;
9137 if (num == INT_MIN && den == -1) {
9138 return INT_MIN;
9140 return num / den;
9143 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
9145 if (den == 0) {
9146 handle_possible_div0_trap(env, GETPC());
9147 return 0;
9149 return num / den;
9152 uint32_t HELPER(rbit)(uint32_t x)
9154 return revbit32(x);
9157 #ifdef CONFIG_USER_ONLY
9159 static void switch_mode(CPUARMState *env, int mode)
9161 ARMCPU *cpu = env_archcpu(env);
9163 if (mode != ARM_CPU_MODE_USR) {
9164 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
9168 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9169 uint32_t cur_el, bool secure)
9171 return 1;
9174 void aarch64_sync_64_to_32(CPUARMState *env)
9176 g_assert_not_reached();
9179 #else
9181 static void switch_mode(CPUARMState *env, int mode)
9183 int old_mode;
9184 int i;
9186 old_mode = env->uncached_cpsr & CPSR_M;
9187 if (mode == old_mode)
9188 return;
9190 if (old_mode == ARM_CPU_MODE_FIQ) {
9191 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
9192 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
9193 } else if (mode == ARM_CPU_MODE_FIQ) {
9194 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
9195 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
9198 i = bank_number(old_mode);
9199 env->banked_r13[i] = env->regs[13];
9200 env->banked_spsr[i] = env->spsr;
9202 i = bank_number(mode);
9203 env->regs[13] = env->banked_r13[i];
9204 env->spsr = env->banked_spsr[i];
9206 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9207 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
9210 /* Physical Interrupt Target EL Lookup Table
9212 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9214 * The below multi-dimensional table is used for looking up the target
9215 * exception level given numerous condition criteria. Specifically, the
9216 * target EL is based on SCR and HCR routing controls as well as the
9217 * currently executing EL and secure state.
9219 * Dimensions:
9220 * target_el_table[2][2][2][2][2][4]
9221 * | | | | | +--- Current EL
9222 * | | | | +------ Non-secure(0)/Secure(1)
9223 * | | | +--------- HCR mask override
9224 * | | +------------ SCR exec state control
9225 * | +--------------- SCR mask override
9226 * +------------------ 32-bit(0)/64-bit(1) EL3
9228 * The table values are as such:
9229 * 0-3 = EL0-EL3
9230 * -1 = Cannot occur
9232 * The ARM ARM target EL table includes entries indicating that an "exception
9233 * is not taken". The two cases where this is applicable are:
9234 * 1) An exception is taken from EL3 but the SCR does not have the exception
9235 * routed to EL3.
9236 * 2) An exception is taken from EL2 but the HCR does not have the exception
9237 * routed to EL2.
9238 * In these two cases, the below table contain a target of EL1. This value is
9239 * returned as it is expected that the consumer of the table data will check
9240 * for "target EL >= current EL" to ensure the exception is not taken.
9242 * SCR HCR
9243 * 64 EA AMO From
9244 * BIT IRQ IMO Non-secure Secure
9245 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9247 static const int8_t target_el_table[2][2][2][2][2][4] = {
9248 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9249 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9250 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9251 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9252 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9253 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9254 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9255 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9256 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9257 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9258 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9259 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9260 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9261 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9262 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9263 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9267 * Determine the target EL for physical exceptions
9269 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9270 uint32_t cur_el, bool secure)
9272 CPUARMState *env = cs->env_ptr;
9273 bool rw;
9274 bool scr;
9275 bool hcr;
9276 int target_el;
9277 /* Is the highest EL AArch64? */
9278 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9279 uint64_t hcr_el2;
9281 if (arm_feature(env, ARM_FEATURE_EL3)) {
9282 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9283 } else {
9284 /* Either EL2 is the highest EL (and so the EL2 register width
9285 * is given by is64); or there is no EL2 or EL3, in which case
9286 * the value of 'rw' does not affect the table lookup anyway.
9288 rw = is64;
9291 hcr_el2 = arm_hcr_el2_eff(env);
9292 switch (excp_idx) {
9293 case EXCP_IRQ:
9294 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9295 hcr = hcr_el2 & HCR_IMO;
9296 break;
9297 case EXCP_FIQ:
9298 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9299 hcr = hcr_el2 & HCR_FMO;
9300 break;
9301 default:
9302 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9303 hcr = hcr_el2 & HCR_AMO;
9304 break;
9308 * For these purposes, TGE and AMO/IMO/FMO both force the
9309 * interrupt to EL2. Fold TGE into the bit extracted above.
9311 hcr |= (hcr_el2 & HCR_TGE) != 0;
9313 /* Perform a table-lookup for the target EL given the current state */
9314 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9316 assert(target_el > 0);
9318 return target_el;
9321 void arm_log_exception(CPUState *cs)
9323 int idx = cs->exception_index;
9325 if (qemu_loglevel_mask(CPU_LOG_INT)) {
9326 const char *exc = NULL;
9327 static const char * const excnames[] = {
9328 [EXCP_UDEF] = "Undefined Instruction",
9329 [EXCP_SWI] = "SVC",
9330 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9331 [EXCP_DATA_ABORT] = "Data Abort",
9332 [EXCP_IRQ] = "IRQ",
9333 [EXCP_FIQ] = "FIQ",
9334 [EXCP_BKPT] = "Breakpoint",
9335 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9336 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9337 [EXCP_HVC] = "Hypervisor Call",
9338 [EXCP_HYP_TRAP] = "Hypervisor Trap",
9339 [EXCP_SMC] = "Secure Monitor Call",
9340 [EXCP_VIRQ] = "Virtual IRQ",
9341 [EXCP_VFIQ] = "Virtual FIQ",
9342 [EXCP_SEMIHOST] = "Semihosting call",
9343 [EXCP_NOCP] = "v7M NOCP UsageFault",
9344 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9345 [EXCP_STKOF] = "v8M STKOF UsageFault",
9346 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9347 [EXCP_LSERR] = "v8M LSERR UsageFault",
9348 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9349 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
9352 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9353 exc = excnames[idx];
9355 if (!exc) {
9356 exc = "unknown";
9358 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
9359 idx, exc, cs->cpu_index);
9364 * Function used to synchronize QEMU's AArch64 register set with AArch32
9365 * register set. This is necessary when switching between AArch32 and AArch64
9366 * execution state.
9368 void aarch64_sync_32_to_64(CPUARMState *env)
9370 int i;
9371 uint32_t mode = env->uncached_cpsr & CPSR_M;
9373 /* We can blanket copy R[0:7] to X[0:7] */
9374 for (i = 0; i < 8; i++) {
9375 env->xregs[i] = env->regs[i];
9379 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9380 * Otherwise, they come from the banked user regs.
9382 if (mode == ARM_CPU_MODE_FIQ) {
9383 for (i = 8; i < 13; i++) {
9384 env->xregs[i] = env->usr_regs[i - 8];
9386 } else {
9387 for (i = 8; i < 13; i++) {
9388 env->xregs[i] = env->regs[i];
9393 * Registers x13-x23 are the various mode SP and FP registers. Registers
9394 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9395 * from the mode banked register.
9397 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9398 env->xregs[13] = env->regs[13];
9399 env->xregs[14] = env->regs[14];
9400 } else {
9401 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9402 /* HYP is an exception in that it is copied from r14 */
9403 if (mode == ARM_CPU_MODE_HYP) {
9404 env->xregs[14] = env->regs[14];
9405 } else {
9406 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9410 if (mode == ARM_CPU_MODE_HYP) {
9411 env->xregs[15] = env->regs[13];
9412 } else {
9413 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9416 if (mode == ARM_CPU_MODE_IRQ) {
9417 env->xregs[16] = env->regs[14];
9418 env->xregs[17] = env->regs[13];
9419 } else {
9420 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9421 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9424 if (mode == ARM_CPU_MODE_SVC) {
9425 env->xregs[18] = env->regs[14];
9426 env->xregs[19] = env->regs[13];
9427 } else {
9428 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9429 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9432 if (mode == ARM_CPU_MODE_ABT) {
9433 env->xregs[20] = env->regs[14];
9434 env->xregs[21] = env->regs[13];
9435 } else {
9436 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9437 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9440 if (mode == ARM_CPU_MODE_UND) {
9441 env->xregs[22] = env->regs[14];
9442 env->xregs[23] = env->regs[13];
9443 } else {
9444 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9445 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9449 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9450 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9451 * FIQ bank for r8-r14.
9453 if (mode == ARM_CPU_MODE_FIQ) {
9454 for (i = 24; i < 31; i++) {
9455 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
9457 } else {
9458 for (i = 24; i < 29; i++) {
9459 env->xregs[i] = env->fiq_regs[i - 24];
9461 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9462 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9465 env->pc = env->regs[15];
9469 * Function used to synchronize QEMU's AArch32 register set with AArch64
9470 * register set. This is necessary when switching between AArch32 and AArch64
9471 * execution state.
9473 void aarch64_sync_64_to_32(CPUARMState *env)
9475 int i;
9476 uint32_t mode = env->uncached_cpsr & CPSR_M;
9478 /* We can blanket copy X[0:7] to R[0:7] */
9479 for (i = 0; i < 8; i++) {
9480 env->regs[i] = env->xregs[i];
9484 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9485 * Otherwise, we copy x8-x12 into the banked user regs.
9487 if (mode == ARM_CPU_MODE_FIQ) {
9488 for (i = 8; i < 13; i++) {
9489 env->usr_regs[i - 8] = env->xregs[i];
9491 } else {
9492 for (i = 8; i < 13; i++) {
9493 env->regs[i] = env->xregs[i];
9498 * Registers r13 & r14 depend on the current mode.
9499 * If we are in a given mode, we copy the corresponding x registers to r13
9500 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9501 * for the mode.
9503 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9504 env->regs[13] = env->xregs[13];
9505 env->regs[14] = env->xregs[14];
9506 } else {
9507 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9510 * HYP is an exception in that it does not have its own banked r14 but
9511 * shares the USR r14
9513 if (mode == ARM_CPU_MODE_HYP) {
9514 env->regs[14] = env->xregs[14];
9515 } else {
9516 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9520 if (mode == ARM_CPU_MODE_HYP) {
9521 env->regs[13] = env->xregs[15];
9522 } else {
9523 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9526 if (mode == ARM_CPU_MODE_IRQ) {
9527 env->regs[14] = env->xregs[16];
9528 env->regs[13] = env->xregs[17];
9529 } else {
9530 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9531 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9534 if (mode == ARM_CPU_MODE_SVC) {
9535 env->regs[14] = env->xregs[18];
9536 env->regs[13] = env->xregs[19];
9537 } else {
9538 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9539 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9542 if (mode == ARM_CPU_MODE_ABT) {
9543 env->regs[14] = env->xregs[20];
9544 env->regs[13] = env->xregs[21];
9545 } else {
9546 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9547 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9550 if (mode == ARM_CPU_MODE_UND) {
9551 env->regs[14] = env->xregs[22];
9552 env->regs[13] = env->xregs[23];
9553 } else {
9554 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9555 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9558 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9559 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9560 * FIQ bank for r8-r14.
9562 if (mode == ARM_CPU_MODE_FIQ) {
9563 for (i = 24; i < 31; i++) {
9564 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
9566 } else {
9567 for (i = 24; i < 29; i++) {
9568 env->fiq_regs[i - 24] = env->xregs[i];
9570 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9571 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9574 env->regs[15] = env->pc;
9577 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9578 uint32_t mask, uint32_t offset,
9579 uint32_t newpc)
9581 int new_el;
9583 /* Change the CPU state so as to actually take the exception. */
9584 switch_mode(env, new_mode);
9587 * For exceptions taken to AArch32 we must clear the SS bit in both
9588 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9590 env->pstate &= ~PSTATE_SS;
9591 env->spsr = cpsr_read(env);
9592 /* Clear IT bits. */
9593 env->condexec_bits = 0;
9594 /* Switch to the new mode, and to the correct instruction set. */
9595 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9597 /* This must be after mode switching. */
9598 new_el = arm_current_el(env);
9600 /* Set new mode endianness */
9601 env->uncached_cpsr &= ~CPSR_E;
9602 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
9603 env->uncached_cpsr |= CPSR_E;
9605 /* J and IL must always be cleared for exception entry */
9606 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9607 env->daif |= mask;
9609 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
9610 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
9611 env->uncached_cpsr |= CPSR_SSBS;
9612 } else {
9613 env->uncached_cpsr &= ~CPSR_SSBS;
9617 if (new_mode == ARM_CPU_MODE_HYP) {
9618 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9619 env->elr_el[2] = env->regs[15];
9620 } else {
9621 /* CPSR.PAN is normally preserved preserved unless... */
9622 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
9623 switch (new_el) {
9624 case 3:
9625 if (!arm_is_secure_below_el3(env)) {
9626 /* ... the target is EL3, from non-secure state. */
9627 env->uncached_cpsr &= ~CPSR_PAN;
9628 break;
9630 /* ... the target is EL3, from secure state ... */
9631 /* fall through */
9632 case 1:
9633 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9634 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9635 env->uncached_cpsr |= CPSR_PAN;
9637 break;
9641 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9642 * and we should just guard the thumb mode on V4
9644 if (arm_feature(env, ARM_FEATURE_V4T)) {
9645 env->thumb =
9646 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9648 env->regs[14] = env->regs[15] + offset;
9650 env->regs[15] = newpc;
9651 arm_rebuild_hflags(env);
9654 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9657 * Handle exception entry to Hyp mode; this is sufficiently
9658 * different to entry to other AArch32 modes that we handle it
9659 * separately here.
9661 * The vector table entry used is always the 0x14 Hyp mode entry point,
9662 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
9663 * The offset applied to the preferred return address is always zero
9664 * (see DDI0487C.a section G1.12.3).
9665 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9667 uint32_t addr, mask;
9668 ARMCPU *cpu = ARM_CPU(cs);
9669 CPUARMState *env = &cpu->env;
9671 switch (cs->exception_index) {
9672 case EXCP_UDEF:
9673 addr = 0x04;
9674 break;
9675 case EXCP_SWI:
9676 addr = 0x08;
9677 break;
9678 case EXCP_BKPT:
9679 /* Fall through to prefetch abort. */
9680 case EXCP_PREFETCH_ABORT:
9681 env->cp15.ifar_s = env->exception.vaddress;
9682 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9683 (uint32_t)env->exception.vaddress);
9684 addr = 0x0c;
9685 break;
9686 case EXCP_DATA_ABORT:
9687 env->cp15.dfar_s = env->exception.vaddress;
9688 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9689 (uint32_t)env->exception.vaddress);
9690 addr = 0x10;
9691 break;
9692 case EXCP_IRQ:
9693 addr = 0x18;
9694 break;
9695 case EXCP_FIQ:
9696 addr = 0x1c;
9697 break;
9698 case EXCP_HVC:
9699 addr = 0x08;
9700 break;
9701 case EXCP_HYP_TRAP:
9702 addr = 0x14;
9703 break;
9704 default:
9705 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9708 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9709 if (!arm_feature(env, ARM_FEATURE_V8)) {
9711 * QEMU syndrome values are v8-style. v7 has the IL bit
9712 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9713 * If this is a v7 CPU, squash the IL bit in those cases.
9715 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9716 (cs->exception_index == EXCP_DATA_ABORT &&
9717 !(env->exception.syndrome & ARM_EL_ISV)) ||
9718 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9719 env->exception.syndrome &= ~ARM_EL_IL;
9722 env->cp15.esr_el[2] = env->exception.syndrome;
9725 if (arm_current_el(env) != 2 && addr < 0x14) {
9726 addr = 0x14;
9729 mask = 0;
9730 if (!(env->cp15.scr_el3 & SCR_EA)) {
9731 mask |= CPSR_A;
9733 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9734 mask |= CPSR_I;
9736 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9737 mask |= CPSR_F;
9740 addr += env->cp15.hvbar;
9742 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9745 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9747 ARMCPU *cpu = ARM_CPU(cs);
9748 CPUARMState *env = &cpu->env;
9749 uint32_t addr;
9750 uint32_t mask;
9751 int new_mode;
9752 uint32_t offset;
9753 uint32_t moe;
9755 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9756 switch (syn_get_ec(env->exception.syndrome)) {
9757 case EC_BREAKPOINT:
9758 case EC_BREAKPOINT_SAME_EL:
9759 moe = 1;
9760 break;
9761 case EC_WATCHPOINT:
9762 case EC_WATCHPOINT_SAME_EL:
9763 moe = 10;
9764 break;
9765 case EC_AA32_BKPT:
9766 moe = 3;
9767 break;
9768 case EC_VECTORCATCH:
9769 moe = 5;
9770 break;
9771 default:
9772 moe = 0;
9773 break;
9776 if (moe) {
9777 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9780 if (env->exception.target_el == 2) {
9781 arm_cpu_do_interrupt_aarch32_hyp(cs);
9782 return;
9785 switch (cs->exception_index) {
9786 case EXCP_UDEF:
9787 new_mode = ARM_CPU_MODE_UND;
9788 addr = 0x04;
9789 mask = CPSR_I;
9790 if (env->thumb)
9791 offset = 2;
9792 else
9793 offset = 4;
9794 break;
9795 case EXCP_SWI:
9796 new_mode = ARM_CPU_MODE_SVC;
9797 addr = 0x08;
9798 mask = CPSR_I;
9799 /* The PC already points to the next instruction. */
9800 offset = 0;
9801 break;
9802 case EXCP_BKPT:
9803 /* Fall through to prefetch abort. */
9804 case EXCP_PREFETCH_ABORT:
9805 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9806 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9807 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9808 env->exception.fsr, (uint32_t)env->exception.vaddress);
9809 new_mode = ARM_CPU_MODE_ABT;
9810 addr = 0x0c;
9811 mask = CPSR_A | CPSR_I;
9812 offset = 4;
9813 break;
9814 case EXCP_DATA_ABORT:
9815 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9816 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9817 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9818 env->exception.fsr,
9819 (uint32_t)env->exception.vaddress);
9820 new_mode = ARM_CPU_MODE_ABT;
9821 addr = 0x10;
9822 mask = CPSR_A | CPSR_I;
9823 offset = 8;
9824 break;
9825 case EXCP_IRQ:
9826 new_mode = ARM_CPU_MODE_IRQ;
9827 addr = 0x18;
9828 /* Disable IRQ and imprecise data aborts. */
9829 mask = CPSR_A | CPSR_I;
9830 offset = 4;
9831 if (env->cp15.scr_el3 & SCR_IRQ) {
9832 /* IRQ routed to monitor mode */
9833 new_mode = ARM_CPU_MODE_MON;
9834 mask |= CPSR_F;
9836 break;
9837 case EXCP_FIQ:
9838 new_mode = ARM_CPU_MODE_FIQ;
9839 addr = 0x1c;
9840 /* Disable FIQ, IRQ and imprecise data aborts. */
9841 mask = CPSR_A | CPSR_I | CPSR_F;
9842 if (env->cp15.scr_el3 & SCR_FIQ) {
9843 /* FIQ routed to monitor mode */
9844 new_mode = ARM_CPU_MODE_MON;
9846 offset = 4;
9847 break;
9848 case EXCP_VIRQ:
9849 new_mode = ARM_CPU_MODE_IRQ;
9850 addr = 0x18;
9851 /* Disable IRQ and imprecise data aborts. */
9852 mask = CPSR_A | CPSR_I;
9853 offset = 4;
9854 break;
9855 case EXCP_VFIQ:
9856 new_mode = ARM_CPU_MODE_FIQ;
9857 addr = 0x1c;
9858 /* Disable FIQ, IRQ and imprecise data aborts. */
9859 mask = CPSR_A | CPSR_I | CPSR_F;
9860 offset = 4;
9861 break;
9862 case EXCP_SMC:
9863 new_mode = ARM_CPU_MODE_MON;
9864 addr = 0x08;
9865 mask = CPSR_A | CPSR_I | CPSR_F;
9866 offset = 0;
9867 break;
9868 default:
9869 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9870 return; /* Never happens. Keep compiler happy. */
9873 if (new_mode == ARM_CPU_MODE_MON) {
9874 addr += env->cp15.mvbar;
9875 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9876 /* High vectors. When enabled, base address cannot be remapped. */
9877 addr += 0xffff0000;
9878 } else {
9879 /* ARM v7 architectures provide a vector base address register to remap
9880 * the interrupt vector table.
9881 * This register is only followed in non-monitor mode, and is banked.
9882 * Note: only bits 31:5 are valid.
9884 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9887 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9888 env->cp15.scr_el3 &= ~SCR_NS;
9891 take_aarch32_exception(env, new_mode, mask, offset, addr);
9894 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9897 * Return the register number of the AArch64 view of the AArch32
9898 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9899 * be that of the AArch32 mode the exception came from.
9901 int mode = env->uncached_cpsr & CPSR_M;
9903 switch (aarch32_reg) {
9904 case 0 ... 7:
9905 return aarch32_reg;
9906 case 8 ... 12:
9907 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9908 case 13:
9909 switch (mode) {
9910 case ARM_CPU_MODE_USR:
9911 case ARM_CPU_MODE_SYS:
9912 return 13;
9913 case ARM_CPU_MODE_HYP:
9914 return 15;
9915 case ARM_CPU_MODE_IRQ:
9916 return 17;
9917 case ARM_CPU_MODE_SVC:
9918 return 19;
9919 case ARM_CPU_MODE_ABT:
9920 return 21;
9921 case ARM_CPU_MODE_UND:
9922 return 23;
9923 case ARM_CPU_MODE_FIQ:
9924 return 29;
9925 default:
9926 g_assert_not_reached();
9928 case 14:
9929 switch (mode) {
9930 case ARM_CPU_MODE_USR:
9931 case ARM_CPU_MODE_SYS:
9932 case ARM_CPU_MODE_HYP:
9933 return 14;
9934 case ARM_CPU_MODE_IRQ:
9935 return 16;
9936 case ARM_CPU_MODE_SVC:
9937 return 18;
9938 case ARM_CPU_MODE_ABT:
9939 return 20;
9940 case ARM_CPU_MODE_UND:
9941 return 22;
9942 case ARM_CPU_MODE_FIQ:
9943 return 30;
9944 default:
9945 g_assert_not_reached();
9947 case 15:
9948 return 31;
9949 default:
9950 g_assert_not_reached();
9954 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9956 uint32_t ret = cpsr_read(env);
9958 /* Move DIT to the correct location for SPSR_ELx */
9959 if (ret & CPSR_DIT) {
9960 ret &= ~CPSR_DIT;
9961 ret |= PSTATE_DIT;
9963 /* Merge PSTATE.SS into SPSR_ELx */
9964 ret |= env->pstate & PSTATE_SS;
9966 return ret;
9969 /* Handle exception entry to a target EL which is using AArch64 */
9970 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9972 ARMCPU *cpu = ARM_CPU(cs);
9973 CPUARMState *env = &cpu->env;
9974 unsigned int new_el = env->exception.target_el;
9975 target_ulong addr = env->cp15.vbar_el[new_el];
9976 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
9977 unsigned int old_mode;
9978 unsigned int cur_el = arm_current_el(env);
9979 int rt;
9982 * Note that new_el can never be 0. If cur_el is 0, then
9983 * el0_a64 is is_a64(), else el0_a64 is ignored.
9985 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9987 if (cur_el < new_el) {
9988 /* Entry vector offset depends on whether the implemented EL
9989 * immediately lower than the target level is using AArch32 or AArch64
9991 bool is_aa64;
9992 uint64_t hcr;
9994 switch (new_el) {
9995 case 3:
9996 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9997 break;
9998 case 2:
9999 hcr = arm_hcr_el2_eff(env);
10000 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
10001 is_aa64 = (hcr & HCR_RW) != 0;
10002 break;
10004 /* fall through */
10005 case 1:
10006 is_aa64 = is_a64(env);
10007 break;
10008 default:
10009 g_assert_not_reached();
10012 if (is_aa64) {
10013 addr += 0x400;
10014 } else {
10015 addr += 0x600;
10017 } else if (pstate_read(env) & PSTATE_SP) {
10018 addr += 0x200;
10021 switch (cs->exception_index) {
10022 case EXCP_PREFETCH_ABORT:
10023 case EXCP_DATA_ABORT:
10024 env->cp15.far_el[new_el] = env->exception.vaddress;
10025 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
10026 env->cp15.far_el[new_el]);
10027 /* fall through */
10028 case EXCP_BKPT:
10029 case EXCP_UDEF:
10030 case EXCP_SWI:
10031 case EXCP_HVC:
10032 case EXCP_HYP_TRAP:
10033 case EXCP_SMC:
10034 switch (syn_get_ec(env->exception.syndrome)) {
10035 case EC_ADVSIMDFPACCESSTRAP:
10037 * QEMU internal FP/SIMD syndromes from AArch32 include the
10038 * TA and coproc fields which are only exposed if the exception
10039 * is taken to AArch32 Hyp mode. Mask them out to get a valid
10040 * AArch64 format syndrome.
10042 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10043 break;
10044 case EC_CP14RTTRAP:
10045 case EC_CP15RTTRAP:
10046 case EC_CP14DTTRAP:
10048 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10049 * the raw register field from the insn; when taking this to
10050 * AArch64 we must convert it to the AArch64 view of the register
10051 * number. Notice that we read a 4-bit AArch32 register number and
10052 * write back a 5-bit AArch64 one.
10054 rt = extract32(env->exception.syndrome, 5, 4);
10055 rt = aarch64_regnum(env, rt);
10056 env->exception.syndrome = deposit32(env->exception.syndrome,
10057 5, 5, rt);
10058 break;
10059 case EC_CP15RRTTRAP:
10060 case EC_CP14RRTTRAP:
10061 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10062 rt = extract32(env->exception.syndrome, 5, 4);
10063 rt = aarch64_regnum(env, rt);
10064 env->exception.syndrome = deposit32(env->exception.syndrome,
10065 5, 5, rt);
10066 rt = extract32(env->exception.syndrome, 10, 4);
10067 rt = aarch64_regnum(env, rt);
10068 env->exception.syndrome = deposit32(env->exception.syndrome,
10069 10, 5, rt);
10070 break;
10072 env->cp15.esr_el[new_el] = env->exception.syndrome;
10073 break;
10074 case EXCP_IRQ:
10075 case EXCP_VIRQ:
10076 addr += 0x80;
10077 break;
10078 case EXCP_FIQ:
10079 case EXCP_VFIQ:
10080 addr += 0x100;
10081 break;
10082 default:
10083 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10086 if (is_a64(env)) {
10087 old_mode = pstate_read(env);
10088 aarch64_save_sp(env, arm_current_el(env));
10089 env->elr_el[new_el] = env->pc;
10090 } else {
10091 old_mode = cpsr_read_for_spsr_elx(env);
10092 env->elr_el[new_el] = env->regs[15];
10094 aarch64_sync_32_to_64(env);
10096 env->condexec_bits = 0;
10098 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
10100 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
10101 env->elr_el[new_el]);
10103 if (cpu_isar_feature(aa64_pan, cpu)) {
10104 /* The value of PSTATE.PAN is normally preserved, except when ... */
10105 new_mode |= old_mode & PSTATE_PAN;
10106 switch (new_el) {
10107 case 2:
10108 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
10109 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
10110 != (HCR_E2H | HCR_TGE)) {
10111 break;
10113 /* fall through */
10114 case 1:
10115 /* ... the target is EL1 ... */
10116 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
10117 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
10118 new_mode |= PSTATE_PAN;
10120 break;
10123 if (cpu_isar_feature(aa64_mte, cpu)) {
10124 new_mode |= PSTATE_TCO;
10127 if (cpu_isar_feature(aa64_ssbs, cpu)) {
10128 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
10129 new_mode |= PSTATE_SSBS;
10130 } else {
10131 new_mode &= ~PSTATE_SSBS;
10135 pstate_write(env, PSTATE_DAIF | new_mode);
10136 env->aarch64 = 1;
10137 aarch64_restore_sp(env, new_el);
10138 helper_rebuild_hflags_a64(env, new_el);
10140 env->pc = addr;
10142 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
10143 new_el, env->pc, pstate_read(env));
10147 * Do semihosting call and set the appropriate return value. All the
10148 * permission and validity checks have been done at translate time.
10150 * We only see semihosting exceptions in TCG only as they are not
10151 * trapped to the hypervisor in KVM.
10153 #ifdef CONFIG_TCG
10154 static void handle_semihosting(CPUState *cs)
10156 ARMCPU *cpu = ARM_CPU(cs);
10157 CPUARMState *env = &cpu->env;
10159 if (is_a64(env)) {
10160 qemu_log_mask(CPU_LOG_INT,
10161 "...handling as semihosting call 0x%" PRIx64 "\n",
10162 env->xregs[0]);
10163 env->xregs[0] = do_common_semihosting(cs);
10164 env->pc += 4;
10165 } else {
10166 qemu_log_mask(CPU_LOG_INT,
10167 "...handling as semihosting call 0x%x\n",
10168 env->regs[0]);
10169 env->regs[0] = do_common_semihosting(cs);
10170 env->regs[15] += env->thumb ? 2 : 4;
10173 #endif
10175 /* Handle a CPU exception for A and R profile CPUs.
10176 * Do any appropriate logging, handle PSCI calls, and then hand off
10177 * to the AArch64-entry or AArch32-entry function depending on the
10178 * target exception level's register width.
10180 * Note: this is used for both TCG (as the do_interrupt tcg op),
10181 * and KVM to re-inject guest debug exceptions, and to
10182 * inject a Synchronous-External-Abort.
10184 void arm_cpu_do_interrupt(CPUState *cs)
10186 ARMCPU *cpu = ARM_CPU(cs);
10187 CPUARMState *env = &cpu->env;
10188 unsigned int new_el = env->exception.target_el;
10190 assert(!arm_feature(env, ARM_FEATURE_M));
10192 arm_log_exception(cs);
10193 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10194 new_el);
10195 if (qemu_loglevel_mask(CPU_LOG_INT)
10196 && !excp_is_internal(cs->exception_index)) {
10197 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
10198 syn_get_ec(env->exception.syndrome),
10199 env->exception.syndrome);
10202 if (arm_is_psci_call(cpu, cs->exception_index)) {
10203 arm_handle_psci_call(cpu);
10204 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10205 return;
10209 * Semihosting semantics depend on the register width of the code
10210 * that caused the exception, not the target exception level, so
10211 * must be handled here.
10213 #ifdef CONFIG_TCG
10214 if (cs->exception_index == EXCP_SEMIHOST) {
10215 handle_semihosting(cs);
10216 return;
10218 #endif
10220 /* Hooks may change global state so BQL should be held, also the
10221 * BQL needs to be held for any modification of
10222 * cs->interrupt_request.
10224 g_assert(qemu_mutex_iothread_locked());
10226 arm_call_pre_el_change_hook(cpu);
10228 assert(!excp_is_internal(cs->exception_index));
10229 if (arm_el_is_aa64(env, new_el)) {
10230 arm_cpu_do_interrupt_aarch64(cs);
10231 } else {
10232 arm_cpu_do_interrupt_aarch32(cs);
10235 arm_call_el_change_hook(cpu);
10237 if (!kvm_enabled()) {
10238 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10241 #endif /* !CONFIG_USER_ONLY */
10243 uint64_t arm_sctlr(CPUARMState *env, int el)
10245 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10246 if (el == 0) {
10247 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
10248 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
10249 ? 2 : 1;
10251 return env->cp15.sctlr_el[el];
10254 /* Return the SCTLR value which controls this address translation regime */
10255 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
10257 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
10260 #ifndef CONFIG_USER_ONLY
10262 /* Return true if the specified stage of address translation is disabled */
10263 static inline bool regime_translation_disabled(CPUARMState *env,
10264 ARMMMUIdx mmu_idx)
10266 uint64_t hcr_el2;
10268 if (arm_feature(env, ARM_FEATURE_M)) {
10269 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
10270 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
10271 case R_V7M_MPU_CTRL_ENABLE_MASK:
10272 /* Enabled, but not for HardFault and NMI */
10273 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
10274 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
10275 /* Enabled for all cases */
10276 return false;
10277 case 0:
10278 default:
10279 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10280 * we warned about that in armv7m_nvic.c when the guest set it.
10282 return true;
10286 hcr_el2 = arm_hcr_el2_eff(env);
10288 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10289 /* HCR.DC means HCR.VM behaves as 1 */
10290 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
10293 if (hcr_el2 & HCR_TGE) {
10294 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10295 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
10296 return true;
10300 if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
10301 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10302 return true;
10305 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
10308 static inline bool regime_translation_big_endian(CPUARMState *env,
10309 ARMMMUIdx mmu_idx)
10311 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
10314 /* Return the TTBR associated with this translation regime */
10315 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
10316 int ttbrn)
10318 if (mmu_idx == ARMMMUIdx_Stage2) {
10319 return env->cp15.vttbr_el2;
10321 if (mmu_idx == ARMMMUIdx_Stage2_S) {
10322 return env->cp15.vsttbr_el2;
10324 if (ttbrn == 0) {
10325 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
10326 } else {
10327 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
10331 #endif /* !CONFIG_USER_ONLY */
10333 /* Convert a possible stage1+2 MMU index into the appropriate
10334 * stage 1 MMU index
10336 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
10338 switch (mmu_idx) {
10339 case ARMMMUIdx_SE10_0:
10340 return ARMMMUIdx_Stage1_SE0;
10341 case ARMMMUIdx_SE10_1:
10342 return ARMMMUIdx_Stage1_SE1;
10343 case ARMMMUIdx_SE10_1_PAN:
10344 return ARMMMUIdx_Stage1_SE1_PAN;
10345 case ARMMMUIdx_E10_0:
10346 return ARMMMUIdx_Stage1_E0;
10347 case ARMMMUIdx_E10_1:
10348 return ARMMMUIdx_Stage1_E1;
10349 case ARMMMUIdx_E10_1_PAN:
10350 return ARMMMUIdx_Stage1_E1_PAN;
10351 default:
10352 return mmu_idx;
10356 /* Return true if the translation regime is using LPAE format page tables */
10357 static inline bool regime_using_lpae_format(CPUARMState *env,
10358 ARMMMUIdx mmu_idx)
10360 int el = regime_el(env, mmu_idx);
10361 if (el == 2 || arm_el_is_aa64(env, el)) {
10362 return true;
10364 if (arm_feature(env, ARM_FEATURE_LPAE)
10365 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
10366 return true;
10368 return false;
10371 /* Returns true if the stage 1 translation regime is using LPAE format page
10372 * tables. Used when raising alignment exceptions, whose FSR changes depending
10373 * on whether the long or short descriptor format is in use. */
10374 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
10376 mmu_idx = stage_1_mmu_idx(mmu_idx);
10378 return regime_using_lpae_format(env, mmu_idx);
10381 #ifndef CONFIG_USER_ONLY
10382 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
10384 switch (mmu_idx) {
10385 case ARMMMUIdx_SE10_0:
10386 case ARMMMUIdx_E20_0:
10387 case ARMMMUIdx_SE20_0:
10388 case ARMMMUIdx_Stage1_E0:
10389 case ARMMMUIdx_Stage1_SE0:
10390 case ARMMMUIdx_MUser:
10391 case ARMMMUIdx_MSUser:
10392 case ARMMMUIdx_MUserNegPri:
10393 case ARMMMUIdx_MSUserNegPri:
10394 return true;
10395 default:
10396 return false;
10397 case ARMMMUIdx_E10_0:
10398 case ARMMMUIdx_E10_1:
10399 case ARMMMUIdx_E10_1_PAN:
10400 g_assert_not_reached();
10404 /* Translate section/page access permissions to page
10405 * R/W protection flags
10407 * @env: CPUARMState
10408 * @mmu_idx: MMU index indicating required translation regime
10409 * @ap: The 3-bit access permissions (AP[2:0])
10410 * @domain_prot: The 2-bit domain access permissions
10412 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
10413 int ap, int domain_prot)
10415 bool is_user = regime_is_user(env, mmu_idx);
10417 if (domain_prot == 3) {
10418 return PAGE_READ | PAGE_WRITE;
10421 switch (ap) {
10422 case 0:
10423 if (arm_feature(env, ARM_FEATURE_V7)) {
10424 return 0;
10426 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
10427 case SCTLR_S:
10428 return is_user ? 0 : PAGE_READ;
10429 case SCTLR_R:
10430 return PAGE_READ;
10431 default:
10432 return 0;
10434 case 1:
10435 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10436 case 2:
10437 if (is_user) {
10438 return PAGE_READ;
10439 } else {
10440 return PAGE_READ | PAGE_WRITE;
10442 case 3:
10443 return PAGE_READ | PAGE_WRITE;
10444 case 4: /* Reserved. */
10445 return 0;
10446 case 5:
10447 return is_user ? 0 : PAGE_READ;
10448 case 6:
10449 return PAGE_READ;
10450 case 7:
10451 if (!arm_feature(env, ARM_FEATURE_V6K)) {
10452 return 0;
10454 return PAGE_READ;
10455 default:
10456 g_assert_not_reached();
10460 /* Translate section/page access permissions to page
10461 * R/W protection flags.
10463 * @ap: The 2-bit simple AP (AP[2:1])
10464 * @is_user: TRUE if accessing from PL0
10466 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
10468 switch (ap) {
10469 case 0:
10470 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10471 case 1:
10472 return PAGE_READ | PAGE_WRITE;
10473 case 2:
10474 return is_user ? 0 : PAGE_READ;
10475 case 3:
10476 return PAGE_READ;
10477 default:
10478 g_assert_not_reached();
10482 static inline int
10483 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10485 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10488 /* Translate S2 section/page access permissions to protection flags
10490 * @env: CPUARMState
10491 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10492 * @xn: XN (execute-never) bits
10493 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10495 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
10497 int prot = 0;
10499 if (s2ap & 1) {
10500 prot |= PAGE_READ;
10502 if (s2ap & 2) {
10503 prot |= PAGE_WRITE;
10506 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
10507 switch (xn) {
10508 case 0:
10509 prot |= PAGE_EXEC;
10510 break;
10511 case 1:
10512 if (s1_is_el0) {
10513 prot |= PAGE_EXEC;
10515 break;
10516 case 2:
10517 break;
10518 case 3:
10519 if (!s1_is_el0) {
10520 prot |= PAGE_EXEC;
10522 break;
10523 default:
10524 g_assert_not_reached();
10526 } else {
10527 if (!extract32(xn, 1, 1)) {
10528 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
10529 prot |= PAGE_EXEC;
10533 return prot;
10536 /* Translate section/page access permissions to protection flags
10538 * @env: CPUARMState
10539 * @mmu_idx: MMU index indicating required translation regime
10540 * @is_aa64: TRUE if AArch64
10541 * @ap: The 2-bit simple AP (AP[2:1])
10542 * @ns: NS (non-secure) bit
10543 * @xn: XN (execute-never) bit
10544 * @pxn: PXN (privileged execute-never) bit
10546 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
10547 int ap, int ns, int xn, int pxn)
10549 bool is_user = regime_is_user(env, mmu_idx);
10550 int prot_rw, user_rw;
10551 bool have_wxn;
10552 int wxn = 0;
10554 assert(mmu_idx != ARMMMUIdx_Stage2);
10555 assert(mmu_idx != ARMMMUIdx_Stage2_S);
10557 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
10558 if (is_user) {
10559 prot_rw = user_rw;
10560 } else {
10561 if (user_rw && regime_is_pan(env, mmu_idx)) {
10562 /* PAN forbids data accesses but doesn't affect insn fetch */
10563 prot_rw = 0;
10564 } else {
10565 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
10569 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
10570 return prot_rw;
10573 /* TODO have_wxn should be replaced with
10574 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10575 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10576 * compatible processors have EL2, which is required for [U]WXN.
10578 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
10580 if (have_wxn) {
10581 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
10584 if (is_aa64) {
10585 if (regime_has_2_ranges(mmu_idx) && !is_user) {
10586 xn = pxn || (user_rw & PAGE_WRITE);
10588 } else if (arm_feature(env, ARM_FEATURE_V7)) {
10589 switch (regime_el(env, mmu_idx)) {
10590 case 1:
10591 case 3:
10592 if (is_user) {
10593 xn = xn || !(user_rw & PAGE_READ);
10594 } else {
10595 int uwxn = 0;
10596 if (have_wxn) {
10597 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
10599 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
10600 (uwxn && (user_rw & PAGE_WRITE));
10602 break;
10603 case 2:
10604 break;
10606 } else {
10607 xn = wxn = 0;
10610 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
10611 return prot_rw;
10613 return prot_rw | PAGE_EXEC;
10616 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
10617 uint32_t *table, uint32_t address)
10619 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10620 TCR *tcr = regime_tcr(env, mmu_idx);
10622 if (address & tcr->mask) {
10623 if (tcr->raw_tcr & TTBCR_PD1) {
10624 /* Translation table walk disabled for TTBR1 */
10625 return false;
10627 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
10628 } else {
10629 if (tcr->raw_tcr & TTBCR_PD0) {
10630 /* Translation table walk disabled for TTBR0 */
10631 return false;
10633 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
10635 *table |= (address >> 18) & 0x3ffc;
10636 return true;
10639 /* Translate a S1 pagetable walk through S2 if needed. */
10640 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
10641 hwaddr addr, bool *is_secure,
10642 ARMMMUFaultInfo *fi)
10644 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
10645 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
10646 target_ulong s2size;
10647 hwaddr s2pa;
10648 int s2prot;
10649 int ret;
10650 ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
10651 : ARMMMUIdx_Stage2;
10652 ARMCacheAttrs cacheattrs = {};
10653 MemTxAttrs txattrs = {};
10655 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
10656 &s2pa, &txattrs, &s2prot, &s2size, fi,
10657 &cacheattrs);
10658 if (ret) {
10659 assert(fi->type != ARMFault_None);
10660 fi->s2addr = addr;
10661 fi->stage2 = true;
10662 fi->s1ptw = true;
10663 fi->s1ns = !*is_secure;
10664 return ~0;
10666 if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
10667 (cacheattrs.attrs & 0xf0) == 0) {
10669 * PTW set and S1 walk touched S2 Device memory:
10670 * generate Permission fault.
10672 fi->type = ARMFault_Permission;
10673 fi->s2addr = addr;
10674 fi->stage2 = true;
10675 fi->s1ptw = true;
10676 fi->s1ns = !*is_secure;
10677 return ~0;
10680 if (arm_is_secure_below_el3(env)) {
10681 /* Check if page table walk is to secure or non-secure PA space. */
10682 if (*is_secure) {
10683 *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
10684 } else {
10685 *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
10687 } else {
10688 assert(!*is_secure);
10691 addr = s2pa;
10693 return addr;
10696 /* All loads done in the course of a page table walk go through here. */
10697 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10698 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10700 ARMCPU *cpu = ARM_CPU(cs);
10701 CPUARMState *env = &cpu->env;
10702 MemTxAttrs attrs = {};
10703 MemTxResult result = MEMTX_OK;
10704 AddressSpace *as;
10705 uint32_t data;
10707 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
10708 attrs.secure = is_secure;
10709 as = arm_addressspace(cs, attrs);
10710 if (fi->s1ptw) {
10711 return 0;
10713 if (regime_translation_big_endian(env, mmu_idx)) {
10714 data = address_space_ldl_be(as, addr, attrs, &result);
10715 } else {
10716 data = address_space_ldl_le(as, addr, attrs, &result);
10718 if (result == MEMTX_OK) {
10719 return data;
10721 fi->type = ARMFault_SyncExternalOnWalk;
10722 fi->ea = arm_extabort_type(result);
10723 return 0;
10726 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10727 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10729 ARMCPU *cpu = ARM_CPU(cs);
10730 CPUARMState *env = &cpu->env;
10731 MemTxAttrs attrs = {};
10732 MemTxResult result = MEMTX_OK;
10733 AddressSpace *as;
10734 uint64_t data;
10736 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
10737 attrs.secure = is_secure;
10738 as = arm_addressspace(cs, attrs);
10739 if (fi->s1ptw) {
10740 return 0;
10742 if (regime_translation_big_endian(env, mmu_idx)) {
10743 data = address_space_ldq_be(as, addr, attrs, &result);
10744 } else {
10745 data = address_space_ldq_le(as, addr, attrs, &result);
10747 if (result == MEMTX_OK) {
10748 return data;
10750 fi->type = ARMFault_SyncExternalOnWalk;
10751 fi->ea = arm_extabort_type(result);
10752 return 0;
10755 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
10756 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10757 hwaddr *phys_ptr, int *prot,
10758 target_ulong *page_size,
10759 ARMMMUFaultInfo *fi)
10761 CPUState *cs = env_cpu(env);
10762 int level = 1;
10763 uint32_t table;
10764 uint32_t desc;
10765 int type;
10766 int ap;
10767 int domain = 0;
10768 int domain_prot;
10769 hwaddr phys_addr;
10770 uint32_t dacr;
10772 /* Pagetable walk. */
10773 /* Lookup l1 descriptor. */
10774 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10775 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10776 fi->type = ARMFault_Translation;
10777 goto do_fault;
10779 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10780 mmu_idx, fi);
10781 if (fi->type != ARMFault_None) {
10782 goto do_fault;
10784 type = (desc & 3);
10785 domain = (desc >> 5) & 0x0f;
10786 if (regime_el(env, mmu_idx) == 1) {
10787 dacr = env->cp15.dacr_ns;
10788 } else {
10789 dacr = env->cp15.dacr_s;
10791 domain_prot = (dacr >> (domain * 2)) & 3;
10792 if (type == 0) {
10793 /* Section translation fault. */
10794 fi->type = ARMFault_Translation;
10795 goto do_fault;
10797 if (type != 2) {
10798 level = 2;
10800 if (domain_prot == 0 || domain_prot == 2) {
10801 fi->type = ARMFault_Domain;
10802 goto do_fault;
10804 if (type == 2) {
10805 /* 1Mb section. */
10806 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10807 ap = (desc >> 10) & 3;
10808 *page_size = 1024 * 1024;
10809 } else {
10810 /* Lookup l2 entry. */
10811 if (type == 1) {
10812 /* Coarse pagetable. */
10813 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10814 } else {
10815 /* Fine pagetable. */
10816 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
10818 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10819 mmu_idx, fi);
10820 if (fi->type != ARMFault_None) {
10821 goto do_fault;
10823 switch (desc & 3) {
10824 case 0: /* Page translation fault. */
10825 fi->type = ARMFault_Translation;
10826 goto do_fault;
10827 case 1: /* 64k page. */
10828 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10829 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10830 *page_size = 0x10000;
10831 break;
10832 case 2: /* 4k page. */
10833 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10834 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10835 *page_size = 0x1000;
10836 break;
10837 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10838 if (type == 1) {
10839 /* ARMv6/XScale extended small page format */
10840 if (arm_feature(env, ARM_FEATURE_XSCALE)
10841 || arm_feature(env, ARM_FEATURE_V6)) {
10842 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10843 *page_size = 0x1000;
10844 } else {
10845 /* UNPREDICTABLE in ARMv5; we choose to take a
10846 * page translation fault.
10848 fi->type = ARMFault_Translation;
10849 goto do_fault;
10851 } else {
10852 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10853 *page_size = 0x400;
10855 ap = (desc >> 4) & 3;
10856 break;
10857 default:
10858 /* Never happens, but compiler isn't smart enough to tell. */
10859 abort();
10862 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10863 *prot |= *prot ? PAGE_EXEC : 0;
10864 if (!(*prot & (1 << access_type))) {
10865 /* Access permission fault. */
10866 fi->type = ARMFault_Permission;
10867 goto do_fault;
10869 *phys_ptr = phys_addr;
10870 return false;
10871 do_fault:
10872 fi->domain = domain;
10873 fi->level = level;
10874 return true;
10877 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
10878 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10879 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10880 target_ulong *page_size, ARMMMUFaultInfo *fi)
10882 CPUState *cs = env_cpu(env);
10883 ARMCPU *cpu = env_archcpu(env);
10884 int level = 1;
10885 uint32_t table;
10886 uint32_t desc;
10887 uint32_t xn;
10888 uint32_t pxn = 0;
10889 int type;
10890 int ap;
10891 int domain = 0;
10892 int domain_prot;
10893 hwaddr phys_addr;
10894 uint32_t dacr;
10895 bool ns;
10897 /* Pagetable walk. */
10898 /* Lookup l1 descriptor. */
10899 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10900 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10901 fi->type = ARMFault_Translation;
10902 goto do_fault;
10904 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10905 mmu_idx, fi);
10906 if (fi->type != ARMFault_None) {
10907 goto do_fault;
10909 type = (desc & 3);
10910 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
10911 /* Section translation fault, or attempt to use the encoding
10912 * which is Reserved on implementations without PXN.
10914 fi->type = ARMFault_Translation;
10915 goto do_fault;
10917 if ((type == 1) || !(desc & (1 << 18))) {
10918 /* Page or Section. */
10919 domain = (desc >> 5) & 0x0f;
10921 if (regime_el(env, mmu_idx) == 1) {
10922 dacr = env->cp15.dacr_ns;
10923 } else {
10924 dacr = env->cp15.dacr_s;
10926 if (type == 1) {
10927 level = 2;
10929 domain_prot = (dacr >> (domain * 2)) & 3;
10930 if (domain_prot == 0 || domain_prot == 2) {
10931 /* Section or Page domain fault */
10932 fi->type = ARMFault_Domain;
10933 goto do_fault;
10935 if (type != 1) {
10936 if (desc & (1 << 18)) {
10937 /* Supersection. */
10938 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
10939 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
10940 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
10941 *page_size = 0x1000000;
10942 } else {
10943 /* Section. */
10944 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10945 *page_size = 0x100000;
10947 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
10948 xn = desc & (1 << 4);
10949 pxn = desc & 1;
10950 ns = extract32(desc, 19, 1);
10951 } else {
10952 if (cpu_isar_feature(aa32_pxn, cpu)) {
10953 pxn = (desc >> 2) & 1;
10955 ns = extract32(desc, 3, 1);
10956 /* Lookup l2 entry. */
10957 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10958 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10959 mmu_idx, fi);
10960 if (fi->type != ARMFault_None) {
10961 goto do_fault;
10963 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
10964 switch (desc & 3) {
10965 case 0: /* Page translation fault. */
10966 fi->type = ARMFault_Translation;
10967 goto do_fault;
10968 case 1: /* 64k page. */
10969 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10970 xn = desc & (1 << 15);
10971 *page_size = 0x10000;
10972 break;
10973 case 2: case 3: /* 4k page. */
10974 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10975 xn = desc & 1;
10976 *page_size = 0x1000;
10977 break;
10978 default:
10979 /* Never happens, but compiler isn't smart enough to tell. */
10980 abort();
10983 if (domain_prot == 3) {
10984 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10985 } else {
10986 if (pxn && !regime_is_user(env, mmu_idx)) {
10987 xn = 1;
10989 if (xn && access_type == MMU_INST_FETCH) {
10990 fi->type = ARMFault_Permission;
10991 goto do_fault;
10994 if (arm_feature(env, ARM_FEATURE_V6K) &&
10995 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
10996 /* The simplified model uses AP[0] as an access control bit. */
10997 if ((ap & 1) == 0) {
10998 /* Access flag fault. */
10999 fi->type = ARMFault_AccessFlag;
11000 goto do_fault;
11002 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
11003 } else {
11004 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
11006 if (*prot && !xn) {
11007 *prot |= PAGE_EXEC;
11009 if (!(*prot & (1 << access_type))) {
11010 /* Access permission fault. */
11011 fi->type = ARMFault_Permission;
11012 goto do_fault;
11015 if (ns) {
11016 /* The NS bit will (as required by the architecture) have no effect if
11017 * the CPU doesn't support TZ or this is a non-secure translation
11018 * regime, because the attribute will already be non-secure.
11020 attrs->secure = false;
11022 *phys_ptr = phys_addr;
11023 return false;
11024 do_fault:
11025 fi->domain = domain;
11026 fi->level = level;
11027 return true;
11031 * check_s2_mmu_setup
11032 * @cpu: ARMCPU
11033 * @is_aa64: True if the translation regime is in AArch64 state
11034 * @startlevel: Suggested starting level
11035 * @inputsize: Bitsize of IPAs
11036 * @stride: Page-table stride (See the ARM ARM)
11038 * Returns true if the suggested S2 translation parameters are OK and
11039 * false otherwise.
11041 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
11042 int inputsize, int stride)
11044 const int grainsize = stride + 3;
11045 int startsizecheck;
11047 /* Negative levels are never allowed. */
11048 if (level < 0) {
11049 return false;
11052 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
11053 if (startsizecheck < 1 || startsizecheck > stride + 4) {
11054 return false;
11057 if (is_aa64) {
11058 CPUARMState *env = &cpu->env;
11059 unsigned int pamax = arm_pamax(cpu);
11061 switch (stride) {
11062 case 13: /* 64KB Pages. */
11063 if (level == 0 || (level == 1 && pamax <= 42)) {
11064 return false;
11066 break;
11067 case 11: /* 16KB Pages. */
11068 if (level == 0 || (level == 1 && pamax <= 40)) {
11069 return false;
11071 break;
11072 case 9: /* 4KB Pages. */
11073 if (level == 0 && pamax <= 42) {
11074 return false;
11076 break;
11077 default:
11078 g_assert_not_reached();
11081 /* Inputsize checks. */
11082 if (inputsize > pamax &&
11083 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
11084 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
11085 return false;
11087 } else {
11088 /* AArch32 only supports 4KB pages. Assert on that. */
11089 assert(stride == 9);
11091 if (level == 0) {
11092 return false;
11095 return true;
11098 /* Translate from the 4-bit stage 2 representation of
11099 * memory attributes (without cache-allocation hints) to
11100 * the 8-bit representation of the stage 1 MAIR registers
11101 * (which includes allocation hints).
11103 * ref: shared/translation/attrs/S2AttrDecode()
11104 * .../S2ConvertAttrsHints()
11106 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
11108 uint8_t hiattr = extract32(s2attrs, 2, 2);
11109 uint8_t loattr = extract32(s2attrs, 0, 2);
11110 uint8_t hihint = 0, lohint = 0;
11112 if (hiattr != 0) { /* normal memory */
11113 if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
11114 hiattr = loattr = 1; /* non-cacheable */
11115 } else {
11116 if (hiattr != 1) { /* Write-through or write-back */
11117 hihint = 3; /* RW allocate */
11119 if (loattr != 1) { /* Write-through or write-back */
11120 lohint = 3; /* RW allocate */
11125 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
11127 #endif /* !CONFIG_USER_ONLY */
11129 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
11131 if (regime_has_2_ranges(mmu_idx)) {
11132 return extract64(tcr, 37, 2);
11133 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11134 return 0; /* VTCR_EL2 */
11135 } else {
11136 /* Replicate the single TBI bit so we always have 2 bits. */
11137 return extract32(tcr, 20, 1) * 3;
11141 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
11143 if (regime_has_2_ranges(mmu_idx)) {
11144 return extract64(tcr, 51, 2);
11145 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11146 return 0; /* VTCR_EL2 */
11147 } else {
11148 /* Replicate the single TBID bit so we always have 2 bits. */
11149 return extract32(tcr, 29, 1) * 3;
11153 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
11155 if (regime_has_2_ranges(mmu_idx)) {
11156 return extract64(tcr, 57, 2);
11157 } else {
11158 /* Replicate the single TCMA bit so we always have 2 bits. */
11159 return extract32(tcr, 30, 1) * 3;
11163 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11164 ARMMMUIdx mmu_idx, bool data)
11166 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11167 bool epd, hpd, using16k, using64k;
11168 int select, tsz, tbi, max_tsz;
11170 if (!regime_has_2_ranges(mmu_idx)) {
11171 select = 0;
11172 tsz = extract32(tcr, 0, 6);
11173 using64k = extract32(tcr, 14, 1);
11174 using16k = extract32(tcr, 15, 1);
11175 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11176 /* VTCR_EL2 */
11177 hpd = false;
11178 } else {
11179 hpd = extract32(tcr, 24, 1);
11181 epd = false;
11182 } else {
11184 * Bit 55 is always between the two regions, and is canonical for
11185 * determining if address tagging is enabled.
11187 select = extract64(va, 55, 1);
11188 if (!select) {
11189 tsz = extract32(tcr, 0, 6);
11190 epd = extract32(tcr, 7, 1);
11191 using64k = extract32(tcr, 14, 1);
11192 using16k = extract32(tcr, 15, 1);
11193 hpd = extract64(tcr, 41, 1);
11194 } else {
11195 int tg = extract32(tcr, 30, 2);
11196 using16k = tg == 1;
11197 using64k = tg == 3;
11198 tsz = extract32(tcr, 16, 6);
11199 epd = extract32(tcr, 23, 1);
11200 hpd = extract64(tcr, 42, 1);
11204 if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
11205 max_tsz = 48 - using64k;
11206 } else {
11207 max_tsz = 39;
11210 tsz = MIN(tsz, max_tsz);
11211 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
11213 /* Present TBI as a composite with TBID. */
11214 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
11215 if (!data) {
11216 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
11218 tbi = (tbi >> select) & 1;
11220 return (ARMVAParameters) {
11221 .tsz = tsz,
11222 .select = select,
11223 .tbi = tbi,
11224 .epd = epd,
11225 .hpd = hpd,
11226 .using16k = using16k,
11227 .using64k = using64k,
11231 #ifndef CONFIG_USER_ONLY
11232 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
11233 ARMMMUIdx mmu_idx)
11235 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11236 uint32_t el = regime_el(env, mmu_idx);
11237 int select, tsz;
11238 bool epd, hpd;
11240 assert(mmu_idx != ARMMMUIdx_Stage2_S);
11242 if (mmu_idx == ARMMMUIdx_Stage2) {
11243 /* VTCR */
11244 bool sext = extract32(tcr, 4, 1);
11245 bool sign = extract32(tcr, 3, 1);
11248 * If the sign-extend bit is not the same as t0sz[3], the result
11249 * is unpredictable. Flag this as a guest error.
11251 if (sign != sext) {
11252 qemu_log_mask(LOG_GUEST_ERROR,
11253 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11255 tsz = sextract32(tcr, 0, 4) + 8;
11256 select = 0;
11257 hpd = false;
11258 epd = false;
11259 } else if (el == 2) {
11260 /* HTCR */
11261 tsz = extract32(tcr, 0, 3);
11262 select = 0;
11263 hpd = extract64(tcr, 24, 1);
11264 epd = false;
11265 } else {
11266 int t0sz = extract32(tcr, 0, 3);
11267 int t1sz = extract32(tcr, 16, 3);
11269 if (t1sz == 0) {
11270 select = va > (0xffffffffu >> t0sz);
11271 } else {
11272 /* Note that we will detect errors later. */
11273 select = va >= ~(0xffffffffu >> t1sz);
11275 if (!select) {
11276 tsz = t0sz;
11277 epd = extract32(tcr, 7, 1);
11278 hpd = extract64(tcr, 41, 1);
11279 } else {
11280 tsz = t1sz;
11281 epd = extract32(tcr, 23, 1);
11282 hpd = extract64(tcr, 42, 1);
11284 /* For aarch32, hpd0 is not enabled without t2e as well. */
11285 hpd &= extract32(tcr, 6, 1);
11288 return (ARMVAParameters) {
11289 .tsz = tsz,
11290 .select = select,
11291 .epd = epd,
11292 .hpd = hpd,
11297 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
11299 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11300 * prot and page_size may not be filled in, and the populated fsr value provides
11301 * information on why the translation aborted, in the format of a long-format
11302 * DFSR/IFSR fault register, with the following caveats:
11303 * * the WnR bit is never set (the caller must do this).
11305 * @env: CPUARMState
11306 * @address: virtual address to get physical address for
11307 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
11308 * @mmu_idx: MMU index indicating required translation regime
11309 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
11310 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
11311 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
11312 * @phys_ptr: set to the physical address corresponding to the virtual address
11313 * @attrs: set to the memory transaction attributes to use
11314 * @prot: set to the permissions for the page containing phys_ptr
11315 * @page_size_ptr: set to the size of the page containing phys_ptr
11316 * @fi: set to fault info if the translation fails
11317 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11319 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
11320 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11321 bool s1_is_el0,
11322 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
11323 target_ulong *page_size_ptr,
11324 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
11326 ARMCPU *cpu = env_archcpu(env);
11327 CPUState *cs = CPU(cpu);
11328 /* Read an LPAE long-descriptor translation table. */
11329 ARMFaultType fault_type = ARMFault_Translation;
11330 uint32_t level;
11331 ARMVAParameters param;
11332 uint64_t ttbr;
11333 hwaddr descaddr, indexmask, indexmask_grainsize;
11334 uint32_t tableattrs;
11335 target_ulong page_size;
11336 uint32_t attrs;
11337 int32_t stride;
11338 int addrsize, inputsize;
11339 TCR *tcr = regime_tcr(env, mmu_idx);
11340 int ap, ns, xn, pxn;
11341 uint32_t el = regime_el(env, mmu_idx);
11342 uint64_t descaddrmask;
11343 bool aarch64 = arm_el_is_aa64(env, el);
11344 bool guarded = false;
11346 /* TODO: This code does not support shareability levels. */
11347 if (aarch64) {
11348 param = aa64_va_parameters(env, address, mmu_idx,
11349 access_type != MMU_INST_FETCH);
11350 level = 0;
11351 addrsize = 64 - 8 * param.tbi;
11352 inputsize = 64 - param.tsz;
11353 } else {
11354 param = aa32_va_parameters(env, address, mmu_idx);
11355 level = 1;
11356 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
11357 inputsize = addrsize - param.tsz;
11361 * We determined the region when collecting the parameters, but we
11362 * have not yet validated that the address is valid for the region.
11363 * Extract the top bits and verify that they all match select.
11365 * For aa32, if inputsize == addrsize, then we have selected the
11366 * region by exclusion in aa32_va_parameters and there is no more
11367 * validation to do here.
11369 if (inputsize < addrsize) {
11370 target_ulong top_bits = sextract64(address, inputsize,
11371 addrsize - inputsize);
11372 if (-top_bits != param.select) {
11373 /* The gap between the two regions is a Translation fault */
11374 fault_type = ARMFault_Translation;
11375 goto do_fault;
11379 if (param.using64k) {
11380 stride = 13;
11381 } else if (param.using16k) {
11382 stride = 11;
11383 } else {
11384 stride = 9;
11387 /* Note that QEMU ignores shareability and cacheability attributes,
11388 * so we don't need to do anything with the SH, ORGN, IRGN fields
11389 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11390 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11391 * implement any ASID-like capability so we can ignore it (instead
11392 * we will always flush the TLB any time the ASID is changed).
11394 ttbr = regime_ttbr(env, mmu_idx, param.select);
11396 /* Here we should have set up all the parameters for the translation:
11397 * inputsize, ttbr, epd, stride, tbi
11400 if (param.epd) {
11401 /* Translation table walk disabled => Translation fault on TLB miss
11402 * Note: This is always 0 on 64-bit EL2 and EL3.
11404 goto do_fault;
11407 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
11408 /* The starting level depends on the virtual address size (which can
11409 * be up to 48 bits) and the translation granule size. It indicates
11410 * the number of strides (stride bits at a time) needed to
11411 * consume the bits of the input address. In the pseudocode this is:
11412 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11413 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11414 * our 'stride + 3' and 'stride' is our 'stride'.
11415 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11416 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11417 * = 4 - (inputsize - 4) / stride;
11419 level = 4 - (inputsize - 4) / stride;
11420 } else {
11421 /* For stage 2 translations the starting level is specified by the
11422 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11424 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
11425 uint32_t startlevel;
11426 bool ok;
11428 if (!aarch64 || stride == 9) {
11429 /* AArch32 or 4KB pages */
11430 startlevel = 2 - sl0;
11432 if (cpu_isar_feature(aa64_st, cpu)) {
11433 startlevel &= 3;
11435 } else {
11436 /* 16KB or 64KB pages */
11437 startlevel = 3 - sl0;
11440 /* Check that the starting level is valid. */
11441 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
11442 inputsize, stride);
11443 if (!ok) {
11444 fault_type = ARMFault_Translation;
11445 goto do_fault;
11447 level = startlevel;
11450 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
11451 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
11453 /* Now we can extract the actual base address from the TTBR */
11454 descaddr = extract64(ttbr, 0, 48);
11456 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11457 * and also to mask out CnP (bit 0) which could validly be non-zero.
11459 descaddr &= ~indexmask;
11461 /* The address field in the descriptor goes up to bit 39 for ARMv7
11462 * but up to bit 47 for ARMv8, but we use the descaddrmask
11463 * up to bit 39 for AArch32, because we don't need other bits in that case
11464 * to construct next descriptor address (anyway they should be all zeroes).
11466 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
11467 ~indexmask_grainsize;
11469 /* Secure accesses start with the page table in secure memory and
11470 * can be downgraded to non-secure at any step. Non-secure accesses
11471 * remain non-secure. We implement this by just ORing in the NSTable/NS
11472 * bits at each step.
11474 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
11475 for (;;) {
11476 uint64_t descriptor;
11477 bool nstable;
11479 descaddr |= (address >> (stride * (4 - level))) & indexmask;
11480 descaddr &= ~7ULL;
11481 nstable = extract32(tableattrs, 4, 1);
11482 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
11483 if (fi->type != ARMFault_None) {
11484 goto do_fault;
11487 if (!(descriptor & 1) ||
11488 (!(descriptor & 2) && (level == 3))) {
11489 /* Invalid, or the Reserved level 3 encoding */
11490 goto do_fault;
11492 descaddr = descriptor & descaddrmask;
11494 if ((descriptor & 2) && (level < 3)) {
11495 /* Table entry. The top five bits are attributes which may
11496 * propagate down through lower levels of the table (and
11497 * which are all arranged so that 0 means "no effect", so
11498 * we can gather them up by ORing in the bits at each level).
11500 tableattrs |= extract64(descriptor, 59, 5);
11501 level++;
11502 indexmask = indexmask_grainsize;
11503 continue;
11505 /* Block entry at level 1 or 2, or page entry at level 3.
11506 * These are basically the same thing, although the number
11507 * of bits we pull in from the vaddr varies.
11509 page_size = (1ULL << ((stride * (4 - level)) + 3));
11510 descaddr |= (address & (page_size - 1));
11511 /* Extract attributes from the descriptor */
11512 attrs = extract64(descriptor, 2, 10)
11513 | (extract64(descriptor, 52, 12) << 10);
11515 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11516 /* Stage 2 table descriptors do not include any attribute fields */
11517 break;
11519 /* Merge in attributes from table descriptors */
11520 attrs |= nstable << 3; /* NS */
11521 guarded = extract64(descriptor, 50, 1); /* GP */
11522 if (param.hpd) {
11523 /* HPD disables all the table attributes except NSTable. */
11524 break;
11526 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
11527 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11528 * means "force PL1 access only", which means forcing AP[1] to 0.
11530 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
11531 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
11532 break;
11534 /* Here descaddr is the final physical address, and attributes
11535 * are all in attrs.
11537 fault_type = ARMFault_AccessFlag;
11538 if ((attrs & (1 << 8)) == 0) {
11539 /* Access flag */
11540 goto do_fault;
11543 ap = extract32(attrs, 4, 2);
11545 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11546 ns = mmu_idx == ARMMMUIdx_Stage2;
11547 xn = extract32(attrs, 11, 2);
11548 *prot = get_S2prot(env, ap, xn, s1_is_el0);
11549 } else {
11550 ns = extract32(attrs, 3, 1);
11551 xn = extract32(attrs, 12, 1);
11552 pxn = extract32(attrs, 11, 1);
11553 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
11556 fault_type = ARMFault_Permission;
11557 if (!(*prot & (1 << access_type))) {
11558 goto do_fault;
11561 if (ns) {
11562 /* The NS bit will (as required by the architecture) have no effect if
11563 * the CPU doesn't support TZ or this is a non-secure translation
11564 * regime, because the attribute will already be non-secure.
11566 txattrs->secure = false;
11568 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11569 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
11570 arm_tlb_bti_gp(txattrs) = true;
11573 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11574 cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
11575 } else {
11576 /* Index into MAIR registers for cache attributes */
11577 uint8_t attrindx = extract32(attrs, 0, 3);
11578 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
11579 assert(attrindx <= 7);
11580 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
11582 cacheattrs->shareability = extract32(attrs, 6, 2);
11584 *phys_ptr = descaddr;
11585 *page_size_ptr = page_size;
11586 return false;
11588 do_fault:
11589 fi->type = fault_type;
11590 fi->level = level;
11591 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11592 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
11593 mmu_idx == ARMMMUIdx_Stage2_S);
11594 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
11595 return true;
11598 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
11599 ARMMMUIdx mmu_idx,
11600 int32_t address, int *prot)
11602 if (!arm_feature(env, ARM_FEATURE_M)) {
11603 *prot = PAGE_READ | PAGE_WRITE;
11604 switch (address) {
11605 case 0xF0000000 ... 0xFFFFFFFF:
11606 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
11607 /* hivecs execing is ok */
11608 *prot |= PAGE_EXEC;
11610 break;
11611 case 0x00000000 ... 0x7FFFFFFF:
11612 *prot |= PAGE_EXEC;
11613 break;
11615 } else {
11616 /* Default system address map for M profile cores.
11617 * The architecture specifies which regions are execute-never;
11618 * at the MPU level no other checks are defined.
11620 switch (address) {
11621 case 0x00000000 ... 0x1fffffff: /* ROM */
11622 case 0x20000000 ... 0x3fffffff: /* SRAM */
11623 case 0x60000000 ... 0x7fffffff: /* RAM */
11624 case 0x80000000 ... 0x9fffffff: /* RAM */
11625 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11626 break;
11627 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11628 case 0xa0000000 ... 0xbfffffff: /* Device */
11629 case 0xc0000000 ... 0xdfffffff: /* Device */
11630 case 0xe0000000 ... 0xffffffff: /* System */
11631 *prot = PAGE_READ | PAGE_WRITE;
11632 break;
11633 default:
11634 g_assert_not_reached();
11639 static bool pmsav7_use_background_region(ARMCPU *cpu,
11640 ARMMMUIdx mmu_idx, bool is_user)
11642 /* Return true if we should use the default memory map as a
11643 * "background" region if there are no hits against any MPU regions.
11645 CPUARMState *env = &cpu->env;
11647 if (is_user) {
11648 return false;
11651 if (arm_feature(env, ARM_FEATURE_M)) {
11652 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
11653 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
11654 } else {
11655 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
11659 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
11661 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11662 return arm_feature(env, ARM_FEATURE_M) &&
11663 extract32(address, 20, 12) == 0xe00;
11666 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
11668 /* True if address is in the M profile system region
11669 * 0xe0000000 - 0xffffffff
11671 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
11674 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
11675 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11676 hwaddr *phys_ptr, int *prot,
11677 target_ulong *page_size,
11678 ARMMMUFaultInfo *fi)
11680 ARMCPU *cpu = env_archcpu(env);
11681 int n;
11682 bool is_user = regime_is_user(env, mmu_idx);
11684 *phys_ptr = address;
11685 *page_size = TARGET_PAGE_SIZE;
11686 *prot = 0;
11688 if (regime_translation_disabled(env, mmu_idx) ||
11689 m_is_ppb_region(env, address)) {
11690 /* MPU disabled or M profile PPB access: use default memory map.
11691 * The other case which uses the default memory map in the
11692 * v7M ARM ARM pseudocode is exception vector reads from the vector
11693 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11694 * which always does a direct read using address_space_ldl(), rather
11695 * than going via this function, so we don't need to check that here.
11697 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11698 } else { /* MPU enabled */
11699 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11700 /* region search */
11701 uint32_t base = env->pmsav7.drbar[n];
11702 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
11703 uint32_t rmask;
11704 bool srdis = false;
11706 if (!(env->pmsav7.drsr[n] & 0x1)) {
11707 continue;
11710 if (!rsize) {
11711 qemu_log_mask(LOG_GUEST_ERROR,
11712 "DRSR[%d]: Rsize field cannot be 0\n", n);
11713 continue;
11715 rsize++;
11716 rmask = (1ull << rsize) - 1;
11718 if (base & rmask) {
11719 qemu_log_mask(LOG_GUEST_ERROR,
11720 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
11721 "to DRSR region size, mask = 0x%" PRIx32 "\n",
11722 n, base, rmask);
11723 continue;
11726 if (address < base || address > base + rmask) {
11728 * Address not in this region. We must check whether the
11729 * region covers addresses in the same page as our address.
11730 * In that case we must not report a size that covers the
11731 * whole page for a subsequent hit against a different MPU
11732 * region or the background region, because it would result in
11733 * incorrect TLB hits for subsequent accesses to addresses that
11734 * are in this MPU region.
11736 if (ranges_overlap(base, rmask,
11737 address & TARGET_PAGE_MASK,
11738 TARGET_PAGE_SIZE)) {
11739 *page_size = 1;
11741 continue;
11744 /* Region matched */
11746 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
11747 int i, snd;
11748 uint32_t srdis_mask;
11750 rsize -= 3; /* sub region size (power of 2) */
11751 snd = ((address - base) >> rsize) & 0x7;
11752 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
11754 srdis_mask = srdis ? 0x3 : 0x0;
11755 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
11756 /* This will check in groups of 2, 4 and then 8, whether
11757 * the subregion bits are consistent. rsize is incremented
11758 * back up to give the region size, considering consistent
11759 * adjacent subregions as one region. Stop testing if rsize
11760 * is already big enough for an entire QEMU page.
11762 int snd_rounded = snd & ~(i - 1);
11763 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
11764 snd_rounded + 8, i);
11765 if (srdis_mask ^ srdis_multi) {
11766 break;
11768 srdis_mask = (srdis_mask << i) | srdis_mask;
11769 rsize++;
11772 if (srdis) {
11773 continue;
11775 if (rsize < TARGET_PAGE_BITS) {
11776 *page_size = 1 << rsize;
11778 break;
11781 if (n == -1) { /* no hits */
11782 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11783 /* background fault */
11784 fi->type = ARMFault_Background;
11785 return true;
11787 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11788 } else { /* a MPU hit! */
11789 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
11790 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
11792 if (m_is_system_region(env, address)) {
11793 /* System space is always execute never */
11794 xn = 1;
11797 if (is_user) { /* User mode AP bit decoding */
11798 switch (ap) {
11799 case 0:
11800 case 1:
11801 case 5:
11802 break; /* no access */
11803 case 3:
11804 *prot |= PAGE_WRITE;
11805 /* fall through */
11806 case 2:
11807 case 6:
11808 *prot |= PAGE_READ | PAGE_EXEC;
11809 break;
11810 case 7:
11811 /* for v7M, same as 6; for R profile a reserved value */
11812 if (arm_feature(env, ARM_FEATURE_M)) {
11813 *prot |= PAGE_READ | PAGE_EXEC;
11814 break;
11816 /* fall through */
11817 default:
11818 qemu_log_mask(LOG_GUEST_ERROR,
11819 "DRACR[%d]: Bad value for AP bits: 0x%"
11820 PRIx32 "\n", n, ap);
11822 } else { /* Priv. mode AP bits decoding */
11823 switch (ap) {
11824 case 0:
11825 break; /* no access */
11826 case 1:
11827 case 2:
11828 case 3:
11829 *prot |= PAGE_WRITE;
11830 /* fall through */
11831 case 5:
11832 case 6:
11833 *prot |= PAGE_READ | PAGE_EXEC;
11834 break;
11835 case 7:
11836 /* for v7M, same as 6; for R profile a reserved value */
11837 if (arm_feature(env, ARM_FEATURE_M)) {
11838 *prot |= PAGE_READ | PAGE_EXEC;
11839 break;
11841 /* fall through */
11842 default:
11843 qemu_log_mask(LOG_GUEST_ERROR,
11844 "DRACR[%d]: Bad value for AP bits: 0x%"
11845 PRIx32 "\n", n, ap);
11849 /* execute never */
11850 if (xn) {
11851 *prot &= ~PAGE_EXEC;
11856 fi->type = ARMFault_Permission;
11857 fi->level = 1;
11858 return !(*prot & (1 << access_type));
11861 static bool v8m_is_sau_exempt(CPUARMState *env,
11862 uint32_t address, MMUAccessType access_type)
11864 /* The architecture specifies that certain address ranges are
11865 * exempt from v8M SAU/IDAU checks.
11867 return
11868 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
11869 (address >= 0xe0000000 && address <= 0xe0002fff) ||
11870 (address >= 0xe000e000 && address <= 0xe000efff) ||
11871 (address >= 0xe002e000 && address <= 0xe002efff) ||
11872 (address >= 0xe0040000 && address <= 0xe0041fff) ||
11873 (address >= 0xe00ff000 && address <= 0xe00fffff);
11876 void v8m_security_lookup(CPUARMState *env, uint32_t address,
11877 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11878 V8M_SAttributes *sattrs)
11880 /* Look up the security attributes for this address. Compare the
11881 * pseudocode SecurityCheck() function.
11882 * We assume the caller has zero-initialized *sattrs.
11884 ARMCPU *cpu = env_archcpu(env);
11885 int r;
11886 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11887 int idau_region = IREGION_NOTVALID;
11888 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11889 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11891 if (cpu->idau) {
11892 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11893 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11895 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11896 &idau_nsc);
11899 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11900 /* 0xf0000000..0xffffffff is always S for insn fetches */
11901 return;
11904 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11905 sattrs->ns = !regime_is_secure(env, mmu_idx);
11906 return;
11909 if (idau_region != IREGION_NOTVALID) {
11910 sattrs->irvalid = true;
11911 sattrs->iregion = idau_region;
11914 switch (env->sau.ctrl & 3) {
11915 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11916 break;
11917 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11918 sattrs->ns = true;
11919 break;
11920 default: /* SAU.ENABLE == 1 */
11921 for (r = 0; r < cpu->sau_sregion; r++) {
11922 if (env->sau.rlar[r] & 1) {
11923 uint32_t base = env->sau.rbar[r] & ~0x1f;
11924 uint32_t limit = env->sau.rlar[r] | 0x1f;
11926 if (base <= address && limit >= address) {
11927 if (base > addr_page_base || limit < addr_page_limit) {
11928 sattrs->subpage = true;
11930 if (sattrs->srvalid) {
11931 /* If we hit in more than one region then we must report
11932 * as Secure, not NS-Callable, with no valid region
11933 * number info.
11935 sattrs->ns = false;
11936 sattrs->nsc = false;
11937 sattrs->sregion = 0;
11938 sattrs->srvalid = false;
11939 break;
11940 } else {
11941 if (env->sau.rlar[r] & 2) {
11942 sattrs->nsc = true;
11943 } else {
11944 sattrs->ns = true;
11946 sattrs->srvalid = true;
11947 sattrs->sregion = r;
11949 } else {
11951 * Address not in this region. We must check whether the
11952 * region covers addresses in the same page as our address.
11953 * In that case we must not report a size that covers the
11954 * whole page for a subsequent hit against a different MPU
11955 * region or the background region, because it would result
11956 * in incorrect TLB hits for subsequent accesses to
11957 * addresses that are in this MPU region.
11959 if (limit >= base &&
11960 ranges_overlap(base, limit - base + 1,
11961 addr_page_base,
11962 TARGET_PAGE_SIZE)) {
11963 sattrs->subpage = true;
11968 break;
11972 * The IDAU will override the SAU lookup results if it specifies
11973 * higher security than the SAU does.
11975 if (!idau_ns) {
11976 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
11977 sattrs->ns = false;
11978 sattrs->nsc = idau_nsc;
11983 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
11984 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11985 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11986 int *prot, bool *is_subpage,
11987 ARMMMUFaultInfo *fi, uint32_t *mregion)
11989 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11990 * that a full phys-to-virt translation does).
11991 * mregion is (if not NULL) set to the region number which matched,
11992 * or -1 if no region number is returned (MPU off, address did not
11993 * hit a region, address hit in multiple regions).
11994 * We set is_subpage to true if the region hit doesn't cover the
11995 * entire TARGET_PAGE the address is within.
11997 ARMCPU *cpu = env_archcpu(env);
11998 bool is_user = regime_is_user(env, mmu_idx);
11999 uint32_t secure = regime_is_secure(env, mmu_idx);
12000 int n;
12001 int matchregion = -1;
12002 bool hit = false;
12003 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
12004 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
12006 *is_subpage = false;
12007 *phys_ptr = address;
12008 *prot = 0;
12009 if (mregion) {
12010 *mregion = -1;
12013 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12014 * was an exception vector read from the vector table (which is always
12015 * done using the default system address map), because those accesses
12016 * are done in arm_v7m_load_vector(), which always does a direct
12017 * read using address_space_ldl(), rather than going via this function.
12019 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
12020 hit = true;
12021 } else if (m_is_ppb_region(env, address)) {
12022 hit = true;
12023 } else {
12024 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
12025 hit = true;
12028 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
12029 /* region search */
12030 /* Note that the base address is bits [31:5] from the register
12031 * with bits [4:0] all zeroes, but the limit address is bits
12032 * [31:5] from the register with bits [4:0] all ones.
12034 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
12035 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
12037 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
12038 /* Region disabled */
12039 continue;
12042 if (address < base || address > limit) {
12044 * Address not in this region. We must check whether the
12045 * region covers addresses in the same page as our address.
12046 * In that case we must not report a size that covers the
12047 * whole page for a subsequent hit against a different MPU
12048 * region or the background region, because it would result in
12049 * incorrect TLB hits for subsequent accesses to addresses that
12050 * are in this MPU region.
12052 if (limit >= base &&
12053 ranges_overlap(base, limit - base + 1,
12054 addr_page_base,
12055 TARGET_PAGE_SIZE)) {
12056 *is_subpage = true;
12058 continue;
12061 if (base > addr_page_base || limit < addr_page_limit) {
12062 *is_subpage = true;
12065 if (matchregion != -1) {
12066 /* Multiple regions match -- always a failure (unlike
12067 * PMSAv7 where highest-numbered-region wins)
12069 fi->type = ARMFault_Permission;
12070 fi->level = 1;
12071 return true;
12074 matchregion = n;
12075 hit = true;
12079 if (!hit) {
12080 /* background fault */
12081 fi->type = ARMFault_Background;
12082 return true;
12085 if (matchregion == -1) {
12086 /* hit using the background region */
12087 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
12088 } else {
12089 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
12090 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
12091 bool pxn = false;
12093 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
12094 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
12097 if (m_is_system_region(env, address)) {
12098 /* System space is always execute never */
12099 xn = 1;
12102 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
12103 if (*prot && !xn && !(pxn && !is_user)) {
12104 *prot |= PAGE_EXEC;
12106 /* We don't need to look the attribute up in the MAIR0/MAIR1
12107 * registers because that only tells us about cacheability.
12109 if (mregion) {
12110 *mregion = matchregion;
12114 fi->type = ARMFault_Permission;
12115 fi->level = 1;
12116 return !(*prot & (1 << access_type));
12120 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
12121 MMUAccessType access_type, ARMMMUIdx mmu_idx,
12122 hwaddr *phys_ptr, MemTxAttrs *txattrs,
12123 int *prot, target_ulong *page_size,
12124 ARMMMUFaultInfo *fi)
12126 uint32_t secure = regime_is_secure(env, mmu_idx);
12127 V8M_SAttributes sattrs = {};
12128 bool ret;
12129 bool mpu_is_subpage;
12131 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12132 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
12133 if (access_type == MMU_INST_FETCH) {
12134 /* Instruction fetches always use the MMU bank and the
12135 * transaction attribute determined by the fetch address,
12136 * regardless of CPU state. This is painful for QEMU
12137 * to handle, because it would mean we need to encode
12138 * into the mmu_idx not just the (user, negpri) information
12139 * for the current security state but also that for the
12140 * other security state, which would balloon the number
12141 * of mmu_idx values needed alarmingly.
12142 * Fortunately we can avoid this because it's not actually
12143 * possible to arbitrarily execute code from memory with
12144 * the wrong security attribute: it will always generate
12145 * an exception of some kind or another, apart from the
12146 * special case of an NS CPU executing an SG instruction
12147 * in S&NSC memory. So we always just fail the translation
12148 * here and sort things out in the exception handler
12149 * (including possibly emulating an SG instruction).
12151 if (sattrs.ns != !secure) {
12152 if (sattrs.nsc) {
12153 fi->type = ARMFault_QEMU_NSCExec;
12154 } else {
12155 fi->type = ARMFault_QEMU_SFault;
12157 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12158 *phys_ptr = address;
12159 *prot = 0;
12160 return true;
12162 } else {
12163 /* For data accesses we always use the MMU bank indicated
12164 * by the current CPU state, but the security attributes
12165 * might downgrade a secure access to nonsecure.
12167 if (sattrs.ns) {
12168 txattrs->secure = false;
12169 } else if (!secure) {
12170 /* NS access to S memory must fault.
12171 * Architecturally we should first check whether the
12172 * MPU information for this address indicates that we
12173 * are doing an unaligned access to Device memory, which
12174 * should generate a UsageFault instead. QEMU does not
12175 * currently check for that kind of unaligned access though.
12176 * If we added it we would need to do so as a special case
12177 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12179 fi->type = ARMFault_QEMU_SFault;
12180 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12181 *phys_ptr = address;
12182 *prot = 0;
12183 return true;
12188 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
12189 txattrs, prot, &mpu_is_subpage, fi, NULL);
12190 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
12191 return ret;
12194 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
12195 MMUAccessType access_type, ARMMMUIdx mmu_idx,
12196 hwaddr *phys_ptr, int *prot,
12197 ARMMMUFaultInfo *fi)
12199 int n;
12200 uint32_t mask;
12201 uint32_t base;
12202 bool is_user = regime_is_user(env, mmu_idx);
12204 if (regime_translation_disabled(env, mmu_idx)) {
12205 /* MPU disabled. */
12206 *phys_ptr = address;
12207 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12208 return false;
12211 *phys_ptr = address;
12212 for (n = 7; n >= 0; n--) {
12213 base = env->cp15.c6_region[n];
12214 if ((base & 1) == 0) {
12215 continue;
12217 mask = 1 << ((base >> 1) & 0x1f);
12218 /* Keep this shift separate from the above to avoid an
12219 (undefined) << 32. */
12220 mask = (mask << 1) - 1;
12221 if (((base ^ address) & ~mask) == 0) {
12222 break;
12225 if (n < 0) {
12226 fi->type = ARMFault_Background;
12227 return true;
12230 if (access_type == MMU_INST_FETCH) {
12231 mask = env->cp15.pmsav5_insn_ap;
12232 } else {
12233 mask = env->cp15.pmsav5_data_ap;
12235 mask = (mask >> (n * 4)) & 0xf;
12236 switch (mask) {
12237 case 0:
12238 fi->type = ARMFault_Permission;
12239 fi->level = 1;
12240 return true;
12241 case 1:
12242 if (is_user) {
12243 fi->type = ARMFault_Permission;
12244 fi->level = 1;
12245 return true;
12247 *prot = PAGE_READ | PAGE_WRITE;
12248 break;
12249 case 2:
12250 *prot = PAGE_READ;
12251 if (!is_user) {
12252 *prot |= PAGE_WRITE;
12254 break;
12255 case 3:
12256 *prot = PAGE_READ | PAGE_WRITE;
12257 break;
12258 case 5:
12259 if (is_user) {
12260 fi->type = ARMFault_Permission;
12261 fi->level = 1;
12262 return true;
12264 *prot = PAGE_READ;
12265 break;
12266 case 6:
12267 *prot = PAGE_READ;
12268 break;
12269 default:
12270 /* Bad permission. */
12271 fi->type = ARMFault_Permission;
12272 fi->level = 1;
12273 return true;
12275 *prot |= PAGE_EXEC;
12276 return false;
12279 /* Combine either inner or outer cacheability attributes for normal
12280 * memory, according to table D4-42 and pseudocode procedure
12281 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12283 * NB: only stage 1 includes allocation hints (RW bits), leading to
12284 * some asymmetry.
12286 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
12288 if (s1 == 4 || s2 == 4) {
12289 /* non-cacheable has precedence */
12290 return 4;
12291 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
12292 /* stage 1 write-through takes precedence */
12293 return s1;
12294 } else if (extract32(s2, 2, 2) == 2) {
12295 /* stage 2 write-through takes precedence, but the allocation hint
12296 * is still taken from stage 1
12298 return (2 << 2) | extract32(s1, 0, 2);
12299 } else { /* write-back */
12300 return s1;
12304 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12305 * and CombineS1S2Desc()
12307 * @s1: Attributes from stage 1 walk
12308 * @s2: Attributes from stage 2 walk
12310 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
12312 uint8_t s1lo, s2lo, s1hi, s2hi;
12313 ARMCacheAttrs ret;
12314 bool tagged = false;
12316 if (s1.attrs == 0xf0) {
12317 tagged = true;
12318 s1.attrs = 0xff;
12321 s1lo = extract32(s1.attrs, 0, 4);
12322 s2lo = extract32(s2.attrs, 0, 4);
12323 s1hi = extract32(s1.attrs, 4, 4);
12324 s2hi = extract32(s2.attrs, 4, 4);
12326 /* Combine shareability attributes (table D4-43) */
12327 if (s1.shareability == 2 || s2.shareability == 2) {
12328 /* if either are outer-shareable, the result is outer-shareable */
12329 ret.shareability = 2;
12330 } else if (s1.shareability == 3 || s2.shareability == 3) {
12331 /* if either are inner-shareable, the result is inner-shareable */
12332 ret.shareability = 3;
12333 } else {
12334 /* both non-shareable */
12335 ret.shareability = 0;
12338 /* Combine memory type and cacheability attributes */
12339 if (s1hi == 0 || s2hi == 0) {
12340 /* Device has precedence over normal */
12341 if (s1lo == 0 || s2lo == 0) {
12342 /* nGnRnE has precedence over anything */
12343 ret.attrs = 0;
12344 } else if (s1lo == 4 || s2lo == 4) {
12345 /* non-Reordering has precedence over Reordering */
12346 ret.attrs = 4; /* nGnRE */
12347 } else if (s1lo == 8 || s2lo == 8) {
12348 /* non-Gathering has precedence over Gathering */
12349 ret.attrs = 8; /* nGRE */
12350 } else {
12351 ret.attrs = 0xc; /* GRE */
12354 /* Any location for which the resultant memory type is any
12355 * type of Device memory is always treated as Outer Shareable.
12357 ret.shareability = 2;
12358 } else { /* Normal memory */
12359 /* Outer/inner cacheability combine independently */
12360 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
12361 | combine_cacheattr_nibble(s1lo, s2lo);
12363 if (ret.attrs == 0x44) {
12364 /* Any location for which the resultant memory type is Normal
12365 * Inner Non-cacheable, Outer Non-cacheable is always treated
12366 * as Outer Shareable.
12368 ret.shareability = 2;
12372 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12373 if (tagged && ret.attrs == 0xff) {
12374 ret.attrs = 0xf0;
12377 return ret;
12381 /* get_phys_addr - get the physical address for this virtual address
12383 * Find the physical address corresponding to the given virtual address,
12384 * by doing a translation table walk on MMU based systems or using the
12385 * MPU state on MPU based systems.
12387 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12388 * prot and page_size may not be filled in, and the populated fsr value provides
12389 * information on why the translation aborted, in the format of a
12390 * DFSR/IFSR fault register, with the following caveats:
12391 * * we honour the short vs long DFSR format differences.
12392 * * the WnR bit is never set (the caller must do this).
12393 * * for PSMAv5 based systems we don't bother to return a full FSR format
12394 * value.
12396 * @env: CPUARMState
12397 * @address: virtual address to get physical address for
12398 * @access_type: 0 for read, 1 for write, 2 for execute
12399 * @mmu_idx: MMU index indicating required translation regime
12400 * @phys_ptr: set to the physical address corresponding to the virtual address
12401 * @attrs: set to the memory transaction attributes to use
12402 * @prot: set to the permissions for the page containing phys_ptr
12403 * @page_size: set to the size of the page containing phys_ptr
12404 * @fi: set to fault info if the translation fails
12405 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12407 bool get_phys_addr(CPUARMState *env, target_ulong address,
12408 MMUAccessType access_type, ARMMMUIdx mmu_idx,
12409 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
12410 target_ulong *page_size,
12411 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
12413 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
12415 if (mmu_idx != s1_mmu_idx) {
12416 /* Call ourselves recursively to do the stage 1 and then stage 2
12417 * translations if mmu_idx is a two-stage regime.
12419 if (arm_feature(env, ARM_FEATURE_EL2)) {
12420 hwaddr ipa;
12421 int s2_prot;
12422 int ret;
12423 ARMCacheAttrs cacheattrs2 = {};
12424 ARMMMUIdx s2_mmu_idx;
12425 bool is_el0;
12427 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
12428 attrs, prot, page_size, fi, cacheattrs);
12430 /* If S1 fails or S2 is disabled, return early. */
12431 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
12432 *phys_ptr = ipa;
12433 return ret;
12436 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
12437 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
12439 /* S1 is done. Now do S2 translation. */
12440 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
12441 phys_ptr, attrs, &s2_prot,
12442 page_size, fi, &cacheattrs2);
12443 fi->s2addr = ipa;
12444 /* Combine the S1 and S2 perms. */
12445 *prot &= s2_prot;
12447 /* If S2 fails, return early. */
12448 if (ret) {
12449 return ret;
12452 /* Combine the S1 and S2 cache attributes. */
12453 if (arm_hcr_el2_eff(env) & HCR_DC) {
12455 * HCR.DC forces the first stage attributes to
12456 * Normal Non-Shareable,
12457 * Inner Write-Back Read-Allocate Write-Allocate,
12458 * Outer Write-Back Read-Allocate Write-Allocate.
12459 * Do not overwrite Tagged within attrs.
12461 if (cacheattrs->attrs != 0xf0) {
12462 cacheattrs->attrs = 0xff;
12464 cacheattrs->shareability = 0;
12466 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
12468 /* Check if IPA translates to secure or non-secure PA space. */
12469 if (arm_is_secure_below_el3(env)) {
12470 if (attrs->secure) {
12471 attrs->secure =
12472 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
12473 } else {
12474 attrs->secure =
12475 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
12476 || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
12479 return 0;
12480 } else {
12482 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12484 mmu_idx = stage_1_mmu_idx(mmu_idx);
12488 /* The page table entries may downgrade secure to non-secure, but
12489 * cannot upgrade an non-secure translation regime's attributes
12490 * to secure.
12492 attrs->secure = regime_is_secure(env, mmu_idx);
12493 attrs->user = regime_is_user(env, mmu_idx);
12495 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12496 * In v7 and earlier it affects all stage 1 translations.
12498 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
12499 && !arm_feature(env, ARM_FEATURE_V8)) {
12500 if (regime_el(env, mmu_idx) == 3) {
12501 address += env->cp15.fcseidr_s;
12502 } else {
12503 address += env->cp15.fcseidr_ns;
12507 if (arm_feature(env, ARM_FEATURE_PMSA)) {
12508 bool ret;
12509 *page_size = TARGET_PAGE_SIZE;
12511 if (arm_feature(env, ARM_FEATURE_V8)) {
12512 /* PMSAv8 */
12513 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
12514 phys_ptr, attrs, prot, page_size, fi);
12515 } else if (arm_feature(env, ARM_FEATURE_V7)) {
12516 /* PMSAv7 */
12517 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
12518 phys_ptr, prot, page_size, fi);
12519 } else {
12520 /* Pre-v7 MPU */
12521 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
12522 phys_ptr, prot, fi);
12524 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
12525 " mmu_idx %u -> %s (prot %c%c%c)\n",
12526 access_type == MMU_DATA_LOAD ? "reading" :
12527 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
12528 (uint32_t)address, mmu_idx,
12529 ret ? "Miss" : "Hit",
12530 *prot & PAGE_READ ? 'r' : '-',
12531 *prot & PAGE_WRITE ? 'w' : '-',
12532 *prot & PAGE_EXEC ? 'x' : '-');
12534 return ret;
12537 /* Definitely a real MMU, not an MPU */
12539 if (regime_translation_disabled(env, mmu_idx)) {
12540 uint64_t hcr;
12541 uint8_t memattr;
12544 * MMU disabled. S1 addresses within aa64 translation regimes are
12545 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12547 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
12548 int r_el = regime_el(env, mmu_idx);
12549 if (arm_el_is_aa64(env, r_el)) {
12550 int pamax = arm_pamax(env_archcpu(env));
12551 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
12552 int addrtop, tbi;
12554 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
12555 if (access_type == MMU_INST_FETCH) {
12556 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
12558 tbi = (tbi >> extract64(address, 55, 1)) & 1;
12559 addrtop = (tbi ? 55 : 63);
12561 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
12562 fi->type = ARMFault_AddressSize;
12563 fi->level = 0;
12564 fi->stage2 = false;
12565 return 1;
12569 * When TBI is disabled, we've just validated that all of the
12570 * bits above PAMax are zero, so logically we only need to
12571 * clear the top byte for TBI. But it's clearer to follow
12572 * the pseudocode set of addrdesc.paddress.
12574 address = extract64(address, 0, 52);
12577 *phys_ptr = address;
12578 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12579 *page_size = TARGET_PAGE_SIZE;
12581 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12582 hcr = arm_hcr_el2_eff(env);
12583 cacheattrs->shareability = 0;
12584 if (hcr & HCR_DC) {
12585 if (hcr & HCR_DCT) {
12586 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
12587 } else {
12588 memattr = 0xff; /* Normal, WB, RWA */
12590 } else if (access_type == MMU_INST_FETCH) {
12591 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
12592 memattr = 0xee; /* Normal, WT, RA, NT */
12593 } else {
12594 memattr = 0x44; /* Normal, NC, No */
12596 cacheattrs->shareability = 2; /* outer sharable */
12597 } else {
12598 memattr = 0x00; /* Device, nGnRnE */
12600 cacheattrs->attrs = memattr;
12601 return 0;
12604 if (regime_using_lpae_format(env, mmu_idx)) {
12605 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
12606 phys_ptr, attrs, prot, page_size,
12607 fi, cacheattrs);
12608 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
12609 return get_phys_addr_v6(env, address, access_type, mmu_idx,
12610 phys_ptr, attrs, prot, page_size, fi);
12611 } else {
12612 return get_phys_addr_v5(env, address, access_type, mmu_idx,
12613 phys_ptr, prot, page_size, fi);
12617 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
12618 MemTxAttrs *attrs)
12620 ARMCPU *cpu = ARM_CPU(cs);
12621 CPUARMState *env = &cpu->env;
12622 hwaddr phys_addr;
12623 target_ulong page_size;
12624 int prot;
12625 bool ret;
12626 ARMMMUFaultInfo fi = {};
12627 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
12628 ARMCacheAttrs cacheattrs = {};
12630 *attrs = (MemTxAttrs) {};
12632 ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
12633 attrs, &prot, &page_size, &fi, &cacheattrs);
12635 if (ret) {
12636 return -1;
12638 return phys_addr;
12641 #endif
12643 /* Note that signed overflow is undefined in C. The following routines are
12644 careful to use unsigned types where modulo arithmetic is required.
12645 Failure to do so _will_ break on newer gcc. */
12647 /* Signed saturating arithmetic. */
12649 /* Perform 16-bit signed saturating addition. */
12650 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12652 uint16_t res;
12654 res = a + b;
12655 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12656 if (a & 0x8000)
12657 res = 0x8000;
12658 else
12659 res = 0x7fff;
12661 return res;
12664 /* Perform 8-bit signed saturating addition. */
12665 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12667 uint8_t res;
12669 res = a + b;
12670 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12671 if (a & 0x80)
12672 res = 0x80;
12673 else
12674 res = 0x7f;
12676 return res;
12679 /* Perform 16-bit signed saturating subtraction. */
12680 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12682 uint16_t res;
12684 res = a - b;
12685 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12686 if (a & 0x8000)
12687 res = 0x8000;
12688 else
12689 res = 0x7fff;
12691 return res;
12694 /* Perform 8-bit signed saturating subtraction. */
12695 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12697 uint8_t res;
12699 res = a - b;
12700 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12701 if (a & 0x80)
12702 res = 0x80;
12703 else
12704 res = 0x7f;
12706 return res;
12709 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12710 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12711 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12712 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12713 #define PFX q
12715 #include "op_addsub.h"
12717 /* Unsigned saturating arithmetic. */
12718 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12720 uint16_t res;
12721 res = a + b;
12722 if (res < a)
12723 res = 0xffff;
12724 return res;
12727 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12729 if (a > b)
12730 return a - b;
12731 else
12732 return 0;
12735 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12737 uint8_t res;
12738 res = a + b;
12739 if (res < a)
12740 res = 0xff;
12741 return res;
12744 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12746 if (a > b)
12747 return a - b;
12748 else
12749 return 0;
12752 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12753 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12754 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12755 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12756 #define PFX uq
12758 #include "op_addsub.h"
12760 /* Signed modulo arithmetic. */
12761 #define SARITH16(a, b, n, op) do { \
12762 int32_t sum; \
12763 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12764 RESULT(sum, n, 16); \
12765 if (sum >= 0) \
12766 ge |= 3 << (n * 2); \
12767 } while(0)
12769 #define SARITH8(a, b, n, op) do { \
12770 int32_t sum; \
12771 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12772 RESULT(sum, n, 8); \
12773 if (sum >= 0) \
12774 ge |= 1 << n; \
12775 } while(0)
12778 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12779 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12780 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12781 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12782 #define PFX s
12783 #define ARITH_GE
12785 #include "op_addsub.h"
12787 /* Unsigned modulo arithmetic. */
12788 #define ADD16(a, b, n) do { \
12789 uint32_t sum; \
12790 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12791 RESULT(sum, n, 16); \
12792 if ((sum >> 16) == 1) \
12793 ge |= 3 << (n * 2); \
12794 } while(0)
12796 #define ADD8(a, b, n) do { \
12797 uint32_t sum; \
12798 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12799 RESULT(sum, n, 8); \
12800 if ((sum >> 8) == 1) \
12801 ge |= 1 << n; \
12802 } while(0)
12804 #define SUB16(a, b, n) do { \
12805 uint32_t sum; \
12806 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12807 RESULT(sum, n, 16); \
12808 if ((sum >> 16) == 0) \
12809 ge |= 3 << (n * 2); \
12810 } while(0)
12812 #define SUB8(a, b, n) do { \
12813 uint32_t sum; \
12814 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12815 RESULT(sum, n, 8); \
12816 if ((sum >> 8) == 0) \
12817 ge |= 1 << n; \
12818 } while(0)
12820 #define PFX u
12821 #define ARITH_GE
12823 #include "op_addsub.h"
12825 /* Halved signed arithmetic. */
12826 #define ADD16(a, b, n) \
12827 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12828 #define SUB16(a, b, n) \
12829 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12830 #define ADD8(a, b, n) \
12831 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12832 #define SUB8(a, b, n) \
12833 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12834 #define PFX sh
12836 #include "op_addsub.h"
12838 /* Halved unsigned arithmetic. */
12839 #define ADD16(a, b, n) \
12840 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12841 #define SUB16(a, b, n) \
12842 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12843 #define ADD8(a, b, n) \
12844 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12845 #define SUB8(a, b, n) \
12846 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12847 #define PFX uh
12849 #include "op_addsub.h"
12851 static inline uint8_t do_usad(uint8_t a, uint8_t b)
12853 if (a > b)
12854 return a - b;
12855 else
12856 return b - a;
12859 /* Unsigned sum of absolute byte differences. */
12860 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12862 uint32_t sum;
12863 sum = do_usad(a, b);
12864 sum += do_usad(a >> 8, b >> 8);
12865 sum += do_usad(a >> 16, b >> 16);
12866 sum += do_usad(a >> 24, b >> 24);
12867 return sum;
12870 /* For ARMv6 SEL instruction. */
12871 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12873 uint32_t mask;
12875 mask = 0;
12876 if (flags & 1)
12877 mask |= 0xff;
12878 if (flags & 2)
12879 mask |= 0xff00;
12880 if (flags & 4)
12881 mask |= 0xff0000;
12882 if (flags & 8)
12883 mask |= 0xff000000;
12884 return (a & mask) | (b & ~mask);
12887 /* CRC helpers.
12888 * The upper bytes of val (above the number specified by 'bytes') must have
12889 * been zeroed out by the caller.
12891 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12893 uint8_t buf[4];
12895 stl_le_p(buf, val);
12897 /* zlib crc32 converts the accumulator and output to one's complement. */
12898 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12901 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12903 uint8_t buf[4];
12905 stl_le_p(buf, val);
12907 /* Linux crc32c converts the output to one's complement. */
12908 return crc32c(acc, buf, bytes) ^ 0xffffffff;
12911 /* Return the exception level to which FP-disabled exceptions should
12912 * be taken, or 0 if FP is enabled.
12914 int fp_exception_el(CPUARMState *env, int cur_el)
12916 #ifndef CONFIG_USER_ONLY
12917 /* CPACR and the CPTR registers don't exist before v6, so FP is
12918 * always accessible
12920 if (!arm_feature(env, ARM_FEATURE_V6)) {
12921 return 0;
12924 if (arm_feature(env, ARM_FEATURE_M)) {
12925 /* CPACR can cause a NOCP UsageFault taken to current security state */
12926 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12927 return 1;
12930 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12931 if (!extract32(env->v7m.nsacr, 10, 1)) {
12932 /* FP insns cause a NOCP UsageFault taken to Secure */
12933 return 3;
12937 return 0;
12940 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12941 * 0, 2 : trap EL0 and EL1/PL1 accesses
12942 * 1 : trap only EL0 accesses
12943 * 3 : trap no accesses
12944 * This register is ignored if E2H+TGE are both set.
12946 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12947 int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12949 switch (fpen) {
12950 case 0:
12951 case 2:
12952 if (cur_el == 0 || cur_el == 1) {
12953 /* Trap to PL1, which might be EL1 or EL3 */
12954 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12955 return 3;
12957 return 1;
12959 if (cur_el == 3 && !is_a64(env)) {
12960 /* Secure PL1 running at EL3 */
12961 return 3;
12963 break;
12964 case 1:
12965 if (cur_el == 0) {
12966 return 1;
12968 break;
12969 case 3:
12970 break;
12975 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12976 * to control non-secure access to the FPU. It doesn't have any
12977 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12979 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12980 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12981 if (!extract32(env->cp15.nsacr, 10, 1)) {
12982 /* FP insns act as UNDEF */
12983 return cur_el == 2 ? 2 : 1;
12987 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12988 * check because zero bits in the registers mean "don't trap".
12991 /* CPTR_EL2 : present in v7VE or v8 */
12992 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12993 && arm_is_el2_enabled(env)) {
12994 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12995 return 2;
12998 /* CPTR_EL3 : present in v8 */
12999 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
13000 /* Trap all FP ops to EL3 */
13001 return 3;
13003 #endif
13004 return 0;
13007 /* Return the exception level we're running at if this is our mmu_idx */
13008 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
13010 if (mmu_idx & ARM_MMU_IDX_M) {
13011 return mmu_idx & ARM_MMU_IDX_M_PRIV;
13014 switch (mmu_idx) {
13015 case ARMMMUIdx_E10_0:
13016 case ARMMMUIdx_E20_0:
13017 case ARMMMUIdx_SE10_0:
13018 case ARMMMUIdx_SE20_0:
13019 return 0;
13020 case ARMMMUIdx_E10_1:
13021 case ARMMMUIdx_E10_1_PAN:
13022 case ARMMMUIdx_SE10_1:
13023 case ARMMMUIdx_SE10_1_PAN:
13024 return 1;
13025 case ARMMMUIdx_E2:
13026 case ARMMMUIdx_E20_2:
13027 case ARMMMUIdx_E20_2_PAN:
13028 case ARMMMUIdx_SE2:
13029 case ARMMMUIdx_SE20_2:
13030 case ARMMMUIdx_SE20_2_PAN:
13031 return 2;
13032 case ARMMMUIdx_SE3:
13033 return 3;
13034 default:
13035 g_assert_not_reached();
13039 #ifndef CONFIG_TCG
13040 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
13042 g_assert_not_reached();
13044 #endif
13046 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
13048 ARMMMUIdx idx;
13049 uint64_t hcr;
13051 if (arm_feature(env, ARM_FEATURE_M)) {
13052 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
13055 /* See ARM pseudo-function ELIsInHost. */
13056 switch (el) {
13057 case 0:
13058 hcr = arm_hcr_el2_eff(env);
13059 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
13060 idx = ARMMMUIdx_E20_0;
13061 } else {
13062 idx = ARMMMUIdx_E10_0;
13064 break;
13065 case 1:
13066 if (env->pstate & PSTATE_PAN) {
13067 idx = ARMMMUIdx_E10_1_PAN;
13068 } else {
13069 idx = ARMMMUIdx_E10_1;
13071 break;
13072 case 2:
13073 /* Note that TGE does not apply at EL2. */
13074 if (arm_hcr_el2_eff(env) & HCR_E2H) {
13075 if (env->pstate & PSTATE_PAN) {
13076 idx = ARMMMUIdx_E20_2_PAN;
13077 } else {
13078 idx = ARMMMUIdx_E20_2;
13080 } else {
13081 idx = ARMMMUIdx_E2;
13083 break;
13084 case 3:
13085 return ARMMMUIdx_SE3;
13086 default:
13087 g_assert_not_reached();
13090 if (arm_is_secure_below_el3(env)) {
13091 idx &= ~ARM_MMU_IDX_A_NS;
13094 return idx;
13097 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
13099 return arm_mmu_idx_el(env, arm_current_el(env));
13102 #ifndef CONFIG_USER_ONLY
13103 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
13105 return stage_1_mmu_idx(arm_mmu_idx(env));
13107 #endif
13109 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
13110 ARMMMUIdx mmu_idx,
13111 CPUARMTBFlags flags)
13113 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
13114 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
13116 if (arm_singlestep_active(env)) {
13117 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
13119 return flags;
13122 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
13123 ARMMMUIdx mmu_idx,
13124 CPUARMTBFlags flags)
13126 bool sctlr_b = arm_sctlr_b(env);
13128 if (sctlr_b) {
13129 DP_TBFLAG_A32(flags, SCTLR__B, 1);
13131 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
13132 DP_TBFLAG_ANY(flags, BE_DATA, 1);
13134 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
13136 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
13139 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
13140 ARMMMUIdx mmu_idx)
13142 CPUARMTBFlags flags = {};
13143 uint32_t ccr = env->v7m.ccr[env->v7m.secure];
13145 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
13146 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
13147 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
13150 if (arm_v7m_is_handler_mode(env)) {
13151 DP_TBFLAG_M32(flags, HANDLER, 1);
13155 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
13156 * is suppressing them because the requested execution priority
13157 * is less than 0.
13159 if (arm_feature(env, ARM_FEATURE_V8) &&
13160 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
13161 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
13162 DP_TBFLAG_M32(flags, STACKCHECK, 1);
13165 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
13168 static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
13170 CPUARMTBFlags flags = {};
13172 DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
13173 return flags;
13176 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
13177 ARMMMUIdx mmu_idx)
13179 CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
13180 int el = arm_current_el(env);
13182 if (arm_sctlr(env, el) & SCTLR_A) {
13183 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
13186 if (arm_el_is_aa64(env, 1)) {
13187 DP_TBFLAG_A32(flags, VFPEN, 1);
13190 if (el < 2 && env->cp15.hstr_el2 &&
13191 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
13192 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
13195 if (env->uncached_cpsr & CPSR_IL) {
13196 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
13199 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
13202 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
13203 ARMMMUIdx mmu_idx)
13205 CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
13206 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
13207 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
13208 uint64_t sctlr;
13209 int tbii, tbid;
13211 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
13213 /* Get control bits for tagged addresses. */
13214 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
13215 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
13217 DP_TBFLAG_A64(flags, TBII, tbii);
13218 DP_TBFLAG_A64(flags, TBID, tbid);
13220 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
13221 int sve_el = sve_exception_el(env, el);
13222 uint32_t zcr_len;
13225 * If SVE is disabled, but FP is enabled,
13226 * then the effective len is 0.
13228 if (sve_el != 0 && fp_el == 0) {
13229 zcr_len = 0;
13230 } else {
13231 zcr_len = sve_zcr_len_for_el(env, el);
13233 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
13234 DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
13237 sctlr = regime_sctlr(env, stage1);
13239 if (sctlr & SCTLR_A) {
13240 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
13243 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
13244 DP_TBFLAG_ANY(flags, BE_DATA, 1);
13247 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
13249 * In order to save space in flags, we record only whether
13250 * pauth is "inactive", meaning all insns are implemented as
13251 * a nop, or "active" when some action must be performed.
13252 * The decision of which action to take is left to a helper.
13254 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
13255 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
13259 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
13260 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13261 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
13262 DP_TBFLAG_A64(flags, BT, 1);
13266 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
13267 if (!(env->pstate & PSTATE_UAO)) {
13268 switch (mmu_idx) {
13269 case ARMMMUIdx_E10_1:
13270 case ARMMMUIdx_E10_1_PAN:
13271 case ARMMMUIdx_SE10_1:
13272 case ARMMMUIdx_SE10_1_PAN:
13273 /* TODO: ARMv8.3-NV */
13274 DP_TBFLAG_A64(flags, UNPRIV, 1);
13275 break;
13276 case ARMMMUIdx_E20_2:
13277 case ARMMMUIdx_E20_2_PAN:
13278 case ARMMMUIdx_SE20_2:
13279 case ARMMMUIdx_SE20_2_PAN:
13281 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
13282 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
13284 if (env->cp15.hcr_el2 & HCR_TGE) {
13285 DP_TBFLAG_A64(flags, UNPRIV, 1);
13287 break;
13288 default:
13289 break;
13293 if (env->pstate & PSTATE_IL) {
13294 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
13297 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
13299 * Set MTE_ACTIVE if any access may be Checked, and leave clear
13300 * if all accesses must be Unchecked:
13301 * 1) If no TBI, then there are no tags in the address to check,
13302 * 2) If Tag Check Override, then all accesses are Unchecked,
13303 * 3) If Tag Check Fail == 0, then Checked access have no effect,
13304 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
13306 if (allocation_tag_access_enabled(env, el, sctlr)) {
13307 DP_TBFLAG_A64(flags, ATA, 1);
13308 if (tbid
13309 && !(env->pstate & PSTATE_TCO)
13310 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
13311 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
13314 /* And again for unprivileged accesses, if required. */
13315 if (EX_TBFLAG_A64(flags, UNPRIV)
13316 && tbid
13317 && !(env->pstate & PSTATE_TCO)
13318 && (sctlr & SCTLR_TCF0)
13319 && allocation_tag_access_enabled(env, 0, sctlr)) {
13320 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
13322 /* Cache TCMA as well as TBI. */
13323 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
13326 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
13329 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
13331 int el = arm_current_el(env);
13332 int fp_el = fp_exception_el(env, el);
13333 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13335 if (is_a64(env)) {
13336 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
13337 } else if (arm_feature(env, ARM_FEATURE_M)) {
13338 return rebuild_hflags_m32(env, fp_el, mmu_idx);
13339 } else {
13340 return rebuild_hflags_a32(env, fp_el, mmu_idx);
13344 void arm_rebuild_hflags(CPUARMState *env)
13346 env->hflags = rebuild_hflags_internal(env);
13350 * If we have triggered a EL state change we can't rely on the
13351 * translator having passed it to us, we need to recompute.
13353 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
13355 int el = arm_current_el(env);
13356 int fp_el = fp_exception_el(env, el);
13357 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13359 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
13362 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
13364 int fp_el = fp_exception_el(env, el);
13365 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13367 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
13371 * If we have triggered a EL state change we can't rely on the
13372 * translator having passed it to us, we need to recompute.
13374 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
13376 int el = arm_current_el(env);
13377 int fp_el = fp_exception_el(env, el);
13378 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13379 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
13382 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
13384 int fp_el = fp_exception_el(env, el);
13385 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13387 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
13390 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
13392 int fp_el = fp_exception_el(env, el);
13393 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13395 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
13398 static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
13400 #ifdef CONFIG_DEBUG_TCG
13401 CPUARMTBFlags c = env->hflags;
13402 CPUARMTBFlags r = rebuild_hflags_internal(env);
13404 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
13405 fprintf(stderr, "TCG hflags mismatch "
13406 "(current:(0x%08x,0x" TARGET_FMT_lx ")"
13407 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
13408 c.flags, c.flags2, r.flags, r.flags2);
13409 abort();
13411 #endif
13414 static bool mve_no_pred(CPUARMState *env)
13417 * Return true if there is definitely no predication of MVE
13418 * instructions by VPR or LTPSIZE. (Returning false even if there
13419 * isn't any predication is OK; generated code will just be
13420 * a little worse.)
13421 * If the CPU does not implement MVE then this TB flag is always 0.
13423 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
13424 * logic in gen_update_fp_context() needs to be updated to match.
13426 * We do not include the effect of the ECI bits here -- they are
13427 * tracked in other TB flags. This simplifies the logic for
13428 * "when did we emit code that changes the MVE_NO_PRED TB flag
13429 * and thus need to end the TB?".
13431 if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
13432 return false;
13434 if (env->v7m.vpr) {
13435 return false;
13437 if (env->v7m.ltpsize < 4) {
13438 return false;
13440 return true;
13443 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
13444 target_ulong *cs_base, uint32_t *pflags)
13446 CPUARMTBFlags flags;
13448 assert_hflags_rebuild_correctly(env);
13449 flags = env->hflags;
13451 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
13452 *pc = env->pc;
13453 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
13454 DP_TBFLAG_A64(flags, BTYPE, env->btype);
13456 } else {
13457 *pc = env->regs[15];
13459 if (arm_feature(env, ARM_FEATURE_M)) {
13460 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13461 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
13462 != env->v7m.secure) {
13463 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
13466 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
13467 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
13468 (env->v7m.secure &&
13469 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
13471 * ASPEN is set, but FPCA/SFPA indicate that there is no
13472 * active FP context; we must create a new FP context before
13473 * executing any FP insn.
13475 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
13478 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
13479 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
13480 DP_TBFLAG_M32(flags, LSPACT, 1);
13483 if (mve_no_pred(env)) {
13484 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
13486 } else {
13488 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13489 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13491 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13492 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
13493 } else {
13494 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
13495 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
13497 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
13498 DP_TBFLAG_A32(flags, VFPEN, 1);
13502 DP_TBFLAG_AM32(flags, THUMB, env->thumb);
13503 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
13507 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13508 * states defined in the ARM ARM for software singlestep:
13509 * SS_ACTIVE PSTATE.SS State
13510 * 0 x Inactive (the TB flag for SS is always 0)
13511 * 1 0 Active-pending
13512 * 1 1 Active-not-pending
13513 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
13515 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
13516 DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
13519 *pflags = flags.flags;
13520 *cs_base = flags.flags2;
13523 #ifdef TARGET_AARCH64
13525 * The manual says that when SVE is enabled and VQ is widened the
13526 * implementation is allowed to zero the previously inaccessible
13527 * portion of the registers. The corollary to that is that when
13528 * SVE is enabled and VQ is narrowed we are also allowed to zero
13529 * the now inaccessible portion of the registers.
13531 * The intent of this is that no predicate bit beyond VQ is ever set.
13532 * Which means that some operations on predicate registers themselves
13533 * may operate on full uint64_t or even unrolled across the maximum
13534 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13535 * may well be cheaper than conditionals to restrict the operation
13536 * to the relevant portion of a uint16_t[16].
13538 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
13540 int i, j;
13541 uint64_t pmask;
13543 assert(vq >= 1 && vq <= ARM_MAX_VQ);
13544 assert(vq <= env_archcpu(env)->sve_max_vq);
13546 /* Zap the high bits of the zregs. */
13547 for (i = 0; i < 32; i++) {
13548 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
13551 /* Zap the high bits of the pregs and ffr. */
13552 pmask = 0;
13553 if (vq & 3) {
13554 pmask = ~(-1ULL << (16 * (vq & 3)));
13556 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
13557 for (i = 0; i < 17; ++i) {
13558 env->vfp.pregs[i].p[j] &= pmask;
13560 pmask = 0;
13565 * Notice a change in SVE vector size when changing EL.
13567 void aarch64_sve_change_el(CPUARMState *env, int old_el,
13568 int new_el, bool el0_a64)
13570 ARMCPU *cpu = env_archcpu(env);
13571 int old_len, new_len;
13572 bool old_a64, new_a64;
13574 /* Nothing to do if no SVE. */
13575 if (!cpu_isar_feature(aa64_sve, cpu)) {
13576 return;
13579 /* Nothing to do if FP is disabled in either EL. */
13580 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
13581 return;
13585 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13586 * at ELx, or not available because the EL is in AArch32 state, then
13587 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13588 * has an effective value of 0".
13590 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13591 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13592 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13593 * we already have the correct register contents when encountering the
13594 * vq0->vq0 transition between EL0->EL1.
13596 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
13597 old_len = (old_a64 && !sve_exception_el(env, old_el)
13598 ? sve_zcr_len_for_el(env, old_el) : 0);
13599 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
13600 new_len = (new_a64 && !sve_exception_el(env, new_el)
13601 ? sve_zcr_len_for_el(env, new_el) : 0);
13603 /* When changing vector length, clear inaccessible state. */
13604 if (new_len < old_len) {
13605 aarch64_sve_narrow_vq(env, new_len + 1);
13608 #endif