2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/printk.h>
27 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
33 #include <asm/kvm_arm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_hyp.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/perf_event.h>
40 #include <asm/sysreg.h>
42 #include <trace/events/kvm.h>
49 * All of this file is extremly similar to the ARM coproc.c, but the
50 * types are different. My gut feeling is that it should be pretty
51 * easy to merge, but that would be an ABI breakage -- again. VFP
52 * would also need to be abstracted.
54 * For AArch32, we only take care of what is being trapped. Anything
55 * that has to do with init and userspace access has to go via the
59 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
60 struct sys_reg_params
*params
,
61 const struct sys_reg_desc
*r
)
63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 print_sys_reg_instr(params
);
65 kvm_inject_undefined(vcpu
);
69 static bool write_to_read_only(struct kvm_vcpu
*vcpu
,
70 struct sys_reg_params
*params
,
71 const struct sys_reg_desc
*r
)
73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 print_sys_reg_instr(params
);
75 kvm_inject_undefined(vcpu
);
79 u64
vcpu_read_sys_reg(struct kvm_vcpu
*vcpu
, int reg
)
81 if (!vcpu
->arch
.sysregs_loaded_on_cpu
)
85 * System registers listed in the switch are not saved on every
86 * exit from the guest but are only saved on vcpu_put.
88 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
89 * should never be listed below, because the guest cannot modify its
90 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
91 * thread when emulating cross-VCPU communication.
94 case CSSELR_EL1
: return read_sysreg_s(SYS_CSSELR_EL1
);
95 case SCTLR_EL1
: return read_sysreg_s(sctlr_EL12
);
96 case ACTLR_EL1
: return read_sysreg_s(SYS_ACTLR_EL1
);
97 case CPACR_EL1
: return read_sysreg_s(cpacr_EL12
);
98 case TTBR0_EL1
: return read_sysreg_s(ttbr0_EL12
);
99 case TTBR1_EL1
: return read_sysreg_s(ttbr1_EL12
);
100 case TCR_EL1
: return read_sysreg_s(tcr_EL12
);
101 case ESR_EL1
: return read_sysreg_s(esr_EL12
);
102 case AFSR0_EL1
: return read_sysreg_s(afsr0_EL12
);
103 case AFSR1_EL1
: return read_sysreg_s(afsr1_EL12
);
104 case FAR_EL1
: return read_sysreg_s(far_EL12
);
105 case MAIR_EL1
: return read_sysreg_s(mair_EL12
);
106 case VBAR_EL1
: return read_sysreg_s(vbar_EL12
);
107 case CONTEXTIDR_EL1
: return read_sysreg_s(contextidr_EL12
);
108 case TPIDR_EL0
: return read_sysreg_s(SYS_TPIDR_EL0
);
109 case TPIDRRO_EL0
: return read_sysreg_s(SYS_TPIDRRO_EL0
);
110 case TPIDR_EL1
: return read_sysreg_s(SYS_TPIDR_EL1
);
111 case AMAIR_EL1
: return read_sysreg_s(amair_EL12
);
112 case CNTKCTL_EL1
: return read_sysreg_s(cntkctl_EL12
);
113 case PAR_EL1
: return read_sysreg_s(SYS_PAR_EL1
);
114 case DACR32_EL2
: return read_sysreg_s(SYS_DACR32_EL2
);
115 case IFSR32_EL2
: return read_sysreg_s(SYS_IFSR32_EL2
);
116 case DBGVCR32_EL2
: return read_sysreg_s(SYS_DBGVCR32_EL2
);
120 return __vcpu_sys_reg(vcpu
, reg
);
123 void vcpu_write_sys_reg(struct kvm_vcpu
*vcpu
, u64 val
, int reg
)
125 if (!vcpu
->arch
.sysregs_loaded_on_cpu
)
126 goto immediate_write
;
129 * System registers listed in the switch are not restored on every
130 * entry to the guest but are only restored on vcpu_load.
132 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
133 * should never be listed below, because the the MPIDR should only be
134 * set once, before running the VCPU, and never changed later.
137 case CSSELR_EL1
: write_sysreg_s(val
, SYS_CSSELR_EL1
); return;
138 case SCTLR_EL1
: write_sysreg_s(val
, sctlr_EL12
); return;
139 case ACTLR_EL1
: write_sysreg_s(val
, SYS_ACTLR_EL1
); return;
140 case CPACR_EL1
: write_sysreg_s(val
, cpacr_EL12
); return;
141 case TTBR0_EL1
: write_sysreg_s(val
, ttbr0_EL12
); return;
142 case TTBR1_EL1
: write_sysreg_s(val
, ttbr1_EL12
); return;
143 case TCR_EL1
: write_sysreg_s(val
, tcr_EL12
); return;
144 case ESR_EL1
: write_sysreg_s(val
, esr_EL12
); return;
145 case AFSR0_EL1
: write_sysreg_s(val
, afsr0_EL12
); return;
146 case AFSR1_EL1
: write_sysreg_s(val
, afsr1_EL12
); return;
147 case FAR_EL1
: write_sysreg_s(val
, far_EL12
); return;
148 case MAIR_EL1
: write_sysreg_s(val
, mair_EL12
); return;
149 case VBAR_EL1
: write_sysreg_s(val
, vbar_EL12
); return;
150 case CONTEXTIDR_EL1
: write_sysreg_s(val
, contextidr_EL12
); return;
151 case TPIDR_EL0
: write_sysreg_s(val
, SYS_TPIDR_EL0
); return;
152 case TPIDRRO_EL0
: write_sysreg_s(val
, SYS_TPIDRRO_EL0
); return;
153 case TPIDR_EL1
: write_sysreg_s(val
, SYS_TPIDR_EL1
); return;
154 case AMAIR_EL1
: write_sysreg_s(val
, amair_EL12
); return;
155 case CNTKCTL_EL1
: write_sysreg_s(val
, cntkctl_EL12
); return;
156 case PAR_EL1
: write_sysreg_s(val
, SYS_PAR_EL1
); return;
157 case DACR32_EL2
: write_sysreg_s(val
, SYS_DACR32_EL2
); return;
158 case IFSR32_EL2
: write_sysreg_s(val
, SYS_IFSR32_EL2
); return;
159 case DBGVCR32_EL2
: write_sysreg_s(val
, SYS_DBGVCR32_EL2
); return;
163 __vcpu_sys_reg(vcpu
, reg
) = val
;
166 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
167 static u32 cache_levels
;
169 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
170 #define CSSELR_MAX 12
172 /* Which cache CCSIDR represents depends on CSSELR value. */
173 static u32
get_ccsidr(u32 csselr
)
177 /* Make sure noone else changes CSSELR during this! */
179 write_sysreg(csselr
, csselr_el1
);
181 ccsidr
= read_sysreg(ccsidr_el1
);
188 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
190 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
191 struct sys_reg_params
*p
,
192 const struct sys_reg_desc
*r
)
195 return read_from_write_only(vcpu
, p
, r
);
198 * Only track S/W ops if we don't have FWB. It still indicates
199 * that the guest is a bit broken (S/W operations should only
200 * be done by firmware, knowing that there is only a single
201 * CPU left in the system, and certainly not from non-secure
204 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
))
205 kvm_set_way_flush(vcpu
);
211 * Generic accessor for VM registers. Only called as long as HCR_TVM
212 * is set. If the guest enables the MMU, we stop trapping the VM
213 * sys_regs and leave it in complete control of the caches.
215 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
216 struct sys_reg_params
*p
,
217 const struct sys_reg_desc
*r
)
219 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
223 BUG_ON(!p
->is_write
);
225 /* See the 32bit mapping in kvm_host.h */
229 if (!p
->is_aarch32
|| !p
->is_32bit
) {
232 val
= vcpu_read_sys_reg(vcpu
, reg
);
234 val
= (p
->regval
<< 32) | (u64
)lower_32_bits(val
);
236 val
= ((u64
)upper_32_bits(val
) << 32) |
237 lower_32_bits(p
->regval
);
239 vcpu_write_sys_reg(vcpu
, val
, reg
);
241 kvm_toggle_cache(vcpu
, was_enabled
);
246 * Trap handler for the GICv3 SGI generation system register.
247 * Forward the request to the VGIC emulation.
248 * The cp15_64 code makes sure this automatically works
249 * for both AArch64 and AArch32 accesses.
251 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
252 struct sys_reg_params
*p
,
253 const struct sys_reg_desc
*r
)
258 return read_from_write_only(vcpu
, p
, r
);
261 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
262 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
263 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
264 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
269 default: /* Keep GCC quiet */
270 case 0: /* ICC_SGI1R */
273 case 1: /* ICC_ASGI1R */
274 case 2: /* ICC_SGI0R */
280 default: /* Keep GCC quiet */
281 case 5: /* ICC_SGI1R_EL1 */
284 case 6: /* ICC_ASGI1R_EL1 */
285 case 7: /* ICC_SGI0R_EL1 */
291 vgic_v3_dispatch_sgi(vcpu
, p
->regval
, g1
);
296 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
297 struct sys_reg_params
*p
,
298 const struct sys_reg_desc
*r
)
301 return ignore_write(vcpu
, p
);
303 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
307 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
308 struct sys_reg_params
*p
,
309 const struct sys_reg_desc
*r
)
312 return ignore_write(vcpu
, p
);
314 return read_zero(vcpu
, p
);
317 static bool trap_undef(struct kvm_vcpu
*vcpu
,
318 struct sys_reg_params
*p
,
319 const struct sys_reg_desc
*r
)
321 kvm_inject_undefined(vcpu
);
325 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
326 struct sys_reg_params
*p
,
327 const struct sys_reg_desc
*r
)
330 return ignore_write(vcpu
, p
);
332 p
->regval
= (1 << 3);
337 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
338 struct sys_reg_params
*p
,
339 const struct sys_reg_desc
*r
)
342 return ignore_write(vcpu
, p
);
344 p
->regval
= read_sysreg(dbgauthstatus_el1
);
350 * We want to avoid world-switching all the DBG registers all the
353 * - If we've touched any debug register, it is likely that we're
354 * going to touch more of them. It then makes sense to disable the
355 * traps and start doing the save/restore dance
356 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
357 * then mandatory to save/restore the registers, as the guest
360 * For this, we use a DIRTY bit, indicating the guest has modified the
361 * debug registers, used as follow:
364 * - If the dirty bit is set (because we're coming back from trapping),
365 * disable the traps, save host registers, restore guest registers.
366 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
367 * set the dirty bit, disable the traps, save host registers,
368 * restore guest registers.
369 * - Otherwise, enable the traps
372 * - If the dirty bit is set, save guest registers, restore host
373 * registers and clear the dirty bit. This ensure that the host can
374 * now use the debug registers.
376 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
377 struct sys_reg_params
*p
,
378 const struct sys_reg_desc
*r
)
381 vcpu_write_sys_reg(vcpu
, p
->regval
, r
->reg
);
382 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
384 p
->regval
= vcpu_read_sys_reg(vcpu
, r
->reg
);
387 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
393 * reg_to_dbg/dbg_to_reg
395 * A 32 bit write to a debug register leave top bits alone
396 * A 32 bit read from a debug register only returns the bottom bits
398 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
399 * hyp.S code switches between host and guest values in future.
401 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
402 struct sys_reg_params
*p
,
409 val
|= ((*dbg_reg
>> 32) << 32);
413 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
416 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
417 struct sys_reg_params
*p
,
420 p
->regval
= *dbg_reg
;
422 p
->regval
&= 0xffffffffUL
;
425 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
426 struct sys_reg_params
*p
,
427 const struct sys_reg_desc
*rd
)
429 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
432 reg_to_dbg(vcpu
, p
, dbg_reg
);
434 dbg_to_reg(vcpu
, p
, dbg_reg
);
436 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
441 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
442 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
444 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
446 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
451 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
452 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
454 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
456 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
461 static void reset_bvr(struct kvm_vcpu
*vcpu
,
462 const struct sys_reg_desc
*rd
)
464 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
467 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
468 struct sys_reg_params
*p
,
469 const struct sys_reg_desc
*rd
)
471 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
474 reg_to_dbg(vcpu
, p
, dbg_reg
);
476 dbg_to_reg(vcpu
, p
, dbg_reg
);
478 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
483 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
484 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
486 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
488 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
494 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
495 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
497 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
499 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
504 static void reset_bcr(struct kvm_vcpu
*vcpu
,
505 const struct sys_reg_desc
*rd
)
507 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
510 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
511 struct sys_reg_params
*p
,
512 const struct sys_reg_desc
*rd
)
514 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
517 reg_to_dbg(vcpu
, p
, dbg_reg
);
519 dbg_to_reg(vcpu
, p
, dbg_reg
);
521 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
522 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
527 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
528 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
530 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
532 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
537 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
538 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
540 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
542 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
547 static void reset_wvr(struct kvm_vcpu
*vcpu
,
548 const struct sys_reg_desc
*rd
)
550 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
553 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
554 struct sys_reg_params
*p
,
555 const struct sys_reg_desc
*rd
)
557 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
560 reg_to_dbg(vcpu
, p
, dbg_reg
);
562 dbg_to_reg(vcpu
, p
, dbg_reg
);
564 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
569 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
570 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
572 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
574 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
579 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
580 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
582 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
584 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
589 static void reset_wcr(struct kvm_vcpu
*vcpu
,
590 const struct sys_reg_desc
*rd
)
592 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
595 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
597 u64 amair
= read_sysreg(amair_el1
);
598 vcpu_write_sys_reg(vcpu
, amair
, AMAIR_EL1
);
601 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
606 * Map the vcpu_id into the first three affinity level fields of
607 * the MPIDR. We limit the number of VCPUs in level 0 due to a
608 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
609 * of the GICv3 to be able to address each CPU directly when
612 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
613 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
614 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
615 vcpu_write_sys_reg(vcpu
, (1ULL << 31) | mpidr
, MPIDR_EL1
);
618 static void reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
622 pmcr
= read_sysreg(pmcr_el0
);
624 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
625 * except PMCR.E resetting to zero.
627 val
= ((pmcr
& ~ARMV8_PMU_PMCR_MASK
)
628 | (ARMV8_PMU_PMCR_MASK
& 0xdecafbad)) & (~ARMV8_PMU_PMCR_E
);
629 __vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
632 static bool check_pmu_access_disabled(struct kvm_vcpu
*vcpu
, u64 flags
)
634 u64 reg
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
635 bool enabled
= (reg
& flags
) || vcpu_mode_priv(vcpu
);
638 kvm_inject_undefined(vcpu
);
643 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
645 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_EN
);
648 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
650 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
);
653 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
655 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
);
658 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
660 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
);
663 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
664 const struct sys_reg_desc
*r
)
668 if (!kvm_arm_pmu_v3_ready(vcpu
))
669 return trap_raz_wi(vcpu
, p
, r
);
671 if (pmu_access_el0_disabled(vcpu
))
675 /* Only update writeable bits of PMCR */
676 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
677 val
&= ~ARMV8_PMU_PMCR_MASK
;
678 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
679 __vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
680 kvm_pmu_handle_pmcr(vcpu
, val
);
682 /* PMCR.P & PMCR.C are RAZ */
683 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
)
684 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
691 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
692 const struct sys_reg_desc
*r
)
694 if (!kvm_arm_pmu_v3_ready(vcpu
))
695 return trap_raz_wi(vcpu
, p
, r
);
697 if (pmu_access_event_counter_el0_disabled(vcpu
))
701 __vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
703 /* return PMSELR.SEL field */
704 p
->regval
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
705 & ARMV8_PMU_COUNTER_MASK
;
710 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
711 const struct sys_reg_desc
*r
)
715 if (!kvm_arm_pmu_v3_ready(vcpu
))
716 return trap_raz_wi(vcpu
, p
, r
);
720 if (pmu_access_el0_disabled(vcpu
))
724 pmceid
= read_sysreg(pmceid0_el0
);
726 pmceid
= read_sysreg(pmceid1_el0
);
733 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
737 pmcr
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
738 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
739 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
) {
740 kvm_inject_undefined(vcpu
);
747 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
748 struct sys_reg_params
*p
,
749 const struct sys_reg_desc
*r
)
753 if (!kvm_arm_pmu_v3_ready(vcpu
))
754 return trap_raz_wi(vcpu
, p
, r
);
756 if (r
->CRn
== 9 && r
->CRm
== 13) {
759 if (pmu_access_event_counter_el0_disabled(vcpu
))
762 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
763 & ARMV8_PMU_COUNTER_MASK
;
764 } else if (r
->Op2
== 0) {
766 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
769 idx
= ARMV8_PMU_CYCLE_IDX
;
773 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
775 if (pmu_access_event_counter_el0_disabled(vcpu
))
778 idx
= ARMV8_PMU_CYCLE_IDX
;
779 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
781 if (pmu_access_event_counter_el0_disabled(vcpu
))
784 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
789 if (!pmu_counter_idx_valid(vcpu
, idx
))
793 if (pmu_access_el0_disabled(vcpu
))
796 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
798 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
804 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
805 const struct sys_reg_desc
*r
)
809 if (!kvm_arm_pmu_v3_ready(vcpu
))
810 return trap_raz_wi(vcpu
, p
, r
);
812 if (pmu_access_el0_disabled(vcpu
))
815 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
817 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
818 reg
= PMEVTYPER0_EL0
+ idx
;
819 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
820 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
821 if (idx
== ARMV8_PMU_CYCLE_IDX
)
825 reg
= PMEVTYPER0_EL0
+ idx
;
830 if (!pmu_counter_idx_valid(vcpu
, idx
))
834 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
835 __vcpu_sys_reg(vcpu
, reg
) = p
->regval
& ARMV8_PMU_EVTYPE_MASK
;
837 p
->regval
= __vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
843 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
844 const struct sys_reg_desc
*r
)
848 if (!kvm_arm_pmu_v3_ready(vcpu
))
849 return trap_raz_wi(vcpu
, p
, r
);
851 if (pmu_access_el0_disabled(vcpu
))
854 mask
= kvm_pmu_valid_counter_mask(vcpu
);
856 val
= p
->regval
& mask
;
858 /* accessing PMCNTENSET_EL0 */
859 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
860 kvm_pmu_enable_counter(vcpu
, val
);
862 /* accessing PMCNTENCLR_EL0 */
863 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
864 kvm_pmu_disable_counter(vcpu
, val
);
867 p
->regval
= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
;
873 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
874 const struct sys_reg_desc
*r
)
876 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
878 if (!kvm_arm_pmu_v3_ready(vcpu
))
879 return trap_raz_wi(vcpu
, p
, r
);
881 if (!vcpu_mode_priv(vcpu
)) {
882 kvm_inject_undefined(vcpu
);
887 u64 val
= p
->regval
& mask
;
890 /* accessing PMINTENSET_EL1 */
891 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
893 /* accessing PMINTENCLR_EL1 */
894 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
896 p
->regval
= __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) & mask
;
902 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
903 const struct sys_reg_desc
*r
)
905 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
907 if (!kvm_arm_pmu_v3_ready(vcpu
))
908 return trap_raz_wi(vcpu
, p
, r
);
910 if (pmu_access_el0_disabled(vcpu
))
915 /* accessing PMOVSSET_EL0 */
916 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= (p
->regval
& mask
);
918 /* accessing PMOVSCLR_EL0 */
919 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
921 p
->regval
= __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) & mask
;
927 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
928 const struct sys_reg_desc
*r
)
932 if (!kvm_arm_pmu_v3_ready(vcpu
))
933 return trap_raz_wi(vcpu
, p
, r
);
936 return read_from_write_only(vcpu
, p
, r
);
938 if (pmu_write_swinc_el0_disabled(vcpu
))
941 mask
= kvm_pmu_valid_counter_mask(vcpu
);
942 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
946 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
947 const struct sys_reg_desc
*r
)
949 if (!kvm_arm_pmu_v3_ready(vcpu
))
950 return trap_raz_wi(vcpu
, p
, r
);
953 if (!vcpu_mode_priv(vcpu
)) {
954 kvm_inject_undefined(vcpu
);
958 __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) =
959 p
->regval
& ARMV8_PMU_USERENR_MASK
;
961 p
->regval
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
962 & ARMV8_PMU_USERENR_MASK
;
968 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
969 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
970 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
971 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
972 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
973 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
974 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
975 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
976 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
977 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
979 /* Macro to expand the PMEVCNTRn_EL0 register */
980 #define PMU_PMEVCNTR_EL0(n) \
981 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
982 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
984 /* Macro to expand the PMEVTYPERn_EL0 register */
985 #define PMU_PMEVTYPER_EL0(n) \
986 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
987 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
989 static bool access_cntp_tval(struct kvm_vcpu
*vcpu
,
990 struct sys_reg_params
*p
,
991 const struct sys_reg_desc
*r
)
993 u64 now
= kvm_phys_timer_read();
997 kvm_arm_timer_set_reg(vcpu
, KVM_REG_ARM_PTIMER_CVAL
,
1000 cval
= kvm_arm_timer_get_reg(vcpu
, KVM_REG_ARM_PTIMER_CVAL
);
1001 p
->regval
= cval
- now
;
1007 static bool access_cntp_ctl(struct kvm_vcpu
*vcpu
,
1008 struct sys_reg_params
*p
,
1009 const struct sys_reg_desc
*r
)
1012 kvm_arm_timer_set_reg(vcpu
, KVM_REG_ARM_PTIMER_CTL
, p
->regval
);
1014 p
->regval
= kvm_arm_timer_get_reg(vcpu
, KVM_REG_ARM_PTIMER_CTL
);
1019 static bool access_cntp_cval(struct kvm_vcpu
*vcpu
,
1020 struct sys_reg_params
*p
,
1021 const struct sys_reg_desc
*r
)
1024 kvm_arm_timer_set_reg(vcpu
, KVM_REG_ARM_PTIMER_CVAL
, p
->regval
);
1026 p
->regval
= kvm_arm_timer_get_reg(vcpu
, KVM_REG_ARM_PTIMER_CVAL
);
1031 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1032 static u64
read_id_reg(struct sys_reg_desc
const *r
, bool raz
)
1034 u32 id
= sys_reg((u32
)r
->Op0
, (u32
)r
->Op1
,
1035 (u32
)r
->CRn
, (u32
)r
->CRm
, (u32
)r
->Op2
);
1036 u64 val
= raz
? 0 : read_sanitised_ftr_reg(id
);
1038 if (id
== SYS_ID_AA64PFR0_EL1
) {
1039 if (val
& (0xfUL
<< ID_AA64PFR0_SVE_SHIFT
))
1040 kvm_debug("SVE unsupported for guests, suppressing\n");
1042 val
&= ~(0xfUL
<< ID_AA64PFR0_SVE_SHIFT
);
1043 } else if (id
== SYS_ID_AA64MMFR1_EL1
) {
1044 if (val
& (0xfUL
<< ID_AA64MMFR1_LOR_SHIFT
))
1045 kvm_debug("LORegions unsupported for guests, suppressing\n");
1047 val
&= ~(0xfUL
<< ID_AA64MMFR1_LOR_SHIFT
);
1053 /* cpufeature ID register access trap handlers */
1055 static bool __access_id_reg(struct kvm_vcpu
*vcpu
,
1056 struct sys_reg_params
*p
,
1057 const struct sys_reg_desc
*r
,
1061 return write_to_read_only(vcpu
, p
, r
);
1063 p
->regval
= read_id_reg(r
, raz
);
1067 static bool access_id_reg(struct kvm_vcpu
*vcpu
,
1068 struct sys_reg_params
*p
,
1069 const struct sys_reg_desc
*r
)
1071 return __access_id_reg(vcpu
, p
, r
, false);
1074 static bool access_raz_id_reg(struct kvm_vcpu
*vcpu
,
1075 struct sys_reg_params
*p
,
1076 const struct sys_reg_desc
*r
)
1078 return __access_id_reg(vcpu
, p
, r
, true);
1081 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
);
1082 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
);
1083 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
);
1086 * cpufeature ID register user accessors
1088 * For now, these registers are immutable for userspace, so no values
1089 * are stored, and for set_id_reg() we don't allow the effective value
1092 static int __get_id_reg(const struct sys_reg_desc
*rd
, void __user
*uaddr
,
1095 const u64 id
= sys_reg_to_index(rd
);
1096 const u64 val
= read_id_reg(rd
, raz
);
1098 return reg_to_user(uaddr
, &val
, id
);
1101 static int __set_id_reg(const struct sys_reg_desc
*rd
, void __user
*uaddr
,
1104 const u64 id
= sys_reg_to_index(rd
);
1108 err
= reg_from_user(&val
, uaddr
, id
);
1112 /* This is what we mean by invariant: you can't change it. */
1113 if (val
!= read_id_reg(rd
, raz
))
1119 static int get_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1120 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1122 return __get_id_reg(rd
, uaddr
, false);
1125 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1126 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1128 return __set_id_reg(rd
, uaddr
, false);
1131 static int get_raz_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1132 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1134 return __get_id_reg(rd
, uaddr
, true);
1137 static int set_raz_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1138 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1140 return __set_id_reg(rd
, uaddr
, true);
1143 /* sys_reg_desc initialiser for known cpufeature ID registers */
1144 #define ID_SANITISED(name) { \
1145 SYS_DESC(SYS_##name), \
1146 .access = access_id_reg, \
1147 .get_user = get_id_reg, \
1148 .set_user = set_id_reg, \
1152 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1153 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1154 * (1 <= crm < 8, 0 <= Op2 < 8).
1156 #define ID_UNALLOCATED(crm, op2) { \
1157 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1158 .access = access_raz_id_reg, \
1159 .get_user = get_raz_id_reg, \
1160 .set_user = set_raz_id_reg, \
1164 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1165 * For now, these are exposed just like unallocated ID regs: they appear
1166 * RAZ for the guest.
1168 #define ID_HIDDEN(name) { \
1169 SYS_DESC(SYS_##name), \
1170 .access = access_raz_id_reg, \
1171 .get_user = get_raz_id_reg, \
1172 .set_user = set_raz_id_reg, \
1176 * Architected system registers.
1177 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1179 * Debug handling: We do trap most, if not all debug related system
1180 * registers. The implementation is good enough to ensure that a guest
1181 * can use these with minimal performance degradation. The drawback is
1182 * that we don't implement any of the external debug, none of the
1183 * OSlock protocol. This should be revisited if we ever encounter a
1184 * more demanding guest...
1186 static const struct sys_reg_desc sys_reg_descs
[] = {
1187 { SYS_DESC(SYS_DC_ISW
), access_dcsw
},
1188 { SYS_DESC(SYS_DC_CSW
), access_dcsw
},
1189 { SYS_DESC(SYS_DC_CISW
), access_dcsw
},
1191 DBG_BCR_BVR_WCR_WVR_EL1(0),
1192 DBG_BCR_BVR_WCR_WVR_EL1(1),
1193 { SYS_DESC(SYS_MDCCINT_EL1
), trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
1194 { SYS_DESC(SYS_MDSCR_EL1
), trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
1195 DBG_BCR_BVR_WCR_WVR_EL1(2),
1196 DBG_BCR_BVR_WCR_WVR_EL1(3),
1197 DBG_BCR_BVR_WCR_WVR_EL1(4),
1198 DBG_BCR_BVR_WCR_WVR_EL1(5),
1199 DBG_BCR_BVR_WCR_WVR_EL1(6),
1200 DBG_BCR_BVR_WCR_WVR_EL1(7),
1201 DBG_BCR_BVR_WCR_WVR_EL1(8),
1202 DBG_BCR_BVR_WCR_WVR_EL1(9),
1203 DBG_BCR_BVR_WCR_WVR_EL1(10),
1204 DBG_BCR_BVR_WCR_WVR_EL1(11),
1205 DBG_BCR_BVR_WCR_WVR_EL1(12),
1206 DBG_BCR_BVR_WCR_WVR_EL1(13),
1207 DBG_BCR_BVR_WCR_WVR_EL1(14),
1208 DBG_BCR_BVR_WCR_WVR_EL1(15),
1210 { SYS_DESC(SYS_MDRAR_EL1
), trap_raz_wi
},
1211 { SYS_DESC(SYS_OSLAR_EL1
), trap_raz_wi
},
1212 { SYS_DESC(SYS_OSLSR_EL1
), trap_oslsr_el1
},
1213 { SYS_DESC(SYS_OSDLR_EL1
), trap_raz_wi
},
1214 { SYS_DESC(SYS_DBGPRCR_EL1
), trap_raz_wi
},
1215 { SYS_DESC(SYS_DBGCLAIMSET_EL1
), trap_raz_wi
},
1216 { SYS_DESC(SYS_DBGCLAIMCLR_EL1
), trap_raz_wi
},
1217 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1
), trap_dbgauthstatus_el1
},
1219 { SYS_DESC(SYS_MDCCSR_EL0
), trap_raz_wi
},
1220 { SYS_DESC(SYS_DBGDTR_EL0
), trap_raz_wi
},
1221 // DBGDTR[TR]X_EL0 share the same encoding
1222 { SYS_DESC(SYS_DBGDTRTX_EL0
), trap_raz_wi
},
1224 { SYS_DESC(SYS_DBGVCR32_EL2
), NULL
, reset_val
, DBGVCR32_EL2
, 0 },
1226 { SYS_DESC(SYS_MPIDR_EL1
), NULL
, reset_mpidr
, MPIDR_EL1
},
1229 * ID regs: all ID_SANITISED() entries here must have corresponding
1230 * entries in arm64_ftr_regs[].
1233 /* AArch64 mappings of the AArch32 ID registers */
1235 ID_SANITISED(ID_PFR0_EL1
),
1236 ID_SANITISED(ID_PFR1_EL1
),
1237 ID_SANITISED(ID_DFR0_EL1
),
1238 ID_HIDDEN(ID_AFR0_EL1
),
1239 ID_SANITISED(ID_MMFR0_EL1
),
1240 ID_SANITISED(ID_MMFR1_EL1
),
1241 ID_SANITISED(ID_MMFR2_EL1
),
1242 ID_SANITISED(ID_MMFR3_EL1
),
1245 ID_SANITISED(ID_ISAR0_EL1
),
1246 ID_SANITISED(ID_ISAR1_EL1
),
1247 ID_SANITISED(ID_ISAR2_EL1
),
1248 ID_SANITISED(ID_ISAR3_EL1
),
1249 ID_SANITISED(ID_ISAR4_EL1
),
1250 ID_SANITISED(ID_ISAR5_EL1
),
1251 ID_SANITISED(ID_MMFR4_EL1
),
1252 ID_UNALLOCATED(2,7),
1255 ID_SANITISED(MVFR0_EL1
),
1256 ID_SANITISED(MVFR1_EL1
),
1257 ID_SANITISED(MVFR2_EL1
),
1258 ID_UNALLOCATED(3,3),
1259 ID_UNALLOCATED(3,4),
1260 ID_UNALLOCATED(3,5),
1261 ID_UNALLOCATED(3,6),
1262 ID_UNALLOCATED(3,7),
1264 /* AArch64 ID registers */
1266 ID_SANITISED(ID_AA64PFR0_EL1
),
1267 ID_SANITISED(ID_AA64PFR1_EL1
),
1268 ID_UNALLOCATED(4,2),
1269 ID_UNALLOCATED(4,3),
1270 ID_UNALLOCATED(4,4),
1271 ID_UNALLOCATED(4,5),
1272 ID_UNALLOCATED(4,6),
1273 ID_UNALLOCATED(4,7),
1276 ID_SANITISED(ID_AA64DFR0_EL1
),
1277 ID_SANITISED(ID_AA64DFR1_EL1
),
1278 ID_UNALLOCATED(5,2),
1279 ID_UNALLOCATED(5,3),
1280 ID_HIDDEN(ID_AA64AFR0_EL1
),
1281 ID_HIDDEN(ID_AA64AFR1_EL1
),
1282 ID_UNALLOCATED(5,6),
1283 ID_UNALLOCATED(5,7),
1286 ID_SANITISED(ID_AA64ISAR0_EL1
),
1287 ID_SANITISED(ID_AA64ISAR1_EL1
),
1288 ID_UNALLOCATED(6,2),
1289 ID_UNALLOCATED(6,3),
1290 ID_UNALLOCATED(6,4),
1291 ID_UNALLOCATED(6,5),
1292 ID_UNALLOCATED(6,6),
1293 ID_UNALLOCATED(6,7),
1296 ID_SANITISED(ID_AA64MMFR0_EL1
),
1297 ID_SANITISED(ID_AA64MMFR1_EL1
),
1298 ID_SANITISED(ID_AA64MMFR2_EL1
),
1299 ID_UNALLOCATED(7,3),
1300 ID_UNALLOCATED(7,4),
1301 ID_UNALLOCATED(7,5),
1302 ID_UNALLOCATED(7,6),
1303 ID_UNALLOCATED(7,7),
1305 { SYS_DESC(SYS_SCTLR_EL1
), access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
1306 { SYS_DESC(SYS_CPACR_EL1
), NULL
, reset_val
, CPACR_EL1
, 0 },
1307 { SYS_DESC(SYS_TTBR0_EL1
), access_vm_reg
, reset_unknown
, TTBR0_EL1
},
1308 { SYS_DESC(SYS_TTBR1_EL1
), access_vm_reg
, reset_unknown
, TTBR1_EL1
},
1309 { SYS_DESC(SYS_TCR_EL1
), access_vm_reg
, reset_val
, TCR_EL1
, 0 },
1311 { SYS_DESC(SYS_AFSR0_EL1
), access_vm_reg
, reset_unknown
, AFSR0_EL1
},
1312 { SYS_DESC(SYS_AFSR1_EL1
), access_vm_reg
, reset_unknown
, AFSR1_EL1
},
1313 { SYS_DESC(SYS_ESR_EL1
), access_vm_reg
, reset_unknown
, ESR_EL1
},
1315 { SYS_DESC(SYS_ERRIDR_EL1
), trap_raz_wi
},
1316 { SYS_DESC(SYS_ERRSELR_EL1
), trap_raz_wi
},
1317 { SYS_DESC(SYS_ERXFR_EL1
), trap_raz_wi
},
1318 { SYS_DESC(SYS_ERXCTLR_EL1
), trap_raz_wi
},
1319 { SYS_DESC(SYS_ERXSTATUS_EL1
), trap_raz_wi
},
1320 { SYS_DESC(SYS_ERXADDR_EL1
), trap_raz_wi
},
1321 { SYS_DESC(SYS_ERXMISC0_EL1
), trap_raz_wi
},
1322 { SYS_DESC(SYS_ERXMISC1_EL1
), trap_raz_wi
},
1324 { SYS_DESC(SYS_FAR_EL1
), access_vm_reg
, reset_unknown
, FAR_EL1
},
1325 { SYS_DESC(SYS_PAR_EL1
), NULL
, reset_unknown
, PAR_EL1
},
1327 { SYS_DESC(SYS_PMINTENSET_EL1
), access_pminten
, reset_unknown
, PMINTENSET_EL1
},
1328 { SYS_DESC(SYS_PMINTENCLR_EL1
), access_pminten
, NULL
, PMINTENSET_EL1
},
1330 { SYS_DESC(SYS_MAIR_EL1
), access_vm_reg
, reset_unknown
, MAIR_EL1
},
1331 { SYS_DESC(SYS_AMAIR_EL1
), access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
1333 { SYS_DESC(SYS_LORSA_EL1
), trap_undef
},
1334 { SYS_DESC(SYS_LOREA_EL1
), trap_undef
},
1335 { SYS_DESC(SYS_LORN_EL1
), trap_undef
},
1336 { SYS_DESC(SYS_LORC_EL1
), trap_undef
},
1337 { SYS_DESC(SYS_LORID_EL1
), trap_undef
},
1339 { SYS_DESC(SYS_VBAR_EL1
), NULL
, reset_val
, VBAR_EL1
, 0 },
1340 { SYS_DESC(SYS_DISR_EL1
), NULL
, reset_val
, DISR_EL1
, 0 },
1342 { SYS_DESC(SYS_ICC_IAR0_EL1
), write_to_read_only
},
1343 { SYS_DESC(SYS_ICC_EOIR0_EL1
), read_from_write_only
},
1344 { SYS_DESC(SYS_ICC_HPPIR0_EL1
), write_to_read_only
},
1345 { SYS_DESC(SYS_ICC_DIR_EL1
), read_from_write_only
},
1346 { SYS_DESC(SYS_ICC_RPR_EL1
), write_to_read_only
},
1347 { SYS_DESC(SYS_ICC_SGI1R_EL1
), access_gic_sgi
},
1348 { SYS_DESC(SYS_ICC_ASGI1R_EL1
), access_gic_sgi
},
1349 { SYS_DESC(SYS_ICC_SGI0R_EL1
), access_gic_sgi
},
1350 { SYS_DESC(SYS_ICC_IAR1_EL1
), write_to_read_only
},
1351 { SYS_DESC(SYS_ICC_EOIR1_EL1
), read_from_write_only
},
1352 { SYS_DESC(SYS_ICC_HPPIR1_EL1
), write_to_read_only
},
1353 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
1355 { SYS_DESC(SYS_CONTEXTIDR_EL1
), access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
1356 { SYS_DESC(SYS_TPIDR_EL1
), NULL
, reset_unknown
, TPIDR_EL1
},
1358 { SYS_DESC(SYS_CNTKCTL_EL1
), NULL
, reset_val
, CNTKCTL_EL1
, 0},
1360 { SYS_DESC(SYS_CSSELR_EL1
), NULL
, reset_unknown
, CSSELR_EL1
},
1362 { SYS_DESC(SYS_PMCR_EL0
), access_pmcr
, reset_pmcr
, },
1363 { SYS_DESC(SYS_PMCNTENSET_EL0
), access_pmcnten
, reset_unknown
, PMCNTENSET_EL0
},
1364 { SYS_DESC(SYS_PMCNTENCLR_EL0
), access_pmcnten
, NULL
, PMCNTENSET_EL0
},
1365 { SYS_DESC(SYS_PMOVSCLR_EL0
), access_pmovs
, NULL
, PMOVSSET_EL0
},
1366 { SYS_DESC(SYS_PMSWINC_EL0
), access_pmswinc
, reset_unknown
, PMSWINC_EL0
},
1367 { SYS_DESC(SYS_PMSELR_EL0
), access_pmselr
, reset_unknown
, PMSELR_EL0
},
1368 { SYS_DESC(SYS_PMCEID0_EL0
), access_pmceid
},
1369 { SYS_DESC(SYS_PMCEID1_EL0
), access_pmceid
},
1370 { SYS_DESC(SYS_PMCCNTR_EL0
), access_pmu_evcntr
, reset_unknown
, PMCCNTR_EL0
},
1371 { SYS_DESC(SYS_PMXEVTYPER_EL0
), access_pmu_evtyper
},
1372 { SYS_DESC(SYS_PMXEVCNTR_EL0
), access_pmu_evcntr
},
1374 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1375 * in 32bit mode. Here we choose to reset it as zero for consistency.
1377 { SYS_DESC(SYS_PMUSERENR_EL0
), access_pmuserenr
, reset_val
, PMUSERENR_EL0
, 0 },
1378 { SYS_DESC(SYS_PMOVSSET_EL0
), access_pmovs
, reset_unknown
, PMOVSSET_EL0
},
1380 { SYS_DESC(SYS_TPIDR_EL0
), NULL
, reset_unknown
, TPIDR_EL0
},
1381 { SYS_DESC(SYS_TPIDRRO_EL0
), NULL
, reset_unknown
, TPIDRRO_EL0
},
1383 { SYS_DESC(SYS_CNTP_TVAL_EL0
), access_cntp_tval
},
1384 { SYS_DESC(SYS_CNTP_CTL_EL0
), access_cntp_ctl
},
1385 { SYS_DESC(SYS_CNTP_CVAL_EL0
), access_cntp_cval
},
1388 PMU_PMEVCNTR_EL0(0),
1389 PMU_PMEVCNTR_EL0(1),
1390 PMU_PMEVCNTR_EL0(2),
1391 PMU_PMEVCNTR_EL0(3),
1392 PMU_PMEVCNTR_EL0(4),
1393 PMU_PMEVCNTR_EL0(5),
1394 PMU_PMEVCNTR_EL0(6),
1395 PMU_PMEVCNTR_EL0(7),
1396 PMU_PMEVCNTR_EL0(8),
1397 PMU_PMEVCNTR_EL0(9),
1398 PMU_PMEVCNTR_EL0(10),
1399 PMU_PMEVCNTR_EL0(11),
1400 PMU_PMEVCNTR_EL0(12),
1401 PMU_PMEVCNTR_EL0(13),
1402 PMU_PMEVCNTR_EL0(14),
1403 PMU_PMEVCNTR_EL0(15),
1404 PMU_PMEVCNTR_EL0(16),
1405 PMU_PMEVCNTR_EL0(17),
1406 PMU_PMEVCNTR_EL0(18),
1407 PMU_PMEVCNTR_EL0(19),
1408 PMU_PMEVCNTR_EL0(20),
1409 PMU_PMEVCNTR_EL0(21),
1410 PMU_PMEVCNTR_EL0(22),
1411 PMU_PMEVCNTR_EL0(23),
1412 PMU_PMEVCNTR_EL0(24),
1413 PMU_PMEVCNTR_EL0(25),
1414 PMU_PMEVCNTR_EL0(26),
1415 PMU_PMEVCNTR_EL0(27),
1416 PMU_PMEVCNTR_EL0(28),
1417 PMU_PMEVCNTR_EL0(29),
1418 PMU_PMEVCNTR_EL0(30),
1419 /* PMEVTYPERn_EL0 */
1420 PMU_PMEVTYPER_EL0(0),
1421 PMU_PMEVTYPER_EL0(1),
1422 PMU_PMEVTYPER_EL0(2),
1423 PMU_PMEVTYPER_EL0(3),
1424 PMU_PMEVTYPER_EL0(4),
1425 PMU_PMEVTYPER_EL0(5),
1426 PMU_PMEVTYPER_EL0(6),
1427 PMU_PMEVTYPER_EL0(7),
1428 PMU_PMEVTYPER_EL0(8),
1429 PMU_PMEVTYPER_EL0(9),
1430 PMU_PMEVTYPER_EL0(10),
1431 PMU_PMEVTYPER_EL0(11),
1432 PMU_PMEVTYPER_EL0(12),
1433 PMU_PMEVTYPER_EL0(13),
1434 PMU_PMEVTYPER_EL0(14),
1435 PMU_PMEVTYPER_EL0(15),
1436 PMU_PMEVTYPER_EL0(16),
1437 PMU_PMEVTYPER_EL0(17),
1438 PMU_PMEVTYPER_EL0(18),
1439 PMU_PMEVTYPER_EL0(19),
1440 PMU_PMEVTYPER_EL0(20),
1441 PMU_PMEVTYPER_EL0(21),
1442 PMU_PMEVTYPER_EL0(22),
1443 PMU_PMEVTYPER_EL0(23),
1444 PMU_PMEVTYPER_EL0(24),
1445 PMU_PMEVTYPER_EL0(25),
1446 PMU_PMEVTYPER_EL0(26),
1447 PMU_PMEVTYPER_EL0(27),
1448 PMU_PMEVTYPER_EL0(28),
1449 PMU_PMEVTYPER_EL0(29),
1450 PMU_PMEVTYPER_EL0(30),
1452 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1453 * in 32bit mode. Here we choose to reset it as zero for consistency.
1455 { SYS_DESC(SYS_PMCCFILTR_EL0
), access_pmu_evtyper
, reset_val
, PMCCFILTR_EL0
, 0 },
1457 { SYS_DESC(SYS_DACR32_EL2
), NULL
, reset_unknown
, DACR32_EL2
},
1458 { SYS_DESC(SYS_IFSR32_EL2
), NULL
, reset_unknown
, IFSR32_EL2
},
1459 { SYS_DESC(SYS_FPEXC32_EL2
), NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
1462 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
1463 struct sys_reg_params
*p
,
1464 const struct sys_reg_desc
*r
)
1467 return ignore_write(vcpu
, p
);
1469 u64 dfr
= read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1
);
1470 u64 pfr
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
1471 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
1473 p
->regval
= ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
1474 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
1475 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20)
1476 | (6 << 16) | (el3
<< 14) | (el3
<< 12));
1481 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
1482 struct sys_reg_params
*p
,
1483 const struct sys_reg_desc
*r
)
1486 vcpu_cp14(vcpu
, r
->reg
) = p
->regval
;
1487 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
1489 p
->regval
= vcpu_cp14(vcpu
, r
->reg
);
1495 /* AArch32 debug register mappings
1497 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1498 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1500 * All control registers and watchpoint value registers are mapped to
1501 * the lower 32 bits of their AArch64 equivalents. We share the trap
1502 * handlers with the above AArch64 code which checks what mode the
1506 static bool trap_xvr(struct kvm_vcpu
*vcpu
,
1507 struct sys_reg_params
*p
,
1508 const struct sys_reg_desc
*rd
)
1510 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
1515 val
&= 0xffffffffUL
;
1516 val
|= p
->regval
<< 32;
1519 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
1521 p
->regval
= *dbg_reg
>> 32;
1524 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
1529 #define DBG_BCR_BVR_WCR_WVR(n) \
1531 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1533 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1535 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1537 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1539 #define DBGBXVR(n) \
1540 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1543 * Trapped cp14 registers. We generally ignore most of the external
1544 * debug, on the principle that they don't really make sense to a
1545 * guest. Revisit this one day, would this principle change.
1547 static const struct sys_reg_desc cp14_regs
[] = {
1549 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
1551 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
1553 DBG_BCR_BVR_WCR_WVR(0),
1555 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
1556 DBG_BCR_BVR_WCR_WVR(1),
1558 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
1560 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
1561 DBG_BCR_BVR_WCR_WVR(2),
1562 /* DBGDTR[RT]Xint */
1563 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
1564 /* DBGDTR[RT]Xext */
1565 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
1566 DBG_BCR_BVR_WCR_WVR(3),
1567 DBG_BCR_BVR_WCR_WVR(4),
1568 DBG_BCR_BVR_WCR_WVR(5),
1570 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
1572 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
1573 DBG_BCR_BVR_WCR_WVR(6),
1575 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
1576 DBG_BCR_BVR_WCR_WVR(7),
1577 DBG_BCR_BVR_WCR_WVR(8),
1578 DBG_BCR_BVR_WCR_WVR(9),
1579 DBG_BCR_BVR_WCR_WVR(10),
1580 DBG_BCR_BVR_WCR_WVR(11),
1581 DBG_BCR_BVR_WCR_WVR(12),
1582 DBG_BCR_BVR_WCR_WVR(13),
1583 DBG_BCR_BVR_WCR_WVR(14),
1584 DBG_BCR_BVR_WCR_WVR(15),
1586 /* DBGDRAR (32bit) */
1587 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
1591 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
1594 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
1598 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
1601 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
1614 /* DBGDSAR (32bit) */
1615 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
1618 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
1620 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
1622 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
1624 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
1626 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
1628 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
1631 /* Trapped cp14 64bit registers */
1632 static const struct sys_reg_desc cp14_64_regs
[] = {
1633 /* DBGDRAR (64bit) */
1634 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
1636 /* DBGDSAR (64bit) */
1637 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
1640 /* Macro to expand the PMEVCNTRn register */
1641 #define PMU_PMEVCNTR(n) \
1643 { Op1(0), CRn(0b1110), \
1644 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1647 /* Macro to expand the PMEVTYPERn register */
1648 #define PMU_PMEVTYPER(n) \
1650 { Op1(0), CRn(0b1110), \
1651 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1652 access_pmu_evtyper }
1655 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1656 * depending on the way they are accessed (as a 32bit or a 64bit
1659 static const struct sys_reg_desc cp15_regs
[] = {
1660 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
1661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
1663 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
1664 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
1665 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
1666 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
1667 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
1668 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
1669 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
1670 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
1673 * DC{C,I,CI}SW operations:
1675 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
1676 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
1677 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
1680 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr
},
1681 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten
},
1682 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten
},
1683 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs
},
1684 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc
},
1685 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr
},
1686 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid
},
1687 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid
},
1688 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr
},
1689 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper
},
1690 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr
},
1691 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr
},
1692 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten
},
1693 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten
},
1694 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs
},
1696 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
1697 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
1698 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
1699 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
1702 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
1704 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
1707 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval
},
1709 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl
},
1776 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper
},
1779 static const struct sys_reg_desc cp15_64_regs
[] = {
1780 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1781 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr
},
1782 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI1R */
1783 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
1784 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_ASGI1R */
1785 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI0R */
1786 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval
},
1789 /* Target specific emulation tables */
1790 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
1792 void kvm_register_target_sys_reg_table(unsigned int target
,
1793 struct kvm_sys_reg_target_table
*table
)
1795 target_tables
[target
] = table
;
1798 /* Get specific register table for this target. */
1799 static const struct sys_reg_desc
*get_target_table(unsigned target
,
1803 struct kvm_sys_reg_target_table
*table
;
1805 table
= target_tables
[target
];
1807 *num
= table
->table64
.num
;
1808 return table
->table64
.table
;
1810 *num
= table
->table32
.num
;
1811 return table
->table32
.table
;
1815 #define reg_to_match_value(x) \
1817 unsigned long val; \
1818 val = (x)->Op0 << 14; \
1819 val |= (x)->Op1 << 11; \
1820 val |= (x)->CRn << 7; \
1821 val |= (x)->CRm << 3; \
1826 static int match_sys_reg(const void *key
, const void *elt
)
1828 const unsigned long pval
= (unsigned long)key
;
1829 const struct sys_reg_desc
*r
= elt
;
1831 return pval
- reg_to_match_value(r
);
1834 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
1835 const struct sys_reg_desc table
[],
1838 unsigned long pval
= reg_to_match_value(params
);
1840 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_sys_reg
);
1843 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1845 kvm_inject_undefined(vcpu
);
1849 static void perform_access(struct kvm_vcpu
*vcpu
,
1850 struct sys_reg_params
*params
,
1851 const struct sys_reg_desc
*r
)
1854 * Not having an accessor means that we have configured a trap
1855 * that we don't know how to handle. This certainly qualifies
1856 * as a gross bug that should be fixed right away.
1860 /* Skip instruction if instructed so */
1861 if (likely(r
->access(vcpu
, params
, r
)))
1862 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1866 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1867 * call the corresponding trap handler.
1869 * @params: pointer to the descriptor of the access
1870 * @table: array of trap descriptors
1871 * @num: size of the trap descriptor array
1873 * Return 0 if the access has been handled, and -1 if not.
1875 static int emulate_cp(struct kvm_vcpu
*vcpu
,
1876 struct sys_reg_params
*params
,
1877 const struct sys_reg_desc
*table
,
1880 const struct sys_reg_desc
*r
;
1883 return -1; /* Not handled */
1885 r
= find_reg(params
, table
, num
);
1888 perform_access(vcpu
, params
, r
);
1896 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
1897 struct sys_reg_params
*params
)
1899 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
1903 case ESR_ELx_EC_CP15_32
:
1904 case ESR_ELx_EC_CP15_64
:
1907 case ESR_ELx_EC_CP14_MR
:
1908 case ESR_ELx_EC_CP14_64
:
1915 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1916 cp
, *vcpu_pc(vcpu
));
1917 print_sys_reg_instr(params
);
1918 kvm_inject_undefined(vcpu
);
1922 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1923 * @vcpu: The VCPU pointer
1924 * @run: The kvm_run struct
1926 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
1927 const struct sys_reg_desc
*global
,
1929 const struct sys_reg_desc
*target_specific
,
1932 struct sys_reg_params params
;
1933 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1934 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1935 int Rt2
= (hsr
>> 10) & 0x1f;
1937 params
.is_aarch32
= true;
1938 params
.is_32bit
= false;
1939 params
.CRm
= (hsr
>> 1) & 0xf;
1940 params
.is_write
= ((hsr
& 1) == 0);
1943 params
.Op1
= (hsr
>> 16) & 0xf;
1948 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1949 * backends between AArch32 and AArch64, we get away with it.
1951 if (params
.is_write
) {
1952 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
1953 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
1957 * Try to emulate the coprocessor access using the target
1958 * specific table first, and using the global table afterwards.
1959 * If either of the tables contains a handler, handle the
1960 * potential register operation in the case of a read and return
1963 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
1964 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
1965 /* Split up the value between registers for the read side */
1966 if (!params
.is_write
) {
1967 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
1968 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
1974 unhandled_cp_access(vcpu
, ¶ms
);
1979 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1980 * @vcpu: The VCPU pointer
1981 * @run: The kvm_run struct
1983 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
1984 const struct sys_reg_desc
*global
,
1986 const struct sys_reg_desc
*target_specific
,
1989 struct sys_reg_params params
;
1990 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1991 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1993 params
.is_aarch32
= true;
1994 params
.is_32bit
= true;
1995 params
.CRm
= (hsr
>> 1) & 0xf;
1996 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1997 params
.is_write
= ((hsr
& 1) == 0);
1998 params
.CRn
= (hsr
>> 10) & 0xf;
2000 params
.Op1
= (hsr
>> 14) & 0x7;
2001 params
.Op2
= (hsr
>> 17) & 0x7;
2003 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
2004 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
2005 if (!params
.is_write
)
2006 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
2010 unhandled_cp_access(vcpu
, ¶ms
);
2014 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2016 const struct sys_reg_desc
*target_specific
;
2019 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
2020 return kvm_handle_cp_64(vcpu
,
2021 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
2022 target_specific
, num
);
2025 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2027 const struct sys_reg_desc
*target_specific
;
2030 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
2031 return kvm_handle_cp_32(vcpu
,
2032 cp15_regs
, ARRAY_SIZE(cp15_regs
),
2033 target_specific
, num
);
2036 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2038 return kvm_handle_cp_64(vcpu
,
2039 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
2043 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2045 return kvm_handle_cp_32(vcpu
,
2046 cp14_regs
, ARRAY_SIZE(cp14_regs
),
2050 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
2051 struct sys_reg_params
*params
)
2054 const struct sys_reg_desc
*table
, *r
;
2056 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2058 /* Search target-specific then generic table. */
2059 r
= find_reg(params
, table
, num
);
2061 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2064 perform_access(vcpu
, params
, r
);
2066 kvm_err("Unsupported guest sys_reg access at: %lx\n",
2068 print_sys_reg_instr(params
);
2069 kvm_inject_undefined(vcpu
);
2074 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
2075 const struct sys_reg_desc
*table
, size_t num
)
2079 for (i
= 0; i
< num
; i
++)
2081 table
[i
].reset(vcpu
, &table
[i
]);
2085 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2086 * @vcpu: The VCPU pointer
2087 * @run: The kvm_run struct
2089 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2091 struct sys_reg_params params
;
2092 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
2093 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2096 trace_kvm_handle_sys_reg(esr
);
2098 params
.is_aarch32
= false;
2099 params
.is_32bit
= false;
2100 params
.Op0
= (esr
>> 20) & 3;
2101 params
.Op1
= (esr
>> 14) & 0x7;
2102 params
.CRn
= (esr
>> 10) & 0xf;
2103 params
.CRm
= (esr
>> 1) & 0xf;
2104 params
.Op2
= (esr
>> 17) & 0x7;
2105 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
2106 params
.is_write
= !(esr
& 1);
2108 ret
= emulate_sys_reg(vcpu
, ¶ms
);
2110 if (!params
.is_write
)
2111 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
2115 /******************************************************************************
2117 *****************************************************************************/
2119 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
2121 switch (id
& KVM_REG_SIZE_MASK
) {
2122 case KVM_REG_SIZE_U64
:
2123 /* Any unused index bits means it's not valid. */
2124 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
2125 | KVM_REG_ARM_COPROC_MASK
2126 | KVM_REG_ARM64_SYSREG_OP0_MASK
2127 | KVM_REG_ARM64_SYSREG_OP1_MASK
2128 | KVM_REG_ARM64_SYSREG_CRN_MASK
2129 | KVM_REG_ARM64_SYSREG_CRM_MASK
2130 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
2132 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
2133 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
2134 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
2135 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
2136 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
2137 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
2138 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
2139 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
2140 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
2141 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
2148 const struct sys_reg_desc
*find_reg_by_id(u64 id
,
2149 struct sys_reg_params
*params
,
2150 const struct sys_reg_desc table
[],
2153 if (!index_to_params(id
, params
))
2156 return find_reg(params
, table
, num
);
2159 /* Decode an index value, and find the sys_reg_desc entry. */
2160 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
2164 const struct sys_reg_desc
*table
, *r
;
2165 struct sys_reg_params params
;
2167 /* We only do sys_reg for now. */
2168 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
2171 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2172 r
= find_reg_by_id(id
, ¶ms
, table
, num
);
2174 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2176 /* Not saved in the sys_reg array and not otherwise accessible? */
2177 if (r
&& !(r
->reg
|| r
->get_user
))
2184 * These are the invariant sys_reg registers: we let the guest see the
2185 * host versions of these, so they're part of the guest state.
2187 * A future CPU may provide a mechanism to present different values to
2188 * the guest, or a future kvm may trap them.
2191 #define FUNCTION_INVARIANT(reg) \
2192 static void get_##reg(struct kvm_vcpu *v, \
2193 const struct sys_reg_desc *r) \
2195 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2198 FUNCTION_INVARIANT(midr_el1
)
2199 FUNCTION_INVARIANT(ctr_el0
)
2200 FUNCTION_INVARIANT(revidr_el1
)
2201 FUNCTION_INVARIANT(clidr_el1
)
2202 FUNCTION_INVARIANT(aidr_el1
)
2204 /* ->val is filled in by kvm_sys_reg_table_init() */
2205 static struct sys_reg_desc invariant_sys_regs
[] = {
2206 { SYS_DESC(SYS_MIDR_EL1
), NULL
, get_midr_el1
},
2207 { SYS_DESC(SYS_REVIDR_EL1
), NULL
, get_revidr_el1
},
2208 { SYS_DESC(SYS_CLIDR_EL1
), NULL
, get_clidr_el1
},
2209 { SYS_DESC(SYS_AIDR_EL1
), NULL
, get_aidr_el1
},
2210 { SYS_DESC(SYS_CTR_EL0
), NULL
, get_ctr_el0
},
2213 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
2215 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
2220 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
2222 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
2227 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
2229 struct sys_reg_params params
;
2230 const struct sys_reg_desc
*r
;
2232 r
= find_reg_by_id(id
, ¶ms
, invariant_sys_regs
,
2233 ARRAY_SIZE(invariant_sys_regs
));
2237 return reg_to_user(uaddr
, &r
->val
, id
);
2240 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
2242 struct sys_reg_params params
;
2243 const struct sys_reg_desc
*r
;
2245 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
2247 r
= find_reg_by_id(id
, ¶ms
, invariant_sys_regs
,
2248 ARRAY_SIZE(invariant_sys_regs
));
2252 err
= reg_from_user(&val
, uaddr
, id
);
2256 /* This is what we mean by invariant: you can't change it. */
2263 static bool is_valid_cache(u32 val
)
2267 if (val
>= CSSELR_MAX
)
2270 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2272 ctype
= (cache_levels
>> (level
* 3)) & 7;
2275 case 0: /* No cache */
2277 case 1: /* Instruction cache only */
2279 case 2: /* Data cache only */
2280 case 4: /* Unified cache */
2282 case 3: /* Separate instruction and data caches */
2284 default: /* Reserved: we can't know instruction or data. */
2289 static int demux_c15_get(u64 id
, void __user
*uaddr
)
2292 u32 __user
*uval
= uaddr
;
2294 /* Fail if we have unknown bits set. */
2295 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2296 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2299 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2300 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2301 if (KVM_REG_SIZE(id
) != 4)
2303 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2304 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2305 if (!is_valid_cache(val
))
2308 return put_user(get_ccsidr(val
), uval
);
2314 static int demux_c15_set(u64 id
, void __user
*uaddr
)
2317 u32 __user
*uval
= uaddr
;
2319 /* Fail if we have unknown bits set. */
2320 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2321 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2324 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2325 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2326 if (KVM_REG_SIZE(id
) != 4)
2328 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2329 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2330 if (!is_valid_cache(val
))
2333 if (get_user(newval
, uval
))
2336 /* This is also invariant: you can't change it. */
2337 if (newval
!= get_ccsidr(val
))
2345 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2347 const struct sys_reg_desc
*r
;
2348 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2350 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2351 return demux_c15_get(reg
->id
, uaddr
);
2353 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2356 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2358 return get_invariant_sys_reg(reg
->id
, uaddr
);
2361 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
2363 return reg_to_user(uaddr
, &__vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
2366 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2368 const struct sys_reg_desc
*r
;
2369 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2371 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2372 return demux_c15_set(reg
->id
, uaddr
);
2374 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2377 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2379 return set_invariant_sys_reg(reg
->id
, uaddr
);
2382 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
2384 return reg_from_user(&__vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
2387 static unsigned int num_demux_regs(void)
2389 unsigned int i
, count
= 0;
2391 for (i
= 0; i
< CSSELR_MAX
; i
++)
2392 if (is_valid_cache(i
))
2398 static int write_demux_regids(u64 __user
*uindices
)
2400 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
2403 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
2404 for (i
= 0; i
< CSSELR_MAX
; i
++) {
2405 if (!is_valid_cache(i
))
2407 if (put_user(val
| i
, uindices
))
2414 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
2416 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
2417 KVM_REG_ARM64_SYSREG
|
2418 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
2419 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
2420 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
2421 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
2422 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
2425 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
2430 if (put_user(sys_reg_to_index(reg
), *uind
))
2437 static int walk_one_sys_reg(const struct sys_reg_desc
*rd
,
2439 unsigned int *total
)
2442 * Ignore registers we trap but don't save,
2443 * and for which no custom user accessor is provided.
2445 if (!(rd
->reg
|| rd
->get_user
))
2448 if (!copy_reg_to_user(rd
, uind
))
2455 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2456 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
2458 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
2459 unsigned int total
= 0;
2463 /* We check for duplicates here, to allow arch-specific overrides. */
2464 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
2467 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
2469 BUG_ON(i1
== end1
|| i2
== end2
);
2471 /* Walk carefully, as both tables may refer to the same register. */
2473 int cmp
= cmp_sys_reg(i1
, i2
);
2474 /* target-specific overrides generic entry. */
2476 err
= walk_one_sys_reg(i1
, &uind
, &total
);
2478 err
= walk_one_sys_reg(i2
, &uind
, &total
);
2483 if (cmp
<= 0 && ++i1
== end1
)
2485 if (cmp
>= 0 && ++i2
== end2
)
2491 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
2493 return ARRAY_SIZE(invariant_sys_regs
)
2495 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
2498 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
2503 /* Then give them all the invariant registers' indices. */
2504 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
2505 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
2510 err
= walk_sys_regs(vcpu
, uindices
);
2515 return write_demux_regids(uindices
);
2518 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
2522 for (i
= 1; i
< n
; i
++) {
2523 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2524 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
2532 void kvm_sys_reg_table_init(void)
2535 struct sys_reg_desc clidr
;
2537 /* Make sure tables are unique and in order. */
2538 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
2539 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
2540 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
2541 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
2542 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
2543 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
2545 /* We abuse the reset function to overwrite the table itself. */
2546 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
2547 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
2550 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2552 * If software reads the Cache Type fields from Ctype1
2553 * upwards, once it has seen a value of 0b000, no caches
2554 * exist at further-out levels of the hierarchy. So, for
2555 * example, if Ctype3 is the first Cache Type field with a
2556 * value of 0b000, the values of Ctype4 to Ctype7 must be
2559 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
2560 cache_levels
= clidr
.val
;
2561 for (i
= 0; i
< 7; i
++)
2562 if (((cache_levels
>> (i
*3)) & 7) == 0)
2564 /* Clear all higher bits. */
2565 cache_levels
&= (1 << (i
*3))-1;
2569 * kvm_reset_sys_regs - sets system registers to reset value
2570 * @vcpu: The VCPU pointer
2572 * This function finds the right table above and sets the registers on the
2573 * virtual CPU struct to their architecturally defined reset values.
2575 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
2578 const struct sys_reg_desc
*table
;
2580 /* Catch someone adding a register without putting in reset entry. */
2581 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
2583 /* Generic chip reset first (so target could override). */
2584 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2586 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2587 reset_sys_reg_descs(vcpu
, table
, num
);
2589 for (num
= 1; num
< NR_SYS_REGS
; num
++)
2590 if (__vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
2591 panic("Didn't reset __vcpu_sys_reg(%zi)", num
);