2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <linux/uaccess.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_host.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_coproc.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cputype.h>
33 #include <asm/debug-monitors.h>
34 #include <trace/events/kvm.h>
39 * All of this file is extremly similar to the ARM coproc.c, but the
40 * types are different. My gut feeling is that it should be pretty
41 * easy to merge, but that would be an ABI breakage -- again. VFP
42 * would also need to be abstracted.
44 * For AArch32, we only take care of what is being trapped. Anything
45 * that has to do with init and userspace access has to go via the
49 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
50 static u32 cache_levels
;
52 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
55 /* Which cache CCSIDR represents depends on CSSELR value. */
56 static u32
get_ccsidr(u32 csselr
)
60 /* Make sure noone else changes CSSELR during this! */
62 /* Put value into CSSELR */
63 asm volatile("msr csselr_el1, %x0" : : "r" (csselr
));
65 /* Read result out of CCSIDR */
66 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr
));
72 static void do_dc_cisw(u32 val
)
74 asm volatile("dc cisw, %x0" : : "r" (val
));
78 static void do_dc_csw(u32 val
)
80 asm volatile("dc csw, %x0" : : "r" (val
));
84 /* See note at ARM ARM B1.14.4 */
85 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
86 const struct sys_reg_params
*p
,
87 const struct sys_reg_desc
*r
)
93 return read_from_write_only(vcpu
, p
);
97 cpumask_setall(&vcpu
->arch
.require_dcache_flush
);
98 cpumask_clear_cpu(cpu
, &vcpu
->arch
.require_dcache_flush
);
100 /* If we were already preempted, take the long way around */
101 if (cpu
!= vcpu
->arch
.last_pcpu
) {
106 val
= *vcpu_reg(vcpu
, p
->Rt
);
109 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
110 case 14: /* DCCISW */
126 * Generic accessor for VM registers. Only called as long as HCR_TVM
129 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
130 const struct sys_reg_params
*p
,
131 const struct sys_reg_desc
*r
)
135 BUG_ON(!p
->is_write
);
137 val
= *vcpu_reg(vcpu
, p
->Rt
);
138 if (!p
->is_aarch32
|| !p
->is_32bit
)
139 vcpu_sys_reg(vcpu
, r
->reg
) = val
;
141 vcpu_cp15_64_low(vcpu
, r
->reg
) = val
& 0xffffffffUL
;
147 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
148 * guest enables the MMU, we stop trapping the VM sys_regs and leave
149 * it in complete control of the caches.
151 static bool access_sctlr(struct kvm_vcpu
*vcpu
,
152 const struct sys_reg_params
*p
,
153 const struct sys_reg_desc
*r
)
155 access_vm_reg(vcpu
, p
, r
);
157 if (vcpu_has_cache_enabled(vcpu
)) { /* MMU+Caches enabled? */
158 vcpu
->arch
.hcr_el2
&= ~HCR_TVM
;
159 stage2_flush_vm(vcpu
->kvm
);
165 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
166 const struct sys_reg_params
*p
,
167 const struct sys_reg_desc
*r
)
170 return ignore_write(vcpu
, p
);
172 return read_zero(vcpu
, p
);
175 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
176 const struct sys_reg_params
*p
,
177 const struct sys_reg_desc
*r
)
180 return ignore_write(vcpu
, p
);
182 *vcpu_reg(vcpu
, p
->Rt
) = (1 << 3);
187 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
188 const struct sys_reg_params
*p
,
189 const struct sys_reg_desc
*r
)
192 return ignore_write(vcpu
, p
);
195 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val
));
196 *vcpu_reg(vcpu
, p
->Rt
) = val
;
202 * We want to avoid world-switching all the DBG registers all the
205 * - If we've touched any debug register, it is likely that we're
206 * going to touch more of them. It then makes sense to disable the
207 * traps and start doing the save/restore dance
208 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
209 * then mandatory to save/restore the registers, as the guest
212 * For this, we use a DIRTY bit, indicating the guest has modified the
213 * debug registers, used as follow:
216 * - If the dirty bit is set (because we're coming back from trapping),
217 * disable the traps, save host registers, restore guest registers.
218 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
219 * set the dirty bit, disable the traps, save host registers,
220 * restore guest registers.
221 * - Otherwise, enable the traps
224 * - If the dirty bit is set, save guest registers, restore host
225 * registers and clear the dirty bit. This ensure that the host can
226 * now use the debug registers.
228 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
229 const struct sys_reg_params
*p
,
230 const struct sys_reg_desc
*r
)
233 vcpu_sys_reg(vcpu
, r
->reg
) = *vcpu_reg(vcpu
, p
->Rt
);
234 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
236 *vcpu_reg(vcpu
, p
->Rt
) = vcpu_sys_reg(vcpu
, r
->reg
);
242 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
246 asm volatile("mrs %0, amair_el1\n" : "=r" (amair
));
247 vcpu_sys_reg(vcpu
, AMAIR_EL1
) = amair
;
250 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
253 * Simply map the vcpu_id into the Aff0 field of the MPIDR.
255 vcpu_sys_reg(vcpu
, MPIDR_EL1
) = (1UL << 31) | (vcpu
->vcpu_id
& 0xff);
258 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
259 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
261 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
262 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
264 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
265 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
267 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
268 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
270 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
271 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
274 * Architected system registers.
275 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
277 * We could trap ID_DFR0 and tell the guest we don't support performance
278 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
279 * NAKed, so it will read the PMCR anyway.
281 * Therefore we tell the guest we have 0 counters. Unfortunately, we
282 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
283 * all PM registers, which doesn't crash the guest kernel at least.
285 * Debug handling: We do trap most, if not all debug related system
286 * registers. The implementation is good enough to ensure that a guest
287 * can use these with minimal performance degradation. The drawback is
288 * that we don't implement any of the external debug, none of the
289 * OSlock protocol. This should be revisited if we ever encounter a
290 * more demanding guest...
292 static const struct sys_reg_desc sys_reg_descs
[] = {
294 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
297 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
300 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
303 DBG_BCR_BVR_WCR_WVR_EL1(0),
304 DBG_BCR_BVR_WCR_WVR_EL1(1),
306 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
307 trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
309 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
310 trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
311 DBG_BCR_BVR_WCR_WVR_EL1(2),
312 DBG_BCR_BVR_WCR_WVR_EL1(3),
313 DBG_BCR_BVR_WCR_WVR_EL1(4),
314 DBG_BCR_BVR_WCR_WVR_EL1(5),
315 DBG_BCR_BVR_WCR_WVR_EL1(6),
316 DBG_BCR_BVR_WCR_WVR_EL1(7),
317 DBG_BCR_BVR_WCR_WVR_EL1(8),
318 DBG_BCR_BVR_WCR_WVR_EL1(9),
319 DBG_BCR_BVR_WCR_WVR_EL1(10),
320 DBG_BCR_BVR_WCR_WVR_EL1(11),
321 DBG_BCR_BVR_WCR_WVR_EL1(12),
322 DBG_BCR_BVR_WCR_WVR_EL1(13),
323 DBG_BCR_BVR_WCR_WVR_EL1(14),
324 DBG_BCR_BVR_WCR_WVR_EL1(15),
327 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
330 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
333 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
336 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
339 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
341 /* DBGCLAIMSET_EL1 */
342 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
344 /* DBGCLAIMCLR_EL1 */
345 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
347 /* DBGAUTHSTATUS_EL1 */
348 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
349 trap_dbgauthstatus_el1
},
352 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
353 NULL
, reset_val
, TEECR32_EL1
, 0 },
355 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
356 NULL
, reset_val
, TEEHBR32_EL1
, 0 },
359 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
362 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
364 /* DBGDTR[TR]X_EL0 */
365 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
369 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
370 NULL
, reset_val
, DBGVCR32_EL2
, 0 },
373 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
374 NULL
, reset_mpidr
, MPIDR_EL1
},
376 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
377 access_sctlr
, reset_val
, SCTLR_EL1
, 0x00C50078 },
379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
380 NULL
, reset_val
, CPACR_EL1
, 0 },
382 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
383 access_vm_reg
, reset_unknown
, TTBR0_EL1
},
385 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
386 access_vm_reg
, reset_unknown
, TTBR1_EL1
},
388 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
389 access_vm_reg
, reset_val
, TCR_EL1
, 0 },
392 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
393 access_vm_reg
, reset_unknown
, AFSR0_EL1
},
395 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
396 access_vm_reg
, reset_unknown
, AFSR1_EL1
},
398 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
399 access_vm_reg
, reset_unknown
, ESR_EL1
},
401 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
402 access_vm_reg
, reset_unknown
, FAR_EL1
},
404 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
405 NULL
, reset_unknown
, PAR_EL1
},
408 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
411 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
415 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
416 access_vm_reg
, reset_unknown
, MAIR_EL1
},
418 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
419 access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
422 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
423 NULL
, reset_val
, VBAR_EL1
, 0 },
425 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
426 access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
428 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
429 NULL
, reset_unknown
, TPIDR_EL1
},
432 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
433 NULL
, reset_val
, CNTKCTL_EL1
, 0},
436 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
437 NULL
, reset_unknown
, CSSELR_EL1
},
440 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
443 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
446 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
449 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
452 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
455 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
458 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
461 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
464 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
467 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
470 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
473 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
476 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
480 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
481 NULL
, reset_unknown
, TPIDR_EL0
},
483 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
484 NULL
, reset_unknown
, TPIDRRO_EL0
},
487 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
488 NULL
, reset_unknown
, DACR32_EL2
},
490 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
491 NULL
, reset_unknown
, IFSR32_EL2
},
493 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
494 NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
497 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
498 const struct sys_reg_params
*p
,
499 const struct sys_reg_desc
*r
)
502 return ignore_write(vcpu
, p
);
504 u64 dfr
= read_cpuid(ID_AA64DFR0_EL1
);
505 u64 pfr
= read_cpuid(ID_AA64PFR0_EL1
);
506 u32 el3
= !!((pfr
>> 12) & 0xf);
508 *vcpu_reg(vcpu
, p
->Rt
) = ((((dfr
>> 20) & 0xf) << 28) |
509 (((dfr
>> 12) & 0xf) << 24) |
510 (((dfr
>> 28) & 0xf) << 20) |
511 (6 << 16) | (el3
<< 14) | (el3
<< 12));
516 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
517 const struct sys_reg_params
*p
,
518 const struct sys_reg_desc
*r
)
521 vcpu_cp14(vcpu
, r
->reg
) = *vcpu_reg(vcpu
, p
->Rt
);
522 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
524 *vcpu_reg(vcpu
, p
->Rt
) = vcpu_cp14(vcpu
, r
->reg
);
530 #define DBG_BCR_BVR_WCR_WVR(n) \
532 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
533 NULL, (cp14_DBGBVR0 + (n) * 2) }, \
535 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
536 NULL, (cp14_DBGBCR0 + (n) * 2) }, \
538 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
539 NULL, (cp14_DBGWVR0 + (n) * 2) }, \
541 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
542 NULL, (cp14_DBGWCR0 + (n) * 2) }
545 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
546 NULL, cp14_DBGBXVR0 + n * 2 }
549 * Trapped cp14 registers. We generally ignore most of the external
550 * debug, on the principle that they don't really make sense to a
551 * guest. Revisit this one day, whould this principle change.
553 static const struct sys_reg_desc cp14_regs
[] = {
555 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
557 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
559 DBG_BCR_BVR_WCR_WVR(0),
561 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
562 DBG_BCR_BVR_WCR_WVR(1),
564 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
566 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
567 DBG_BCR_BVR_WCR_WVR(2),
569 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
571 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
572 DBG_BCR_BVR_WCR_WVR(3),
573 DBG_BCR_BVR_WCR_WVR(4),
574 DBG_BCR_BVR_WCR_WVR(5),
576 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
578 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
579 DBG_BCR_BVR_WCR_WVR(6),
581 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
582 DBG_BCR_BVR_WCR_WVR(7),
583 DBG_BCR_BVR_WCR_WVR(8),
584 DBG_BCR_BVR_WCR_WVR(9),
585 DBG_BCR_BVR_WCR_WVR(10),
586 DBG_BCR_BVR_WCR_WVR(11),
587 DBG_BCR_BVR_WCR_WVR(12),
588 DBG_BCR_BVR_WCR_WVR(13),
589 DBG_BCR_BVR_WCR_WVR(14),
590 DBG_BCR_BVR_WCR_WVR(15),
592 /* DBGDRAR (32bit) */
593 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
597 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
600 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
604 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
607 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
620 /* DBGDSAR (32bit) */
621 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
624 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
626 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
628 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
630 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
632 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
634 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
637 /* Trapped cp14 64bit registers */
638 static const struct sys_reg_desc cp14_64_regs
[] = {
639 /* DBGDRAR (64bit) */
640 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
642 /* DBGDSAR (64bit) */
643 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
647 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
648 * depending on the way they are accessed (as a 32bit or a 64bit
651 static const struct sys_reg_desc cp15_regs
[] = {
652 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr
, NULL
, c1_SCTLR
},
653 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
654 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
655 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
656 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
657 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
658 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
659 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
660 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
661 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
662 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
665 * DC{C,I,CI}SW operations:
667 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
668 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
669 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
672 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi
},
673 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi
},
674 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi
},
675 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi
},
676 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi
},
677 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi
},
678 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi
},
679 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi
},
680 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi
},
681 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi
},
682 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi
},
683 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi
},
684 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi
},
686 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
687 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
688 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
689 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
690 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
693 static const struct sys_reg_desc cp15_64_regs
[] = {
694 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
695 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
698 /* Target specific emulation tables */
699 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
701 void kvm_register_target_sys_reg_table(unsigned int target
,
702 struct kvm_sys_reg_target_table
*table
)
704 target_tables
[target
] = table
;
707 /* Get specific register table for this target. */
708 static const struct sys_reg_desc
*get_target_table(unsigned target
,
712 struct kvm_sys_reg_target_table
*table
;
714 table
= target_tables
[target
];
716 *num
= table
->table64
.num
;
717 return table
->table64
.table
;
719 *num
= table
->table32
.num
;
720 return table
->table32
.table
;
724 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
725 const struct sys_reg_desc table
[],
730 for (i
= 0; i
< num
; i
++) {
731 const struct sys_reg_desc
*r
= &table
[i
];
733 if (params
->Op0
!= r
->Op0
)
735 if (params
->Op1
!= r
->Op1
)
737 if (params
->CRn
!= r
->CRn
)
739 if (params
->CRm
!= r
->CRm
)
741 if (params
->Op2
!= r
->Op2
)
749 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
751 kvm_inject_undefined(vcpu
);
756 * emulate_cp -- tries to match a sys_reg access in a handling table, and
757 * call the corresponding trap handler.
759 * @params: pointer to the descriptor of the access
760 * @table: array of trap descriptors
761 * @num: size of the trap descriptor array
763 * Return 0 if the access has been handled, and -1 if not.
765 static int emulate_cp(struct kvm_vcpu
*vcpu
,
766 const struct sys_reg_params
*params
,
767 const struct sys_reg_desc
*table
,
770 const struct sys_reg_desc
*r
;
773 return -1; /* Not handled */
775 r
= find_reg(params
, table
, num
);
779 * Not having an accessor means that we have
780 * configured a trap that we don't know how to
781 * handle. This certainly qualifies as a gross bug
782 * that should be fixed right away.
786 if (likely(r
->access(vcpu
, params
, r
))) {
787 /* Skip instruction, since it was emulated */
788 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
799 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
800 struct sys_reg_params
*params
)
802 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
806 case ESR_EL2_EC_CP15_32
:
807 case ESR_EL2_EC_CP15_64
:
810 case ESR_EL2_EC_CP14_MR
:
811 case ESR_EL2_EC_CP14_64
:
818 kvm_err("Unsupported guest CP%d access at: %08lx\n",
820 print_sys_reg_instr(params
);
821 kvm_inject_undefined(vcpu
);
825 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
826 * @vcpu: The VCPU pointer
827 * @run: The kvm_run struct
829 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
830 const struct sys_reg_desc
*global
,
832 const struct sys_reg_desc
*target_specific
,
835 struct sys_reg_params params
;
836 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
837 int Rt2
= (hsr
>> 10) & 0xf;
839 params
.is_aarch32
= true;
840 params
.is_32bit
= false;
841 params
.CRm
= (hsr
>> 1) & 0xf;
842 params
.Rt
= (hsr
>> 5) & 0xf;
843 params
.is_write
= ((hsr
& 1) == 0);
846 params
.Op1
= (hsr
>> 16) & 0xf;
851 * Massive hack here. Store Rt2 in the top 32bits so we only
852 * have one register to deal with. As we use the same trap
853 * backends between AArch32 and AArch64, we get away with it.
855 if (params
.is_write
) {
856 u64 val
= *vcpu_reg(vcpu
, params
.Rt
);
858 val
|= *vcpu_reg(vcpu
, Rt2
) << 32;
859 *vcpu_reg(vcpu
, params
.Rt
) = val
;
862 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
864 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
867 unhandled_cp_access(vcpu
, ¶ms
);
870 /* Do the opposite hack for the read side */
871 if (!params
.is_write
) {
872 u64 val
= *vcpu_reg(vcpu
, params
.Rt
);
874 *vcpu_reg(vcpu
, Rt2
) = val
;
881 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
882 * @vcpu: The VCPU pointer
883 * @run: The kvm_run struct
885 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
886 const struct sys_reg_desc
*global
,
888 const struct sys_reg_desc
*target_specific
,
891 struct sys_reg_params params
;
892 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
894 params
.is_aarch32
= true;
895 params
.is_32bit
= true;
896 params
.CRm
= (hsr
>> 1) & 0xf;
897 params
.Rt
= (hsr
>> 5) & 0xf;
898 params
.is_write
= ((hsr
& 1) == 0);
899 params
.CRn
= (hsr
>> 10) & 0xf;
901 params
.Op1
= (hsr
>> 14) & 0x7;
902 params
.Op2
= (hsr
>> 17) & 0x7;
904 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
906 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
909 unhandled_cp_access(vcpu
, ¶ms
);
913 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
915 const struct sys_reg_desc
*target_specific
;
918 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
919 return kvm_handle_cp_64(vcpu
,
920 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
921 target_specific
, num
);
924 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
926 const struct sys_reg_desc
*target_specific
;
929 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
930 return kvm_handle_cp_32(vcpu
,
931 cp15_regs
, ARRAY_SIZE(cp15_regs
),
932 target_specific
, num
);
935 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
937 return kvm_handle_cp_64(vcpu
,
938 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
942 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
944 return kvm_handle_cp_32(vcpu
,
945 cp14_regs
, ARRAY_SIZE(cp14_regs
),
949 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
950 const struct sys_reg_params
*params
)
953 const struct sys_reg_desc
*table
, *r
;
955 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
957 /* Search target-specific then generic table. */
958 r
= find_reg(params
, table
, num
);
960 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
964 * Not having an accessor means that we have
965 * configured a trap that we don't know how to
966 * handle. This certainly qualifies as a gross bug
967 * that should be fixed right away.
971 if (likely(r
->access(vcpu
, params
, r
))) {
972 /* Skip instruction, since it was emulated */
973 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
976 /* If access function fails, it should complain. */
978 kvm_err("Unsupported guest sys_reg access at: %lx\n",
980 print_sys_reg_instr(params
);
982 kvm_inject_undefined(vcpu
);
986 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
987 const struct sys_reg_desc
*table
, size_t num
)
991 for (i
= 0; i
< num
; i
++)
993 table
[i
].reset(vcpu
, &table
[i
]);
997 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
998 * @vcpu: The VCPU pointer
999 * @run: The kvm_run struct
1001 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1003 struct sys_reg_params params
;
1004 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
1006 params
.is_aarch32
= false;
1007 params
.is_32bit
= false;
1008 params
.Op0
= (esr
>> 20) & 3;
1009 params
.Op1
= (esr
>> 14) & 0x7;
1010 params
.CRn
= (esr
>> 10) & 0xf;
1011 params
.CRm
= (esr
>> 1) & 0xf;
1012 params
.Op2
= (esr
>> 17) & 0x7;
1013 params
.Rt
= (esr
>> 5) & 0x1f;
1014 params
.is_write
= !(esr
& 1);
1016 return emulate_sys_reg(vcpu
, ¶ms
);
1019 /******************************************************************************
1021 *****************************************************************************/
1023 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
1025 switch (id
& KVM_REG_SIZE_MASK
) {
1026 case KVM_REG_SIZE_U64
:
1027 /* Any unused index bits means it's not valid. */
1028 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
1029 | KVM_REG_ARM_COPROC_MASK
1030 | KVM_REG_ARM64_SYSREG_OP0_MASK
1031 | KVM_REG_ARM64_SYSREG_OP1_MASK
1032 | KVM_REG_ARM64_SYSREG_CRN_MASK
1033 | KVM_REG_ARM64_SYSREG_CRM_MASK
1034 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
1036 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
1037 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
1038 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
1039 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
1040 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
1041 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
1042 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
1043 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
1044 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
1045 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
1052 /* Decode an index value, and find the sys_reg_desc entry. */
1053 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
1057 const struct sys_reg_desc
*table
, *r
;
1058 struct sys_reg_params params
;
1060 /* We only do sys_reg for now. */
1061 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
1064 if (!index_to_params(id
, ¶ms
))
1067 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1068 r
= find_reg(¶ms
, table
, num
);
1070 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1072 /* Not saved in the sys_reg array? */
1080 * These are the invariant sys_reg registers: we let the guest see the
1081 * host versions of these, so they're part of the guest state.
1083 * A future CPU may provide a mechanism to present different values to
1084 * the guest, or a future kvm may trap them.
1087 #define FUNCTION_INVARIANT(reg) \
1088 static void get_##reg(struct kvm_vcpu *v, \
1089 const struct sys_reg_desc *r) \
1093 asm volatile("mrs %0, " __stringify(reg) "\n" \
1095 ((struct sys_reg_desc *)r)->val = val; \
1098 FUNCTION_INVARIANT(midr_el1
)
1099 FUNCTION_INVARIANT(ctr_el0
)
1100 FUNCTION_INVARIANT(revidr_el1
)
1101 FUNCTION_INVARIANT(id_pfr0_el1
)
1102 FUNCTION_INVARIANT(id_pfr1_el1
)
1103 FUNCTION_INVARIANT(id_dfr0_el1
)
1104 FUNCTION_INVARIANT(id_afr0_el1
)
1105 FUNCTION_INVARIANT(id_mmfr0_el1
)
1106 FUNCTION_INVARIANT(id_mmfr1_el1
)
1107 FUNCTION_INVARIANT(id_mmfr2_el1
)
1108 FUNCTION_INVARIANT(id_mmfr3_el1
)
1109 FUNCTION_INVARIANT(id_isar0_el1
)
1110 FUNCTION_INVARIANT(id_isar1_el1
)
1111 FUNCTION_INVARIANT(id_isar2_el1
)
1112 FUNCTION_INVARIANT(id_isar3_el1
)
1113 FUNCTION_INVARIANT(id_isar4_el1
)
1114 FUNCTION_INVARIANT(id_isar5_el1
)
1115 FUNCTION_INVARIANT(clidr_el1
)
1116 FUNCTION_INVARIANT(aidr_el1
)
1118 /* ->val is filled in by kvm_sys_reg_table_init() */
1119 static struct sys_reg_desc invariant_sys_regs
[] = {
1120 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1121 NULL
, get_midr_el1
},
1122 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1123 NULL
, get_revidr_el1
},
1124 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1125 NULL
, get_id_pfr0_el1
},
1126 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1127 NULL
, get_id_pfr1_el1
},
1128 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1129 NULL
, get_id_dfr0_el1
},
1130 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1131 NULL
, get_id_afr0_el1
},
1132 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1133 NULL
, get_id_mmfr0_el1
},
1134 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1135 NULL
, get_id_mmfr1_el1
},
1136 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1137 NULL
, get_id_mmfr2_el1
},
1138 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1139 NULL
, get_id_mmfr3_el1
},
1140 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1141 NULL
, get_id_isar0_el1
},
1142 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1143 NULL
, get_id_isar1_el1
},
1144 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1145 NULL
, get_id_isar2_el1
},
1146 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1147 NULL
, get_id_isar3_el1
},
1148 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1149 NULL
, get_id_isar4_el1
},
1150 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1151 NULL
, get_id_isar5_el1
},
1152 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1153 NULL
, get_clidr_el1
},
1154 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1155 NULL
, get_aidr_el1
},
1156 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1157 NULL
, get_ctr_el0
},
1160 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
1162 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
1167 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
1169 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
1174 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1176 struct sys_reg_params params
;
1177 const struct sys_reg_desc
*r
;
1179 if (!index_to_params(id
, ¶ms
))
1182 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1186 return reg_to_user(uaddr
, &r
->val
, id
);
1189 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1191 struct sys_reg_params params
;
1192 const struct sys_reg_desc
*r
;
1194 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
1196 if (!index_to_params(id
, ¶ms
))
1198 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1202 err
= reg_from_user(&val
, uaddr
, id
);
1206 /* This is what we mean by invariant: you can't change it. */
1213 static bool is_valid_cache(u32 val
)
1217 if (val
>= CSSELR_MAX
)
1220 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1222 ctype
= (cache_levels
>> (level
* 3)) & 7;
1225 case 0: /* No cache */
1227 case 1: /* Instruction cache only */
1229 case 2: /* Data cache only */
1230 case 4: /* Unified cache */
1232 case 3: /* Separate instruction and data caches */
1234 default: /* Reserved: we can't know instruction or data. */
1239 static int demux_c15_get(u64 id
, void __user
*uaddr
)
1242 u32 __user
*uval
= uaddr
;
1244 /* Fail if we have unknown bits set. */
1245 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1246 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1249 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
1250 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
1251 if (KVM_REG_SIZE(id
) != 4)
1253 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
1254 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
1255 if (!is_valid_cache(val
))
1258 return put_user(get_ccsidr(val
), uval
);
1264 static int demux_c15_set(u64 id
, void __user
*uaddr
)
1267 u32 __user
*uval
= uaddr
;
1269 /* Fail if we have unknown bits set. */
1270 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1271 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1274 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
1275 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
1276 if (KVM_REG_SIZE(id
) != 4)
1278 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
1279 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
1280 if (!is_valid_cache(val
))
1283 if (get_user(newval
, uval
))
1286 /* This is also invariant: you can't change it. */
1287 if (newval
!= get_ccsidr(val
))
1295 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1297 const struct sys_reg_desc
*r
;
1298 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
1300 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1301 return demux_c15_get(reg
->id
, uaddr
);
1303 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
1306 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
1308 return get_invariant_sys_reg(reg
->id
, uaddr
);
1310 return reg_to_user(uaddr
, &vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
1313 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1315 const struct sys_reg_desc
*r
;
1316 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
1318 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1319 return demux_c15_set(reg
->id
, uaddr
);
1321 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
1324 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
1326 return set_invariant_sys_reg(reg
->id
, uaddr
);
1328 return reg_from_user(&vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
1331 static unsigned int num_demux_regs(void)
1333 unsigned int i
, count
= 0;
1335 for (i
= 0; i
< CSSELR_MAX
; i
++)
1336 if (is_valid_cache(i
))
1342 static int write_demux_regids(u64 __user
*uindices
)
1344 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
1347 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
1348 for (i
= 0; i
< CSSELR_MAX
; i
++) {
1349 if (!is_valid_cache(i
))
1351 if (put_user(val
| i
, uindices
))
1358 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
1360 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
1361 KVM_REG_ARM64_SYSREG
|
1362 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
1363 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
1364 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
1365 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
1366 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
1369 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
1374 if (put_user(sys_reg_to_index(reg
), *uind
))
1381 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
1382 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
1384 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
1385 unsigned int total
= 0;
1388 /* We check for duplicates here, to allow arch-specific overrides. */
1389 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
1392 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
1394 BUG_ON(i1
== end1
|| i2
== end2
);
1396 /* Walk carefully, as both tables may refer to the same register. */
1398 int cmp
= cmp_sys_reg(i1
, i2
);
1399 /* target-specific overrides generic entry. */
1401 /* Ignore registers we trap but don't save. */
1403 if (!copy_reg_to_user(i1
, &uind
))
1408 /* Ignore registers we trap but don't save. */
1410 if (!copy_reg_to_user(i2
, &uind
))
1416 if (cmp
<= 0 && ++i1
== end1
)
1418 if (cmp
>= 0 && ++i2
== end2
)
1424 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
1426 return ARRAY_SIZE(invariant_sys_regs
)
1428 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
1431 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
1436 /* Then give them all the invariant registers' indices. */
1437 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
1438 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
1443 err
= walk_sys_regs(vcpu
, uindices
);
1448 return write_demux_regids(uindices
);
1451 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
1455 for (i
= 1; i
< n
; i
++) {
1456 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
1457 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
1465 void kvm_sys_reg_table_init(void)
1468 struct sys_reg_desc clidr
;
1470 /* Make sure tables are unique and in order. */
1471 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
1472 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
1473 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
1474 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
1475 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
1476 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
1478 /* We abuse the reset function to overwrite the table itself. */
1479 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
1480 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
1483 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1485 * If software reads the Cache Type fields from Ctype1
1486 * upwards, once it has seen a value of 0b000, no caches
1487 * exist at further-out levels of the hierarchy. So, for
1488 * example, if Ctype3 is the first Cache Type field with a
1489 * value of 0b000, the values of Ctype4 to Ctype7 must be
1492 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
1493 cache_levels
= clidr
.val
;
1494 for (i
= 0; i
< 7; i
++)
1495 if (((cache_levels
>> (i
*3)) & 7) == 0)
1497 /* Clear all higher bits. */
1498 cache_levels
&= (1 << (i
*3))-1;
1502 * kvm_reset_sys_regs - sets system registers to reset value
1503 * @vcpu: The VCPU pointer
1505 * This function finds the right table above and sets the registers on the
1506 * virtual CPU struct to their architecturally defined reset values.
1508 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
1511 const struct sys_reg_desc
*table
;
1513 /* Catch someone adding a register without putting in reset entry. */
1514 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
1516 /* Generic chip reset first (so target could override). */
1517 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1519 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1520 reset_sys_reg_descs(vcpu
, table
, num
);
1522 for (num
= 1; num
< NR_SYS_REGS
; num
++)
1523 if (vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
1524 panic("Didn't reset vcpu_sys_reg(%zi)", num
);