s390x: upgrade status of KVM cores to "supported"
[qemu/ar7.git] / hw / intc / arm_gicv3_kvm.c
blob1e11200fe2eec42b24070e98364a4d35f5b57416
1 /*
2 * ARM Generic Interrupt Controller using KVM in-kernel support
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Written by Pavel Fedin
6 * Based on vGICv2 code by Peter Maydell
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qapi/error.h"
24 #include "hw/intc/arm_gicv3_common.h"
25 #include "hw/sysbus.h"
26 #include "qemu/error-report.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/sysemu.h"
29 #include "kvm_arm.h"
30 #include "gicv3_internal.h"
31 #include "vgic_common.h"
32 #include "migration/blocker.h"
34 #ifdef DEBUG_GICV3_KVM
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, "kvm_gicv3: " fmt, ## __VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF(fmt, ...) \
39 do { } while (0)
40 #endif
42 #define TYPE_KVM_ARM_GICV3 "kvm-arm-gicv3"
43 #define KVM_ARM_GICV3(obj) \
44 OBJECT_CHECK(GICv3State, (obj), TYPE_KVM_ARM_GICV3)
45 #define KVM_ARM_GICV3_CLASS(klass) \
46 OBJECT_CLASS_CHECK(KVMARMGICv3Class, (klass), TYPE_KVM_ARM_GICV3)
47 #define KVM_ARM_GICV3_GET_CLASS(obj) \
48 OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3)
50 #define KVM_DEV_ARM_VGIC_SYSREG(op0, op1, crn, crm, op2) \
51 (ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
52 ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
53 ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
54 ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
55 ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
57 #define ICC_PMR_EL1 \
58 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 4, 6, 0)
59 #define ICC_BPR0_EL1 \
60 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 3)
61 #define ICC_AP0R_EL1(n) \
62 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 4 | n)
63 #define ICC_AP1R_EL1(n) \
64 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 9, n)
65 #define ICC_BPR1_EL1 \
66 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 3)
67 #define ICC_CTLR_EL1 \
68 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 4)
69 #define ICC_SRE_EL1 \
70 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 5)
71 #define ICC_IGRPEN0_EL1 \
72 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 6)
73 #define ICC_IGRPEN1_EL1 \
74 KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 7)
76 typedef struct KVMARMGICv3Class {
77 ARMGICv3CommonClass parent_class;
78 DeviceRealize parent_realize;
79 void (*parent_reset)(DeviceState *dev);
80 } KVMARMGICv3Class;
82 static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level)
84 GICv3State *s = (GICv3State *)opaque;
86 kvm_arm_gic_set_irq(s->num_irq, irq, level);
89 #define KVM_VGIC_ATTR(reg, typer) \
90 ((typer & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) | (reg))
92 static inline void kvm_gicd_access(GICv3State *s, int offset,
93 uint32_t *val, bool write)
95 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
96 KVM_VGIC_ATTR(offset, 0),
97 val, write, &error_abort);
100 static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu,
101 uint32_t *val, bool write)
103 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
104 KVM_VGIC_ATTR(offset, s->cpu[cpu].gicr_typer),
105 val, write, &error_abort);
108 static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu,
109 uint64_t *val, bool write)
111 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
112 KVM_VGIC_ATTR(reg, s->cpu[cpu].gicr_typer),
113 val, write, &error_abort);
116 static inline void kvm_gic_line_level_access(GICv3State *s, int irq, int cpu,
117 uint32_t *val, bool write)
119 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
120 KVM_VGIC_ATTR(irq, s->cpu[cpu].gicr_typer) |
121 (VGIC_LEVEL_INFO_LINE_LEVEL <<
122 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT),
123 val, write, &error_abort);
126 /* Loop through each distributor IRQ related register; since bits
127 * corresponding to SPIs and PPIs are RAZ/WI when affinity routing
128 * is enabled, we skip those.
130 #define for_each_dist_irq_reg(_irq, _max, _field_width) \
131 for (_irq = GIC_INTERNAL; _irq < _max; _irq += (32 / _field_width))
133 static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
135 uint32_t reg, *field;
136 int irq;
138 /* For the KVM GICv3, affinity routing is always enabled, and the first 8
139 * GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
140 * functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
141 * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
142 * offset.
144 field = (uint32_t *)(bmp + GIC_INTERNAL);
145 offset += (GIC_INTERNAL * 8) / 8;
146 for_each_dist_irq_reg(irq, s->num_irq, 8) {
147 kvm_gicd_access(s, offset, &reg, false);
148 *field = reg;
149 offset += 4;
150 field++;
154 static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
156 uint32_t reg, *field;
157 int irq;
159 /* For the KVM GICv3, affinity routing is always enabled, and the first 8
160 * GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
161 * functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
162 * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
163 * offset.
165 field = (uint32_t *)(bmp + GIC_INTERNAL);
166 offset += (GIC_INTERNAL * 8) / 8;
167 for_each_dist_irq_reg(irq, s->num_irq, 8) {
168 reg = *field;
169 kvm_gicd_access(s, offset, &reg, true);
170 offset += 4;
171 field++;
175 static void kvm_dist_get_edge_trigger(GICv3State *s, uint32_t offset,
176 uint32_t *bmp)
178 uint32_t reg;
179 int irq;
181 /* For the KVM GICv3, affinity routing is always enabled, and the first 2
182 * GICD_ICFGR<n> registers are always RAZ/WI. The corresponding
183 * functionality is replaced by GICR_ICFGR<n>. It doesn't need to sync
184 * them. So it should increase the offset to skip GIC_INTERNAL irqs.
185 * This matches the for_each_dist_irq_reg() macro which also skips the
186 * first GIC_INTERNAL irqs.
188 offset += (GIC_INTERNAL * 2) / 8;
189 for_each_dist_irq_reg(irq, s->num_irq, 2) {
190 kvm_gicd_access(s, offset, &reg, false);
191 reg = half_unshuffle32(reg >> 1);
192 if (irq % 32 != 0) {
193 reg = (reg << 16);
195 *gic_bmp_ptr32(bmp, irq) |= reg;
196 offset += 4;
200 static void kvm_dist_put_edge_trigger(GICv3State *s, uint32_t offset,
201 uint32_t *bmp)
203 uint32_t reg;
204 int irq;
206 /* For the KVM GICv3, affinity routing is always enabled, and the first 2
207 * GICD_ICFGR<n> registers are always RAZ/WI. The corresponding
208 * functionality is replaced by GICR_ICFGR<n>. It doesn't need to sync
209 * them. So it should increase the offset to skip GIC_INTERNAL irqs.
210 * This matches the for_each_dist_irq_reg() macro which also skips the
211 * first GIC_INTERNAL irqs.
213 offset += (GIC_INTERNAL * 2) / 8;
214 for_each_dist_irq_reg(irq, s->num_irq, 2) {
215 reg = *gic_bmp_ptr32(bmp, irq);
216 if (irq % 32 != 0) {
217 reg = (reg & 0xffff0000) >> 16;
218 } else {
219 reg = reg & 0xffff;
221 reg = half_shuffle32(reg) << 1;
222 kvm_gicd_access(s, offset, &reg, true);
223 offset += 4;
227 static void kvm_gic_get_line_level_bmp(GICv3State *s, uint32_t *bmp)
229 uint32_t reg;
230 int irq;
232 for_each_dist_irq_reg(irq, s->num_irq, 1) {
233 kvm_gic_line_level_access(s, irq, 0, &reg, false);
234 *gic_bmp_ptr32(bmp, irq) = reg;
238 static void kvm_gic_put_line_level_bmp(GICv3State *s, uint32_t *bmp)
240 uint32_t reg;
241 int irq;
243 for_each_dist_irq_reg(irq, s->num_irq, 1) {
244 reg = *gic_bmp_ptr32(bmp, irq);
245 kvm_gic_line_level_access(s, irq, 0, &reg, true);
249 /* Read a bitmap register group from the kernel VGIC. */
250 static void kvm_dist_getbmp(GICv3State *s, uint32_t offset, uint32_t *bmp)
252 uint32_t reg;
253 int irq;
255 /* For the KVM GICv3, affinity routing is always enabled, and the
256 * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/
257 * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding
258 * functionality is replaced by the GICR registers. It doesn't need to sync
259 * them. So it should increase the offset to skip GIC_INTERNAL irqs.
260 * This matches the for_each_dist_irq_reg() macro which also skips the
261 * first GIC_INTERNAL irqs.
263 offset += (GIC_INTERNAL * 1) / 8;
264 for_each_dist_irq_reg(irq, s->num_irq, 1) {
265 kvm_gicd_access(s, offset, &reg, false);
266 *gic_bmp_ptr32(bmp, irq) = reg;
267 offset += 4;
271 static void kvm_dist_putbmp(GICv3State *s, uint32_t offset,
272 uint32_t clroffset, uint32_t *bmp)
274 uint32_t reg;
275 int irq;
277 /* For the KVM GICv3, affinity routing is always enabled, and the
278 * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/
279 * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding
280 * functionality is replaced by the GICR registers. It doesn't need to sync
281 * them. So it should increase the offset and clroffset to skip GIC_INTERNAL
282 * irqs. This matches the for_each_dist_irq_reg() macro which also skips the
283 * first GIC_INTERNAL irqs.
285 offset += (GIC_INTERNAL * 1) / 8;
286 if (clroffset != 0) {
287 clroffset += (GIC_INTERNAL * 1) / 8;
290 for_each_dist_irq_reg(irq, s->num_irq, 1) {
291 /* If this bitmap is a set/clear register pair, first write to the
292 * clear-reg to clear all bits before using the set-reg to write
293 * the 1 bits.
295 if (clroffset != 0) {
296 reg = 0;
297 kvm_gicd_access(s, clroffset, &reg, true);
298 clroffset += 4;
300 reg = *gic_bmp_ptr32(bmp, irq);
301 kvm_gicd_access(s, offset, &reg, true);
302 offset += 4;
306 static void kvm_arm_gicv3_check(GICv3State *s)
308 uint32_t reg;
309 uint32_t num_irq;
311 /* Sanity checking s->num_irq */
312 kvm_gicd_access(s, GICD_TYPER, &reg, false);
313 num_irq = ((reg & 0x1f) + 1) * 32;
315 if (num_irq < s->num_irq) {
316 error_report("Model requests %u IRQs, but kernel supports max %u",
317 s->num_irq, num_irq);
318 abort();
322 static void kvm_arm_gicv3_put(GICv3State *s)
324 uint32_t regl, regh, reg;
325 uint64_t reg64, redist_typer;
326 int ncpu, i;
328 kvm_arm_gicv3_check(s);
330 kvm_gicr_access(s, GICR_TYPER, 0, &regl, false);
331 kvm_gicr_access(s, GICR_TYPER + 4, 0, &regh, false);
332 redist_typer = ((uint64_t)regh << 32) | regl;
334 reg = s->gicd_ctlr;
335 kvm_gicd_access(s, GICD_CTLR, &reg, true);
337 if (redist_typer & GICR_TYPER_PLPIS) {
338 /* Set base addresses before LPIs are enabled by GICR_CTLR write */
339 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
340 GICv3CPUState *c = &s->cpu[ncpu];
342 reg64 = c->gicr_propbaser;
343 regl = (uint32_t)reg64;
344 kvm_gicr_access(s, GICR_PROPBASER, ncpu, &regl, true);
345 regh = (uint32_t)(reg64 >> 32);
346 kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, &regh, true);
348 reg64 = c->gicr_pendbaser;
349 if (!(c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
350 /* Setting PTZ is advised if LPIs are disabled, to reduce
351 * GIC initialization time.
353 reg64 |= GICR_PENDBASER_PTZ;
355 regl = (uint32_t)reg64;
356 kvm_gicr_access(s, GICR_PENDBASER, ncpu, &regl, true);
357 regh = (uint32_t)(reg64 >> 32);
358 kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, &regh, true);
362 /* Redistributor state (one per CPU) */
364 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
365 GICv3CPUState *c = &s->cpu[ncpu];
367 reg = c->gicr_ctlr;
368 kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, true);
370 reg = c->gicr_statusr[GICV3_NS];
371 kvm_gicr_access(s, GICR_STATUSR, ncpu, &reg, true);
373 reg = c->gicr_waker;
374 kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, true);
376 reg = c->gicr_igroupr0;
377 kvm_gicr_access(s, GICR_IGROUPR0, ncpu, &reg, true);
379 reg = ~0;
380 kvm_gicr_access(s, GICR_ICENABLER0, ncpu, &reg, true);
381 reg = c->gicr_ienabler0;
382 kvm_gicr_access(s, GICR_ISENABLER0, ncpu, &reg, true);
384 /* Restore config before pending so we treat level/edge correctly */
385 reg = half_shuffle32(c->edge_trigger >> 16) << 1;
386 kvm_gicr_access(s, GICR_ICFGR1, ncpu, &reg, true);
388 reg = c->level;
389 kvm_gic_line_level_access(s, 0, ncpu, &reg, true);
391 reg = ~0;
392 kvm_gicr_access(s, GICR_ICPENDR0, ncpu, &reg, true);
393 reg = c->gicr_ipendr0;
394 kvm_gicr_access(s, GICR_ISPENDR0, ncpu, &reg, true);
396 reg = ~0;
397 kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, &reg, true);
398 reg = c->gicr_iactiver0;
399 kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, &reg, true);
401 for (i = 0; i < GIC_INTERNAL; i += 4) {
402 reg = c->gicr_ipriorityr[i] |
403 (c->gicr_ipriorityr[i + 1] << 8) |
404 (c->gicr_ipriorityr[i + 2] << 16) |
405 (c->gicr_ipriorityr[i + 3] << 24);
406 kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, &reg, true);
410 /* Distributor state (shared between all CPUs */
411 reg = s->gicd_statusr[GICV3_NS];
412 kvm_gicd_access(s, GICD_STATUSR, &reg, true);
414 /* s->enable bitmap -> GICD_ISENABLERn */
415 kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled);
417 /* s->group bitmap -> GICD_IGROUPRn */
418 kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group);
420 /* Restore targets before pending to ensure the pending state is set on
421 * the appropriate CPU interfaces in the kernel
424 /* s->gicd_irouter[irq] -> GICD_IROUTERn
425 * We can't use kvm_dist_put() here because the registers are 64-bit
427 for (i = GIC_INTERNAL; i < s->num_irq; i++) {
428 uint32_t offset;
430 offset = GICD_IROUTER + (sizeof(uint32_t) * i);
431 reg = (uint32_t)s->gicd_irouter[i];
432 kvm_gicd_access(s, offset, &reg, true);
434 offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4;
435 reg = (uint32_t)(s->gicd_irouter[i] >> 32);
436 kvm_gicd_access(s, offset, &reg, true);
439 /* s->trigger bitmap -> GICD_ICFGRn
440 * (restore configuration registers before pending IRQs so we treat
441 * level/edge correctly)
443 kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger);
445 /* s->level bitmap -> line_level */
446 kvm_gic_put_line_level_bmp(s, s->level);
448 /* s->pending bitmap -> GICD_ISPENDRn */
449 kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending);
451 /* s->active bitmap -> GICD_ISACTIVERn */
452 kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active);
454 /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */
455 kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority);
457 /* CPU Interface state (one per CPU) */
459 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
460 GICv3CPUState *c = &s->cpu[ncpu];
461 int num_pri_bits;
463 kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true);
464 kvm_gicc_access(s, ICC_CTLR_EL1, ncpu,
465 &c->icc_ctlr_el1[GICV3_NS], true);
466 kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu,
467 &c->icc_igrpen[GICV3_G0], true);
468 kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu,
469 &c->icc_igrpen[GICV3_G1NS], true);
470 kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true);
471 kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true);
472 kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true);
474 num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] &
475 ICC_CTLR_EL1_PRIBITS_MASK) >>
476 ICC_CTLR_EL1_PRIBITS_SHIFT) + 1;
478 switch (num_pri_bits) {
479 case 7:
480 reg64 = c->icc_apr[GICV3_G0][3];
481 kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, &reg64, true);
482 reg64 = c->icc_apr[GICV3_G0][2];
483 kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, &reg64, true);
484 case 6:
485 reg64 = c->icc_apr[GICV3_G0][1];
486 kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, &reg64, true);
487 default:
488 reg64 = c->icc_apr[GICV3_G0][0];
489 kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, &reg64, true);
492 switch (num_pri_bits) {
493 case 7:
494 reg64 = c->icc_apr[GICV3_G1NS][3];
495 kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, &reg64, true);
496 reg64 = c->icc_apr[GICV3_G1NS][2];
497 kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, &reg64, true);
498 case 6:
499 reg64 = c->icc_apr[GICV3_G1NS][1];
500 kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, &reg64, true);
501 default:
502 reg64 = c->icc_apr[GICV3_G1NS][0];
503 kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, &reg64, true);
508 static void kvm_arm_gicv3_get(GICv3State *s)
510 uint32_t regl, regh, reg;
511 uint64_t reg64, redist_typer;
512 int ncpu, i;
514 kvm_arm_gicv3_check(s);
516 kvm_gicr_access(s, GICR_TYPER, 0, &regl, false);
517 kvm_gicr_access(s, GICR_TYPER + 4, 0, &regh, false);
518 redist_typer = ((uint64_t)regh << 32) | regl;
520 kvm_gicd_access(s, GICD_CTLR, &reg, false);
521 s->gicd_ctlr = reg;
523 /* Redistributor state (one per CPU) */
525 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
526 GICv3CPUState *c = &s->cpu[ncpu];
528 kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, false);
529 c->gicr_ctlr = reg;
531 kvm_gicr_access(s, GICR_STATUSR, ncpu, &reg, false);
532 c->gicr_statusr[GICV3_NS] = reg;
534 kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, false);
535 c->gicr_waker = reg;
537 kvm_gicr_access(s, GICR_IGROUPR0, ncpu, &reg, false);
538 c->gicr_igroupr0 = reg;
539 kvm_gicr_access(s, GICR_ISENABLER0, ncpu, &reg, false);
540 c->gicr_ienabler0 = reg;
541 kvm_gicr_access(s, GICR_ICFGR1, ncpu, &reg, false);
542 c->edge_trigger = half_unshuffle32(reg >> 1) << 16;
543 kvm_gic_line_level_access(s, 0, ncpu, &reg, false);
544 c->level = reg;
545 kvm_gicr_access(s, GICR_ISPENDR0, ncpu, &reg, false);
546 c->gicr_ipendr0 = reg;
547 kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, &reg, false);
548 c->gicr_iactiver0 = reg;
550 for (i = 0; i < GIC_INTERNAL; i += 4) {
551 kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, &reg, false);
552 c->gicr_ipriorityr[i] = extract32(reg, 0, 8);
553 c->gicr_ipriorityr[i + 1] = extract32(reg, 8, 8);
554 c->gicr_ipriorityr[i + 2] = extract32(reg, 16, 8);
555 c->gicr_ipriorityr[i + 3] = extract32(reg, 24, 8);
559 if (redist_typer & GICR_TYPER_PLPIS) {
560 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
561 GICv3CPUState *c = &s->cpu[ncpu];
563 kvm_gicr_access(s, GICR_PROPBASER, ncpu, &regl, false);
564 kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, &regh, false);
565 c->gicr_propbaser = ((uint64_t)regh << 32) | regl;
567 kvm_gicr_access(s, GICR_PENDBASER, ncpu, &regl, false);
568 kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, &regh, false);
569 c->gicr_pendbaser = ((uint64_t)regh << 32) | regl;
573 /* Distributor state (shared between all CPUs */
575 kvm_gicd_access(s, GICD_STATUSR, &reg, false);
576 s->gicd_statusr[GICV3_NS] = reg;
578 /* GICD_IGROUPRn -> s->group bitmap */
579 kvm_dist_getbmp(s, GICD_IGROUPR, s->group);
581 /* GICD_ISENABLERn -> s->enabled bitmap */
582 kvm_dist_getbmp(s, GICD_ISENABLER, s->enabled);
584 /* Line level of irq */
585 kvm_gic_get_line_level_bmp(s, s->level);
586 /* GICD_ISPENDRn -> s->pending bitmap */
587 kvm_dist_getbmp(s, GICD_ISPENDR, s->pending);
589 /* GICD_ISACTIVERn -> s->active bitmap */
590 kvm_dist_getbmp(s, GICD_ISACTIVER, s->active);
592 /* GICD_ICFGRn -> s->trigger bitmap */
593 kvm_dist_get_edge_trigger(s, GICD_ICFGR, s->edge_trigger);
595 /* GICD_IPRIORITYRn -> s->gicd_ipriority[] */
596 kvm_dist_get_priority(s, GICD_IPRIORITYR, s->gicd_ipriority);
598 /* GICD_IROUTERn -> s->gicd_irouter[irq] */
599 for (i = GIC_INTERNAL; i < s->num_irq; i++) {
600 uint32_t offset;
602 offset = GICD_IROUTER + (sizeof(uint32_t) * i);
603 kvm_gicd_access(s, offset, &regl, false);
604 offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4;
605 kvm_gicd_access(s, offset, &regh, false);
606 s->gicd_irouter[i] = ((uint64_t)regh << 32) | regl;
609 /*****************************************************************
610 * CPU Interface(s) State
613 for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
614 GICv3CPUState *c = &s->cpu[ncpu];
615 int num_pri_bits;
617 kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, false);
618 kvm_gicc_access(s, ICC_CTLR_EL1, ncpu,
619 &c->icc_ctlr_el1[GICV3_NS], false);
620 kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu,
621 &c->icc_igrpen[GICV3_G0], false);
622 kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu,
623 &c->icc_igrpen[GICV3_G1NS], false);
624 kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, false);
625 kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], false);
626 kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], false);
627 num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] &
628 ICC_CTLR_EL1_PRIBITS_MASK) >>
629 ICC_CTLR_EL1_PRIBITS_SHIFT) + 1;
631 switch (num_pri_bits) {
632 case 7:
633 kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, &reg64, false);
634 c->icc_apr[GICV3_G0][3] = reg64;
635 kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, &reg64, false);
636 c->icc_apr[GICV3_G0][2] = reg64;
637 case 6:
638 kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, &reg64, false);
639 c->icc_apr[GICV3_G0][1] = reg64;
640 default:
641 kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, &reg64, false);
642 c->icc_apr[GICV3_G0][0] = reg64;
645 switch (num_pri_bits) {
646 case 7:
647 kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, &reg64, false);
648 c->icc_apr[GICV3_G1NS][3] = reg64;
649 kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, &reg64, false);
650 c->icc_apr[GICV3_G1NS][2] = reg64;
651 case 6:
652 kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, &reg64, false);
653 c->icc_apr[GICV3_G1NS][1] = reg64;
654 default:
655 kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, &reg64, false);
656 c->icc_apr[GICV3_G1NS][0] = reg64;
661 static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
663 ARMCPU *cpu;
664 GICv3State *s;
665 GICv3CPUState *c;
667 c = (GICv3CPUState *)env->gicv3state;
668 s = c->gic;
669 cpu = ARM_CPU(c->cpu);
671 c->icc_pmr_el1 = 0;
672 c->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
673 c->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
674 c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
676 c->icc_sre_el1 = 0x7;
677 memset(c->icc_apr, 0, sizeof(c->icc_apr));
678 memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen));
680 if (s->migration_blocker) {
681 return;
684 /* Initialize to actual HW supported configuration */
685 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
686 KVM_VGIC_ATTR(ICC_CTLR_EL1, cpu->mp_affinity),
687 &c->icc_ctlr_el1[GICV3_NS], false, &error_abort);
689 c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS];
692 static void kvm_arm_gicv3_reset(DeviceState *dev)
694 GICv3State *s = ARM_GICV3_COMMON(dev);
695 KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
697 DPRINTF("Reset\n");
699 kgc->parent_reset(dev);
701 if (s->migration_blocker) {
702 DPRINTF("Cannot put kernel gic state, no kernel interface\n");
703 return;
706 kvm_arm_gicv3_put(s);
710 * CPU interface registers of GIC needs to be reset on CPU reset.
711 * For the calling arm_gicv3_icc_reset() on CPU reset, we register
712 * below ARMCPRegInfo. As we reset the whole cpu interface under single
713 * register reset, we define only one register of CPU interface instead
714 * of defining all the registers.
716 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
717 { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
718 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
720 * If ARM_CP_NOP is used, resetfn is not called,
721 * So ARM_CP_NO_RAW is appropriate type.
723 .type = ARM_CP_NO_RAW,
724 .access = PL1_RW,
725 .readfn = arm_cp_read_zero,
726 .writefn = arm_cp_write_ignore,
728 * We hang the whole cpu interface reset routine off here
729 * rather than parcelling it out into one little function
730 * per register
732 .resetfn = arm_gicv3_icc_reset,
734 REGINFO_SENTINEL
738 * vm_change_state_handler - VM change state callback aiming at flushing
739 * RDIST pending tables into guest RAM
741 * The tables get flushed to guest RAM whenever the VM gets stopped.
743 static void vm_change_state_handler(void *opaque, int running,
744 RunState state)
746 GICv3State *s = (GICv3State *)opaque;
747 Error *err = NULL;
748 int ret;
750 if (running) {
751 return;
754 ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
755 KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES,
756 NULL, true, &err);
757 if (err) {
758 error_report_err(err);
760 if (ret < 0 && ret != -EFAULT) {
761 abort();
766 static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
768 GICv3State *s = KVM_ARM_GICV3(dev);
769 KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
770 bool multiple_redist_region_allowed;
771 Error *local_err = NULL;
772 int i;
774 DPRINTF("kvm_arm_gicv3_realize\n");
776 kgc->parent_realize(dev, &local_err);
777 if (local_err) {
778 error_propagate(errp, local_err);
779 return;
782 if (s->security_extn) {
783 error_setg(errp, "the in-kernel VGICv3 does not implement the "
784 "security extensions");
785 return;
788 gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL, &local_err);
789 if (local_err) {
790 error_propagate(errp, local_err);
791 return;
794 for (i = 0; i < s->num_cpu; i++) {
795 ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
797 define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
800 /* Try to create the device via the device control API */
801 s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false);
802 if (s->dev_fd < 0) {
803 error_setg_errno(errp, -s->dev_fd, "error creating in-kernel VGIC");
804 return;
807 multiple_redist_region_allowed =
808 kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
809 KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
811 if (!multiple_redist_region_allowed && s->nb_redist_regions > 1) {
812 error_setg(errp, "Multiple VGICv3 redistributor regions are not "
813 "supported by this host kernel");
814 error_append_hint(errp, "A maximum of %d VCPUs can be used",
815 s->redist_region_count[0]);
816 return;
819 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
820 0, &s->num_irq, true, &error_abort);
822 /* Tell the kernel to complete VGIC initialization now */
823 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
824 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort);
826 kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
827 KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0);
829 if (!multiple_redist_region_allowed) {
830 kvm_arm_register_device(&s->iomem_redist[0], -1,
831 KVM_DEV_ARM_VGIC_GRP_ADDR,
832 KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0);
833 } else {
834 /* we register regions in reverse order as "devices" are inserted at
835 * the head of a QSLIST and the list is then popped from the head
836 * onwards by kvm_arm_machine_init_done()
838 for (i = s->nb_redist_regions - 1; i >= 0; i--) {
839 /* Address mask made of the rdist region index and count */
840 uint64_t addr_ormask =
841 i | ((uint64_t)s->redist_region_count[i] << 52);
843 kvm_arm_register_device(&s->iomem_redist[i], -1,
844 KVM_DEV_ARM_VGIC_GRP_ADDR,
845 KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
846 s->dev_fd, addr_ormask);
850 if (kvm_has_gsi_routing()) {
851 /* set up irq routing */
852 for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
853 kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
856 kvm_gsi_routing_allowed = true;
858 kvm_irqchip_commit_routes(kvm_state);
861 if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
862 GICD_CTLR)) {
863 error_setg(&s->migration_blocker, "This operating system kernel does "
864 "not support vGICv3 migration");
865 migrate_add_blocker(s->migration_blocker, &local_err);
866 if (local_err) {
867 error_propagate(errp, local_err);
868 error_free(s->migration_blocker);
869 return;
872 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
873 KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES)) {
874 qemu_add_vm_change_state_handler(vm_change_state_handler, s);
878 static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
880 DeviceClass *dc = DEVICE_CLASS(klass);
881 ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
882 KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass);
884 agcc->pre_save = kvm_arm_gicv3_get;
885 agcc->post_load = kvm_arm_gicv3_put;
886 device_class_set_parent_realize(dc, kvm_arm_gicv3_realize,
887 &kgc->parent_realize);
888 device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset);
891 static const TypeInfo kvm_arm_gicv3_info = {
892 .name = TYPE_KVM_ARM_GICV3,
893 .parent = TYPE_ARM_GICV3_COMMON,
894 .instance_size = sizeof(GICv3State),
895 .class_init = kvm_arm_gicv3_class_init,
896 .class_size = sizeof(KVMARMGICv3Class),
899 static void kvm_arm_gicv3_register_types(void)
901 type_register_static(&kvm_arm_gicv3_info);
904 type_init(kvm_arm_gicv3_register_types)