s390x/ccw: create s390 phb for compat reasons as well
[qemu/ar7.git] / hw / intc / arm_gic_kvm.c
blobae095d08a3649a8027cee9f22b0ec745286b90c5
1 /*
2 * ARM Generic Interrupt Controller using KVM in-kernel support
4 * Copyright (c) 2012 Linaro Limited
5 * Written by Peter Maydell
6 * Save/Restore logic added by Christoffer Dall.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qapi/error.h"
24 #include "qemu-common.h"
25 #include "cpu.h"
26 #include "hw/sysbus.h"
27 #include "migration/blocker.h"
28 #include "sysemu/kvm.h"
29 #include "kvm_arm.h"
30 #include "gic_internal.h"
31 #include "vgic_common.h"
33 #define TYPE_KVM_ARM_GIC "kvm-arm-gic"
34 #define KVM_ARM_GIC(obj) \
35 OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC)
36 #define KVM_ARM_GIC_CLASS(klass) \
37 OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC)
38 #define KVM_ARM_GIC_GET_CLASS(obj) \
39 OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC)
41 typedef struct KVMARMGICClass {
42 ARMGICCommonClass parent_class;
43 DeviceRealize parent_realize;
44 void (*parent_reset)(DeviceState *dev);
45 } KVMARMGICClass;
47 void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level)
49 /* Meaning of the 'irq' parameter:
50 * [0..N-1] : external interrupts
51 * [N..N+31] : PPI (internal) interrupts for CPU 0
52 * [N+32..N+63] : PPI (internal interrupts for CPU 1
53 * ...
54 * Convert this to the kernel's desired encoding, which
55 * has separate fields in the irq number for type,
56 * CPU number and interrupt number.
58 int kvm_irq, irqtype, cpu;
60 if (irq < (num_irq - GIC_INTERNAL)) {
61 /* External interrupt. The kernel numbers these like the GIC
62 * hardware, with external interrupt IDs starting after the
63 * internal ones.
65 irqtype = KVM_ARM_IRQ_TYPE_SPI;
66 cpu = 0;
67 irq += GIC_INTERNAL;
68 } else {
69 /* Internal interrupt: decode into (cpu, interrupt id) */
70 irqtype = KVM_ARM_IRQ_TYPE_PPI;
71 irq -= (num_irq - GIC_INTERNAL);
72 cpu = irq / GIC_INTERNAL;
73 irq %= GIC_INTERNAL;
75 kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT)
76 | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq;
78 kvm_set_irq(kvm_state, kvm_irq, !!level);
81 static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level)
83 GICState *s = (GICState *)opaque;
85 kvm_arm_gic_set_irq(s->num_irq, irq, level);
88 static bool kvm_arm_gic_can_save_restore(GICState *s)
90 return s->dev_fd >= 0;
93 #define KVM_VGIC_ATTR(offset, cpu) \
94 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \
95 KVM_DEV_ARM_VGIC_CPUID_MASK) | \
96 (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \
97 KVM_DEV_ARM_VGIC_OFFSET_MASK))
99 static void kvm_gicd_access(GICState *s, int offset, int cpu,
100 uint32_t *val, bool write)
102 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
103 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
106 static void kvm_gicc_access(GICState *s, int offset, int cpu,
107 uint32_t *val, bool write)
109 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
110 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
113 #define for_each_irq_reg(_ctr, _max_irq, _field_width) \
114 for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++)
117 * Translate from the in-kernel field for an IRQ value to/from the qemu
118 * representation.
120 typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
121 uint32_t *field, bool to_kernel);
123 /* synthetic translate function used for clear/set registers to completely
124 * clear a setting using a clear-register before setting the remaining bits
125 * using a set-register */
126 static void translate_clear(GICState *s, int irq, int cpu,
127 uint32_t *field, bool to_kernel)
129 if (to_kernel) {
130 *field = ~0;
131 } else {
132 /* does not make sense: qemu model doesn't use set/clear regs */
133 abort();
137 static void translate_group(GICState *s, int irq, int cpu,
138 uint32_t *field, bool to_kernel)
140 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
142 if (to_kernel) {
143 *field = GIC_TEST_GROUP(irq, cm);
144 } else {
145 if (*field & 1) {
146 GIC_SET_GROUP(irq, cm);
151 static void translate_enabled(GICState *s, int irq, int cpu,
152 uint32_t *field, bool to_kernel)
154 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
156 if (to_kernel) {
157 *field = GIC_TEST_ENABLED(irq, cm);
158 } else {
159 if (*field & 1) {
160 GIC_SET_ENABLED(irq, cm);
165 static void translate_pending(GICState *s, int irq, int cpu,
166 uint32_t *field, bool to_kernel)
168 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
170 if (to_kernel) {
171 *field = gic_test_pending(s, irq, cm);
172 } else {
173 if (*field & 1) {
174 GIC_SET_PENDING(irq, cm);
175 /* TODO: Capture is level-line is held high in the kernel */
180 static void translate_active(GICState *s, int irq, int cpu,
181 uint32_t *field, bool to_kernel)
183 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
185 if (to_kernel) {
186 *field = GIC_TEST_ACTIVE(irq, cm);
187 } else {
188 if (*field & 1) {
189 GIC_SET_ACTIVE(irq, cm);
194 static void translate_trigger(GICState *s, int irq, int cpu,
195 uint32_t *field, bool to_kernel)
197 if (to_kernel) {
198 *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
199 } else {
200 if (*field & 0x2) {
201 GIC_SET_EDGE_TRIGGER(irq);
206 static void translate_priority(GICState *s, int irq, int cpu,
207 uint32_t *field, bool to_kernel)
209 if (to_kernel) {
210 *field = GIC_GET_PRIORITY(irq, cpu) & 0xff;
211 } else {
212 gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED);
216 static void translate_targets(GICState *s, int irq, int cpu,
217 uint32_t *field, bool to_kernel)
219 if (to_kernel) {
220 *field = s->irq_target[irq] & 0xff;
221 } else {
222 s->irq_target[irq] = *field & 0xff;
226 static void translate_sgisource(GICState *s, int irq, int cpu,
227 uint32_t *field, bool to_kernel)
229 if (to_kernel) {
230 *field = s->sgi_pending[irq][cpu] & 0xff;
231 } else {
232 s->sgi_pending[irq][cpu] = *field & 0xff;
236 /* Read a register group from the kernel VGIC */
237 static void kvm_dist_get(GICState *s, uint32_t offset, int width,
238 int maxirq, vgic_translate_fn translate_fn)
240 uint32_t reg;
241 int i;
242 int j;
243 int irq;
244 int cpu;
245 int regsz = 32 / width; /* irqs per kernel register */
246 uint32_t field;
248 for_each_irq_reg(i, maxirq, width) {
249 irq = i * regsz;
250 cpu = 0;
251 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
252 kvm_gicd_access(s, offset, cpu, &reg, false);
253 for (j = 0; j < regsz; j++) {
254 field = extract32(reg, j * width, width);
255 translate_fn(s, irq + j, cpu, &field, false);
258 cpu++;
260 offset += 4;
264 /* Write a register group to the kernel VGIC */
265 static void kvm_dist_put(GICState *s, uint32_t offset, int width,
266 int maxirq, vgic_translate_fn translate_fn)
268 uint32_t reg;
269 int i;
270 int j;
271 int irq;
272 int cpu;
273 int regsz = 32 / width; /* irqs per kernel register */
274 uint32_t field;
276 for_each_irq_reg(i, maxirq, width) {
277 irq = i * regsz;
278 cpu = 0;
279 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
280 reg = 0;
281 for (j = 0; j < regsz; j++) {
282 translate_fn(s, irq + j, cpu, &field, true);
283 reg = deposit32(reg, j * width, width, field);
285 kvm_gicd_access(s, offset, cpu, &reg, true);
287 cpu++;
289 offset += 4;
293 static void kvm_arm_gic_put(GICState *s)
295 uint32_t reg;
296 int i;
297 int cpu;
298 int num_cpu;
299 int num_irq;
301 /* Note: We do the restore in a slightly different order than the save
302 * (where the order doesn't matter and is simply ordered according to the
303 * register offset values */
305 /*****************************************************************
306 * Distributor State
309 /* s->ctlr -> GICD_CTLR */
310 reg = s->ctlr;
311 kvm_gicd_access(s, 0x0, 0, &reg, true);
313 /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */
314 kvm_gicd_access(s, 0x4, 0, &reg, false);
315 num_irq = ((reg & 0x1f) + 1) * 32;
316 num_cpu = ((reg & 0xe0) >> 5) + 1;
318 if (num_irq < s->num_irq) {
319 fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n",
320 s->num_irq, num_irq);
321 abort();
322 } else if (num_cpu != s->num_cpu) {
323 fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n",
324 s->num_cpu, num_cpu);
325 /* Did we not create the VCPUs in the kernel yet? */
326 abort();
329 /* TODO: Consider checking compatibility with the IIDR ? */
331 /* irq_state[n].enabled -> GICD_ISENABLERn */
332 kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear);
333 kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled);
335 /* irq_state[n].group -> GICD_IGROUPRn */
336 kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group);
338 /* s->irq_target[irq] -> GICD_ITARGETSRn
339 * (restore targets before pending to ensure the pending state is set on
340 * the appropriate CPU interfaces in the kernel) */
341 kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets);
343 /* irq_state[n].trigger -> GICD_ICFGRn
344 * (restore configuration registers before pending IRQs so we treat
345 * level/edge correctly) */
346 kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger);
348 /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */
349 kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear);
350 kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending);
352 /* irq_state[n].active -> GICD_ISACTIVERn */
353 kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear);
354 kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active);
357 /* s->priorityX[irq] -> ICD_IPRIORITYRn */
358 kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority);
360 /* s->sgi_pending -> ICD_CPENDSGIRn */
361 kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear);
362 kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource);
365 /*****************************************************************
366 * CPU Interface(s) State
369 for (cpu = 0; cpu < s->num_cpu; cpu++) {
370 /* s->cpu_ctlr[cpu] -> GICC_CTLR */
371 reg = s->cpu_ctlr[cpu];
372 kvm_gicc_access(s, 0x00, cpu, &reg, true);
374 /* s->priority_mask[cpu] -> GICC_PMR */
375 reg = (s->priority_mask[cpu] & 0xff);
376 kvm_gicc_access(s, 0x04, cpu, &reg, true);
378 /* s->bpr[cpu] -> GICC_BPR */
379 reg = (s->bpr[cpu] & 0x7);
380 kvm_gicc_access(s, 0x08, cpu, &reg, true);
382 /* s->abpr[cpu] -> GICC_ABPR */
383 reg = (s->abpr[cpu] & 0x7);
384 kvm_gicc_access(s, 0x1c, cpu, &reg, true);
386 /* s->apr[n][cpu] -> GICC_APRn */
387 for (i = 0; i < 4; i++) {
388 reg = s->apr[i][cpu];
389 kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, true);
394 static void kvm_arm_gic_get(GICState *s)
396 uint32_t reg;
397 int i;
398 int cpu;
400 /*****************************************************************
401 * Distributor State
404 /* GICD_CTLR -> s->ctlr */
405 kvm_gicd_access(s, 0x0, 0, &reg, false);
406 s->ctlr = reg;
408 /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */
409 kvm_gicd_access(s, 0x4, 0, &reg, false);
410 s->num_irq = ((reg & 0x1f) + 1) * 32;
411 s->num_cpu = ((reg & 0xe0) >> 5) + 1;
413 if (s->num_irq > GIC_MAXIRQ) {
414 fprintf(stderr, "Too many IRQs reported from the kernel: %d\n",
415 s->num_irq);
416 abort();
419 /* GICD_IIDR -> ? */
420 kvm_gicd_access(s, 0x8, 0, &reg, false);
422 /* Clear all the IRQ settings */
423 for (i = 0; i < s->num_irq; i++) {
424 memset(&s->irq_state[i], 0, sizeof(s->irq_state[0]));
427 /* GICD_IGROUPRn -> irq_state[n].group */
428 kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group);
430 /* GICD_ISENABLERn -> irq_state[n].enabled */
431 kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled);
433 /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */
434 kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending);
436 /* GICD_ISACTIVERn -> irq_state[n].active */
437 kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active);
439 /* GICD_ICFRn -> irq_state[n].trigger */
440 kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger);
442 /* GICD_IPRIORITYRn -> s->priorityX[irq] */
443 kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority);
445 /* GICD_ITARGETSRn -> s->irq_target[irq] */
446 kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets);
448 /* GICD_CPENDSGIRn -> s->sgi_pending */
449 kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource);
452 /*****************************************************************
453 * CPU Interface(s) State
456 for (cpu = 0; cpu < s->num_cpu; cpu++) {
457 /* GICC_CTLR -> s->cpu_ctlr[cpu] */
458 kvm_gicc_access(s, 0x00, cpu, &reg, false);
459 s->cpu_ctlr[cpu] = reg;
461 /* GICC_PMR -> s->priority_mask[cpu] */
462 kvm_gicc_access(s, 0x04, cpu, &reg, false);
463 s->priority_mask[cpu] = (reg & 0xff);
465 /* GICC_BPR -> s->bpr[cpu] */
466 kvm_gicc_access(s, 0x08, cpu, &reg, false);
467 s->bpr[cpu] = (reg & 0x7);
469 /* GICC_ABPR -> s->abpr[cpu] */
470 kvm_gicc_access(s, 0x1c, cpu, &reg, false);
471 s->abpr[cpu] = (reg & 0x7);
473 /* GICC_APRn -> s->apr[n][cpu] */
474 for (i = 0; i < 4; i++) {
475 kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, false);
476 s->apr[i][cpu] = reg;
481 static void kvm_arm_gic_reset(DeviceState *dev)
483 GICState *s = ARM_GIC_COMMON(dev);
484 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
486 kgc->parent_reset(dev);
488 if (kvm_arm_gic_can_save_restore(s)) {
489 kvm_arm_gic_put(s);
493 static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
495 int i;
496 GICState *s = KVM_ARM_GIC(dev);
497 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
498 Error *local_err = NULL;
499 int ret;
501 kgc->parent_realize(dev, &local_err);
502 if (local_err) {
503 error_propagate(errp, local_err);
504 return;
507 if (s->security_extn) {
508 error_setg(errp, "the in-kernel VGIC does not implement the "
509 "security extensions");
510 return;
513 if (!kvm_arm_gic_can_save_restore(s)) {
514 error_setg(&s->migration_blocker, "This operating system kernel does "
515 "not support vGICv2 migration");
516 migrate_add_blocker(s->migration_blocker, &local_err);
517 if (local_err) {
518 error_propagate(errp, local_err);
519 error_free(s->migration_blocker);
520 return;
524 gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
526 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
527 qemu_irq irq = qdev_get_gpio_in(dev, i);
528 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i);
531 /* Try to create the device via the device control API */
532 s->dev_fd = -1;
533 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false);
534 if (ret >= 0) {
535 s->dev_fd = ret;
537 /* Newstyle API is used, we may have attributes */
538 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
539 uint32_t numirqs = s->num_irq;
540 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0,
541 &numirqs, true, &error_abort);
543 /* Tell the kernel to complete VGIC initialization now */
544 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
545 KVM_DEV_ARM_VGIC_CTRL_INIT)) {
546 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
547 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true,
548 &error_abort);
550 } else if (ret != -ENODEV && ret != -ENOTSUP) {
551 error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
552 return;
555 /* Distributor */
556 kvm_arm_register_device(&s->iomem,
557 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
558 | KVM_VGIC_V2_ADDR_TYPE_DIST,
559 KVM_DEV_ARM_VGIC_GRP_ADDR,
560 KVM_VGIC_V2_ADDR_TYPE_DIST,
561 s->dev_fd);
562 /* CPU interface for current core. Unlike arm_gic, we don't
563 * provide the "interface for core #N" memory regions, because
564 * cores with a VGIC don't have those.
566 kvm_arm_register_device(&s->cpuiomem[0],
567 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
568 | KVM_VGIC_V2_ADDR_TYPE_CPU,
569 KVM_DEV_ARM_VGIC_GRP_ADDR,
570 KVM_VGIC_V2_ADDR_TYPE_CPU,
571 s->dev_fd);
573 if (kvm_has_gsi_routing()) {
574 /* set up irq routing */
575 kvm_init_irq_routing(kvm_state);
576 for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
577 kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
580 kvm_gsi_routing_allowed = true;
582 kvm_irqchip_commit_routes(kvm_state);
586 static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
588 DeviceClass *dc = DEVICE_CLASS(klass);
589 ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass);
590 KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass);
592 agcc->pre_save = kvm_arm_gic_get;
593 agcc->post_load = kvm_arm_gic_put;
594 kgc->parent_realize = dc->realize;
595 kgc->parent_reset = dc->reset;
596 dc->realize = kvm_arm_gic_realize;
597 dc->reset = kvm_arm_gic_reset;
600 static const TypeInfo kvm_arm_gic_info = {
601 .name = TYPE_KVM_ARM_GIC,
602 .parent = TYPE_ARM_GIC_COMMON,
603 .instance_size = sizeof(GICState),
604 .class_init = kvm_arm_gic_class_init,
605 .class_size = sizeof(KVMARMGICClass),
608 static void kvm_arm_gic_register_types(void)
610 type_register_static(&kvm_arm_gic_info);
613 type_init(kvm_arm_gic_register_types)