4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
25 #define EXPECTED_KVM_API_VERSION 12
27 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
28 #error libkvm: userspace and kernel version mismatch
33 int kvm_pit_reinject
= 1;
35 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
37 static inline void set_gsi(KVMState
*s
, unsigned int gsi
)
39 uint32_t *bitmap
= s
->used_gsi_bitmap
;
41 if (gsi
< s
->max_gsi
) {
42 bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
44 DPRINTF("Invalid GSI %u\n", gsi
);
48 static inline void clear_gsi(KVMState
*s
, unsigned int gsi
)
50 uint32_t *bitmap
= s
->used_gsi_bitmap
;
52 if (gsi
< s
->max_gsi
) {
53 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
55 DPRINTF("Invalid GSI %u\n", gsi
);
59 static int kvm_init_irq_routing(KVMState
*s
)
61 #ifdef KVM_CAP_IRQ_ROUTING
64 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
);
68 /* Round up so we can search ints using ffs */
69 gsi_bits
= ALIGN(gsi_count
, 32);
70 s
->used_gsi_bitmap
= g_malloc0(gsi_bits
/ 8);
71 s
->max_gsi
= gsi_bits
;
73 /* Mark any over-allocated bits as already in use */
74 for (i
= gsi_count
; i
< gsi_bits
; i
++) {
79 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
80 s
->nr_allocated_irq_routes
= 0;
82 r
= kvm_arch_init_irq_routing();
91 int kvm_create_irqchip(KVMState
*s
)
93 #ifdef KVM_CAP_IRQCHIP
96 if (!kvm_irqchip
|| !kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
100 r
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
102 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
106 s
->irqchip_inject_ioctl
= KVM_IRQ_LINE
;
107 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
108 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
109 s
->irqchip_inject_ioctl
= KVM_IRQ_LINE_STATUS
;
112 s
->irqchip_in_kernel
= 1;
114 r
= kvm_init_irq_routing(s
);
123 #ifdef KVM_CAP_IRQCHIP
125 int kvm_set_irq(int irq
, int level
, int *status
)
127 struct kvm_irq_level event
;
130 if (!kvm_state
->irqchip_in_kernel
) {
135 r
= kvm_vm_ioctl(kvm_state
, kvm_state
->irqchip_inject_ioctl
,
138 perror("kvm_set_irq");
142 #ifdef KVM_CAP_IRQ_INJECT_STATUS
143 *status
= (kvm_state
->irqchip_inject_ioctl
== KVM_IRQ_LINE
) ?
153 int kvm_get_irqchip(KVMState
*s
, struct kvm_irqchip
*chip
)
157 if (!s
->irqchip_in_kernel
) {
160 r
= kvm_vm_ioctl(s
, KVM_GET_IRQCHIP
, chip
);
162 perror("kvm_get_irqchip\n");
167 int kvm_set_irqchip(KVMState
*s
, struct kvm_irqchip
*chip
)
171 if (!s
->irqchip_in_kernel
) {
174 r
= kvm_vm_ioctl(s
, KVM_SET_IRQCHIP
, chip
);
176 perror("kvm_set_irqchip\n");
183 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
184 int kvm_assign_pci_device(KVMState
*s
,
185 struct kvm_assigned_pci_dev
*assigned_dev
)
187 return kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
190 static int kvm_old_assign_irq(KVMState
*s
,
191 struct kvm_assigned_irq
*assigned_irq
)
193 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, assigned_irq
);
196 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
197 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
201 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
203 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
206 return kvm_old_assign_irq(s
, assigned_irq
);
209 int kvm_deassign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
211 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
214 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
216 return kvm_old_assign_irq(s
, assigned_irq
);
221 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
222 int kvm_deassign_pci_device(KVMState
*s
,
223 struct kvm_assigned_pci_dev
*assigned_dev
)
225 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
229 int kvm_reinject_control(KVMState
*s
, int pit_reinject
)
231 #ifdef KVM_CAP_REINJECT_CONTROL
233 struct kvm_reinject_control control
;
235 control
.pit_reinject
= pit_reinject
;
237 r
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
239 return kvm_vm_ioctl(s
, KVM_REINJECT_CONTROL
, &control
);
245 int kvm_has_gsi_routing(void)
249 #ifdef KVM_CAP_IRQ_ROUTING
250 r
= kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
255 int kvm_clear_gsi_routes(void)
257 #ifdef KVM_CAP_IRQ_ROUTING
258 kvm_state
->irq_routes
->nr
= 0;
265 int kvm_add_routing_entry(struct kvm_irq_routing_entry
*entry
)
267 #ifdef KVM_CAP_IRQ_ROUTING
268 KVMState
*s
= kvm_state
;
269 struct kvm_irq_routing
*z
;
270 struct kvm_irq_routing_entry
*new;
273 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
274 n
= s
->nr_allocated_irq_routes
* 2;
278 size
= sizeof(struct kvm_irq_routing
);
279 size
+= n
* sizeof(*new);
280 z
= realloc(s
->irq_routes
, size
);
284 s
->nr_allocated_irq_routes
= n
;
287 n
= s
->irq_routes
->nr
++;
288 new = &s
->irq_routes
->entries
[n
];
289 memset(new, 0, sizeof(*new));
290 new->gsi
= entry
->gsi
;
291 new->type
= entry
->type
;
292 new->flags
= entry
->flags
;
295 set_gsi(s
, entry
->gsi
);
303 int kvm_add_irq_route(int gsi
, int irqchip
, int pin
)
305 #ifdef KVM_CAP_IRQ_ROUTING
306 struct kvm_irq_routing_entry e
;
309 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
311 e
.u
.irqchip
.irqchip
= irqchip
;
312 e
.u
.irqchip
.pin
= pin
;
313 return kvm_add_routing_entry(&e
);
319 int kvm_del_routing_entry(struct kvm_irq_routing_entry
*entry
)
321 #ifdef KVM_CAP_IRQ_ROUTING
322 KVMState
*s
= kvm_state
;
323 struct kvm_irq_routing_entry
*e
, *p
;
324 int i
, gsi
, found
= 0;
328 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
329 e
= &s
->irq_routes
->entries
[i
];
330 if (e
->type
== entry
->type
&& e
->gsi
== gsi
) {
332 case KVM_IRQ_ROUTING_IRQCHIP
:{
333 if (e
->u
.irqchip
.irqchip
==
334 entry
->u
.irqchip
.irqchip
335 && e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
336 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
342 case KVM_IRQ_ROUTING_MSI
:{
343 if (e
->u
.msi
.address_lo
==
344 entry
->u
.msi
.address_lo
345 && e
->u
.msi
.address_hi
==
346 entry
->u
.msi
.address_hi
347 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
348 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
358 /* If there are no other users of this GSI
359 * mark it available in the bitmap */
360 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
361 e
= &s
->irq_routes
->entries
[i
];
365 if (i
== s
->irq_routes
->nr
) {
379 int kvm_update_routing_entry(struct kvm_irq_routing_entry
*entry
,
380 struct kvm_irq_routing_entry
*newentry
)
382 #ifdef KVM_CAP_IRQ_ROUTING
383 KVMState
*s
= kvm_state
;
384 struct kvm_irq_routing_entry
*e
;
387 if (entry
->gsi
!= newentry
->gsi
|| entry
->type
!= newentry
->type
) {
391 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
392 e
= &s
->irq_routes
->entries
[i
];
393 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
397 case KVM_IRQ_ROUTING_IRQCHIP
:
398 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
399 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
400 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
,
401 sizeof e
->u
.irqchip
);
405 case KVM_IRQ_ROUTING_MSI
:
406 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
407 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
408 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
409 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
423 int kvm_del_irq_route(int gsi
, int irqchip
, int pin
)
425 #ifdef KVM_CAP_IRQ_ROUTING
426 struct kvm_irq_routing_entry e
;
429 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
431 e
.u
.irqchip
.irqchip
= irqchip
;
432 e
.u
.irqchip
.pin
= pin
;
433 return kvm_del_routing_entry(&e
);
439 int kvm_commit_irq_routes(void)
441 #ifdef KVM_CAP_IRQ_ROUTING
442 KVMState
*s
= kvm_state
;
444 s
->irq_routes
->flags
= 0;
445 return kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
451 int kvm_get_irq_route_gsi(void)
453 KVMState
*s
= kvm_state
;
455 uint32_t *buf
= s
->used_gsi_bitmap
;
457 /* Return the lowest unused GSI in the bitmap */
458 for (i
= 0; i
< s
->max_gsi
/ 32; i
++) {
464 return bit
- 1 + i
* 32;
470 static void kvm_msi_routing_entry(struct kvm_irq_routing_entry
*e
,
475 e
->type
= KVM_IRQ_ROUTING_MSI
;
477 e
->u
.msi
.address_lo
= msg
->addr_lo
;
478 e
->u
.msi
.address_hi
= msg
->addr_hi
;
479 e
->u
.msi
.data
= msg
->data
;
482 int kvm_msi_message_add(KVMMsiMessage
*msg
)
484 struct kvm_irq_routing_entry e
;
487 ret
= kvm_get_irq_route_gsi();
493 kvm_msi_routing_entry(&e
, msg
);
494 return kvm_add_routing_entry(&e
);
497 int kvm_msi_message_del(KVMMsiMessage
*msg
)
499 struct kvm_irq_routing_entry e
;
501 kvm_msi_routing_entry(&e
, msg
);
502 return kvm_del_routing_entry(&e
);
505 int kvm_msi_message_update(KVMMsiMessage
*old
, KVMMsiMessage
*new)
507 struct kvm_irq_routing_entry e1
, e2
;
511 if (memcmp(old
, new, sizeof(KVMMsiMessage
)) == 0) {
515 kvm_msi_routing_entry(&e1
, old
);
516 kvm_msi_routing_entry(&e2
, new);
518 ret
= kvm_update_routing_entry(&e1
, &e2
);
527 #ifdef KVM_CAP_DEVICE_MSIX
528 int kvm_assign_set_msix_nr(KVMState
*s
, struct kvm_assigned_msix_nr
*msix_nr
)
530 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
533 int kvm_assign_set_msix_entry(KVMState
*s
,
534 struct kvm_assigned_msix_entry
*entry
)
536 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
541 void kvm_hpet_disable_kpit(void)
543 struct kvm_pit_state2 ps2
;
545 kvm_get_pit2(kvm_state
, &ps2
);
546 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
547 kvm_set_pit2(kvm_state
, &ps2
);
550 void kvm_hpet_enable_kpit(void)
552 struct kvm_pit_state2 ps2
;
554 kvm_get_pit2(kvm_state
, &ps2
);
555 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
556 kvm_set_pit2(kvm_state
, &ps2
);
560 #if !defined(TARGET_I386)
561 int kvm_arch_init_irq_routing(void)
567 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
568 typedef struct KVMIOPortRegion
{
572 QLIST_ENTRY(KVMIOPortRegion
) entry
;
575 static QLIST_HEAD(, KVMIOPortRegion
) ioport_regions
;
577 static void do_set_ioport_access(void *data
)
579 KVMIOPortRegion
*region
= data
;
580 bool enable
= region
->status
> 0;
583 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
591 int kvm_add_ioport_region(unsigned long start
, unsigned long size
,
594 KVMIOPortRegion
*region
= g_malloc0(sizeof(KVMIOPortRegion
));
598 region
->start
= start
;
601 QLIST_INSERT_HEAD(&ioport_regions
, region
, entry
);
604 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
605 run_on_cpu(env
, do_set_ioport_access
, region
);
606 if (region
->status
< 0) {
608 kvm_remove_ioport_region(start
, size
, is_hot_plug
);
616 int kvm_remove_ioport_region(unsigned long start
, unsigned long size
,
619 KVMIOPortRegion
*region
, *tmp
;
623 QLIST_FOREACH_SAFE(region
, &ioport_regions
, entry
, tmp
) {
624 if (region
->start
== start
&& region
->size
== size
) {
628 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
629 run_on_cpu(env
, do_set_ioport_access
, region
);
632 QLIST_REMOVE(region
, entry
);
638 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
640 int kvm_update_ioport_access(CPUState
*env
)
642 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
643 KVMIOPortRegion
*region
;
646 assert(qemu_cpu_is_self(env
));
648 QLIST_FOREACH(region
, &ioport_regions
, entry
) {
649 bool enable
= region
->status
> 0;
651 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
656 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */