4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
25 #define EXPECTED_KVM_API_VERSION 12
27 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
28 #error libkvm: userspace and kernel version mismatch
31 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
33 static inline void clear_gsi(KVMState
*s
, unsigned int gsi
)
35 uint32_t *bitmap
= s
->used_gsi_bitmap
;
37 if (gsi
< s
->max_gsi
) {
38 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
40 DPRINTF("Invalid GSI %u\n", gsi
);
44 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
45 int kvm_assign_pci_device(KVMState
*s
,
46 struct kvm_assigned_pci_dev
*assigned_dev
)
48 return kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
51 static int kvm_old_assign_irq(KVMState
*s
,
52 struct kvm_assigned_irq
*assigned_irq
)
54 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, assigned_irq
);
57 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
59 struct kvm_assigned_pci_dev assigned_dev
;
61 assigned_dev
.assigned_dev_id
= dev_id
;
62 assigned_dev
.flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0;
63 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &assigned_dev
);
66 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
67 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
71 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
73 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
76 return kvm_old_assign_irq(s
, assigned_irq
);
79 int kvm_deassign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
81 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
84 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
86 return kvm_old_assign_irq(s
, assigned_irq
);
91 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
92 int kvm_deassign_pci_device(KVMState
*s
,
93 struct kvm_assigned_pci_dev
*assigned_dev
)
95 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
99 int kvm_reinject_control(KVMState
*s
, int pit_reinject
)
101 #ifdef KVM_CAP_REINJECT_CONTROL
103 struct kvm_reinject_control control
;
105 control
.pit_reinject
= pit_reinject
;
107 r
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
109 return kvm_vm_ioctl(s
, KVM_REINJECT_CONTROL
, &control
);
115 int kvm_clear_gsi_routes(void)
117 #ifdef KVM_CAP_IRQ_ROUTING
118 kvm_state
->irq_routes
->nr
= 0;
125 int kvm_del_routing_entry(struct kvm_irq_routing_entry
*entry
)
127 #ifdef KVM_CAP_IRQ_ROUTING
128 KVMState
*s
= kvm_state
;
129 struct kvm_irq_routing_entry
*e
, *p
;
130 int i
, gsi
, found
= 0;
134 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
135 e
= &s
->irq_routes
->entries
[i
];
136 if (e
->type
== entry
->type
&& e
->gsi
== gsi
) {
138 case KVM_IRQ_ROUTING_IRQCHIP
:{
139 if (e
->u
.irqchip
.irqchip
==
140 entry
->u
.irqchip
.irqchip
141 && e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
142 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
148 case KVM_IRQ_ROUTING_MSI
:{
149 if (e
->u
.msi
.address_lo
==
150 entry
->u
.msi
.address_lo
151 && e
->u
.msi
.address_hi
==
152 entry
->u
.msi
.address_hi
153 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
154 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
164 /* If there are no other users of this GSI
165 * mark it available in the bitmap */
166 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
167 e
= &s
->irq_routes
->entries
[i
];
171 if (i
== s
->irq_routes
->nr
) {
185 int kvm_update_routing_entry(struct kvm_irq_routing_entry
*entry
,
186 struct kvm_irq_routing_entry
*newentry
)
188 #ifdef KVM_CAP_IRQ_ROUTING
189 KVMState
*s
= kvm_state
;
190 struct kvm_irq_routing_entry
*e
;
193 if (entry
->gsi
!= newentry
->gsi
|| entry
->type
!= newentry
->type
) {
197 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
198 e
= &s
->irq_routes
->entries
[i
];
199 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
203 case KVM_IRQ_ROUTING_IRQCHIP
:
204 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
205 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
206 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
,
207 sizeof e
->u
.irqchip
);
211 case KVM_IRQ_ROUTING_MSI
:
212 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
213 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
214 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
215 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
229 int kvm_del_irq_route(int gsi
, int irqchip
, int pin
)
231 #ifdef KVM_CAP_IRQ_ROUTING
232 struct kvm_irq_routing_entry e
;
235 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
237 e
.u
.irqchip
.irqchip
= irqchip
;
238 e
.u
.irqchip
.pin
= pin
;
239 return kvm_del_routing_entry(&e
);
245 int kvm_get_irq_route_gsi(void)
247 KVMState
*s
= kvm_state
;
249 uint32_t *buf
= s
->used_gsi_bitmap
;
251 /* Return the lowest unused GSI in the bitmap */
252 for (i
= 0; i
< s
->max_gsi
/ 32; i
++) {
258 return bit
- 1 + i
* 32;
264 static void kvm_msi_routing_entry(struct kvm_irq_routing_entry
*e
,
269 e
->type
= KVM_IRQ_ROUTING_MSI
;
271 e
->u
.msi
.address_lo
= msg
->addr_lo
;
272 e
->u
.msi
.address_hi
= msg
->addr_hi
;
273 e
->u
.msi
.data
= msg
->data
;
276 int kvm_msi_message_add(KVMMsiMessage
*msg
)
278 struct kvm_irq_routing_entry e
;
281 ret
= kvm_get_irq_route_gsi();
287 kvm_msi_routing_entry(&e
, msg
);
288 kvm_add_routing_entry(kvm_state
, &e
);
292 int kvm_msi_message_del(KVMMsiMessage
*msg
)
294 struct kvm_irq_routing_entry e
;
296 kvm_msi_routing_entry(&e
, msg
);
297 return kvm_del_routing_entry(&e
);
300 int kvm_msi_message_update(KVMMsiMessage
*old
, KVMMsiMessage
*new)
302 struct kvm_irq_routing_entry e1
, e2
;
306 if (memcmp(old
, new, sizeof(KVMMsiMessage
)) == 0) {
310 kvm_msi_routing_entry(&e1
, old
);
311 kvm_msi_routing_entry(&e2
, new);
313 ret
= kvm_update_routing_entry(&e1
, &e2
);
322 #ifdef KVM_CAP_DEVICE_MSIX
323 int kvm_assign_set_msix_nr(KVMState
*s
, struct kvm_assigned_msix_nr
*msix_nr
)
325 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
328 int kvm_assign_set_msix_entry(KVMState
*s
,
329 struct kvm_assigned_msix_entry
*entry
)
331 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
336 void kvm_hpet_disable_kpit(void)
338 struct kvm_pit_state2 ps2
;
340 kvm_get_pit2(kvm_state
, &ps2
);
341 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
342 kvm_set_pit2(kvm_state
, &ps2
);
345 void kvm_hpet_enable_kpit(void)
347 struct kvm_pit_state2 ps2
;
349 kvm_get_pit2(kvm_state
, &ps2
);
350 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
351 kvm_set_pit2(kvm_state
, &ps2
);
355 #if !defined(TARGET_I386)
356 int kvm_arch_init_irq_routing(void)
362 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
363 typedef struct KVMIOPortRegion
{
367 QLIST_ENTRY(KVMIOPortRegion
) entry
;
370 static QLIST_HEAD(, KVMIOPortRegion
) ioport_regions
;
372 static void do_set_ioport_access(void *data
)
374 KVMIOPortRegion
*region
= data
;
375 bool enable
= region
->status
> 0;
378 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
386 int kvm_add_ioport_region(unsigned long start
, unsigned long size
,
389 KVMIOPortRegion
*region
= g_malloc0(sizeof(KVMIOPortRegion
));
393 region
->start
= start
;
396 QLIST_INSERT_HEAD(&ioport_regions
, region
, entry
);
399 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
400 run_on_cpu(env
, do_set_ioport_access
, region
);
401 if (region
->status
< 0) {
403 kvm_remove_ioport_region(start
, size
, is_hot_plug
);
411 int kvm_remove_ioport_region(unsigned long start
, unsigned long size
,
414 KVMIOPortRegion
*region
, *tmp
;
418 QLIST_FOREACH_SAFE(region
, &ioport_regions
, entry
, tmp
) {
419 if (region
->start
== start
&& region
->size
== size
) {
423 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
424 run_on_cpu(env
, do_set_ioport_access
, region
);
427 QLIST_REMOVE(region
, entry
);
433 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
435 int kvm_update_ioport_access(CPUState
*env
)
437 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
438 KVMIOPortRegion
*region
;
441 assert(qemu_cpu_is_self(env
));
443 QLIST_FOREACH(region
, &ioport_regions
, entry
) {
444 bool enable
= region
->status
> 0;
446 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
451 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */