2 * Copyright (C) 2001 MandrakeSoft S.A.
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Yunhong Jiang <yunhong.jiang@intel.com>
25 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
26 * Based on Xen 3.1 code.
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
32 #include <linux/highmem.h>
33 #include <linux/smp.h>
34 #include <linux/hrtimer.h>
36 #include <asm/processor.h>
38 #include <asm/current.h>
45 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
47 #define ioapic_debug(fmt, arg...)
49 static int ioapic_deliver(struct kvm_ioapic
*vioapic
, int irq
);
51 static unsigned long ioapic_read_indirect(struct kvm_ioapic
*ioapic
,
55 unsigned long result
= 0;
57 switch (ioapic
->ioregsel
) {
58 case IOAPIC_REG_VERSION
:
59 result
= ((((IOAPIC_NUM_PINS
- 1) & 0xff) << 16)
60 | (IOAPIC_VERSION_ID
& 0xff));
63 case IOAPIC_REG_APIC_ID
:
64 case IOAPIC_REG_ARB_ID
:
65 result
= ((ioapic
->id
& 0xf) << 24);
70 u32 redir_index
= (ioapic
->ioregsel
- 0x10) >> 1;
73 ASSERT(redir_index
< IOAPIC_NUM_PINS
);
75 redir_content
= ioapic
->redirtbl
[redir_index
].bits
;
76 result
= (ioapic
->ioregsel
& 0x1) ?
77 (redir_content
>> 32) & 0xffffffff :
78 redir_content
& 0xffffffff;
86 static int ioapic_service(struct kvm_ioapic
*ioapic
, unsigned int idx
)
88 union ioapic_redir_entry
*pent
;
91 pent
= &ioapic
->redirtbl
[idx
];
93 if (!pent
->fields
.mask
) {
94 injected
= ioapic_deliver(ioapic
, idx
);
95 if (injected
&& pent
->fields
.trig_mode
== IOAPIC_LEVEL_TRIG
)
96 pent
->fields
.remote_irr
= 1;
98 if (!pent
->fields
.trig_mode
)
99 ioapic
->irr
&= ~(1 << idx
);
104 static void ioapic_write_indirect(struct kvm_ioapic
*ioapic
, u32 val
)
107 bool mask_before
, mask_after
;
109 switch (ioapic
->ioregsel
) {
110 case IOAPIC_REG_VERSION
:
111 /* Writes are ignored. */
114 case IOAPIC_REG_APIC_ID
:
115 ioapic
->id
= (val
>> 24) & 0xf;
118 case IOAPIC_REG_ARB_ID
:
122 index
= (ioapic
->ioregsel
- 0x10) >> 1;
124 ioapic_debug("change redir index %x val %x\n", index
, val
);
125 if (index
>= IOAPIC_NUM_PINS
)
127 mask_before
= ioapic
->redirtbl
[index
].fields
.mask
;
128 if (ioapic
->ioregsel
& 1) {
129 ioapic
->redirtbl
[index
].bits
&= 0xffffffff;
130 ioapic
->redirtbl
[index
].bits
|= (u64
) val
<< 32;
132 ioapic
->redirtbl
[index
].bits
&= ~0xffffffffULL
;
133 ioapic
->redirtbl
[index
].bits
|= (u32
) val
;
134 ioapic
->redirtbl
[index
].fields
.remote_irr
= 0;
136 mask_after
= ioapic
->redirtbl
[index
].fields
.mask
;
137 if (mask_before
!= mask_after
)
138 kvm_fire_mask_notifiers(ioapic
->kvm
, index
, mask_after
);
139 if (ioapic
->irr
& (1 << index
))
140 ioapic_service(ioapic
, index
);
145 static int ioapic_inj_irq(struct kvm_ioapic
*ioapic
,
146 struct kvm_vcpu
*vcpu
,
147 u8 vector
, u8 trig_mode
, u8 delivery_mode
)
149 ioapic_debug("irq %d trig %d deliv %d\n", vector
, trig_mode
,
152 ASSERT((delivery_mode
== IOAPIC_FIXED
) ||
153 (delivery_mode
== IOAPIC_LOWEST_PRIORITY
));
155 return kvm_apic_set_irq(vcpu
, vector
, trig_mode
);
158 static void ioapic_inj_nmi(struct kvm_vcpu
*vcpu
)
160 kvm_inject_nmi(vcpu
);
164 u32
kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic
*ioapic
, u8 dest
,
169 struct kvm
*kvm
= ioapic
->kvm
;
170 struct kvm_vcpu
*vcpu
;
172 ioapic_debug("dest %d dest_mode %d\n", dest
, dest_mode
);
174 if (dest_mode
== 0) { /* Physical mode. */
175 if (dest
== 0xFF) { /* Broadcast. */
176 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
177 if (kvm
->vcpus
[i
] && kvm
->vcpus
[i
]->arch
.apic
)
181 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
182 vcpu
= kvm
->vcpus
[i
];
185 if (kvm_apic_match_physical_addr(vcpu
->arch
.apic
, dest
)) {
191 } else if (dest
!= 0) /* Logical mode, MDA non-zero. */
192 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
193 vcpu
= kvm
->vcpus
[i
];
196 if (vcpu
->arch
.apic
&&
197 kvm_apic_match_logical_addr(vcpu
->arch
.apic
, dest
))
198 mask
|= 1 << vcpu
->vcpu_id
;
200 ioapic_debug("mask %x\n", mask
);
204 static int ioapic_deliver(struct kvm_ioapic
*ioapic
, int irq
)
206 u8 dest
= ioapic
->redirtbl
[irq
].fields
.dest_id
;
207 u8 dest_mode
= ioapic
->redirtbl
[irq
].fields
.dest_mode
;
208 u8 delivery_mode
= ioapic
->redirtbl
[irq
].fields
.delivery_mode
;
209 u8 vector
= ioapic
->redirtbl
[irq
].fields
.vector
;
210 u8 trig_mode
= ioapic
->redirtbl
[irq
].fields
.trig_mode
;
212 struct kvm_vcpu
*vcpu
;
215 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
216 "vector=%x trig_mode=%x\n",
217 dest
, dest_mode
, delivery_mode
, vector
, trig_mode
);
219 deliver_bitmask
= kvm_ioapic_get_delivery_bitmask(ioapic
, dest
,
221 if (!deliver_bitmask
) {
222 ioapic_debug("no target on destination\n");
226 switch (delivery_mode
) {
227 case IOAPIC_LOWEST_PRIORITY
:
228 vcpu
= kvm_get_lowest_prio_vcpu(ioapic
->kvm
, vector
,
232 vcpu
= ioapic
->kvm
->vcpus
[0];
235 r
= ioapic_inj_irq(ioapic
, vcpu
, vector
,
236 trig_mode
, delivery_mode
);
238 ioapic_debug("null lowest prio vcpu: "
239 "mask=%x vector=%x delivery_mode=%x\n",
240 deliver_bitmask
, vector
, IOAPIC_LOWEST_PRIORITY
);
247 for (vcpu_id
= 0; deliver_bitmask
!= 0; vcpu_id
++) {
248 if (!(deliver_bitmask
& (1 << vcpu_id
)))
250 deliver_bitmask
&= ~(1 << vcpu_id
);
251 vcpu
= ioapic
->kvm
->vcpus
[vcpu_id
];
255 r
+= ioapic_inj_irq(ioapic
, vcpu
, vector
,
256 trig_mode
, delivery_mode
);
261 for (vcpu_id
= 0; deliver_bitmask
!= 0; vcpu_id
++) {
262 if (!(deliver_bitmask
& (1 << vcpu_id
)))
264 deliver_bitmask
&= ~(1 << vcpu_id
);
265 vcpu
= ioapic
->kvm
->vcpus
[vcpu_id
];
267 ioapic_inj_nmi(vcpu
);
271 ioapic_debug("NMI to vcpu %d failed\n",
276 printk(KERN_WARNING
"Unsupported delivery mode %d\n",
283 int kvm_ioapic_set_irq(struct kvm_ioapic
*ioapic
, int irq
, int level
)
285 u32 old_irr
= ioapic
->irr
;
287 union ioapic_redir_entry entry
;
290 if (irq
>= 0 && irq
< IOAPIC_NUM_PINS
) {
291 entry
= ioapic
->redirtbl
[irq
];
292 level
^= entry
.fields
.polarity
;
294 ioapic
->irr
&= ~mask
;
297 if ((!entry
.fields
.trig_mode
&& old_irr
!= ioapic
->irr
)
298 || !entry
.fields
.remote_irr
)
299 ret
= ioapic_service(ioapic
, irq
);
305 static void __kvm_ioapic_update_eoi(struct kvm_ioapic
*ioapic
, int pin
,
308 union ioapic_redir_entry
*ent
;
310 ent
= &ioapic
->redirtbl
[pin
];
312 kvm_notify_acked_irq(ioapic
->kvm
, KVM_IRQCHIP_IOAPIC
, pin
);
314 if (trigger_mode
== IOAPIC_LEVEL_TRIG
) {
315 ASSERT(ent
->fields
.trig_mode
== IOAPIC_LEVEL_TRIG
);
316 ent
->fields
.remote_irr
= 0;
317 if (!ent
->fields
.mask
&& (ioapic
->irr
& (1 << pin
)))
318 ioapic_service(ioapic
, pin
);
322 void kvm_ioapic_update_eoi(struct kvm
*kvm
, int vector
, int trigger_mode
)
324 struct kvm_ioapic
*ioapic
= kvm
->arch
.vioapic
;
327 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++)
328 if (ioapic
->redirtbl
[i
].fields
.vector
== vector
)
329 __kvm_ioapic_update_eoi(ioapic
, i
, trigger_mode
);
332 static int ioapic_in_range(struct kvm_io_device
*this, gpa_t addr
,
333 int len
, int is_write
)
335 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
337 return ((addr
>= ioapic
->base_address
&&
338 (addr
< ioapic
->base_address
+ IOAPIC_MEM_LENGTH
)));
341 static void ioapic_mmio_read(struct kvm_io_device
*this, gpa_t addr
, int len
,
344 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
347 ioapic_debug("addr %lx\n", (unsigned long)addr
);
348 ASSERT(!(addr
& 0xf)); /* check alignment */
352 case IOAPIC_REG_SELECT
:
353 result
= ioapic
->ioregsel
;
356 case IOAPIC_REG_WINDOW
:
357 result
= ioapic_read_indirect(ioapic
, addr
, len
);
366 *(u64
*) val
= result
;
371 memcpy(val
, (char *)&result
, len
);
374 printk(KERN_WARNING
"ioapic: wrong length %d\n", len
);
378 static void ioapic_mmio_write(struct kvm_io_device
*this, gpa_t addr
, int len
,
381 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
384 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
385 (void*)addr
, len
, val
);
386 ASSERT(!(addr
& 0xf)); /* check alignment */
387 if (len
== 4 || len
== 8)
390 printk(KERN_WARNING
"ioapic: Unsupported size %d\n", len
);
396 case IOAPIC_REG_SELECT
:
397 ioapic
->ioregsel
= data
;
400 case IOAPIC_REG_WINDOW
:
401 ioapic_write_indirect(ioapic
, data
);
405 kvm_ioapic_update_eoi(ioapic
->kvm
, data
, IOAPIC_LEVEL_TRIG
);
414 void kvm_ioapic_reset(struct kvm_ioapic
*ioapic
)
418 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++)
419 ioapic
->redirtbl
[i
].fields
.mask
= 1;
420 ioapic
->base_address
= IOAPIC_DEFAULT_BASE_ADDRESS
;
421 ioapic
->ioregsel
= 0;
426 int kvm_ioapic_init(struct kvm
*kvm
)
428 struct kvm_ioapic
*ioapic
;
430 ioapic
= kzalloc(sizeof(struct kvm_ioapic
), GFP_KERNEL
);
433 kvm
->arch
.vioapic
= ioapic
;
434 kvm_ioapic_reset(ioapic
);
435 ioapic
->dev
.read
= ioapic_mmio_read
;
436 ioapic
->dev
.write
= ioapic_mmio_write
;
437 ioapic
->dev
.in_range
= ioapic_in_range
;
438 ioapic
->dev
.private = ioapic
;
440 kvm_io_bus_register_dev(&kvm
->mmio_bus
, &ioapic
->dev
);