2 * ioapic.c IOAPIC emulation logic
4 * Copyright (c) 2004-2005 Fabrice Bellard
6 * Split the ioapic logic from apic.c
7 * Xiantao Zhang <xiantao.zhang@intel.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "qapi/error.h"
25 #include "monitor/monitor.h"
27 #include "hw/i386/pc.h"
28 #include "hw/i386/apic.h"
29 #include "hw/i386/ioapic.h"
30 #include "hw/i386/ioapic_internal.h"
31 #include "hw/pci/msi.h"
32 #include "sysemu/kvm.h"
33 #include "hw/i386/apic-msidef.h"
34 #include "hw/i386/x86-iommu.h"
37 #define APIC_DELIVERY_MODE_SHIFT 8
38 #define APIC_POLARITY_SHIFT 14
39 #define APIC_TRIG_MODE_SHIFT 15
41 static IOAPICCommonState
*ioapics
[MAX_IOAPICS
];
43 /* global variable from ioapic_common.c */
46 struct ioapic_entry_info
{
47 /* fields parsed from IOAPIC entries */
52 uint8_t delivery_mode
;
55 /* MSI message generated from above parsed fields */
60 static void ioapic_entry_parse(uint64_t entry
, struct ioapic_entry_info
*info
)
62 memset(info
, 0, sizeof(*info
));
63 info
->masked
= (entry
>> IOAPIC_LVT_MASKED_SHIFT
) & 1;
64 info
->trig_mode
= (entry
>> IOAPIC_LVT_TRIGGER_MODE_SHIFT
) & 1;
66 * By default, this would be dest_id[8] + reserved[8]. When IR
67 * is enabled, this would be interrupt_index[15] +
68 * interrupt_format[1]. This field never means anything, but
69 * only used to generate corresponding MSI.
71 info
->dest_idx
= (entry
>> IOAPIC_LVT_DEST_IDX_SHIFT
) & 0xffff;
72 info
->dest_mode
= (entry
>> IOAPIC_LVT_DEST_MODE_SHIFT
) & 1;
73 info
->delivery_mode
= (entry
>> IOAPIC_LVT_DELIV_MODE_SHIFT
) \
75 if (info
->delivery_mode
== IOAPIC_DM_EXTINT
) {
76 info
->vector
= pic_read_irq(isa_pic
);
78 info
->vector
= entry
& IOAPIC_VECTOR_MASK
;
81 info
->addr
= APIC_DEFAULT_ADDRESS
| \
82 (info
->dest_idx
<< MSI_ADDR_DEST_IDX_SHIFT
) | \
83 (info
->dest_mode
<< MSI_ADDR_DEST_MODE_SHIFT
);
84 info
->data
= (info
->vector
<< MSI_DATA_VECTOR_SHIFT
) | \
85 (info
->trig_mode
<< MSI_DATA_TRIGGER_SHIFT
) | \
86 (info
->delivery_mode
<< MSI_DATA_DELIVERY_MODE_SHIFT
);
89 static void ioapic_service(IOAPICCommonState
*s
)
91 AddressSpace
*ioapic_as
= PC_MACHINE(qdev_get_machine())->ioapic_as
;
92 struct ioapic_entry_info info
;
97 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
102 entry
= s
->ioredtbl
[i
];
103 ioapic_entry_parse(entry
, &info
);
105 if (info
.trig_mode
== IOAPIC_TRIGGER_EDGE
) {
108 coalesce
= s
->ioredtbl
[i
] & IOAPIC_LVT_REMOTE_IRR
;
109 trace_ioapic_set_remote_irr(i
);
110 s
->ioredtbl
[i
] |= IOAPIC_LVT_REMOTE_IRR
;
114 /* We are level triggered interrupts, and the
115 * guest should be still working on previous one,
121 if (kvm_irqchip_is_split()) {
122 if (info
.trig_mode
== IOAPIC_TRIGGER_EDGE
) {
123 kvm_set_irq(kvm_state
, i
, 1);
124 kvm_set_irq(kvm_state
, i
, 0);
126 kvm_set_irq(kvm_state
, i
, 1);
132 /* No matter whether IR is enabled, we translate
133 * the IOAPIC message into a MSI one, and its
134 * address space will decide whether we need a
136 stl_le_phys(ioapic_as
, info
.addr
, info
.data
);
142 #define SUCCESSIVE_IRQ_MAX_COUNT 10000
144 static void delayed_ioapic_service_cb(void *opaque
)
146 IOAPICCommonState
*s
= opaque
;
151 static void ioapic_set_irq(void *opaque
, int vector
, int level
)
153 IOAPICCommonState
*s
= opaque
;
155 /* ISA IRQs map to GSI 1-1 except for IRQ0 which maps
156 * to GSI 2. GSI maps to ioapic 1-1. This is not
157 * the cleanest way of doing it but it should work. */
159 trace_ioapic_set_irq(vector
, level
);
160 ioapic_stat_update_irq(s
, vector
, level
);
164 if (vector
< IOAPIC_NUM_PINS
) {
165 uint32_t mask
= 1 << vector
;
166 uint64_t entry
= s
->ioredtbl
[vector
];
168 if (((entry
>> IOAPIC_LVT_TRIGGER_MODE_SHIFT
) & 1) ==
169 IOAPIC_TRIGGER_LEVEL
) {
170 /* level triggered */
173 if (!(entry
& IOAPIC_LVT_REMOTE_IRR
)) {
180 /* According to the 82093AA manual, we must ignore edge requests
181 * if the input pin is masked. */
182 if (level
&& !(entry
& IOAPIC_LVT_MASKED
)) {
190 static void ioapic_update_kvm_routes(IOAPICCommonState
*s
)
195 if (kvm_irqchip_is_split()) {
196 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
198 struct ioapic_entry_info info
;
199 ioapic_entry_parse(s
->ioredtbl
[i
], &info
);
200 msg
.address
= info
.addr
;
201 msg
.data
= info
.data
;
202 kvm_irqchip_update_msi_route(kvm_state
, i
, msg
, NULL
);
204 kvm_irqchip_commit_routes(kvm_state
);
210 static void ioapic_iec_notifier(void *private, bool global
,
211 uint32_t index
, uint32_t mask
)
213 IOAPICCommonState
*s
= (IOAPICCommonState
*)private;
214 /* For simplicity, we just update all the routes */
215 ioapic_update_kvm_routes(s
);
219 void ioapic_eoi_broadcast(int vector
)
221 IOAPICCommonState
*s
;
225 trace_ioapic_eoi_broadcast(vector
);
227 for (i
= 0; i
< MAX_IOAPICS
; i
++) {
232 for (n
= 0; n
< IOAPIC_NUM_PINS
; n
++) {
233 entry
= s
->ioredtbl
[n
];
235 if ((entry
& IOAPIC_VECTOR_MASK
) != vector
||
236 ((entry
>> IOAPIC_LVT_TRIGGER_MODE_SHIFT
) & 1) != IOAPIC_TRIGGER_LEVEL
) {
240 if (!(entry
& IOAPIC_LVT_REMOTE_IRR
)) {
244 trace_ioapic_clear_remote_irr(n
, vector
);
245 s
->ioredtbl
[n
] = entry
& ~IOAPIC_LVT_REMOTE_IRR
;
247 if (!(entry
& IOAPIC_LVT_MASKED
) && (s
->irr
& (1 << n
))) {
248 ++s
->irq_eoi
[vector
];
249 if (s
->irq_eoi
[vector
] >= SUCCESSIVE_IRQ_MAX_COUNT
) {
251 * Real hardware does not deliver the interrupt immediately
252 * during eoi broadcast, and this lets a buggy guest make
253 * slow progress even if it does not correctly handle a
254 * level-triggered interrupt. Emulate this behavior if we
255 * detect an interrupt storm.
257 s
->irq_eoi
[vector
] = 0;
258 timer_mod_anticipate(s
->delayed_ioapic_service_timer
,
259 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
260 NANOSECONDS_PER_SECOND
/ 100);
261 trace_ioapic_eoi_delayed_reassert(vector
);
266 s
->irq_eoi
[vector
] = 0;
273 ioapic_mem_read(void *opaque
, hwaddr addr
, unsigned int size
)
275 IOAPICCommonState
*s
= opaque
;
282 case IOAPIC_IOREGSEL
:
289 switch (s
->ioregsel
) {
292 val
= s
->id
<< IOAPIC_ID_SHIFT
;
296 ((IOAPIC_NUM_PINS
- 1) << IOAPIC_VER_ENTRIES_SHIFT
);
299 index
= (s
->ioregsel
- IOAPIC_REG_REDTBL_BASE
) >> 1;
300 if (index
>= 0 && index
< IOAPIC_NUM_PINS
) {
301 if (s
->ioregsel
& 1) {
302 val
= s
->ioredtbl
[index
] >> 32;
304 val
= s
->ioredtbl
[index
] & 0xffffffff;
311 trace_ioapic_mem_read(addr
, s
->ioregsel
, size
, val
);
317 * This is to satisfy the hack in Linux kernel. One hack of it is to
318 * simulate clearing the Remote IRR bit of IOAPIC entry using the
321 * "For IO-APIC's with EOI register, we use that to do an explicit EOI.
322 * Otherwise, we simulate the EOI message manually by changing the trigger
323 * mode to edge and then back to level, with RTE being masked during
326 * (See linux kernel __eoi_ioapic_pin() comment in commit c0205701)
328 * This is based on the assumption that, Remote IRR bit will be
329 * cleared by IOAPIC hardware when configured as edge-triggered
332 * Without this, level-triggered interrupts in IR mode might fail to
336 ioapic_fix_edge_remote_irr(uint64_t *entry
)
338 if (!(*entry
& IOAPIC_LVT_TRIGGER_MODE
)) {
339 /* Edge-triggered interrupts, make sure remote IRR is zero */
340 *entry
&= ~((uint64_t)IOAPIC_LVT_REMOTE_IRR
);
345 ioapic_mem_write(void *opaque
, hwaddr addr
, uint64_t val
,
348 IOAPICCommonState
*s
= opaque
;
352 trace_ioapic_mem_write(addr
, s
->ioregsel
, size
, val
);
355 case IOAPIC_IOREGSEL
:
362 switch (s
->ioregsel
) {
364 s
->id
= (val
>> IOAPIC_ID_SHIFT
) & IOAPIC_ID_MASK
;
370 index
= (s
->ioregsel
- IOAPIC_REG_REDTBL_BASE
) >> 1;
371 if (index
>= 0 && index
< IOAPIC_NUM_PINS
) {
372 uint64_t ro_bits
= s
->ioredtbl
[index
] & IOAPIC_RO_BITS
;
373 if (s
->ioregsel
& 1) {
374 s
->ioredtbl
[index
] &= 0xffffffff;
375 s
->ioredtbl
[index
] |= (uint64_t)val
<< 32;
377 s
->ioredtbl
[index
] &= ~0xffffffffULL
;
378 s
->ioredtbl
[index
] |= val
;
380 /* restore RO bits */
381 s
->ioredtbl
[index
] &= IOAPIC_RW_BITS
;
382 s
->ioredtbl
[index
] |= ro_bits
;
383 ioapic_fix_edge_remote_irr(&s
->ioredtbl
[index
]);
389 /* Explicit EOI is only supported for IOAPIC version 0x20 */
390 if (size
!= 4 || s
->version
!= 0x20) {
393 ioapic_eoi_broadcast(val
);
397 ioapic_update_kvm_routes(s
);
400 static const MemoryRegionOps ioapic_io_ops
= {
401 .read
= ioapic_mem_read
,
402 .write
= ioapic_mem_write
,
403 .endianness
= DEVICE_NATIVE_ENDIAN
,
406 static void ioapic_machine_done_notify(Notifier
*notifier
, void *data
)
409 IOAPICCommonState
*s
= container_of(notifier
, IOAPICCommonState
,
412 if (kvm_irqchip_is_split()) {
413 X86IOMMUState
*iommu
= x86_iommu_get_default();
415 /* Register this IOAPIC with IOMMU IEC notifier, so that
416 * when there are IR invalidates, we can be notified to
417 * update kernel IR cache. */
418 x86_iommu_iec_register_notifier(iommu
, ioapic_iec_notifier
, s
);
424 #define IOAPIC_VER_DEF 0x20
426 static void ioapic_realize(DeviceState
*dev
, Error
**errp
)
428 IOAPICCommonState
*s
= IOAPIC_COMMON(dev
);
430 if (s
->version
!= 0x11 && s
->version
!= 0x20) {
431 error_setg(errp
, "IOAPIC only supports version 0x11 or 0x20 "
432 "(default: 0x%x).", IOAPIC_VER_DEF
);
436 memory_region_init_io(&s
->io_memory
, OBJECT(s
), &ioapic_io_ops
, s
,
439 s
->delayed_ioapic_service_timer
=
440 timer_new_ns(QEMU_CLOCK_VIRTUAL
, delayed_ioapic_service_cb
, s
);
442 qdev_init_gpio_in(dev
, ioapic_set_irq
, IOAPIC_NUM_PINS
);
444 ioapics
[ioapic_no
] = s
;
445 s
->machine_done
.notify
= ioapic_machine_done_notify
;
446 qemu_add_machine_init_done_notifier(&s
->machine_done
);
449 static void ioapic_unrealize(DeviceState
*dev
, Error
**errp
)
451 IOAPICCommonState
*s
= IOAPIC_COMMON(dev
);
453 timer_del(s
->delayed_ioapic_service_timer
);
454 timer_free(s
->delayed_ioapic_service_timer
);
457 static Property ioapic_properties
[] = {
458 DEFINE_PROP_UINT8("version", IOAPICCommonState
, version
, IOAPIC_VER_DEF
),
459 DEFINE_PROP_END_OF_LIST(),
462 static void ioapic_class_init(ObjectClass
*klass
, void *data
)
464 IOAPICCommonClass
*k
= IOAPIC_COMMON_CLASS(klass
);
465 DeviceClass
*dc
= DEVICE_CLASS(klass
);
467 k
->realize
= ioapic_realize
;
468 k
->unrealize
= ioapic_unrealize
;
470 * If APIC is in kernel, we need to update the kernel cache after
471 * migration, otherwise first 24 gsi routes will be invalid.
473 k
->post_load
= ioapic_update_kvm_routes
;
474 dc
->reset
= ioapic_reset_common
;
475 dc
->props
= ioapic_properties
;
478 static const TypeInfo ioapic_info
= {
480 .parent
= TYPE_IOAPIC_COMMON
,
481 .instance_size
= sizeof(IOAPICCommonState
),
482 .class_init
= ioapic_class_init
,
485 static void ioapic_register_types(void)
487 type_register_static(&ioapic_info
);
490 type_init(ioapic_register_types
)