hw/intc/apic: fix memory leak
[qemu/ar7.git] / hw / intc / apic.c
blob4186c57b34c217ac557c47e28e95503bdc550349
1 /*
2 * APIC support
4 * Copyright (c) 2004-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
19 #include "qemu/osdep.h"
20 #include "qemu/thread.h"
21 #include "qemu/error-report.h"
22 #include "hw/i386/apic_internal.h"
23 #include "hw/i386/apic.h"
24 #include "hw/intc/ioapic.h"
25 #include "hw/intc/i8259.h"
26 #include "hw/intc/kvm_irqcount.h"
27 #include "hw/pci/msi.h"
28 #include "qemu/host-utils.h"
29 #include "sysemu/kvm.h"
30 #include "trace.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "qapi/error.h"
33 #include "qom/object.h"
35 #define SYNC_FROM_VAPIC 0x1
36 #define SYNC_TO_VAPIC 0x2
37 #define SYNC_ISR_IRR_TO_VAPIC 0x4
39 static APICCommonState **local_apics;
40 static uint32_t max_apics;
41 static uint32_t max_apic_words;
43 #define TYPE_APIC "apic"
44 /*This is reusing the APICCommonState typedef from APIC_COMMON */
45 DECLARE_INSTANCE_CHECKER(APICCommonState, APIC,
46 TYPE_APIC)
48 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
49 static void apic_update_irq(APICCommonState *s);
50 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
51 uint32_t dest, uint8_t dest_mode);
53 void apic_set_max_apic_id(uint32_t max_apic_id)
55 int word_size = 32;
57 /* round up the max apic id to next multiple of words */
58 max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1);
60 local_apics = g_malloc0(sizeof(*local_apics) * max_apics);
61 max_apic_words = max_apics >> 5;
65 /* Find first bit starting from msb */
66 static int apic_fls_bit(uint32_t value)
68 return 31 - clz32(value);
71 /* Find first bit starting from lsb */
72 static int apic_ffs_bit(uint32_t value)
74 return ctz32(value);
77 static inline void apic_reset_bit(uint32_t *tab, int index)
79 int i, mask;
80 i = index >> 5;
81 mask = 1 << (index & 0x1f);
82 tab[i] &= ~mask;
85 /* return -1 if no bit is set */
86 static int get_highest_priority_int(uint32_t *tab)
88 int i;
89 for (i = 7; i >= 0; i--) {
90 if (tab[i] != 0) {
91 return i * 32 + apic_fls_bit(tab[i]);
94 return -1;
97 static void apic_sync_vapic(APICCommonState *s, int sync_type)
99 VAPICState vapic_state;
100 size_t length;
101 off_t start;
102 int vector;
104 if (!s->vapic_paddr) {
105 return;
107 if (sync_type & SYNC_FROM_VAPIC) {
108 cpu_physical_memory_read(s->vapic_paddr, &vapic_state,
109 sizeof(vapic_state));
110 s->tpr = vapic_state.tpr;
112 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
113 start = offsetof(VAPICState, isr);
114 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
116 if (sync_type & SYNC_TO_VAPIC) {
117 assert(qemu_cpu_is_self(CPU(s->cpu)));
119 vapic_state.tpr = s->tpr;
120 vapic_state.enabled = 1;
121 start = 0;
122 length = sizeof(VAPICState);
125 vector = get_highest_priority_int(s->isr);
126 if (vector < 0) {
127 vector = 0;
129 vapic_state.isr = vector & 0xf0;
131 vapic_state.zero = 0;
133 vector = get_highest_priority_int(s->irr);
134 if (vector < 0) {
135 vector = 0;
137 vapic_state.irr = vector & 0xff;
139 address_space_write_rom(&address_space_memory,
140 s->vapic_paddr + start,
141 MEMTXATTRS_UNSPECIFIED,
142 ((void *)&vapic_state) + start, length);
146 static void apic_vapic_base_update(APICCommonState *s)
148 apic_sync_vapic(s, SYNC_TO_VAPIC);
151 static void apic_local_deliver(APICCommonState *s, int vector)
153 uint32_t lvt = s->lvt[vector];
154 int trigger_mode;
156 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
158 if (lvt & APIC_LVT_MASKED)
159 return;
161 switch ((lvt >> 8) & 7) {
162 case APIC_DM_SMI:
163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI);
164 break;
166 case APIC_DM_NMI:
167 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI);
168 break;
170 case APIC_DM_EXTINT:
171 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD);
172 break;
174 case APIC_DM_FIXED:
175 trigger_mode = APIC_TRIGGER_EDGE;
176 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
177 (lvt & APIC_LVT_LEVEL_TRIGGER))
178 trigger_mode = APIC_TRIGGER_LEVEL;
179 apic_set_irq(s, lvt & 0xff, trigger_mode);
183 void apic_deliver_pic_intr(DeviceState *dev, int level)
185 APICCommonState *s = APIC(dev);
187 if (level) {
188 apic_local_deliver(s, APIC_LVT_LINT0);
189 } else {
190 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
192 switch ((lvt >> 8) & 7) {
193 case APIC_DM_FIXED:
194 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
195 break;
196 apic_reset_bit(s->irr, lvt & 0xff);
197 /* fall through */
198 case APIC_DM_EXTINT:
199 apic_update_irq(s);
200 break;
205 static void apic_external_nmi(APICCommonState *s)
207 apic_local_deliver(s, APIC_LVT_LINT1);
210 #define foreach_apic(apic, deliver_bitmask, code) \
212 int __i, __j;\
213 for (__i = 0; __i < max_apic_words; __i++) {\
214 uint32_t __mask = deliver_bitmask[__i];\
215 if (__mask) {\
216 for (__j = 0; __j < 32; __j++) {\
217 if (__mask & (1U << __j)) {\
218 apic = local_apics[__i * 32 + __j];\
219 if (apic) {\
220 code;\
228 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
229 uint8_t delivery_mode, uint8_t vector_num,
230 uint8_t trigger_mode)
232 APICCommonState *apic_iter;
234 switch (delivery_mode) {
235 case APIC_DM_LOWPRI:
236 /* XXX: search for focus processor, arbitration */
238 int i, d;
239 d = -1;
240 for (i = 0; i < max_apic_words; i++) {
241 if (deliver_bitmask[i]) {
242 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]);
243 break;
246 if (d >= 0) {
247 apic_iter = local_apics[d];
248 if (apic_iter) {
249 apic_set_irq(apic_iter, vector_num, trigger_mode);
253 return;
255 case APIC_DM_FIXED:
256 break;
258 case APIC_DM_SMI:
259 foreach_apic(apic_iter, deliver_bitmask,
260 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI)
262 return;
264 case APIC_DM_NMI:
265 foreach_apic(apic_iter, deliver_bitmask,
266 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI)
268 return;
270 case APIC_DM_INIT:
271 /* normal INIT IPI sent to processors */
272 foreach_apic(apic_iter, deliver_bitmask,
273 cpu_interrupt(CPU(apic_iter->cpu),
274 CPU_INTERRUPT_INIT)
276 return;
278 case APIC_DM_EXTINT:
279 /* handled in I/O APIC code */
280 break;
282 default:
283 return;
286 foreach_apic(apic_iter, deliver_bitmask,
287 apic_set_irq(apic_iter, vector_num, trigger_mode) );
290 static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode,
291 uint8_t delivery_mode, uint8_t vector_num,
292 uint8_t trigger_mode)
294 g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words);
296 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
297 trigger_mode);
299 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
300 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
303 bool is_x2apic_mode(DeviceState *dev)
305 APICCommonState *s = APIC(dev);
307 return s->apicbase & MSR_IA32_APICBASE_EXTD;
310 static int apic_set_base_check(APICCommonState *s, uint64_t val)
312 /* Enable x2apic when x2apic is not supported by CPU */
313 if (!cpu_has_x2apic_feature(&s->cpu->env) &&
314 val & MSR_IA32_APICBASE_EXTD) {
315 return -1;
319 * Transition into invalid state
320 * (s->apicbase & MSR_IA32_APICBASE_ENABLE == 0) &&
321 * (s->apicbase & MSR_IA32_APICBASE_EXTD) == 1
323 if (!(val & MSR_IA32_APICBASE_ENABLE) &&
324 (val & MSR_IA32_APICBASE_EXTD)) {
325 return -1;
328 /* Invalid transition from disabled mode to x2APIC */
329 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
330 !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
331 (val & MSR_IA32_APICBASE_ENABLE) &&
332 (val & MSR_IA32_APICBASE_EXTD)) {
333 return -1;
336 /* Invalid transition from x2APIC to xAPIC */
337 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
338 (s->apicbase & MSR_IA32_APICBASE_EXTD) &&
339 (val & MSR_IA32_APICBASE_ENABLE) &&
340 !(val & MSR_IA32_APICBASE_EXTD)) {
341 return -1;
344 return 0;
347 static int apic_set_base(APICCommonState *s, uint64_t val)
349 if (apic_set_base_check(s, val) < 0) {
350 return -1;
353 s->apicbase = (val & 0xfffff000) |
354 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
355 /* if disabled, cannot be enabled again */
356 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
357 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
358 cpu_clear_apic_feature(&s->cpu->env);
359 s->spurious_vec &= ~APIC_SV_ENABLE;
362 /* Transition from disabled mode to xAPIC */
363 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
364 (val & MSR_IA32_APICBASE_ENABLE)) {
365 s->apicbase |= MSR_IA32_APICBASE_ENABLE;
366 cpu_set_apic_feature(&s->cpu->env);
369 /* Transition from xAPIC to x2APIC */
370 if (cpu_has_x2apic_feature(&s->cpu->env) &&
371 !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
372 (val & MSR_IA32_APICBASE_EXTD)) {
373 s->apicbase |= MSR_IA32_APICBASE_EXTD;
375 s->log_dest = ((s->initial_apic_id & 0xffff0) << 16) |
376 (1 << (s->initial_apic_id & 0xf));
379 return 0;
382 static void apic_set_tpr(APICCommonState *s, uint8_t val)
384 /* Updates from cr8 are ignored while the VAPIC is active */
385 if (!s->vapic_paddr) {
386 s->tpr = val << 4;
387 apic_update_irq(s);
391 int apic_get_highest_priority_irr(DeviceState *dev)
393 APICCommonState *s;
395 if (!dev) {
396 /* no interrupts */
397 return -1;
399 s = APIC_COMMON(dev);
400 return get_highest_priority_int(s->irr);
403 static uint8_t apic_get_tpr(APICCommonState *s)
405 apic_sync_vapic(s, SYNC_FROM_VAPIC);
406 return s->tpr >> 4;
409 int apic_get_ppr(APICCommonState *s)
411 int tpr, isrv, ppr;
413 tpr = (s->tpr >> 4);
414 isrv = get_highest_priority_int(s->isr);
415 if (isrv < 0)
416 isrv = 0;
417 isrv >>= 4;
418 if (tpr >= isrv)
419 ppr = s->tpr;
420 else
421 ppr = isrv << 4;
422 return ppr;
425 static int apic_get_arb_pri(APICCommonState *s)
427 /* XXX: arbitration */
428 return 0;
433 * <0 - low prio interrupt,
434 * 0 - no interrupt,
435 * >0 - interrupt number
437 static int apic_irq_pending(APICCommonState *s)
439 int irrv, ppr;
441 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
442 return 0;
445 irrv = get_highest_priority_int(s->irr);
446 if (irrv < 0) {
447 return 0;
449 ppr = apic_get_ppr(s);
450 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
451 return -1;
454 return irrv;
457 /* signal the CPU if an irq is pending */
458 static void apic_update_irq(APICCommonState *s)
460 CPUState *cpu;
461 DeviceState *dev = (DeviceState *)s;
463 cpu = CPU(s->cpu);
464 if (!qemu_cpu_is_self(cpu)) {
465 cpu_interrupt(cpu, CPU_INTERRUPT_POLL);
466 } else if (apic_irq_pending(s) > 0) {
467 cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
468 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
469 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
473 void apic_poll_irq(DeviceState *dev)
475 APICCommonState *s = APIC(dev);
477 apic_sync_vapic(s, SYNC_FROM_VAPIC);
478 apic_update_irq(s);
481 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
483 kvm_report_irq_delivered(!apic_get_bit(s->irr, vector_num));
485 apic_set_bit(s->irr, vector_num);
486 if (trigger_mode)
487 apic_set_bit(s->tmr, vector_num);
488 else
489 apic_reset_bit(s->tmr, vector_num);
490 if (s->vapic_paddr) {
491 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
493 * The vcpu thread needs to see the new IRR before we pull its current
494 * TPR value. That way, if we miss a lowering of the TRP, the guest
495 * has the chance to notice the new IRR and poll for IRQs on its own.
497 smp_wmb();
498 apic_sync_vapic(s, SYNC_FROM_VAPIC);
500 apic_update_irq(s);
503 static void apic_eoi(APICCommonState *s)
505 int isrv;
506 isrv = get_highest_priority_int(s->isr);
507 if (isrv < 0)
508 return;
509 apic_reset_bit(s->isr, isrv);
510 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) {
511 ioapic_eoi_broadcast(isrv);
513 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
514 apic_update_irq(s);
517 static bool apic_match_dest(APICCommonState *apic, uint32_t dest)
519 if (is_x2apic_mode(&apic->parent_obj)) {
520 return apic->initial_apic_id == dest;
521 } else {
522 return apic->id == (uint8_t)dest;
526 static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest)
528 APICCommonState *apic = NULL;
529 int i;
531 for (i = 0; i < max_apics; i++) {
532 apic = local_apics[i];
533 if (apic && apic_match_dest(apic, dest)) {
534 apic_set_bit(deliver_bitmask, i);
540 * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast.
541 * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC
542 * broadcast.
544 static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask,
545 bool is_x2apic_broadcast)
547 int i;
548 APICCommonState *apic_iter;
550 for (i = 0; i < max_apics; i++) {
551 apic_iter = local_apics[i];
552 if (apic_iter) {
553 bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj);
555 if (is_x2apic_broadcast && apic_in_x2apic) {
556 apic_set_bit(deliver_bitmask, i);
557 } else if (!is_x2apic_broadcast && !apic_in_x2apic) {
558 apic_set_bit(deliver_bitmask, i);
564 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
565 uint32_t dest, uint8_t dest_mode)
567 APICCommonState *apic;
568 int i;
570 memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t));
573 * x2APIC broadcast is delivered to all x2APIC CPUs regardless of
574 * destination mode. In case the destination mode is physical, it is
575 * broadcasted to all xAPIC CPUs too. Otherwise, if the destination
576 * mode is logical, we need to continue checking if xAPIC CPUs accepts
577 * the interrupt.
579 if (dest == 0xffffffff) {
580 if (dest_mode == APIC_DESTMODE_PHYSICAL) {
581 memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t));
582 return;
583 } else {
584 apic_get_broadcast_bitmask(deliver_bitmask, true);
588 if (dest_mode == APIC_DESTMODE_PHYSICAL) {
589 apic_find_dest(deliver_bitmask, dest);
590 /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */
591 if (dest == 0xff) {
592 apic_get_broadcast_bitmask(deliver_bitmask, false);
594 } else {
595 /* XXX: logical mode */
596 for (i = 0; i < max_apics; i++) {
597 apic = local_apics[i];
598 if (apic) {
599 /* x2APIC logical mode */
600 if (apic->apicbase & MSR_IA32_APICBASE_EXTD) {
601 if ((dest >> 16) == (apic->extended_log_dest >> 16) &&
602 (dest & apic->extended_log_dest & 0xffff)) {
603 apic_set_bit(deliver_bitmask, i);
605 continue;
608 /* xAPIC logical mode */
609 dest = (uint8_t)dest;
610 if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) {
611 if (dest & apic->log_dest) {
612 apic_set_bit(deliver_bitmask, i);
614 } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) {
616 * In cluster model of xAPIC logical mode IPI, 4 higher
617 * bits are used as cluster address, 4 lower bits are
618 * the bitmask for local APICs in the cluster. The IPI
619 * is delivered to an APIC if the cluster address
620 * matches and the APIC's address bit in the cluster is
621 * set in bitmask of destination ID in IPI.
623 * The cluster address ranges from 0 - 14, the cluster
624 * address 15 (0xf) is the broadcast address to all
625 * clusters.
627 if ((dest & 0xf0) == 0xf0 ||
628 (dest & 0xf0) == (apic->log_dest & 0xf0)) {
629 if (dest & apic->log_dest & 0x0f) {
630 apic_set_bit(deliver_bitmask, i);
639 static void apic_startup(APICCommonState *s, int vector_num)
641 s->sipi_vector = vector_num;
642 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
645 void apic_sipi(DeviceState *dev)
647 APICCommonState *s = APIC(dev);
649 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
651 if (!s->wait_for_sipi)
652 return;
653 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
654 s->wait_for_sipi = 0;
657 static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode,
658 uint8_t delivery_mode, uint8_t vector_num,
659 uint8_t trigger_mode, uint8_t dest_shorthand)
661 APICCommonState *s = APIC(dev);
662 APICCommonState *apic_iter;
663 uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t);
664 g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words);
665 uint32_t current_apic_id;
667 if (is_x2apic_mode(dev)) {
668 current_apic_id = s->initial_apic_id;
669 } else {
670 current_apic_id = s->id;
673 switch (dest_shorthand) {
674 case 0:
675 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
676 break;
677 case 1:
678 memset(deliver_bitmask, 0x00, deliver_bitmask_size);
679 apic_set_bit(deliver_bitmask, current_apic_id);
680 break;
681 case 2:
682 memset(deliver_bitmask, 0xff, deliver_bitmask_size);
683 break;
684 case 3:
685 memset(deliver_bitmask, 0xff, deliver_bitmask_size);
686 apic_reset_bit(deliver_bitmask, current_apic_id);
687 break;
690 switch (delivery_mode) {
691 case APIC_DM_INIT:
693 int trig_mode = (s->icr[0] >> 15) & 1;
694 int level = (s->icr[0] >> 14) & 1;
695 if (level == 0 && trig_mode == 1) {
696 foreach_apic(apic_iter, deliver_bitmask,
697 apic_iter->arb_id = apic_iter->id );
698 return;
701 break;
703 case APIC_DM_SIPI:
704 foreach_apic(apic_iter, deliver_bitmask,
705 apic_startup(apic_iter, vector_num) );
706 return;
709 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
712 static bool apic_check_pic(APICCommonState *s)
714 DeviceState *dev = (DeviceState *)s;
716 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
717 return false;
719 apic_deliver_pic_intr(dev, 1);
720 return true;
723 int apic_get_interrupt(DeviceState *dev)
725 APICCommonState *s = APIC(dev);
726 int intno;
728 /* if the APIC is installed or enabled, we let the 8259 handle the
729 IRQs */
730 if (!s)
731 return -1;
732 if (!(s->spurious_vec & APIC_SV_ENABLE))
733 return -1;
735 apic_sync_vapic(s, SYNC_FROM_VAPIC);
736 intno = apic_irq_pending(s);
738 /* if there is an interrupt from the 8259, let the caller handle
739 * that first since ExtINT interrupts ignore the priority.
741 if (intno == 0 || apic_check_pic(s)) {
742 apic_sync_vapic(s, SYNC_TO_VAPIC);
743 return -1;
744 } else if (intno < 0) {
745 apic_sync_vapic(s, SYNC_TO_VAPIC);
746 return s->spurious_vec & 0xff;
748 apic_reset_bit(s->irr, intno);
749 apic_set_bit(s->isr, intno);
750 apic_sync_vapic(s, SYNC_TO_VAPIC);
752 apic_update_irq(s);
754 return intno;
757 int apic_accept_pic_intr(DeviceState *dev)
759 APICCommonState *s = APIC(dev);
760 uint32_t lvt0;
762 if (!s)
763 return -1;
765 lvt0 = s->lvt[APIC_LVT_LINT0];
767 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
768 (lvt0 & APIC_LVT_MASKED) == 0)
769 return isa_pic != NULL;
771 return 0;
774 static void apic_timer_update(APICCommonState *s, int64_t current_time)
776 if (apic_next_timer(s, current_time)) {
777 timer_mod(s->timer, s->next_time);
778 } else {
779 timer_del(s->timer);
783 static void apic_timer(void *opaque)
785 APICCommonState *s = opaque;
787 apic_local_deliver(s, APIC_LVT_TIMER);
788 apic_timer_update(s, s->next_time);
791 static int apic_register_read(int index, uint64_t *value)
793 DeviceState *dev;
794 APICCommonState *s;
795 uint32_t val;
796 int ret = 0;
798 dev = cpu_get_current_apic();
799 if (!dev) {
800 return -1;
802 s = APIC(dev);
804 switch(index) {
805 case 0x02: /* id */
806 if (is_x2apic_mode(dev)) {
807 val = s->initial_apic_id;
808 } else {
809 val = s->id << 24;
811 break;
812 case 0x03: /* version */
813 val = s->version | ((APIC_LVT_NB - 1) << 16);
814 break;
815 case 0x08:
816 apic_sync_vapic(s, SYNC_FROM_VAPIC);
817 if (apic_report_tpr_access) {
818 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
820 val = s->tpr;
821 break;
822 case 0x09:
823 val = apic_get_arb_pri(s);
824 break;
825 case 0x0a:
826 /* ppr */
827 val = apic_get_ppr(s);
828 break;
829 case 0x0b:
830 val = 0;
831 break;
832 case 0x0d:
833 if (is_x2apic_mode(dev)) {
834 val = s->extended_log_dest;
835 } else {
836 val = s->log_dest << 24;
838 break;
839 case 0x0e:
840 if (is_x2apic_mode(dev)) {
841 val = 0;
842 ret = -1;
843 } else {
844 val = (s->dest_mode << 28) | 0xfffffff;
846 break;
847 case 0x0f:
848 val = s->spurious_vec;
849 break;
850 case 0x10 ... 0x17:
851 val = s->isr[index & 7];
852 break;
853 case 0x18 ... 0x1f:
854 val = s->tmr[index & 7];
855 break;
856 case 0x20 ... 0x27:
857 val = s->irr[index & 7];
858 break;
859 case 0x28:
860 val = s->esr;
861 break;
862 case 0x30:
863 case 0x31:
864 val = s->icr[index & 1];
865 break;
866 case 0x32 ... 0x37:
867 val = s->lvt[index - 0x32];
868 break;
869 case 0x38:
870 val = s->initial_count;
871 break;
872 case 0x39:
873 val = apic_get_current_count(s);
874 break;
875 case 0x3e:
876 val = s->divide_conf;
877 break;
878 default:
879 s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
880 val = 0;
881 ret = -1;
882 break;
885 trace_apic_register_read(index, val);
886 *value = val;
887 return ret;
890 static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
892 uint64_t val;
893 int index;
895 if (size < 4) {
896 return 0;
899 index = (addr >> 4) & 0xff;
900 apic_register_read(index, &val);
902 return val;
905 int apic_msr_read(int index, uint64_t *val)
907 DeviceState *dev;
909 dev = cpu_get_current_apic();
910 if (!dev) {
911 return -1;
914 if (!is_x2apic_mode(dev)) {
915 return -1;
918 return apic_register_read(index, val);
921 static void apic_send_msi(MSIMessage *msi)
923 uint64_t addr = msi->address;
924 uint32_t data = msi->data;
925 uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
927 * The higher 3 bytes of destination id is stored in higher word of
928 * msi address. See x86_iommu_irq_to_msi_message()
930 dest = dest | (addr >> 32);
931 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
932 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
933 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
934 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
935 /* XXX: Ignore redirection hint. */
936 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
939 static int apic_register_write(int index, uint64_t val)
941 DeviceState *dev;
942 APICCommonState *s;
944 dev = cpu_get_current_apic();
945 if (!dev) {
946 return -1;
948 s = APIC(dev);
950 trace_apic_register_write(index, val);
952 switch(index) {
953 case 0x02:
954 if (is_x2apic_mode(dev)) {
955 return -1;
958 s->id = (val >> 24);
959 break;
960 case 0x03:
961 break;
962 case 0x08:
963 if (apic_report_tpr_access) {
964 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
966 s->tpr = val;
967 apic_sync_vapic(s, SYNC_TO_VAPIC);
968 apic_update_irq(s);
969 break;
970 case 0x09:
971 case 0x0a:
972 break;
973 case 0x0b: /* EOI */
974 apic_eoi(s);
975 break;
976 case 0x0d:
977 if (is_x2apic_mode(dev)) {
978 return -1;
981 s->log_dest = val >> 24;
982 break;
983 case 0x0e:
984 if (is_x2apic_mode(dev)) {
985 return -1;
988 s->dest_mode = val >> 28;
989 break;
990 case 0x0f:
991 s->spurious_vec = val & 0x1ff;
992 apic_update_irq(s);
993 break;
994 case 0x10 ... 0x17:
995 case 0x18 ... 0x1f:
996 case 0x20 ... 0x27:
997 case 0x28:
998 break;
999 case 0x30: {
1000 uint32_t dest;
1002 s->icr[0] = val;
1003 if (is_x2apic_mode(dev)) {
1004 s->icr[1] = val >> 32;
1005 dest = s->icr[1];
1006 } else {
1007 dest = (s->icr[1] >> 24) & 0xff;
1010 apic_deliver(dev, dest, (s->icr[0] >> 11) & 1,
1011 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
1012 (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3);
1013 break;
1015 case 0x31:
1016 if (is_x2apic_mode(dev)) {
1017 return -1;
1020 s->icr[1] = val;
1021 break;
1022 case 0x32 ... 0x37:
1024 int n = index - 0x32;
1025 s->lvt[n] = val;
1026 if (n == APIC_LVT_TIMER) {
1027 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1028 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
1029 apic_update_irq(s);
1032 break;
1033 case 0x38:
1034 s->initial_count = val;
1035 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1036 apic_timer_update(s, s->initial_count_load_time);
1037 break;
1038 case 0x39:
1039 break;
1040 case 0x3e:
1042 int v;
1043 s->divide_conf = val & 0xb;
1044 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
1045 s->count_shift = (v + 1) & 7;
1047 break;
1048 case 0x3f: {
1049 int vector = val & 0xff;
1051 if (!is_x2apic_mode(dev)) {
1052 return -1;
1056 * Self IPI is identical to IPI with
1057 * - Destination shorthand: 1 (Self)
1058 * - Trigger mode: 0 (Edge)
1059 * - Delivery mode: 0 (Fixed)
1061 apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1);
1063 break;
1065 default:
1066 s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
1067 return -1;
1070 return 0;
1073 static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
1074 unsigned size)
1076 int index = (addr >> 4) & 0xff;
1078 if (size < 4) {
1079 return;
1082 if (addr > 0xfff || !index) {
1084 * MSI and MMIO APIC are at the same memory location,
1085 * but actually not on the global bus: MSI is on PCI bus
1086 * APIC is connected directly to the CPU.
1087 * Mapping them on the global bus happens to work because
1088 * MSI registers are reserved in APIC MMIO and vice versa.
1090 MSIMessage msi = { .address = addr, .data = val };
1091 apic_send_msi(&msi);
1092 return;
1095 apic_register_write(index, val);
1098 int apic_msr_write(int index, uint64_t val)
1100 DeviceState *dev;
1102 dev = cpu_get_current_apic();
1103 if (!dev) {
1104 return -1;
1107 if (!is_x2apic_mode(dev)) {
1108 return -1;
1111 return apic_register_write(index, val);
1114 static void apic_pre_save(APICCommonState *s)
1116 apic_sync_vapic(s, SYNC_FROM_VAPIC);
1119 static void apic_post_load(APICCommonState *s)
1121 if (s->timer_expiry != -1) {
1122 timer_mod(s->timer, s->timer_expiry);
1123 } else {
1124 timer_del(s->timer);
1128 static const MemoryRegionOps apic_io_ops = {
1129 .read = apic_mem_read,
1130 .write = apic_mem_write,
1131 .impl.min_access_size = 1,
1132 .impl.max_access_size = 4,
1133 .valid.min_access_size = 1,
1134 .valid.max_access_size = 4,
1135 .endianness = DEVICE_NATIVE_ENDIAN,
1138 static void apic_realize(DeviceState *dev, Error **errp)
1140 APICCommonState *s = APIC(dev);
1142 if (kvm_enabled()) {
1143 warn_report("Userspace local APIC is deprecated for KVM.");
1144 warn_report("Do not use kernel-irqchip except for the -M isapc machine type.");
1147 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
1148 APIC_SPACE_SIZE);
1151 * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can
1152 * write back to apic-msi. As such mark the apic-msi region re-entrancy
1153 * safe.
1155 s->io_memory.disable_reentrancy_guard = true;
1157 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
1160 * The --machine none does not call apic_set_max_apic_id before creating
1161 * apic, so we need to call it here and set it to 1 which is the max cpus
1162 * in machine none.
1164 if (!local_apics) {
1165 apic_set_max_apic_id(1);
1167 local_apics[s->initial_apic_id] = s;
1169 msi_nonbroken = true;
1172 static void apic_unrealize(DeviceState *dev)
1174 APICCommonState *s = APIC(dev);
1176 timer_free(s->timer);
1177 local_apics[s->initial_apic_id] = NULL;
1180 static void apic_class_init(ObjectClass *klass, void *data)
1182 APICCommonClass *k = APIC_COMMON_CLASS(klass);
1184 k->realize = apic_realize;
1185 k->unrealize = apic_unrealize;
1186 k->set_base = apic_set_base;
1187 k->set_tpr = apic_set_tpr;
1188 k->get_tpr = apic_get_tpr;
1189 k->vapic_base_update = apic_vapic_base_update;
1190 k->external_nmi = apic_external_nmi;
1191 k->pre_save = apic_pre_save;
1192 k->post_load = apic_post_load;
1193 k->send_msi = apic_send_msi;
1196 static const TypeInfo apic_info = {
1197 .name = TYPE_APIC,
1198 .instance_size = sizeof(APICCommonState),
1199 .parent = TYPE_APIC_COMMON,
1200 .class_init = apic_class_init,
1203 static void apic_register_types(void)
1205 type_register_static(&apic_info);
1208 type_init(apic_register_types)