vfio/pci: Cache vendor and device ID
[qemu/ar7.git] / hw / intc / apic.c
blob77b639cce8f9fceecb16c6d09be453e787e46310
1 /*
2 * APIC support
4 * Copyright (c) 2004-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
19 #include "qemu/thread.h"
20 #include "hw/i386/apic_internal.h"
21 #include "hw/i386/apic.h"
22 #include "hw/i386/ioapic.h"
23 #include "hw/pci/msi.h"
24 #include "qemu/host-utils.h"
25 #include "trace.h"
26 #include "hw/i386/pc.h"
27 #include "hw/i386/apic-msidef.h"
29 #define MAX_APIC_WORDS 8
31 #define SYNC_FROM_VAPIC 0x1
32 #define SYNC_TO_VAPIC 0x2
33 #define SYNC_ISR_IRR_TO_VAPIC 0x4
35 static APICCommonState *local_apics[MAX_APICS + 1];
37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
38 static void apic_update_irq(APICCommonState *s);
39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
40 uint8_t dest, uint8_t dest_mode);
42 /* Find first bit starting from msb */
43 static int apic_fls_bit(uint32_t value)
45 return 31 - clz32(value);
48 /* Find first bit starting from lsb */
49 static int apic_ffs_bit(uint32_t value)
51 return ctz32(value);
54 static inline void apic_set_bit(uint32_t *tab, int index)
56 int i, mask;
57 i = index >> 5;
58 mask = 1 << (index & 0x1f);
59 tab[i] |= mask;
62 static inline void apic_reset_bit(uint32_t *tab, int index)
64 int i, mask;
65 i = index >> 5;
66 mask = 1 << (index & 0x1f);
67 tab[i] &= ~mask;
70 static inline int apic_get_bit(uint32_t *tab, int index)
72 int i, mask;
73 i = index >> 5;
74 mask = 1 << (index & 0x1f);
75 return !!(tab[i] & mask);
78 /* return -1 if no bit is set */
79 static int get_highest_priority_int(uint32_t *tab)
81 int i;
82 for (i = 7; i >= 0; i--) {
83 if (tab[i] != 0) {
84 return i * 32 + apic_fls_bit(tab[i]);
87 return -1;
90 static void apic_sync_vapic(APICCommonState *s, int sync_type)
92 VAPICState vapic_state;
93 size_t length;
94 off_t start;
95 int vector;
97 if (!s->vapic_paddr) {
98 return;
100 if (sync_type & SYNC_FROM_VAPIC) {
101 cpu_physical_memory_read(s->vapic_paddr, &vapic_state,
102 sizeof(vapic_state));
103 s->tpr = vapic_state.tpr;
105 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
106 start = offsetof(VAPICState, isr);
107 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
109 if (sync_type & SYNC_TO_VAPIC) {
110 assert(qemu_cpu_is_self(CPU(s->cpu)));
112 vapic_state.tpr = s->tpr;
113 vapic_state.enabled = 1;
114 start = 0;
115 length = sizeof(VAPICState);
118 vector = get_highest_priority_int(s->isr);
119 if (vector < 0) {
120 vector = 0;
122 vapic_state.isr = vector & 0xf0;
124 vapic_state.zero = 0;
126 vector = get_highest_priority_int(s->irr);
127 if (vector < 0) {
128 vector = 0;
130 vapic_state.irr = vector & 0xff;
132 cpu_physical_memory_write_rom(&address_space_memory,
133 s->vapic_paddr + start,
134 ((void *)&vapic_state) + start, length);
138 static void apic_vapic_base_update(APICCommonState *s)
140 apic_sync_vapic(s, SYNC_TO_VAPIC);
143 static void apic_local_deliver(APICCommonState *s, int vector)
145 uint32_t lvt = s->lvt[vector];
146 int trigger_mode;
148 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
150 if (lvt & APIC_LVT_MASKED)
151 return;
153 switch ((lvt >> 8) & 7) {
154 case APIC_DM_SMI:
155 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI);
156 break;
158 case APIC_DM_NMI:
159 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI);
160 break;
162 case APIC_DM_EXTINT:
163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD);
164 break;
166 case APIC_DM_FIXED:
167 trigger_mode = APIC_TRIGGER_EDGE;
168 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
169 (lvt & APIC_LVT_LEVEL_TRIGGER))
170 trigger_mode = APIC_TRIGGER_LEVEL;
171 apic_set_irq(s, lvt & 0xff, trigger_mode);
175 void apic_deliver_pic_intr(DeviceState *dev, int level)
177 APICCommonState *s = APIC_COMMON(dev);
179 if (level) {
180 apic_local_deliver(s, APIC_LVT_LINT0);
181 } else {
182 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
184 switch ((lvt >> 8) & 7) {
185 case APIC_DM_FIXED:
186 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
187 break;
188 apic_reset_bit(s->irr, lvt & 0xff);
189 /* fall through */
190 case APIC_DM_EXTINT:
191 apic_update_irq(s);
192 break;
197 static void apic_external_nmi(APICCommonState *s)
199 apic_local_deliver(s, APIC_LVT_LINT1);
202 #define foreach_apic(apic, deliver_bitmask, code) \
204 int __i, __j;\
205 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
206 uint32_t __mask = deliver_bitmask[__i];\
207 if (__mask) {\
208 for(__j = 0; __j < 32; __j++) {\
209 if (__mask & (1U << __j)) {\
210 apic = local_apics[__i * 32 + __j];\
211 if (apic) {\
212 code;\
220 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
221 uint8_t delivery_mode, uint8_t vector_num,
222 uint8_t trigger_mode)
224 APICCommonState *apic_iter;
226 switch (delivery_mode) {
227 case APIC_DM_LOWPRI:
228 /* XXX: search for focus processor, arbitration */
230 int i, d;
231 d = -1;
232 for(i = 0; i < MAX_APIC_WORDS; i++) {
233 if (deliver_bitmask[i]) {
234 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]);
235 break;
238 if (d >= 0) {
239 apic_iter = local_apics[d];
240 if (apic_iter) {
241 apic_set_irq(apic_iter, vector_num, trigger_mode);
245 return;
247 case APIC_DM_FIXED:
248 break;
250 case APIC_DM_SMI:
251 foreach_apic(apic_iter, deliver_bitmask,
252 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI)
254 return;
256 case APIC_DM_NMI:
257 foreach_apic(apic_iter, deliver_bitmask,
258 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI)
260 return;
262 case APIC_DM_INIT:
263 /* normal INIT IPI sent to processors */
264 foreach_apic(apic_iter, deliver_bitmask,
265 cpu_interrupt(CPU(apic_iter->cpu),
266 CPU_INTERRUPT_INIT)
268 return;
270 case APIC_DM_EXTINT:
271 /* handled in I/O APIC code */
272 break;
274 default:
275 return;
278 foreach_apic(apic_iter, deliver_bitmask,
279 apic_set_irq(apic_iter, vector_num, trigger_mode) );
282 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
283 uint8_t vector_num, uint8_t trigger_mode)
285 uint32_t deliver_bitmask[MAX_APIC_WORDS];
287 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
288 trigger_mode);
290 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
291 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
294 static void apic_set_base(APICCommonState *s, uint64_t val)
296 s->apicbase = (val & 0xfffff000) |
297 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
298 /* if disabled, cannot be enabled again */
299 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
300 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
301 cpu_clear_apic_feature(&s->cpu->env);
302 s->spurious_vec &= ~APIC_SV_ENABLE;
306 static void apic_set_tpr(APICCommonState *s, uint8_t val)
308 /* Updates from cr8 are ignored while the VAPIC is active */
309 if (!s->vapic_paddr) {
310 s->tpr = val << 4;
311 apic_update_irq(s);
315 static uint8_t apic_get_tpr(APICCommonState *s)
317 apic_sync_vapic(s, SYNC_FROM_VAPIC);
318 return s->tpr >> 4;
321 static int apic_get_ppr(APICCommonState *s)
323 int tpr, isrv, ppr;
325 tpr = (s->tpr >> 4);
326 isrv = get_highest_priority_int(s->isr);
327 if (isrv < 0)
328 isrv = 0;
329 isrv >>= 4;
330 if (tpr >= isrv)
331 ppr = s->tpr;
332 else
333 ppr = isrv << 4;
334 return ppr;
337 static int apic_get_arb_pri(APICCommonState *s)
339 /* XXX: arbitration */
340 return 0;
345 * <0 - low prio interrupt,
346 * 0 - no interrupt,
347 * >0 - interrupt number
349 static int apic_irq_pending(APICCommonState *s)
351 int irrv, ppr;
353 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
354 return 0;
357 irrv = get_highest_priority_int(s->irr);
358 if (irrv < 0) {
359 return 0;
361 ppr = apic_get_ppr(s);
362 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
363 return -1;
366 return irrv;
369 /* signal the CPU if an irq is pending */
370 static void apic_update_irq(APICCommonState *s)
372 CPUState *cpu;
373 DeviceState *dev = (DeviceState *)s;
375 cpu = CPU(s->cpu);
376 if (!qemu_cpu_is_self(cpu)) {
377 cpu_interrupt(cpu, CPU_INTERRUPT_POLL);
378 } else if (apic_irq_pending(s) > 0) {
379 cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
380 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
381 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
385 void apic_poll_irq(DeviceState *dev)
387 APICCommonState *s = APIC_COMMON(dev);
389 apic_sync_vapic(s, SYNC_FROM_VAPIC);
390 apic_update_irq(s);
393 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
395 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num));
397 apic_set_bit(s->irr, vector_num);
398 if (trigger_mode)
399 apic_set_bit(s->tmr, vector_num);
400 else
401 apic_reset_bit(s->tmr, vector_num);
402 if (s->vapic_paddr) {
403 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
405 * The vcpu thread needs to see the new IRR before we pull its current
406 * TPR value. That way, if we miss a lowering of the TRP, the guest
407 * has the chance to notice the new IRR and poll for IRQs on its own.
409 smp_wmb();
410 apic_sync_vapic(s, SYNC_FROM_VAPIC);
412 apic_update_irq(s);
415 static void apic_eoi(APICCommonState *s)
417 int isrv;
418 isrv = get_highest_priority_int(s->isr);
419 if (isrv < 0)
420 return;
421 apic_reset_bit(s->isr, isrv);
422 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) {
423 ioapic_eoi_broadcast(isrv);
425 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
426 apic_update_irq(s);
429 static int apic_find_dest(uint8_t dest)
431 APICCommonState *apic = local_apics[dest];
432 int i;
434 if (apic && apic->id == dest)
435 return dest; /* shortcut in case apic->id == apic->idx */
437 for (i = 0; i < MAX_APICS; i++) {
438 apic = local_apics[i];
439 if (apic && apic->id == dest)
440 return i;
441 if (!apic)
442 break;
445 return -1;
448 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
449 uint8_t dest, uint8_t dest_mode)
451 APICCommonState *apic_iter;
452 int i;
454 if (dest_mode == 0) {
455 if (dest == 0xff) {
456 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
457 } else {
458 int idx = apic_find_dest(dest);
459 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
460 if (idx >= 0)
461 apic_set_bit(deliver_bitmask, idx);
463 } else {
464 /* XXX: cluster mode */
465 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
466 for(i = 0; i < MAX_APICS; i++) {
467 apic_iter = local_apics[i];
468 if (apic_iter) {
469 if (apic_iter->dest_mode == 0xf) {
470 if (dest & apic_iter->log_dest)
471 apic_set_bit(deliver_bitmask, i);
472 } else if (apic_iter->dest_mode == 0x0) {
473 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
474 (dest & apic_iter->log_dest & 0x0f)) {
475 apic_set_bit(deliver_bitmask, i);
478 } else {
479 break;
485 static void apic_startup(APICCommonState *s, int vector_num)
487 s->sipi_vector = vector_num;
488 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
491 void apic_sipi(DeviceState *dev)
493 APICCommonState *s = APIC_COMMON(dev);
495 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
497 if (!s->wait_for_sipi)
498 return;
499 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
500 s->wait_for_sipi = 0;
503 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
504 uint8_t delivery_mode, uint8_t vector_num,
505 uint8_t trigger_mode)
507 APICCommonState *s = APIC_COMMON(dev);
508 uint32_t deliver_bitmask[MAX_APIC_WORDS];
509 int dest_shorthand = (s->icr[0] >> 18) & 3;
510 APICCommonState *apic_iter;
512 switch (dest_shorthand) {
513 case 0:
514 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
515 break;
516 case 1:
517 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
518 apic_set_bit(deliver_bitmask, s->idx);
519 break;
520 case 2:
521 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
522 break;
523 case 3:
524 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
525 apic_reset_bit(deliver_bitmask, s->idx);
526 break;
529 switch (delivery_mode) {
530 case APIC_DM_INIT:
532 int trig_mode = (s->icr[0] >> 15) & 1;
533 int level = (s->icr[0] >> 14) & 1;
534 if (level == 0 && trig_mode == 1) {
535 foreach_apic(apic_iter, deliver_bitmask,
536 apic_iter->arb_id = apic_iter->id );
537 return;
540 break;
542 case APIC_DM_SIPI:
543 foreach_apic(apic_iter, deliver_bitmask,
544 apic_startup(apic_iter, vector_num) );
545 return;
548 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
551 static bool apic_check_pic(APICCommonState *s)
553 DeviceState *dev = (DeviceState *)s;
555 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
556 return false;
558 apic_deliver_pic_intr(dev, 1);
559 return true;
562 int apic_get_interrupt(DeviceState *dev)
564 APICCommonState *s = APIC_COMMON(dev);
565 int intno;
567 /* if the APIC is installed or enabled, we let the 8259 handle the
568 IRQs */
569 if (!s)
570 return -1;
571 if (!(s->spurious_vec & APIC_SV_ENABLE))
572 return -1;
574 apic_sync_vapic(s, SYNC_FROM_VAPIC);
575 intno = apic_irq_pending(s);
577 /* if there is an interrupt from the 8259, let the caller handle
578 * that first since ExtINT interrupts ignore the priority.
580 if (intno == 0 || apic_check_pic(s)) {
581 apic_sync_vapic(s, SYNC_TO_VAPIC);
582 return -1;
583 } else if (intno < 0) {
584 apic_sync_vapic(s, SYNC_TO_VAPIC);
585 return s->spurious_vec & 0xff;
587 apic_reset_bit(s->irr, intno);
588 apic_set_bit(s->isr, intno);
589 apic_sync_vapic(s, SYNC_TO_VAPIC);
591 apic_update_irq(s);
593 return intno;
596 int apic_accept_pic_intr(DeviceState *dev)
598 APICCommonState *s = APIC_COMMON(dev);
599 uint32_t lvt0;
601 if (!s)
602 return -1;
604 lvt0 = s->lvt[APIC_LVT_LINT0];
606 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
607 (lvt0 & APIC_LVT_MASKED) == 0)
608 return 1;
610 return 0;
613 static uint32_t apic_get_current_count(APICCommonState *s)
615 int64_t d;
616 uint32_t val;
617 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
618 s->count_shift;
619 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
620 /* periodic */
621 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
622 } else {
623 if (d >= s->initial_count)
624 val = 0;
625 else
626 val = s->initial_count - d;
628 return val;
631 static void apic_timer_update(APICCommonState *s, int64_t current_time)
633 if (apic_next_timer(s, current_time)) {
634 timer_mod(s->timer, s->next_time);
635 } else {
636 timer_del(s->timer);
640 static void apic_timer(void *opaque)
642 APICCommonState *s = opaque;
644 apic_local_deliver(s, APIC_LVT_TIMER);
645 apic_timer_update(s, s->next_time);
648 static uint32_t apic_mem_readb(void *opaque, hwaddr addr)
650 return 0;
653 static uint32_t apic_mem_readw(void *opaque, hwaddr addr)
655 return 0;
658 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val)
662 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val)
666 static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
668 DeviceState *dev;
669 APICCommonState *s;
670 uint32_t val;
671 int index;
673 dev = cpu_get_current_apic();
674 if (!dev) {
675 return 0;
677 s = APIC_COMMON(dev);
679 index = (addr >> 4) & 0xff;
680 switch(index) {
681 case 0x02: /* id */
682 val = s->id << 24;
683 break;
684 case 0x03: /* version */
685 val = s->version | ((APIC_LVT_NB - 1) << 16);
686 break;
687 case 0x08:
688 apic_sync_vapic(s, SYNC_FROM_VAPIC);
689 if (apic_report_tpr_access) {
690 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
692 val = s->tpr;
693 break;
694 case 0x09:
695 val = apic_get_arb_pri(s);
696 break;
697 case 0x0a:
698 /* ppr */
699 val = apic_get_ppr(s);
700 break;
701 case 0x0b:
702 val = 0;
703 break;
704 case 0x0d:
705 val = s->log_dest << 24;
706 break;
707 case 0x0e:
708 val = (s->dest_mode << 28) | 0xfffffff;
709 break;
710 case 0x0f:
711 val = s->spurious_vec;
712 break;
713 case 0x10 ... 0x17:
714 val = s->isr[index & 7];
715 break;
716 case 0x18 ... 0x1f:
717 val = s->tmr[index & 7];
718 break;
719 case 0x20 ... 0x27:
720 val = s->irr[index & 7];
721 break;
722 case 0x28:
723 val = s->esr;
724 break;
725 case 0x30:
726 case 0x31:
727 val = s->icr[index & 1];
728 break;
729 case 0x32 ... 0x37:
730 val = s->lvt[index - 0x32];
731 break;
732 case 0x38:
733 val = s->initial_count;
734 break;
735 case 0x39:
736 val = apic_get_current_count(s);
737 break;
738 case 0x3e:
739 val = s->divide_conf;
740 break;
741 default:
742 s->esr |= ESR_ILLEGAL_ADDRESS;
743 val = 0;
744 break;
746 trace_apic_mem_readl(addr, val);
747 return val;
750 static void apic_send_msi(hwaddr addr, uint32_t data)
752 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
753 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
754 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
755 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
756 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
757 /* XXX: Ignore redirection hint. */
758 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
761 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
763 DeviceState *dev;
764 APICCommonState *s;
765 int index = (addr >> 4) & 0xff;
766 if (addr > 0xfff || !index) {
767 /* MSI and MMIO APIC are at the same memory location,
768 * but actually not on the global bus: MSI is on PCI bus
769 * APIC is connected directly to the CPU.
770 * Mapping them on the global bus happens to work because
771 * MSI registers are reserved in APIC MMIO and vice versa. */
772 apic_send_msi(addr, val);
773 return;
776 dev = cpu_get_current_apic();
777 if (!dev) {
778 return;
780 s = APIC_COMMON(dev);
782 trace_apic_mem_writel(addr, val);
784 switch(index) {
785 case 0x02:
786 s->id = (val >> 24);
787 break;
788 case 0x03:
789 break;
790 case 0x08:
791 if (apic_report_tpr_access) {
792 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
794 s->tpr = val;
795 apic_sync_vapic(s, SYNC_TO_VAPIC);
796 apic_update_irq(s);
797 break;
798 case 0x09:
799 case 0x0a:
800 break;
801 case 0x0b: /* EOI */
802 apic_eoi(s);
803 break;
804 case 0x0d:
805 s->log_dest = val >> 24;
806 break;
807 case 0x0e:
808 s->dest_mode = val >> 28;
809 break;
810 case 0x0f:
811 s->spurious_vec = val & 0x1ff;
812 apic_update_irq(s);
813 break;
814 case 0x10 ... 0x17:
815 case 0x18 ... 0x1f:
816 case 0x20 ... 0x27:
817 case 0x28:
818 break;
819 case 0x30:
820 s->icr[0] = val;
821 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
822 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
823 (s->icr[0] >> 15) & 1);
824 break;
825 case 0x31:
826 s->icr[1] = val;
827 break;
828 case 0x32 ... 0x37:
830 int n = index - 0x32;
831 s->lvt[n] = val;
832 if (n == APIC_LVT_TIMER) {
833 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
834 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
835 apic_update_irq(s);
838 break;
839 case 0x38:
840 s->initial_count = val;
841 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
842 apic_timer_update(s, s->initial_count_load_time);
843 break;
844 case 0x39:
845 break;
846 case 0x3e:
848 int v;
849 s->divide_conf = val & 0xb;
850 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
851 s->count_shift = (v + 1) & 7;
853 break;
854 default:
855 s->esr |= ESR_ILLEGAL_ADDRESS;
856 break;
860 static void apic_pre_save(APICCommonState *s)
862 apic_sync_vapic(s, SYNC_FROM_VAPIC);
865 static void apic_post_load(APICCommonState *s)
867 if (s->timer_expiry != -1) {
868 timer_mod(s->timer, s->timer_expiry);
869 } else {
870 timer_del(s->timer);
874 static const MemoryRegionOps apic_io_ops = {
875 .old_mmio = {
876 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, },
877 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, },
879 .endianness = DEVICE_NATIVE_ENDIAN,
882 static void apic_realize(DeviceState *dev, Error **errp)
884 APICCommonState *s = APIC_COMMON(dev);
886 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
887 APIC_SPACE_SIZE);
889 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
890 local_apics[s->idx] = s;
892 msi_supported = true;
895 static void apic_class_init(ObjectClass *klass, void *data)
897 APICCommonClass *k = APIC_COMMON_CLASS(klass);
899 k->realize = apic_realize;
900 k->set_base = apic_set_base;
901 k->set_tpr = apic_set_tpr;
902 k->get_tpr = apic_get_tpr;
903 k->vapic_base_update = apic_vapic_base_update;
904 k->external_nmi = apic_external_nmi;
905 k->pre_save = apic_pre_save;
906 k->post_load = apic_post_load;
909 static const TypeInfo apic_info = {
910 .name = "apic",
911 .instance_size = sizeof(APICCommonState),
912 .parent = TYPE_APIC_COMMON,
913 .class_init = apic_class_init,
916 static void apic_register_types(void)
918 type_register_static(&apic_info);
921 type_init(apic_register_types)