4 * Copyright (c) 2004-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
19 #include "apic_internal.h"
23 #include "host-utils.h"
27 #define MAX_APIC_WORDS 8
29 /* Intel APIC constants: from include/asm/msidef.h */
30 #define MSI_DATA_VECTOR_SHIFT 0
31 #define MSI_DATA_VECTOR_MASK 0x000000ff
32 #define MSI_DATA_DELIVERY_MODE_SHIFT 8
33 #define MSI_DATA_TRIGGER_SHIFT 15
34 #define MSI_DATA_LEVEL_SHIFT 14
35 #define MSI_ADDR_DEST_MODE_SHIFT 2
36 #define MSI_ADDR_DEST_ID_SHIFT 12
37 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0
39 #define SYNC_FROM_VAPIC 0x1
40 #define SYNC_TO_VAPIC 0x2
41 #define SYNC_ISR_IRR_TO_VAPIC 0x4
43 static APICCommonState
*local_apics
[MAX_APICS
+ 1];
45 static void apic_set_irq(APICCommonState
*s
, int vector_num
, int trigger_mode
);
46 static void apic_update_irq(APICCommonState
*s
);
47 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask
,
48 uint8_t dest
, uint8_t dest_mode
);
50 /* Find first bit starting from msb */
51 static int fls_bit(uint32_t value
)
53 return 31 - clz32(value
);
56 /* Find first bit starting from lsb */
57 static int ffs_bit(uint32_t value
)
62 static inline void set_bit(uint32_t *tab
, int index
)
66 mask
= 1 << (index
& 0x1f);
70 static inline void reset_bit(uint32_t *tab
, int index
)
74 mask
= 1 << (index
& 0x1f);
78 static inline int get_bit(uint32_t *tab
, int index
)
82 mask
= 1 << (index
& 0x1f);
83 return !!(tab
[i
] & mask
);
86 /* return -1 if no bit is set */
87 static int get_highest_priority_int(uint32_t *tab
)
90 for (i
= 7; i
>= 0; i
--) {
92 return i
* 32 + fls_bit(tab
[i
]);
98 static void apic_sync_vapic(APICCommonState
*s
, int sync_type
)
100 VAPICState vapic_state
;
105 if (!s
->vapic_paddr
) {
108 if (sync_type
& SYNC_FROM_VAPIC
) {
109 cpu_physical_memory_rw(s
->vapic_paddr
, (void *)&vapic_state
,
110 sizeof(vapic_state
), 0);
111 s
->tpr
= vapic_state
.tpr
;
113 if (sync_type
& (SYNC_TO_VAPIC
| SYNC_ISR_IRR_TO_VAPIC
)) {
114 start
= offsetof(VAPICState
, isr
);
115 length
= offsetof(VAPICState
, enabled
) - offsetof(VAPICState
, isr
);
117 if (sync_type
& SYNC_TO_VAPIC
) {
118 assert(qemu_cpu_is_self(s
->cpu_env
));
120 vapic_state
.tpr
= s
->tpr
;
121 vapic_state
.enabled
= 1;
123 length
= sizeof(VAPICState
);
126 vector
= get_highest_priority_int(s
->isr
);
130 vapic_state
.isr
= vector
& 0xf0;
132 vapic_state
.zero
= 0;
134 vector
= get_highest_priority_int(s
->irr
);
138 vapic_state
.irr
= vector
& 0xff;
140 cpu_physical_memory_write_rom(s
->vapic_paddr
+ start
,
141 ((void *)&vapic_state
) + start
, length
);
145 static void apic_vapic_base_update(APICCommonState
*s
)
147 apic_sync_vapic(s
, SYNC_TO_VAPIC
);
150 static void apic_local_deliver(APICCommonState
*s
, int vector
)
152 uint32_t lvt
= s
->lvt
[vector
];
155 trace_apic_local_deliver(vector
, (lvt
>> 8) & 7);
157 if (lvt
& APIC_LVT_MASKED
)
160 switch ((lvt
>> 8) & 7) {
162 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_SMI
);
166 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_NMI
);
170 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
174 trigger_mode
= APIC_TRIGGER_EDGE
;
175 if ((vector
== APIC_LVT_LINT0
|| vector
== APIC_LVT_LINT1
) &&
176 (lvt
& APIC_LVT_LEVEL_TRIGGER
))
177 trigger_mode
= APIC_TRIGGER_LEVEL
;
178 apic_set_irq(s
, lvt
& 0xff, trigger_mode
);
182 void apic_deliver_pic_intr(DeviceState
*d
, int level
)
184 APICCommonState
*s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
187 apic_local_deliver(s
, APIC_LVT_LINT0
);
189 uint32_t lvt
= s
->lvt
[APIC_LVT_LINT0
];
191 switch ((lvt
>> 8) & 7) {
193 if (!(lvt
& APIC_LVT_LEVEL_TRIGGER
))
195 reset_bit(s
->irr
, lvt
& 0xff);
198 cpu_reset_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
204 static void apic_external_nmi(APICCommonState
*s
)
206 apic_local_deliver(s
, APIC_LVT_LINT1
);
209 #define foreach_apic(apic, deliver_bitmask, code) \
211 int __i, __j, __mask;\
212 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
213 __mask = deliver_bitmask[__i];\
215 for(__j = 0; __j < 32; __j++) {\
216 if (__mask & (1 << __j)) {\
217 apic = local_apics[__i * 32 + __j];\
227 static void apic_bus_deliver(const uint32_t *deliver_bitmask
,
228 uint8_t delivery_mode
, uint8_t vector_num
,
229 uint8_t trigger_mode
)
231 APICCommonState
*apic_iter
;
233 switch (delivery_mode
) {
235 /* XXX: search for focus processor, arbitration */
239 for(i
= 0; i
< MAX_APIC_WORDS
; i
++) {
240 if (deliver_bitmask
[i
]) {
241 d
= i
* 32 + ffs_bit(deliver_bitmask
[i
]);
246 apic_iter
= local_apics
[d
];
248 apic_set_irq(apic_iter
, vector_num
, trigger_mode
);
258 foreach_apic(apic_iter
, deliver_bitmask
,
259 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_SMI
) );
263 foreach_apic(apic_iter
, deliver_bitmask
,
264 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_NMI
) );
268 /* normal INIT IPI sent to processors */
269 foreach_apic(apic_iter
, deliver_bitmask
,
270 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_INIT
) );
274 /* handled in I/O APIC code */
281 foreach_apic(apic_iter
, deliver_bitmask
,
282 apic_set_irq(apic_iter
, vector_num
, trigger_mode
) );
285 void apic_deliver_irq(uint8_t dest
, uint8_t dest_mode
, uint8_t delivery_mode
,
286 uint8_t vector_num
, uint8_t trigger_mode
)
288 uint32_t deliver_bitmask
[MAX_APIC_WORDS
];
290 trace_apic_deliver_irq(dest
, dest_mode
, delivery_mode
, vector_num
,
293 apic_get_delivery_bitmask(deliver_bitmask
, dest
, dest_mode
);
294 apic_bus_deliver(deliver_bitmask
, delivery_mode
, vector_num
, trigger_mode
);
297 static void apic_set_base(APICCommonState
*s
, uint64_t val
)
299 s
->apicbase
= (val
& 0xfffff000) |
300 (s
->apicbase
& (MSR_IA32_APICBASE_BSP
| MSR_IA32_APICBASE_ENABLE
));
301 /* if disabled, cannot be enabled again */
302 if (!(val
& MSR_IA32_APICBASE_ENABLE
)) {
303 s
->apicbase
&= ~MSR_IA32_APICBASE_ENABLE
;
304 cpu_clear_apic_feature(s
->cpu_env
);
305 s
->spurious_vec
&= ~APIC_SV_ENABLE
;
309 static void apic_set_tpr(APICCommonState
*s
, uint8_t val
)
311 /* Updates from cr8 are ignored while the VAPIC is active */
312 if (!s
->vapic_paddr
) {
318 static uint8_t apic_get_tpr(APICCommonState
*s
)
320 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
324 static int apic_get_ppr(APICCommonState
*s
)
329 isrv
= get_highest_priority_int(s
->isr
);
340 static int apic_get_arb_pri(APICCommonState
*s
)
342 /* XXX: arbitration */
348 * <0 - low prio interrupt,
350 * >0 - interrupt number
352 static int apic_irq_pending(APICCommonState
*s
)
355 irrv
= get_highest_priority_int(s
->irr
);
359 ppr
= apic_get_ppr(s
);
360 if (ppr
&& (irrv
& 0xf0) <= (ppr
& 0xf0)) {
367 /* signal the CPU if an irq is pending */
368 static void apic_update_irq(APICCommonState
*s
)
370 if (!(s
->spurious_vec
& APIC_SV_ENABLE
)) {
373 if (apic_irq_pending(s
) > 0) {
374 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
375 } else if (apic_accept_pic_intr(&s
->busdev
.qdev
) &&
376 pic_get_output(isa_pic
)) {
377 apic_deliver_pic_intr(&s
->busdev
.qdev
, 1);
381 void apic_poll_irq(DeviceState
*d
)
383 APICCommonState
*s
= APIC_COMMON(d
);
385 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
389 static void apic_set_irq(APICCommonState
*s
, int vector_num
, int trigger_mode
)
391 apic_report_irq_delivered(!get_bit(s
->irr
, vector_num
));
393 set_bit(s
->irr
, vector_num
);
395 set_bit(s
->tmr
, vector_num
);
397 reset_bit(s
->tmr
, vector_num
);
398 if (s
->vapic_paddr
) {
399 apic_sync_vapic(s
, SYNC_ISR_IRR_TO_VAPIC
);
401 * The vcpu thread needs to see the new IRR before we pull its current
402 * TPR value. That way, if we miss a lowering of the TRP, the guest
403 * has the chance to notice the new IRR and poll for IRQs on its own.
406 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
411 static void apic_eoi(APICCommonState
*s
)
414 isrv
= get_highest_priority_int(s
->isr
);
417 reset_bit(s
->isr
, isrv
);
418 if (!(s
->spurious_vec
& APIC_SV_DIRECTED_IO
) && get_bit(s
->tmr
, isrv
)) {
419 ioapic_eoi_broadcast(isrv
);
421 apic_sync_vapic(s
, SYNC_FROM_VAPIC
| SYNC_TO_VAPIC
);
425 static int apic_find_dest(uint8_t dest
)
427 APICCommonState
*apic
= local_apics
[dest
];
430 if (apic
&& apic
->id
== dest
)
431 return dest
; /* shortcut in case apic->id == apic->idx */
433 for (i
= 0; i
< MAX_APICS
; i
++) {
434 apic
= local_apics
[i
];
435 if (apic
&& apic
->id
== dest
)
444 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask
,
445 uint8_t dest
, uint8_t dest_mode
)
447 APICCommonState
*apic_iter
;
450 if (dest_mode
== 0) {
452 memset(deliver_bitmask
, 0xff, MAX_APIC_WORDS
* sizeof(uint32_t));
454 int idx
= apic_find_dest(dest
);
455 memset(deliver_bitmask
, 0x00, MAX_APIC_WORDS
* sizeof(uint32_t));
457 set_bit(deliver_bitmask
, idx
);
460 /* XXX: cluster mode */
461 memset(deliver_bitmask
, 0x00, MAX_APIC_WORDS
* sizeof(uint32_t));
462 for(i
= 0; i
< MAX_APICS
; i
++) {
463 apic_iter
= local_apics
[i
];
465 if (apic_iter
->dest_mode
== 0xf) {
466 if (dest
& apic_iter
->log_dest
)
467 set_bit(deliver_bitmask
, i
);
468 } else if (apic_iter
->dest_mode
== 0x0) {
469 if ((dest
& 0xf0) == (apic_iter
->log_dest
& 0xf0) &&
470 (dest
& apic_iter
->log_dest
& 0x0f)) {
471 set_bit(deliver_bitmask
, i
);
481 static void apic_startup(APICCommonState
*s
, int vector_num
)
483 s
->sipi_vector
= vector_num
;
484 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_SIPI
);
487 void apic_sipi(DeviceState
*d
)
489 APICCommonState
*s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
491 cpu_reset_interrupt(s
->cpu_env
, CPU_INTERRUPT_SIPI
);
493 if (!s
->wait_for_sipi
)
495 cpu_x86_load_seg_cache_sipi(s
->cpu_env
, s
->sipi_vector
);
496 s
->wait_for_sipi
= 0;
499 static void apic_deliver(DeviceState
*d
, uint8_t dest
, uint8_t dest_mode
,
500 uint8_t delivery_mode
, uint8_t vector_num
,
501 uint8_t trigger_mode
)
503 APICCommonState
*s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
504 uint32_t deliver_bitmask
[MAX_APIC_WORDS
];
505 int dest_shorthand
= (s
->icr
[0] >> 18) & 3;
506 APICCommonState
*apic_iter
;
508 switch (dest_shorthand
) {
510 apic_get_delivery_bitmask(deliver_bitmask
, dest
, dest_mode
);
513 memset(deliver_bitmask
, 0x00, sizeof(deliver_bitmask
));
514 set_bit(deliver_bitmask
, s
->idx
);
517 memset(deliver_bitmask
, 0xff, sizeof(deliver_bitmask
));
520 memset(deliver_bitmask
, 0xff, sizeof(deliver_bitmask
));
521 reset_bit(deliver_bitmask
, s
->idx
);
525 switch (delivery_mode
) {
528 int trig_mode
= (s
->icr
[0] >> 15) & 1;
529 int level
= (s
->icr
[0] >> 14) & 1;
530 if (level
== 0 && trig_mode
== 1) {
531 foreach_apic(apic_iter
, deliver_bitmask
,
532 apic_iter
->arb_id
= apic_iter
->id
);
539 foreach_apic(apic_iter
, deliver_bitmask
,
540 apic_startup(apic_iter
, vector_num
) );
544 apic_bus_deliver(deliver_bitmask
, delivery_mode
, vector_num
, trigger_mode
);
547 int apic_get_interrupt(DeviceState
*d
)
549 APICCommonState
*s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
552 /* if the APIC is installed or enabled, we let the 8259 handle the
556 if (!(s
->spurious_vec
& APIC_SV_ENABLE
))
559 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
560 intno
= apic_irq_pending(s
);
563 apic_sync_vapic(s
, SYNC_TO_VAPIC
);
565 } else if (intno
< 0) {
566 apic_sync_vapic(s
, SYNC_TO_VAPIC
);
567 return s
->spurious_vec
& 0xff;
569 reset_bit(s
->irr
, intno
);
570 set_bit(s
->isr
, intno
);
571 apic_sync_vapic(s
, SYNC_TO_VAPIC
);
576 int apic_accept_pic_intr(DeviceState
*d
)
578 APICCommonState
*s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
584 lvt0
= s
->lvt
[APIC_LVT_LINT0
];
586 if ((s
->apicbase
& MSR_IA32_APICBASE_ENABLE
) == 0 ||
587 (lvt0
& APIC_LVT_MASKED
) == 0)
593 static uint32_t apic_get_current_count(APICCommonState
*s
)
597 d
= (qemu_get_clock_ns(vm_clock
) - s
->initial_count_load_time
) >>
599 if (s
->lvt
[APIC_LVT_TIMER
] & APIC_LVT_TIMER_PERIODIC
) {
601 val
= s
->initial_count
- (d
% ((uint64_t)s
->initial_count
+ 1));
603 if (d
>= s
->initial_count
)
606 val
= s
->initial_count
- d
;
611 static void apic_timer_update(APICCommonState
*s
, int64_t current_time
)
613 if (apic_next_timer(s
, current_time
)) {
614 qemu_mod_timer(s
->timer
, s
->next_time
);
616 qemu_del_timer(s
->timer
);
620 static void apic_timer(void *opaque
)
622 APICCommonState
*s
= opaque
;
624 apic_local_deliver(s
, APIC_LVT_TIMER
);
625 apic_timer_update(s
, s
->next_time
);
628 static uint32_t apic_mem_readb(void *opaque
, target_phys_addr_t addr
)
633 static uint32_t apic_mem_readw(void *opaque
, target_phys_addr_t addr
)
638 static void apic_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
642 static void apic_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
646 static uint32_t apic_mem_readl(void *opaque
, target_phys_addr_t addr
)
653 d
= cpu_get_current_apic();
657 s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
659 index
= (addr
>> 4) & 0xff;
664 case 0x03: /* version */
665 val
= 0x11 | ((APIC_LVT_NB
- 1) << 16); /* version 0x11 */
668 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
669 if (apic_report_tpr_access
) {
670 cpu_report_tpr_access(s
->cpu_env
, TPR_ACCESS_READ
);
675 val
= apic_get_arb_pri(s
);
679 val
= apic_get_ppr(s
);
685 val
= s
->log_dest
<< 24;
688 val
= s
->dest_mode
<< 28;
691 val
= s
->spurious_vec
;
694 val
= s
->isr
[index
& 7];
697 val
= s
->tmr
[index
& 7];
700 val
= s
->irr
[index
& 7];
707 val
= s
->icr
[index
& 1];
710 val
= s
->lvt
[index
- 0x32];
713 val
= s
->initial_count
;
716 val
= apic_get_current_count(s
);
719 val
= s
->divide_conf
;
722 s
->esr
|= ESR_ILLEGAL_ADDRESS
;
726 trace_apic_mem_readl(addr
, val
);
730 static void apic_send_msi(target_phys_addr_t addr
, uint32_t data
)
732 uint8_t dest
= (addr
& MSI_ADDR_DEST_ID_MASK
) >> MSI_ADDR_DEST_ID_SHIFT
;
733 uint8_t vector
= (data
& MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
;
734 uint8_t dest_mode
= (addr
>> MSI_ADDR_DEST_MODE_SHIFT
) & 0x1;
735 uint8_t trigger_mode
= (data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
736 uint8_t delivery
= (data
>> MSI_DATA_DELIVERY_MODE_SHIFT
) & 0x7;
737 /* XXX: Ignore redirection hint. */
738 apic_deliver_irq(dest
, dest_mode
, delivery
, vector
, trigger_mode
);
741 static void apic_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
745 int index
= (addr
>> 4) & 0xff;
746 if (addr
> 0xfff || !index
) {
747 /* MSI and MMIO APIC are at the same memory location,
748 * but actually not on the global bus: MSI is on PCI bus
749 * APIC is connected directly to the CPU.
750 * Mapping them on the global bus happens to work because
751 * MSI registers are reserved in APIC MMIO and vice versa. */
752 apic_send_msi(addr
, val
);
756 d
= cpu_get_current_apic();
760 s
= DO_UPCAST(APICCommonState
, busdev
.qdev
, d
);
762 trace_apic_mem_writel(addr
, val
);
771 if (apic_report_tpr_access
) {
772 cpu_report_tpr_access(s
->cpu_env
, TPR_ACCESS_WRITE
);
775 apic_sync_vapic(s
, SYNC_TO_VAPIC
);
785 s
->log_dest
= val
>> 24;
788 s
->dest_mode
= val
>> 28;
791 s
->spurious_vec
= val
& 0x1ff;
801 apic_deliver(d
, (s
->icr
[1] >> 24) & 0xff, (s
->icr
[0] >> 11) & 1,
802 (s
->icr
[0] >> 8) & 7, (s
->icr
[0] & 0xff),
803 (s
->icr
[0] >> 15) & 1);
810 int n
= index
- 0x32;
812 if (n
== APIC_LVT_TIMER
)
813 apic_timer_update(s
, qemu_get_clock_ns(vm_clock
));
817 s
->initial_count
= val
;
818 s
->initial_count_load_time
= qemu_get_clock_ns(vm_clock
);
819 apic_timer_update(s
, s
->initial_count_load_time
);
826 s
->divide_conf
= val
& 0xb;
827 v
= (s
->divide_conf
& 3) | ((s
->divide_conf
>> 1) & 4);
828 s
->count_shift
= (v
+ 1) & 7;
832 s
->esr
|= ESR_ILLEGAL_ADDRESS
;
837 static void apic_pre_save(APICCommonState
*s
)
839 apic_sync_vapic(s
, SYNC_FROM_VAPIC
);
842 static void apic_post_load(APICCommonState
*s
)
844 if (s
->timer_expiry
!= -1) {
845 qemu_mod_timer(s
->timer
, s
->timer_expiry
);
847 qemu_del_timer(s
->timer
);
851 static const MemoryRegionOps apic_io_ops
= {
853 .read
= { apic_mem_readb
, apic_mem_readw
, apic_mem_readl
, },
854 .write
= { apic_mem_writeb
, apic_mem_writew
, apic_mem_writel
, },
856 .endianness
= DEVICE_NATIVE_ENDIAN
,
859 static void apic_init(APICCommonState
*s
)
861 memory_region_init_io(&s
->io_memory
, &apic_io_ops
, s
, "apic-msi",
864 s
->timer
= qemu_new_timer_ns(vm_clock
, apic_timer
, s
);
865 local_apics
[s
->idx
] = s
;
867 msi_supported
= true;
870 static void apic_class_init(ObjectClass
*klass
, void *data
)
872 APICCommonClass
*k
= APIC_COMMON_CLASS(klass
);
875 k
->set_base
= apic_set_base
;
876 k
->set_tpr
= apic_set_tpr
;
877 k
->get_tpr
= apic_get_tpr
;
878 k
->vapic_base_update
= apic_vapic_base_update
;
879 k
->external_nmi
= apic_external_nmi
;
880 k
->pre_save
= apic_pre_save
;
881 k
->post_load
= apic_post_load
;
884 static TypeInfo apic_info
= {
886 .instance_size
= sizeof(APICCommonState
),
887 .parent
= TYPE_APIC_COMMON
,
888 .class_init
= apic_class_init
,
891 static void apic_register_types(void)
893 type_register_static(&apic_info
);
896 type_init(apic_register_types
)