4 * Copyright (c) 2004-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
22 #include "qemu-timer.h"
23 #include "host-utils.h"
28 /* APIC Local Vector Table */
29 #define APIC_LVT_TIMER 0
30 #define APIC_LVT_THERMAL 1
31 #define APIC_LVT_PERFORM 2
32 #define APIC_LVT_LINT0 3
33 #define APIC_LVT_LINT1 4
34 #define APIC_LVT_ERROR 5
37 /* APIC delivery modes */
38 #define APIC_DM_FIXED 0
39 #define APIC_DM_LOWPRI 1
42 #define APIC_DM_INIT 5
43 #define APIC_DM_SIPI 6
44 #define APIC_DM_EXTINT 7
46 /* APIC destination mode */
47 #define APIC_DESTMODE_FLAT 0xf
48 #define APIC_DESTMODE_CLUSTER 1
50 #define APIC_TRIGGER_EDGE 0
51 #define APIC_TRIGGER_LEVEL 1
53 #define APIC_LVT_TIMER_PERIODIC (1<<17)
54 #define APIC_LVT_MASKED (1<<16)
55 #define APIC_LVT_LEVEL_TRIGGER (1<<15)
56 #define APIC_LVT_REMOTE_IRR (1<<14)
57 #define APIC_INPUT_POLARITY (1<<13)
58 #define APIC_SEND_PENDING (1<<12)
60 #define ESR_ILLEGAL_ADDRESS (1 << 7)
62 #define APIC_SV_DIRECTED_IO (1<<12)
63 #define APIC_SV_ENABLE (1<<8)
66 #define MAX_APIC_WORDS 8
68 /* Intel APIC constants: from include/asm/msidef.h */
69 #define MSI_DATA_VECTOR_SHIFT 0
70 #define MSI_DATA_VECTOR_MASK 0x000000ff
71 #define MSI_DATA_DELIVERY_MODE_SHIFT 8
72 #define MSI_DATA_TRIGGER_SHIFT 15
73 #define MSI_DATA_LEVEL_SHIFT 14
74 #define MSI_ADDR_DEST_MODE_SHIFT 2
75 #define MSI_ADDR_DEST_ID_SHIFT 12
76 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0
78 #define MSI_ADDR_SIZE 0x100000
80 typedef struct APICState APICState
;
84 MemoryRegion io_memory
;
90 uint32_t spurious_vec
;
93 uint32_t isr
[8]; /* in service register */
94 uint32_t tmr
[8]; /* trigger mode register */
95 uint32_t irr
[8]; /* interrupt request register */
96 uint32_t lvt
[APIC_LVT_NB
];
97 uint32_t esr
; /* error register */
100 uint32_t divide_conf
;
102 uint32_t initial_count
;
103 int64_t initial_count_load_time
, next_time
;
110 static APICState
*local_apics
[MAX_APICS
+ 1];
111 static int apic_irq_delivered
;
113 static void apic_set_irq(APICState
*s
, int vector_num
, int trigger_mode
);
114 static void apic_update_irq(APICState
*s
);
115 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask
,
116 uint8_t dest
, uint8_t dest_mode
);
118 /* Find first bit starting from msb */
119 static int fls_bit(uint32_t value
)
121 return 31 - clz32(value
);
124 /* Find first bit starting from lsb */
125 static int ffs_bit(uint32_t value
)
130 static inline void set_bit(uint32_t *tab
, int index
)
134 mask
= 1 << (index
& 0x1f);
138 static inline void reset_bit(uint32_t *tab
, int index
)
142 mask
= 1 << (index
& 0x1f);
146 static inline int get_bit(uint32_t *tab
, int index
)
150 mask
= 1 << (index
& 0x1f);
151 return !!(tab
[i
] & mask
);
154 static void apic_local_deliver(APICState
*s
, int vector
)
156 uint32_t lvt
= s
->lvt
[vector
];
159 trace_apic_local_deliver(vector
, (lvt
>> 8) & 7);
161 if (lvt
& APIC_LVT_MASKED
)
164 switch ((lvt
>> 8) & 7) {
166 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_SMI
);
170 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_NMI
);
174 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
178 trigger_mode
= APIC_TRIGGER_EDGE
;
179 if ((vector
== APIC_LVT_LINT0
|| vector
== APIC_LVT_LINT1
) &&
180 (lvt
& APIC_LVT_LEVEL_TRIGGER
))
181 trigger_mode
= APIC_TRIGGER_LEVEL
;
182 apic_set_irq(s
, lvt
& 0xff, trigger_mode
);
186 void apic_deliver_pic_intr(DeviceState
*d
, int level
)
188 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
191 apic_local_deliver(s
, APIC_LVT_LINT0
);
193 uint32_t lvt
= s
->lvt
[APIC_LVT_LINT0
];
195 switch ((lvt
>> 8) & 7) {
197 if (!(lvt
& APIC_LVT_LEVEL_TRIGGER
))
199 reset_bit(s
->irr
, lvt
& 0xff);
202 cpu_reset_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
208 #define foreach_apic(apic, deliver_bitmask, code) \
210 int __i, __j, __mask;\
211 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
212 __mask = deliver_bitmask[__i];\
214 for(__j = 0; __j < 32; __j++) {\
215 if (__mask & (1 << __j)) {\
216 apic = local_apics[__i * 32 + __j];\
226 static void apic_bus_deliver(const uint32_t *deliver_bitmask
,
227 uint8_t delivery_mode
, uint8_t vector_num
,
228 uint8_t trigger_mode
)
230 APICState
*apic_iter
;
232 switch (delivery_mode
) {
234 /* XXX: search for focus processor, arbitration */
238 for(i
= 0; i
< MAX_APIC_WORDS
; i
++) {
239 if (deliver_bitmask
[i
]) {
240 d
= i
* 32 + ffs_bit(deliver_bitmask
[i
]);
245 apic_iter
= local_apics
[d
];
247 apic_set_irq(apic_iter
, vector_num
, trigger_mode
);
257 foreach_apic(apic_iter
, deliver_bitmask
,
258 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_SMI
) );
262 foreach_apic(apic_iter
, deliver_bitmask
,
263 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_NMI
) );
267 /* normal INIT IPI sent to processors */
268 foreach_apic(apic_iter
, deliver_bitmask
,
269 cpu_interrupt(apic_iter
->cpu_env
, CPU_INTERRUPT_INIT
) );
273 /* handled in I/O APIC code */
280 foreach_apic(apic_iter
, deliver_bitmask
,
281 apic_set_irq(apic_iter
, vector_num
, trigger_mode
) );
284 void apic_deliver_irq(uint8_t dest
, uint8_t dest_mode
, uint8_t delivery_mode
,
285 uint8_t vector_num
, uint8_t trigger_mode
)
287 uint32_t deliver_bitmask
[MAX_APIC_WORDS
];
289 trace_apic_deliver_irq(dest
, dest_mode
, delivery_mode
, vector_num
,
292 apic_get_delivery_bitmask(deliver_bitmask
, dest
, dest_mode
);
293 apic_bus_deliver(deliver_bitmask
, delivery_mode
, vector_num
, trigger_mode
);
296 void cpu_set_apic_base(DeviceState
*d
, uint64_t val
)
298 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
300 trace_cpu_set_apic_base(val
);
304 s
->apicbase
= (val
& 0xfffff000) |
305 (s
->apicbase
& (MSR_IA32_APICBASE_BSP
| MSR_IA32_APICBASE_ENABLE
));
306 /* if disabled, cannot be enabled again */
307 if (!(val
& MSR_IA32_APICBASE_ENABLE
)) {
308 s
->apicbase
&= ~MSR_IA32_APICBASE_ENABLE
;
309 cpu_clear_apic_feature(s
->cpu_env
);
310 s
->spurious_vec
&= ~APIC_SV_ENABLE
;
314 uint64_t cpu_get_apic_base(DeviceState
*d
)
316 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
318 trace_cpu_get_apic_base(s
? (uint64_t)s
->apicbase
: 0);
320 return s
? s
->apicbase
: 0;
323 void cpu_set_apic_tpr(DeviceState
*d
, uint8_t val
)
325 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
329 s
->tpr
= (val
& 0x0f) << 4;
333 uint8_t cpu_get_apic_tpr(DeviceState
*d
)
335 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
337 return s
? s
->tpr
>> 4 : 0;
340 /* return -1 if no bit is set */
341 static int get_highest_priority_int(uint32_t *tab
)
344 for(i
= 7; i
>= 0; i
--) {
346 return i
* 32 + fls_bit(tab
[i
]);
352 static int apic_get_ppr(APICState
*s
)
357 isrv
= get_highest_priority_int(s
->isr
);
368 static int apic_get_arb_pri(APICState
*s
)
370 /* XXX: arbitration */
376 * <0 - low prio interrupt,
378 * >0 - interrupt number
380 static int apic_irq_pending(APICState
*s
)
383 irrv
= get_highest_priority_int(s
->irr
);
387 ppr
= apic_get_ppr(s
);
388 if (ppr
&& (irrv
& 0xf0) <= (ppr
& 0xf0)) {
395 /* signal the CPU if an irq is pending */
396 static void apic_update_irq(APICState
*s
)
398 if (!(s
->spurious_vec
& APIC_SV_ENABLE
)) {
401 if (apic_irq_pending(s
) > 0) {
402 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_HARD
);
403 } else if (apic_accept_pic_intr(&s
->busdev
.qdev
) &&
404 pic_get_output(isa_pic
)) {
405 apic_deliver_pic_intr(&s
->busdev
.qdev
, 1);
409 void apic_reset_irq_delivered(void)
411 trace_apic_reset_irq_delivered(apic_irq_delivered
);
413 apic_irq_delivered
= 0;
416 int apic_get_irq_delivered(void)
418 trace_apic_get_irq_delivered(apic_irq_delivered
);
420 return apic_irq_delivered
;
423 static void apic_set_irq(APICState
*s
, int vector_num
, int trigger_mode
)
425 apic_irq_delivered
+= !get_bit(s
->irr
, vector_num
);
427 trace_apic_set_irq(apic_irq_delivered
);
429 set_bit(s
->irr
, vector_num
);
431 set_bit(s
->tmr
, vector_num
);
433 reset_bit(s
->tmr
, vector_num
);
437 static void apic_eoi(APICState
*s
)
440 isrv
= get_highest_priority_int(s
->isr
);
443 reset_bit(s
->isr
, isrv
);
444 if (!(s
->spurious_vec
& APIC_SV_DIRECTED_IO
) && get_bit(s
->tmr
, isrv
)) {
445 ioapic_eoi_broadcast(isrv
);
450 static int apic_find_dest(uint8_t dest
)
452 APICState
*apic
= local_apics
[dest
];
455 if (apic
&& apic
->id
== dest
)
456 return dest
; /* shortcut in case apic->id == apic->idx */
458 for (i
= 0; i
< MAX_APICS
; i
++) {
459 apic
= local_apics
[i
];
460 if (apic
&& apic
->id
== dest
)
469 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask
,
470 uint8_t dest
, uint8_t dest_mode
)
472 APICState
*apic_iter
;
475 if (dest_mode
== 0) {
477 memset(deliver_bitmask
, 0xff, MAX_APIC_WORDS
* sizeof(uint32_t));
479 int idx
= apic_find_dest(dest
);
480 memset(deliver_bitmask
, 0x00, MAX_APIC_WORDS
* sizeof(uint32_t));
482 set_bit(deliver_bitmask
, idx
);
485 /* XXX: cluster mode */
486 memset(deliver_bitmask
, 0x00, MAX_APIC_WORDS
* sizeof(uint32_t));
487 for(i
= 0; i
< MAX_APICS
; i
++) {
488 apic_iter
= local_apics
[i
];
490 if (apic_iter
->dest_mode
== 0xf) {
491 if (dest
& apic_iter
->log_dest
)
492 set_bit(deliver_bitmask
, i
);
493 } else if (apic_iter
->dest_mode
== 0x0) {
494 if ((dest
& 0xf0) == (apic_iter
->log_dest
& 0xf0) &&
495 (dest
& apic_iter
->log_dest
& 0x0f)) {
496 set_bit(deliver_bitmask
, i
);
506 void apic_init_reset(DeviceState
*d
)
508 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
515 s
->spurious_vec
= 0xff;
518 memset(s
->isr
, 0, sizeof(s
->isr
));
519 memset(s
->tmr
, 0, sizeof(s
->tmr
));
520 memset(s
->irr
, 0, sizeof(s
->irr
));
521 for(i
= 0; i
< APIC_LVT_NB
; i
++)
522 s
->lvt
[i
] = 1 << 16; /* mask LVT */
524 memset(s
->icr
, 0, sizeof(s
->icr
));
527 s
->initial_count
= 0;
528 s
->initial_count_load_time
= 0;
530 s
->wait_for_sipi
= 1;
533 static void apic_startup(APICState
*s
, int vector_num
)
535 s
->sipi_vector
= vector_num
;
536 cpu_interrupt(s
->cpu_env
, CPU_INTERRUPT_SIPI
);
539 void apic_sipi(DeviceState
*d
)
541 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
543 cpu_reset_interrupt(s
->cpu_env
, CPU_INTERRUPT_SIPI
);
545 if (!s
->wait_for_sipi
)
547 cpu_x86_load_seg_cache_sipi(s
->cpu_env
, s
->sipi_vector
);
548 s
->wait_for_sipi
= 0;
551 static void apic_deliver(DeviceState
*d
, uint8_t dest
, uint8_t dest_mode
,
552 uint8_t delivery_mode
, uint8_t vector_num
,
553 uint8_t trigger_mode
)
555 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
556 uint32_t deliver_bitmask
[MAX_APIC_WORDS
];
557 int dest_shorthand
= (s
->icr
[0] >> 18) & 3;
558 APICState
*apic_iter
;
560 switch (dest_shorthand
) {
562 apic_get_delivery_bitmask(deliver_bitmask
, dest
, dest_mode
);
565 memset(deliver_bitmask
, 0x00, sizeof(deliver_bitmask
));
566 set_bit(deliver_bitmask
, s
->idx
);
569 memset(deliver_bitmask
, 0xff, sizeof(deliver_bitmask
));
572 memset(deliver_bitmask
, 0xff, sizeof(deliver_bitmask
));
573 reset_bit(deliver_bitmask
, s
->idx
);
577 switch (delivery_mode
) {
580 int trig_mode
= (s
->icr
[0] >> 15) & 1;
581 int level
= (s
->icr
[0] >> 14) & 1;
582 if (level
== 0 && trig_mode
== 1) {
583 foreach_apic(apic_iter
, deliver_bitmask
,
584 apic_iter
->arb_id
= apic_iter
->id
);
591 foreach_apic(apic_iter
, deliver_bitmask
,
592 apic_startup(apic_iter
, vector_num
) );
596 apic_bus_deliver(deliver_bitmask
, delivery_mode
, vector_num
, trigger_mode
);
599 int apic_get_interrupt(DeviceState
*d
)
601 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
604 /* if the APIC is installed or enabled, we let the 8259 handle the
608 if (!(s
->spurious_vec
& APIC_SV_ENABLE
))
611 intno
= apic_irq_pending(s
);
615 } else if (intno
< 0) {
616 return s
->spurious_vec
& 0xff;
618 reset_bit(s
->irr
, intno
);
619 set_bit(s
->isr
, intno
);
624 int apic_accept_pic_intr(DeviceState
*d
)
626 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
632 lvt0
= s
->lvt
[APIC_LVT_LINT0
];
634 if ((s
->apicbase
& MSR_IA32_APICBASE_ENABLE
) == 0 ||
635 (lvt0
& APIC_LVT_MASKED
) == 0)
641 static uint32_t apic_get_current_count(APICState
*s
)
645 d
= (qemu_get_clock_ns(vm_clock
) - s
->initial_count_load_time
) >>
647 if (s
->lvt
[APIC_LVT_TIMER
] & APIC_LVT_TIMER_PERIODIC
) {
649 val
= s
->initial_count
- (d
% ((uint64_t)s
->initial_count
+ 1));
651 if (d
>= s
->initial_count
)
654 val
= s
->initial_count
- d
;
659 static void apic_timer_update(APICState
*s
, int64_t current_time
)
661 int64_t next_time
, d
;
663 if (!(s
->lvt
[APIC_LVT_TIMER
] & APIC_LVT_MASKED
)) {
664 d
= (current_time
- s
->initial_count_load_time
) >>
666 if (s
->lvt
[APIC_LVT_TIMER
] & APIC_LVT_TIMER_PERIODIC
) {
667 if (!s
->initial_count
)
669 d
= ((d
/ ((uint64_t)s
->initial_count
+ 1)) + 1) * ((uint64_t)s
->initial_count
+ 1);
671 if (d
>= s
->initial_count
)
673 d
= (uint64_t)s
->initial_count
+ 1;
675 next_time
= s
->initial_count_load_time
+ (d
<< s
->count_shift
);
676 qemu_mod_timer(s
->timer
, next_time
);
677 s
->next_time
= next_time
;
680 qemu_del_timer(s
->timer
);
684 static void apic_timer(void *opaque
)
686 APICState
*s
= opaque
;
688 apic_local_deliver(s
, APIC_LVT_TIMER
);
689 apic_timer_update(s
, s
->next_time
);
692 static uint32_t apic_mem_readb(void *opaque
, target_phys_addr_t addr
)
697 static uint32_t apic_mem_readw(void *opaque
, target_phys_addr_t addr
)
702 static void apic_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
706 static void apic_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
710 static uint32_t apic_mem_readl(void *opaque
, target_phys_addr_t addr
)
717 d
= cpu_get_current_apic();
721 s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
723 index
= (addr
>> 4) & 0xff;
728 case 0x03: /* version */
729 val
= 0x11 | ((APIC_LVT_NB
- 1) << 16); /* version 0x11 */
735 val
= apic_get_arb_pri(s
);
739 val
= apic_get_ppr(s
);
745 val
= s
->log_dest
<< 24;
748 val
= s
->dest_mode
<< 28;
751 val
= s
->spurious_vec
;
754 val
= s
->isr
[index
& 7];
757 val
= s
->tmr
[index
& 7];
760 val
= s
->irr
[index
& 7];
767 val
= s
->icr
[index
& 1];
770 val
= s
->lvt
[index
- 0x32];
773 val
= s
->initial_count
;
776 val
= apic_get_current_count(s
);
779 val
= s
->divide_conf
;
782 s
->esr
|= ESR_ILLEGAL_ADDRESS
;
786 trace_apic_mem_readl(addr
, val
);
790 static void apic_send_msi(target_phys_addr_t addr
, uint32_t data
)
792 uint8_t dest
= (addr
& MSI_ADDR_DEST_ID_MASK
) >> MSI_ADDR_DEST_ID_SHIFT
;
793 uint8_t vector
= (data
& MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
;
794 uint8_t dest_mode
= (addr
>> MSI_ADDR_DEST_MODE_SHIFT
) & 0x1;
795 uint8_t trigger_mode
= (data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
796 uint8_t delivery
= (data
>> MSI_DATA_DELIVERY_MODE_SHIFT
) & 0x7;
797 /* XXX: Ignore redirection hint. */
798 apic_deliver_irq(dest
, dest_mode
, delivery
, vector
, trigger_mode
);
801 static void apic_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
805 int index
= (addr
>> 4) & 0xff;
806 if (addr
> 0xfff || !index
) {
807 /* MSI and MMIO APIC are at the same memory location,
808 * but actually not on the global bus: MSI is on PCI bus
809 * APIC is connected directly to the CPU.
810 * Mapping them on the global bus happens to work because
811 * MSI registers are reserved in APIC MMIO and vice versa. */
812 apic_send_msi(addr
, val
);
816 d
= cpu_get_current_apic();
820 s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
822 trace_apic_mem_writel(addr
, val
);
841 s
->log_dest
= val
>> 24;
844 s
->dest_mode
= val
>> 28;
847 s
->spurious_vec
= val
& 0x1ff;
857 apic_deliver(d
, (s
->icr
[1] >> 24) & 0xff, (s
->icr
[0] >> 11) & 1,
858 (s
->icr
[0] >> 8) & 7, (s
->icr
[0] & 0xff),
859 (s
->icr
[0] >> 15) & 1);
866 int n
= index
- 0x32;
868 if (n
== APIC_LVT_TIMER
)
869 apic_timer_update(s
, qemu_get_clock_ns(vm_clock
));
873 s
->initial_count
= val
;
874 s
->initial_count_load_time
= qemu_get_clock_ns(vm_clock
);
875 apic_timer_update(s
, s
->initial_count_load_time
);
882 s
->divide_conf
= val
& 0xb;
883 v
= (s
->divide_conf
& 3) | ((s
->divide_conf
>> 1) & 4);
884 s
->count_shift
= (v
+ 1) & 7;
888 s
->esr
|= ESR_ILLEGAL_ADDRESS
;
893 /* This function is only used for old state version 1 and 2 */
894 static int apic_load_old(QEMUFile
*f
, void *opaque
, int version_id
)
896 APICState
*s
= opaque
;
902 /* XXX: what if the base changes? (registered memory regions) */
903 qemu_get_be32s(f
, &s
->apicbase
);
904 qemu_get_8s(f
, &s
->id
);
905 qemu_get_8s(f
, &s
->arb_id
);
906 qemu_get_8s(f
, &s
->tpr
);
907 qemu_get_be32s(f
, &s
->spurious_vec
);
908 qemu_get_8s(f
, &s
->log_dest
);
909 qemu_get_8s(f
, &s
->dest_mode
);
910 for (i
= 0; i
< 8; i
++) {
911 qemu_get_be32s(f
, &s
->isr
[i
]);
912 qemu_get_be32s(f
, &s
->tmr
[i
]);
913 qemu_get_be32s(f
, &s
->irr
[i
]);
915 for (i
= 0; i
< APIC_LVT_NB
; i
++) {
916 qemu_get_be32s(f
, &s
->lvt
[i
]);
918 qemu_get_be32s(f
, &s
->esr
);
919 qemu_get_be32s(f
, &s
->icr
[0]);
920 qemu_get_be32s(f
, &s
->icr
[1]);
921 qemu_get_be32s(f
, &s
->divide_conf
);
922 s
->count_shift
=qemu_get_be32(f
);
923 qemu_get_be32s(f
, &s
->initial_count
);
924 s
->initial_count_load_time
=qemu_get_be64(f
);
925 s
->next_time
=qemu_get_be64(f
);
928 qemu_get_timer(f
, s
->timer
);
932 static const VMStateDescription vmstate_apic
= {
935 .minimum_version_id
= 3,
936 .minimum_version_id_old
= 1,
937 .load_state_old
= apic_load_old
,
938 .fields
= (VMStateField
[]) {
939 VMSTATE_UINT32(apicbase
, APICState
),
940 VMSTATE_UINT8(id
, APICState
),
941 VMSTATE_UINT8(arb_id
, APICState
),
942 VMSTATE_UINT8(tpr
, APICState
),
943 VMSTATE_UINT32(spurious_vec
, APICState
),
944 VMSTATE_UINT8(log_dest
, APICState
),
945 VMSTATE_UINT8(dest_mode
, APICState
),
946 VMSTATE_UINT32_ARRAY(isr
, APICState
, 8),
947 VMSTATE_UINT32_ARRAY(tmr
, APICState
, 8),
948 VMSTATE_UINT32_ARRAY(irr
, APICState
, 8),
949 VMSTATE_UINT32_ARRAY(lvt
, APICState
, APIC_LVT_NB
),
950 VMSTATE_UINT32(esr
, APICState
),
951 VMSTATE_UINT32_ARRAY(icr
, APICState
, 2),
952 VMSTATE_UINT32(divide_conf
, APICState
),
953 VMSTATE_INT32(count_shift
, APICState
),
954 VMSTATE_UINT32(initial_count
, APICState
),
955 VMSTATE_INT64(initial_count_load_time
, APICState
),
956 VMSTATE_INT64(next_time
, APICState
),
957 VMSTATE_TIMER(timer
, APICState
),
958 VMSTATE_END_OF_LIST()
962 static void apic_reset(DeviceState
*d
)
964 APICState
*s
= DO_UPCAST(APICState
, busdev
.qdev
, d
);
967 bsp
= cpu_is_bsp(s
->cpu_env
);
968 s
->apicbase
= 0xfee00000 |
969 (bsp
? MSR_IA32_APICBASE_BSP
: 0) | MSR_IA32_APICBASE_ENABLE
;
975 * LINT0 delivery mode on CPU #0 is set to ExtInt at initialization
976 * time typically by BIOS, so PIC interrupt can be delivered to the
977 * processor when local APIC is enabled.
979 s
->lvt
[APIC_LVT_LINT0
] = 0x700;
983 static const MemoryRegionOps apic_io_ops
= {
985 .read
= { apic_mem_readb
, apic_mem_readw
, apic_mem_readl
, },
986 .write
= { apic_mem_writeb
, apic_mem_writew
, apic_mem_writel
, },
988 .endianness
= DEVICE_NATIVE_ENDIAN
,
991 static int apic_init1(SysBusDevice
*dev
)
993 APICState
*s
= FROM_SYSBUS(APICState
, dev
);
994 static int last_apic_idx
;
996 if (last_apic_idx
>= MAX_APICS
) {
999 memory_region_init_io(&s
->io_memory
, &apic_io_ops
, s
, "apic",
1001 sysbus_init_mmio_region(dev
, &s
->io_memory
);
1003 s
->timer
= qemu_new_timer_ns(vm_clock
, apic_timer
, s
);
1004 s
->idx
= last_apic_idx
++;
1005 local_apics
[s
->idx
] = s
;
1009 static SysBusDeviceInfo apic_info
= {
1011 .qdev
.name
= "apic",
1012 .qdev
.size
= sizeof(APICState
),
1013 .qdev
.vmsd
= &vmstate_apic
,
1014 .qdev
.reset
= apic_reset
,
1016 .qdev
.props
= (Property
[]) {
1017 DEFINE_PROP_UINT8("id", APICState
, id
, -1),
1018 DEFINE_PROP_PTR("cpu_env", APICState
, cpu_env
),
1019 DEFINE_PROP_END_OF_LIST(),
1023 static void apic_register_devices(void)
1025 sysbus_register_withprop(&apic_info
);
1028 device_init(apic_register_devices
)