cputimer: Add per-cpu handler and private data for interrupt cputimer.
[dragonfly.git] / sys / platform / pc64 / apic / lapic.c
blob9500b648b70cef4c878783ab239ef0b746b71a3c
1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine/specialreg.h>
38 #include <machine_base/apic/lapic.h>
39 #include <machine_base/apic/ioapic.h>
40 #include <machine_base/apic/ioapic_abi.h>
41 #include <machine_base/apic/apicvar.h>
42 #include <machine_base/icu/icu_var.h>
43 #include <machine/segments.h>
44 #include <sys/thread2.h>
46 #include <machine/cputypes.h>
47 #include <machine/intr_machdep.h>
49 extern int naps;
51 volatile lapic_t *lapic;
53 static void lapic_timer_calibrate(void);
54 static void lapic_timer_set_divisor(int);
55 static void lapic_timer_fixup_handler(void *);
56 static void lapic_timer_restart_handler(void *);
59 static int lapic_timer_enable = 1;
60 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
62 static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
63 static void lapic_timer_intr_enable(struct cputimer_intr *);
64 static void lapic_timer_intr_restart(struct cputimer_intr *);
65 static void lapic_timer_intr_pmfixup(struct cputimer_intr *);
67 static struct cputimer_intr lapic_cputimer_intr = {
68 .freq = 0,
69 .reload = lapic_timer_intr_reload,
70 .enable = lapic_timer_intr_enable,
71 .config = cputimer_intr_default_config,
72 .restart = lapic_timer_intr_restart,
73 .pmfixup = lapic_timer_intr_pmfixup,
74 .initclock = cputimer_intr_default_initclock,
75 .pcpuhand = NULL,
76 .next = SLIST_ENTRY_INITIALIZER,
77 .name = "lapic",
78 .type = CPUTIMER_INTR_LAPIC,
79 .prio = CPUTIMER_INTR_PRIO_LAPIC,
80 .caps = CPUTIMER_INTR_CAP_NONE,
81 .priv = NULL
84 static int lapic_timer_divisor_idx = -1;
85 static const uint32_t lapic_timer_divisors[] = {
86 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
87 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1
89 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
92 * APIC ID <-> CPU ID mapping structures.
94 int cpu_id_to_apic_id[NAPICID];
95 int apic_id_to_cpu_id[NAPICID];
96 int lapic_enable = 1;
99 * Enable LAPIC, configure interrupts.
101 void
102 lapic_init(boolean_t bsp)
104 uint32_t timer;
105 u_int temp;
108 * Install vectors
110 * Since IDT is shared between BSP and APs, these vectors
111 * only need to be installed once; we do it on BSP.
113 if (bsp) {
114 if (cpu_vendor_id == CPU_VENDOR_AMD &&
115 CPUID_TO_FAMILY(cpu_id) >= 0xf) {
116 uint32_t tcr;
119 * Set the LINTEN bit in the HyperTransport
120 * Transaction Control Register.
122 * This will cause EXTINT and NMI interrupts
123 * routed over the hypertransport bus to be
124 * fed into the LAPIC LINT0/LINT1. If the bit
125 * isn't set, the interrupts will go to the
126 * general cpu INTR/NMI pins. On a dual-core
127 * cpu the interrupt winds up going to BOTH cpus.
128 * The first cpu that does the interrupt ack
129 * cycle will get the correct interrupt. The
130 * second cpu that does it will get a spurious
131 * interrupt vector (typically IRQ 7).
133 outl(0x0cf8,
134 (1 << 31) | /* enable */
135 (0 << 16) | /* bus */
136 (0x18 << 11) | /* dev (cpu + 0x18) */
137 (0 << 8) | /* func */
138 0x68 /* reg */
140 tcr = inl(0xcfc);
141 if ((tcr & 0x00010000) == 0) {
142 kprintf("LAPIC: AMD LINTEN on\n");
143 outl(0xcfc, tcr|0x00010000);
145 outl(0x0cf8, 0);
148 /* Install a 'Spurious INTerrupt' vector */
149 setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
150 SDT_SYSIGT, SEL_KPL, 0);
152 /* Install a timer vector */
153 setidt_global(XTIMER_OFFSET, Xtimer,
154 SDT_SYSIGT, SEL_KPL, 0);
156 /* Install an inter-CPU IPI for TLB invalidation */
157 setidt_global(XINVLTLB_OFFSET, Xinvltlb,
158 SDT_SYSIGT, SEL_KPL, 0);
160 /* Install an inter-CPU IPI for IPIQ messaging */
161 setidt_global(XIPIQ_OFFSET, Xipiq,
162 SDT_SYSIGT, SEL_KPL, 0);
164 /* Install an inter-CPU IPI for CPU stop/restart */
165 setidt_global(XCPUSTOP_OFFSET, Xcpustop,
166 SDT_SYSIGT, SEL_KPL, 0);
170 * Setup LINT0 as ExtINT on the BSP. This is theoretically an
171 * aggregate interrupt input from the 8259. The INTA cycle
172 * will be routed to the external controller (the 8259) which
173 * is expected to supply the vector.
175 * Must be setup edge triggered, active high.
177 * Disable LINT0 on BSP, if I/O APIC is enabled.
179 * Disable LINT0 on the APs. It doesn't matter what delivery
180 * mode we use because we leave it masked.
182 temp = lapic->lvt_lint0;
183 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
184 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
185 if (bsp) {
186 temp |= APIC_LVT_DM_EXTINT;
187 if (ioapic_enable)
188 temp |= APIC_LVT_MASKED;
189 } else {
190 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
192 lapic->lvt_lint0 = temp;
195 * Setup LINT1 as NMI.
197 * Must be setup edge trigger, active high.
199 * Enable LINT1 on BSP, if I/O APIC is enabled.
201 * Disable LINT1 on the APs.
203 temp = lapic->lvt_lint1;
204 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
205 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
206 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
207 if (bsp && ioapic_enable)
208 temp &= ~APIC_LVT_MASKED;
209 lapic->lvt_lint1 = temp;
212 * Mask the LAPIC error interrupt, LAPIC performance counter
213 * interrupt.
215 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
216 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
219 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
221 timer = lapic->lvt_timer;
222 timer &= ~APIC_LVTT_VECTOR;
223 timer |= XTIMER_OFFSET;
224 timer |= APIC_LVTT_MASKED;
225 lapic->lvt_timer = timer;
228 * Set the Task Priority Register as needed. At the moment allow
229 * interrupts on all cpus (the APs will remain CLId until they are
230 * ready to deal).
232 temp = lapic->tpr;
233 temp &= ~APIC_TPR_PRIO; /* clear priority field */
234 lapic->tpr = temp;
237 * Enable the LAPIC
239 temp = lapic->svr;
240 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */
241 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
244 * Set the spurious interrupt vector. The low 4 bits of the vector
245 * must be 1111.
247 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
248 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
249 temp &= ~APIC_SVR_VECTOR;
250 temp |= XSPURIOUSINT_OFFSET;
252 lapic->svr = temp;
255 * Pump out a few EOIs to clean out interrupts that got through
256 * before we were able to set the TPR.
258 lapic->eoi = 0;
259 lapic->eoi = 0;
260 lapic->eoi = 0;
262 if (bsp) {
263 lapic_timer_calibrate();
264 if (lapic_timer_enable) {
265 if (cpu_thermal_feature & CPUID_THERMAL_ARAT) {
267 * Local APIC timer will not stop
268 * in deep C-state.
270 lapic_cputimer_intr.caps |=
271 CPUTIMER_INTR_CAP_PS;
273 cputimer_intr_register(&lapic_cputimer_intr);
274 cputimer_intr_select(&lapic_cputimer_intr, 0);
276 } else {
277 lapic_timer_set_divisor(lapic_timer_divisor_idx);
280 if (bootverbose)
281 apic_dump("apic_initialize()");
284 static void
285 lapic_timer_set_divisor(int divisor_idx)
287 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
288 lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
291 static void
292 lapic_timer_oneshot(u_int count)
294 uint32_t value;
296 value = lapic->lvt_timer;
297 value &= ~APIC_LVTT_PERIODIC;
298 lapic->lvt_timer = value;
299 lapic->icr_timer = count;
302 static void
303 lapic_timer_oneshot_quick(u_int count)
305 lapic->icr_timer = count;
308 static void
309 lapic_timer_calibrate(void)
311 sysclock_t value;
313 /* Try to calibrate the local APIC timer. */
314 for (lapic_timer_divisor_idx = 0;
315 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
316 lapic_timer_divisor_idx++) {
317 lapic_timer_set_divisor(lapic_timer_divisor_idx);
318 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
319 DELAY(2000000);
320 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
321 if (value != APIC_TIMER_MAX_COUNT)
322 break;
324 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
325 panic("lapic: no proper timer divisor?!");
326 lapic_cputimer_intr.freq = value / 2;
328 kprintf("lapic: divisor index %d, frequency %u Hz\n",
329 lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
332 static void
333 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
335 struct globaldata *gd = mycpu;
337 reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
338 if (reload < 2)
339 reload = 2;
341 if (gd->gd_timer_running) {
342 if (reload < lapic->ccr_timer)
343 lapic_timer_oneshot_quick(reload);
344 } else {
345 gd->gd_timer_running = 1;
346 lapic_timer_oneshot_quick(reload);
350 static void
351 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
353 uint32_t timer;
355 timer = lapic->lvt_timer;
356 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
357 lapic->lvt_timer = timer;
359 lapic_timer_fixup_handler(NULL);
362 static void
363 lapic_timer_fixup_handler(void *arg)
365 int *started = arg;
367 if (started != NULL)
368 *started = 0;
370 if (cpu_vendor_id == CPU_VENDOR_AMD) {
372 * Detect the presence of C1E capability mostly on latest
373 * dual-cores (or future) k8 family. This feature renders
374 * the local APIC timer dead, so we disable it by reading
375 * the Interrupt Pending Message register and clearing both
376 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
378 * Reference:
379 * "BIOS and Kernel Developer's Guide for AMD NPT
380 * Family 0Fh Processors"
381 * #32559 revision 3.00
383 if ((cpu_id & 0x00000f00) == 0x00000f00 &&
384 (cpu_id & 0x0fff0000) >= 0x00040000) {
385 uint64_t msr;
387 msr = rdmsr(0xc0010055);
388 if (msr & 0x18000000) {
389 struct globaldata *gd = mycpu;
391 kprintf("cpu%d: AMD C1E detected\n",
392 gd->gd_cpuid);
393 wrmsr(0xc0010055, msr & ~0x18000000ULL);
396 * We are kinda stalled;
397 * kick start again.
399 gd->gd_timer_running = 1;
400 lapic_timer_oneshot_quick(2);
402 if (started != NULL)
403 *started = 1;
409 static void
410 lapic_timer_restart_handler(void *dummy __unused)
412 int started;
414 lapic_timer_fixup_handler(&started);
415 if (!started) {
416 struct globaldata *gd = mycpu;
418 gd->gd_timer_running = 1;
419 lapic_timer_oneshot_quick(2);
424 * This function is called only by ACPICA code currently:
425 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI
426 * module controls PM. So once ACPICA is attached, we try
427 * to apply the fixup to prevent LAPIC timer from hanging.
429 static void
430 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
432 lwkt_send_ipiq_mask(smp_active_mask,
433 lapic_timer_fixup_handler, NULL);
436 static void
437 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
439 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
444 * dump contents of local APIC registers
446 void
447 apic_dump(char* str)
449 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
450 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
451 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
455 * Inter Processor Interrupt functions.
459 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
461 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
462 * vector is any valid SYSTEM INT vector
463 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
465 * WARNINGS!
467 * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
468 * one IPI from being sent to any given cpu at a time. Thus we no longer
469 * have to process incoming IPIs while waiting for the status to clear.
470 * No deadlock should be possible.
472 * We now physically disable interrupts for the lapic ICR operation. If
473 * we do not do this then it looks like an EOI sent to the lapic (which
474 * occurs even with a critical section) can interfere with the command
475 * register ready status and cause an IPI to be lost.
477 * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
478 * register to busy just before we write to icr_lo, resulting in a lost
479 * issuance. This only appears to occur on Intel cpus and is not
480 * documented. It could simply be that cpus are so fast these days that
481 * it was always an issue, but is only now rearing its ugly head. This
482 * is conjecture.
485 apic_ipi(int dest_type, int vector, int delivery_mode)
487 unsigned long rflags;
488 u_long icr_lo;
490 rflags = read_rflags();
491 cpu_disable_intr();
492 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
493 cpu_pause();
495 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
496 delivery_mode | vector;
497 lapic->icr_lo = icr_lo;
498 write_rflags(rflags);
500 return 0;
503 void
504 single_apic_ipi(int cpu, int vector, int delivery_mode)
506 unsigned long rflags;
507 u_long icr_lo;
508 u_long icr_hi;
510 rflags = read_rflags();
511 cpu_disable_intr();
512 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
513 cpu_pause();
515 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
516 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
517 lapic->icr_hi = icr_hi;
519 /* build ICR_LOW */
520 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) |
521 APIC_DEST_DESTFLD | delivery_mode | vector;
523 /* write APIC ICR */
524 lapic->icr_lo = icr_lo;
525 write_rflags(rflags);
528 #if 0
531 * Returns 0 if the apic is busy, 1 if we were able to queue the request.
533 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all
534 * to the target, and the scheduler does not 'poll' for IPI messages.
537 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
539 u_long icr_lo;
540 u_long icr_hi;
542 crit_enter();
543 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
544 crit_exit();
545 return(0);
547 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
548 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
549 lapic->icr_hi = icr_hi;
551 /* build IRC_LOW */
552 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
553 | APIC_DEST_DESTFLD | delivery_mode | vector;
555 /* write APIC ICR */
556 lapic->icr_lo = icr_lo;
557 crit_exit();
558 return(1);
561 #endif
564 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
566 * target is a bitmask of destination cpus. Vector is any
567 * valid system INT vector. Delivery mode may be either
568 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
570 void
571 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
573 crit_enter();
574 while (CPUMASK_TESTNZERO(target)) {
575 int n = BSFCPUMASK(target);
576 CPUMASK_NANDBIT(target, n);
577 single_apic_ipi(n, vector, delivery_mode);
579 crit_exit();
583 * Timer code, in development...
584 * - suggested by rgrimes@gndrsh.aac.dev.com
587 get_apic_timer_frequency(void)
589 return(lapic_cputimer_intr.freq);
593 * Load a 'downcount time' in uSeconds.
595 void
596 set_apic_timer(int us)
598 u_int count;
601 * When we reach here, lapic timer's frequency
602 * must have been calculated as well as the
603 * divisor (lapic->dcr_timer is setup during the
604 * divisor calculation).
606 KKASSERT(lapic_cputimer_intr.freq != 0 &&
607 lapic_timer_divisor_idx >= 0);
609 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
610 lapic_timer_oneshot(count);
615 * Read remaining time in timer.
618 read_apic_timer(void)
620 #if 0
621 /** XXX FIXME: we need to return the actual remaining time,
622 * for now we just return the remaining count.
624 #else
625 return lapic->ccr_timer;
626 #endif
631 * Spin-style delay, set delay time in uS, spin till it drains.
633 void
634 u_sleep(int count)
636 set_apic_timer(count);
637 while (read_apic_timer())
638 /* spin */ ;
642 lapic_unused_apic_id(int start)
644 int i;
646 for (i = start; i < APICID_MAX; ++i) {
647 if (APICID_TO_CPUID(i) == -1)
648 return i;
650 return NAPICID;
653 void
654 lapic_map(vm_paddr_t lapic_addr)
656 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
659 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
660 TAILQ_HEAD_INITIALIZER(lapic_enumerators);
663 lapic_config(void)
665 struct lapic_enumerator *e;
666 int error, i, ap_max;
668 KKASSERT(lapic_enable);
670 for (i = 0; i < NAPICID; ++i)
671 APICID_TO_CPUID(i) = -1;
673 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
674 error = e->lapic_probe(e);
675 if (!error)
676 break;
678 if (e == NULL) {
679 kprintf("LAPIC: Can't find LAPIC\n");
680 return ENXIO;
683 error = e->lapic_enumerate(e);
684 if (error) {
685 kprintf("LAPIC: enumeration failed\n");
686 return ENXIO;
689 ap_max = MAXCPU - 1;
690 TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
691 if (ap_max > MAXCPU - 1)
692 ap_max = MAXCPU - 1;
694 if (naps > ap_max) {
695 kprintf("LAPIC: Warning use only %d out of %d "
696 "available APs\n",
697 ap_max, naps);
698 naps = ap_max;
701 return 0;
704 void
705 lapic_enumerator_register(struct lapic_enumerator *ne)
707 struct lapic_enumerator *e;
709 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
710 if (e->lapic_prio < ne->lapic_prio) {
711 TAILQ_INSERT_BEFORE(e, ne, lapic_link);
712 return;
715 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
718 void
719 lapic_set_cpuid(int cpu_id, int apic_id)
721 CPUID_TO_APICID(cpu_id) = apic_id;
722 APICID_TO_CPUID(apic_id) = cpu_id;
725 void
726 lapic_fixup_noioapic(void)
728 u_int temp;
730 /* Only allowed on BSP */
731 KKASSERT(mycpuid == 0);
732 KKASSERT(!ioapic_enable);
734 temp = lapic->lvt_lint0;
735 temp &= ~APIC_LVT_MASKED;
736 lapic->lvt_lint0 = temp;
738 temp = lapic->lvt_lint1;
739 temp |= APIC_LVT_MASKED;
740 lapic->lvt_lint1 = temp;
743 static void
744 lapic_sysinit(void *dummy __unused)
746 if (lapic_enable) {
747 int error;
749 error = lapic_config();
750 if (error)
751 lapic_enable = 0;
754 if (lapic_enable) {
755 /* Initialize BSP's local APIC */
756 lapic_init(TRUE);
757 } else if (ioapic_enable) {
758 ioapic_enable = 0;
759 icu_reinit_noioapic();
762 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL);