4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
30 * Copyright (c) 2017, Joyent, Inc. All rights reserved.
34 * To understand how the pcplusmp module interacts with the interrupt subsystem
35 * read the theory statement in uts/i86pc/os/intr.c.
39 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
40 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
42 * PSMI 1.5 extensions are supported in Solaris Nevada.
43 * PSMI 1.6 extensions are supported in Solaris Nevada.
44 * PSMI 1.7 extensions are supported in Solaris Nevada.
48 #include <sys/processor.h>
51 #include <sys/smp_impldefs.h>
53 #include <sys/acpi/acpi.h>
54 #include <sys/acpica.h>
55 #include <sys/psm_common.h>
59 #include <sys/sunddi.h>
60 #include <sys/ddi_impldefs.h>
62 #include <sys/promif.h>
63 #include <sys/x86_archext.h>
64 #include <sys/cpc_impl.h>
65 #include <sys/uadmin.h>
66 #include <sys/panic.h>
67 #include <sys/debug.h>
68 #include <sys/archsystm.h>
70 #include <sys/machsystm.h>
71 #include <sys/sysmacros.h>
72 #include <sys/cpuvar.h>
73 #include <sys/rm_platter.h>
74 #include <sys/privregs.h>
76 #include <sys/pci_intr_lib.h>
78 #include <sys/clock.h>
79 #include <sys/cyclic.h>
80 #include <sys/dditypes.h>
81 #include <sys/sunddi.h>
82 #include <sys/x_call.h>
83 #include <sys/reboot.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
89 * Local Function Prototypes
91 static void apic_init_intr(void);
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl
);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl
, int *vect
);
102 static void apic_setspl(int ipl
);
103 static void x2apic_setspl(int ipl
);
104 static int apic_addspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
105 static int apic_delspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
106 static int apic_disable_intr(processorid_t cpun
);
107 static void apic_enable_intr(processorid_t cpun
);
108 static int apic_get_ipivect(int ipl
, int type
);
109 static void apic_post_cyclic_setup(void *arg
);
111 #define UCHAR_MAX UINT8_MAX
114 * The following vector assignments influence the value of ipltopri and
115 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
116 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
117 * we care to do so in future. Note some IPLs which are rarely used
118 * will share the vector ranges and heavily used IPLs (5 and 6) have
121 * This array is used to initialize apic_ipls[] (in apic_init()).
123 * IPL Vector range. as passed to intr_enter
125 * 1,2,3 0x20-0x2f 0x0-0xf
126 * 4 0x30-0x3f 0x10-0x1f
127 * 5 0x40-0x5f 0x20-0x3f
128 * 6 0x60-0x7f 0x40-0x5f
129 * 7,8,9 0x80-0x8f 0x60-0x6f
130 * 10 0x90-0x9f 0x70-0x7f
131 * 11 0xa0-0xaf 0x80-0x8f
133 * 15 0xe0-0xef 0xc0-0xcf
134 * 15 0xf0-0xff 0xd0-0xdf
136 uchar_t apic_vectortoipl
[APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
] = {
137 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
140 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
141 * NOTE that this is vector as passed into intr_enter which is
142 * programmed vector - 0x20 (APIC_BASE_VECT)
145 uchar_t apic_ipltopri
[MAXIPL
+ 1]; /* unix ipl to apic pri */
146 /* The taskpri to be programmed into apic to mask given ipl */
149 * Correlation of the hardware vector to the IPL in use, initialized
150 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
151 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
152 * connected to errata-stricken IOAPICs
154 uchar_t apic_ipls
[APIC_AVAIL_VECTOR
];
157 * Patchable global variables.
159 int apic_enable_hwsoftint
= 0; /* 0 - disable, 1 - enable */
160 int apic_enable_bind_log
= 1; /* 1 - display interrupt binding log */
165 static struct psm_ops apic_ops
= {
177 (int (*)(int))NULL
, /* psm_softlvl_to_irq */
178 (void (*)(int))NULL
, /* psm_set_softintr */
185 (void (*)(void))NULL
, /* psm_hrtimeinit */
188 apic_get_next_processorid
,
195 (int (*)(dev_info_t
*, int))NULL
, /* psm_translate_irq */
196 (void (*)(int, char *))NULL
, /* psm_notify_error */
197 (void (*)(int))NULL
, /* psm_notify_func */
198 apic_timer_reprogram
,
201 apic_post_cyclic_setup
,
203 apic_intr_ops
, /* Advanced DDI Interrupt framework */
204 apic_state
, /* save, restore apic state for S3 */
205 apic_cpu_ops
, /* CPU control interface. */
208 struct psm_ops
*psmops
= &apic_ops
;
210 static struct psm_info apic_psm_info
= {
211 PSM_INFO_VER01_7
, /* version */
212 PSM_OWN_EXCLUSIVE
, /* ownership */
213 (struct psm_ops
*)&apic_ops
, /* operation */
214 APIC_PCPLUSMP_NAME
, /* machine name */
215 "pcplusmp v1.4 compatible",
218 static void *apic_hdlp
;
220 /* to gather intr data and redistribute */
221 static void apic_redistribute_compute(void);
224 * This is the loadable module wrapper
230 if (apic_coarse_hrtime
)
231 apic_ops
.psm_gethrtime
= &apic_gettime
;
232 return (psm_mod_init(&apic_hdlp
, &apic_psm_info
));
238 return (psm_mod_fini(&apic_hdlp
, &apic_psm_info
));
242 _info(struct modinfo
*modinfop
)
244 return (psm_mod_info(&apic_hdlp
, &apic_psm_info
, modinfop
));
250 /* check if apix is initialized */
251 if (apix_enable
&& apix_loaded())
252 return (PSM_FAILURE
);
255 * Check whether x2APIC mode was activated by BIOS. We don't support
256 * that in pcplusmp as apix normally handles that.
258 if (apic_local_mode() == LOCAL_X2APIC
)
259 return (PSM_FAILURE
);
261 /* continue using pcplusmp PSM */
264 return (apic_probe_common(apic_psm_info
.p_mach_idstring
));
268 apic_xlate_vector_by_irq(uchar_t irq
)
270 if (apic_irq_table
[irq
] == NULL
)
273 return (apic_irq_table
[irq
]->airq_vector
);
282 psm_get_ioapicid
= apic_get_ioapicid
;
283 psm_get_localapicid
= apic_get_localapicid
;
284 psm_xlate_vector_by_irq
= apic_xlate_vector_by_irq
;
286 apic_ipltopri
[0] = APIC_VECTOR_PER_IPL
; /* leave 0 for idle */
287 for (i
= 0; i
< (APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
); i
++) {
288 if ((i
< ((APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
) - 1)) &&
289 (apic_vectortoipl
[i
+ 1] == apic_vectortoipl
[i
]))
290 /* get to highest vector at the same ipl */
292 for (; j
<= apic_vectortoipl
[i
]; j
++) {
293 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) +
297 for (; j
< MAXIPL
+ 1; j
++)
298 /* fill up any empty ipltopri slots */
299 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) + APIC_BASE_VECT
;
302 #if !defined(__amd64)
303 if (cpuid_have_cr8access(CPU
))
304 apic_have_32bit_cr8
= 1;
311 processorid_t cpun
= psm_get_cpu_id();
313 uint32_t svr
= AV_UNIT_ENABLE
| APIC_SPUR_INTR
;
315 apic_reg_ops
->apic_write_task_reg(APIC_MASK_ALL
);
317 if (apic_mode
== LOCAL_APIC
) {
319 * We are running APIC in MMIO mode.
321 if (apic_flat_model
) {
322 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
325 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
329 apic_reg_ops
->apic_write(APIC_DEST_REG
,
330 AV_HIGH_ORDER
>> cpun
);
333 if (apic_directed_EOI_supported()) {
335 * Setting the 12th bit in the Spurious Interrupt Vector
336 * Register suppresses broadcast EOIs generated by the local
337 * APIC. The suppression of broadcast EOIs happens only when
338 * interrupts are level-triggered.
340 svr
|= APIC_SVR_SUPPRESS_BROADCAST_EOI
;
343 /* need to enable APIC before unmasking NMI */
344 apic_reg_ops
->apic_write(APIC_SPUR_INT_REG
, svr
);
347 * Presence of an invalid vector with delivery mode AV_FIXED can
348 * cause an error interrupt, even if the entry is masked...so
349 * write a valid vector to LVT entries along with the mask bit
352 /* All APICs have timer and LINT0/1 */
353 apic_reg_ops
->apic_write(APIC_LOCAL_TIMER
, AV_MASK
|APIC_RESV_IRQ
);
354 apic_reg_ops
->apic_write(APIC_INT_VECT0
, AV_MASK
|APIC_RESV_IRQ
);
355 apic_reg_ops
->apic_write(APIC_INT_VECT1
, AV_NMI
); /* enable NMI */
358 * On integrated APICs, the number of LVT entries is
359 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
360 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
363 if (apic_cpus
[cpun
].aci_local_ver
< APIC_INTEGRATED_VERS
) {
366 nlvt
= ((apic_reg_ops
->apic_read(APIC_VERS_REG
) >> 16) &
371 /* Enable performance counter overflow interrupt */
373 if (!is_x86_feature(x86_featureset
, X86FSET_MSR
))
374 apic_enable_cpcovf_intr
= 0;
375 if (apic_enable_cpcovf_intr
) {
376 if (apic_cpcovf_vect
== 0) {
377 int ipl
= APIC_PCINT_IPL
;
378 int irq
= apic_get_ipivect(ipl
, -1);
382 apic_irq_table
[irq
]->airq_vector
;
383 ASSERT(apic_cpcovf_vect
);
384 (void) add_avintr(NULL
, ipl
,
385 (avfunc
)kcpc_hw_overflow_intr
,
386 "apic pcint", irq
, NULL
, NULL
, NULL
, NULL
);
387 kcpc_hw_overflow_intr_installed
= 1;
388 kcpc_hw_enable_cpc_intr
=
389 apic_cpcovf_mask_clear
;
391 apic_reg_ops
->apic_write(APIC_PCINT_VECT
,
397 /* Only mask TM intr if the BIOS apparently doesn't use it */
401 lvtval
= apic_reg_ops
->apic_read(APIC_THERM_VECT
);
402 if (((lvtval
& AV_MASK
) == AV_MASK
) ||
403 ((lvtval
& AV_DELIV_MODE
) != AV_SMI
)) {
404 apic_reg_ops
->apic_write(APIC_THERM_VECT
,
405 AV_MASK
|APIC_RESV_IRQ
);
409 /* Enable error interrupt */
411 if (nlvt
>= 4 && apic_enable_error_intr
) {
412 if (apic_errvect
== 0) {
413 int ipl
= 0xf; /* get highest priority intr */
414 int irq
= apic_get_ipivect(ipl
, -1);
417 apic_errvect
= apic_irq_table
[irq
]->airq_vector
;
418 ASSERT(apic_errvect
);
420 * Not PSMI compliant, but we are going to merge
423 (void) add_avintr((void *)NULL
, ipl
,
424 (avfunc
)apic_error_intr
, "apic error intr",
425 irq
, NULL
, NULL
, NULL
, NULL
);
427 apic_reg_ops
->apic_write(APIC_ERR_VECT
, apic_errvect
);
428 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
429 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
432 /* Enable CMCI interrupt */
433 if (cmi_enable_cmci
) {
435 mutex_enter(&cmci_cpu_setup_lock
);
436 if (cmci_cpu_setup_registered
== 0) {
437 mutex_enter(&cpu_lock
);
438 register_cpu_setup_func(cmci_cpu_setup
, NULL
);
439 mutex_exit(&cpu_lock
);
440 cmci_cpu_setup_registered
= 1;
442 mutex_exit(&cmci_cpu_setup_lock
);
444 if (apic_cmci_vect
== 0) {
446 int irq
= apic_get_ipivect(ipl
, -1);
449 apic_cmci_vect
= apic_irq_table
[irq
]->airq_vector
;
450 ASSERT(apic_cmci_vect
);
452 (void) add_avintr(NULL
, ipl
,
453 (avfunc
)cmi_cmci_trap
,
454 "apic cmci intr", irq
, NULL
, NULL
, NULL
, NULL
);
456 apic_reg_ops
->apic_write(APIC_CMCI_VECT
, apic_cmci_vect
);
467 * Initialize and enable interrupt remapping before apic
468 * hardware initialization
470 apic_intrmap_init(apic_mode
);
473 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
474 * bit on without clearing it with EOI. Since softint
475 * uses vector 0x20 to interrupt itself, so softint will
476 * not work on this machine. In order to fix this problem
477 * a check is made to verify all the isr bits are clear.
478 * If not, EOIs are issued to clear the bits.
480 for (i
= 7; i
>= 1; i
--) {
481 isr
= apic_reg_ops
->apic_read(APIC_ISR_REG
+ (i
* 4));
483 for (j
= 0; ((j
< 32) && (isr
!= 0)); j
++)
484 if (isr
& (1 << j
)) {
485 apic_reg_ops
->apic_write(
488 apic_error
|= APIC_ERR_BOOT_EOI
;
492 /* set a flag so we know we have run apic_picinit() */
493 apic_picinit_called
= 1;
494 LOCK_INIT_CLEAR(&apic_gethrtime_lock
);
495 LOCK_INIT_CLEAR(&apic_ioapic_lock
);
496 LOCK_INIT_CLEAR(&apic_error_lock
);
497 LOCK_INIT_CLEAR(&apic_mode_switch_lock
);
499 picsetup(); /* initialise the 8259 */
501 /* add nmi handler - least priority nmi handler */
502 LOCK_INIT_CLEAR(&apic_nmi_lock
);
504 if (!psm_add_nmintr(0, (avfunc
) apic_nmi_intr
,
505 "pcplusmp NMI handler", (caddr_t
)NULL
))
506 cmn_err(CE_WARN
, "pcplusmp: Unable to add nmi handler");
509 * Check for directed-EOI capability in the local APIC.
511 if (apic_directed_EOI_supported() == 1) {
512 apic_set_directed_EOI_handler();
517 /* enable apic mode if imcr present */
519 outb(APIC_IMCR_P1
, (uchar_t
)APIC_IMCR_SELECT
);
520 outb(APIC_IMCR_P2
, (uchar_t
)APIC_IMCR_APIC
);
523 ioapic_init_intr(IOAPIC_MASK
);
534 * platform_intr_enter
536 * Called at the beginning of the interrupt service routine to
537 * mask all level equal to and below the interrupt priority
538 * of the interrupting vector. An EOI should be given to
539 * the interrupt controller to enable other HW interrupts.
541 * Return -1 for spurious interrupts
546 apic_intr_enter(int ipl
, int *vectorp
)
552 apic_cpus_info_t
*cpu_infop
;
555 * The real vector delivered is (*vectorp + 0x20), but our caller
556 * subtracts 0x20 from the vector before passing it to us.
557 * (That's why APIC_BASE_VECT is 0x20.)
559 vector
= (uchar_t
)*vectorp
;
561 /* if interrupted by the clock, increment apic_nsec_since_boot */
562 if (vector
== apic_clkvect
) {
564 /* NOTE: this is not MT aware */
566 apic_nsec_since_boot
+= apic_nsec_per_intr
;
568 last_count_read
= apic_hertz_count
;
569 apic_redistribute_compute();
572 /* We will avoid all the book keeping overhead for clock */
573 nipl
= apic_ipls
[vector
];
575 *vectorp
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
577 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
578 apic_reg_ops
->apic_send_eoi(0);
583 cpu_infop
= &apic_cpus
[psm_get_cpu_id()];
585 if (vector
== (APIC_SPUR_INTR
- APIC_BASE_VECT
)) {
586 cpu_infop
->aci_spur_cnt
++;
587 return (APIC_INT_SPURIOUS
);
590 /* Check if the vector we got is really what we need */
591 if (apic_revector_pending
) {
593 * Disable interrupts for the duration of
594 * the vector translation to prevent a self-race for
595 * the apic_revector_lock. This cannot be done
596 * in apic_xlate_vector because it is recursive and
597 * we want the vector translation to be atomic with
598 * respect to other (higher-priority) interrupts.
600 iflag
= intr_clear();
601 vector
= apic_xlate_vector(vector
+ APIC_BASE_VECT
) -
606 nipl
= apic_ipls
[vector
];
607 *vectorp
= irq
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
609 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
611 cpu_infop
->aci_current
[nipl
] = (uchar_t
)irq
;
612 cpu_infop
->aci_curipl
= (uchar_t
)nipl
;
613 cpu_infop
->aci_ISR_in_progress
|= 1 << nipl
;
616 * apic_level_intr could have been assimilated into the irq struct.
617 * but, having it as a character array is more efficient in terms of
618 * cache usage. So, we leave it as is.
620 if (!apic_level_intr
[irq
]) {
621 apic_reg_ops
->apic_send_eoi(0);
625 APIC_DEBUG_BUF_PUT(vector
);
626 APIC_DEBUG_BUF_PUT(irq
);
627 APIC_DEBUG_BUF_PUT(nipl
);
628 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
629 if ((apic_stretch_interrupts
) && (apic_stretch_ISR
& (1 << nipl
)))
630 drv_usecwait(apic_stretch_interrupts
);
632 if (apic_break_on_cpu
== psm_get_cpu_id())
639 * This macro is a common code used by MMIO local apic and X2APIC
642 #define APIC_INTR_EXIT() \
644 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
645 if (apic_level_intr[irq]) \
646 apic_reg_ops->apic_send_eoi(irq); \
647 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
648 /* ISR above current pri could not be in progress */ \
649 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
653 * Any changes made to this function must also change X2APIC
654 * version of intr_exit.
657 apic_intr_exit(int prev_ipl
, int irq
)
659 apic_cpus_info_t
*cpu_infop
;
661 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[prev_ipl
]);
667 * Same as apic_intr_exit() except it uses MSR rather than MMIO
668 * to access local apic registers.
671 x2apic_intr_exit(int prev_ipl
, int irq
)
673 apic_cpus_info_t
*cpu_infop
;
675 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[prev_ipl
]);
680 psm_intr_exit_fn(void)
682 if (apic_mode
== LOCAL_X2APIC
)
683 return (x2apic_intr_exit
);
685 return (apic_intr_exit
);
689 * Mask all interrupts below or equal to the given IPL.
690 * Any changes made to this function must also change X2APIC
696 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[ipl
]);
698 /* interrupts at ipl above this cannot be in progress */
699 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
701 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
702 * have enough time to come in before the priority is raised again
703 * during the idle() loop.
705 if (apic_setspl_delay
)
706 (void) apic_reg_ops
->apic_get_pri();
710 * X2APIC version of setspl.
711 * Mask all interrupts below or equal to the given IPL
714 x2apic_setspl(int ipl
)
716 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[ipl
]);
718 /* interrupts at ipl above this cannot be in progress */
719 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
724 apic_addspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
726 return (apic_addspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
730 apic_delspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
732 return (apic_delspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
736 apic_post_cpu_start(void)
739 static int cpus_started
= 1;
741 /* We know this CPU + BSP started successfully. */
745 * On BSP we would have enabled X2APIC, if supported by processor,
746 * in acpi_probe(), but on AP we do it here.
748 * We enable X2APIC mode only if BSP is running in X2APIC & the
749 * local APIC mode of the current CPU is MMIO (xAPIC).
751 if (apic_mode
== LOCAL_X2APIC
&& apic_detect_x2apic() &&
752 apic_local_mode() == LOCAL_APIC
) {
753 apic_enable_x2apic();
757 * Switch back to x2apic IPI sending method for performance when target
758 * CPU has entered x2apic mode.
760 if (apic_mode
== LOCAL_X2APIC
) {
761 apic_switch_ipi_callback(B_FALSE
);
764 splx(ipltospl(LOCK_LEVEL
));
768 * since some systems don't enable the internal cache on the non-boot
769 * cpus, so we have to enable them here
771 setcr0(getcr0() & ~(CR0_CD
| CR0_NW
));
774 APIC_AV_PENDING_SET();
776 if (apic_mode
== LOCAL_APIC
)
777 APIC_AV_PENDING_SET();
781 * We may be booting, or resuming from suspend; aci_status will
782 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
783 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
785 cpun
= psm_get_cpu_id();
786 apic_cpus
[cpun
].aci_status
|= APIC_CPU_ONLINE
;
788 apic_reg_ops
->apic_write(APIC_DIVIDE_REG
, apic_divide_reg_init
);
789 return (PSM_SUCCESS
);
793 * type == -1 indicates it is an internal request. Do not change
794 * resv_vector for these requests
797 apic_get_ipivect(int ipl
, int type
)
802 if ((irq
= apic_allocate_irq(APIC_VECTOR(ipl
))) != -1) {
803 if ((vector
= apic_allocate_vector(ipl
, irq
, 1))) {
804 apic_irq_table
[irq
]->airq_mps_intr_index
=
806 apic_irq_table
[irq
]->airq_vector
= vector
;
808 apic_resv_vector
[ipl
] = vector
;
813 apic_error
|= APIC_ERR_GET_IPIVECT_FAIL
;
814 return (-1); /* shouldn't happen */
818 apic_getclkirq(int ipl
)
822 if ((irq
= apic_get_ipivect(ipl
, -1)) == -1)
825 * Note the vector in apic_clkvect for per clock handling.
827 apic_clkvect
= apic_irq_table
[irq
]->airq_vector
- APIC_BASE_VECT
;
828 APIC_VERBOSE_IOAPIC((CE_NOTE
, "get_clkirq: vector = %x\n",
834 * Try and disable all interrupts. We just assign interrupts to other
835 * processors based on policy. If any were bound by user request, we
836 * let them continue and return failure. We do not bother to check
837 * for cache affinity while rebinding.
841 apic_disable_intr(processorid_t cpun
)
843 int bind_cpu
= 0, i
, hardbound
= 0;
847 iflag
= intr_clear();
848 lock_set(&apic_ioapic_lock
);
850 for (i
= 0; i
<= APIC_MAX_VECTOR
; i
++) {
851 if (apic_reprogram_info
[i
].done
== B_FALSE
) {
852 if (apic_reprogram_info
[i
].bindcpu
== cpun
) {
854 * CPU is busy -- it's the target of
855 * a pending reprogramming attempt
857 lock_clear(&apic_ioapic_lock
);
859 return (PSM_FAILURE
);
864 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_INTR_ENABLE
;
866 apic_cpus
[cpun
].aci_curipl
= 0;
868 i
= apic_min_device_irq
;
869 for (; i
<= apic_max_device_irq
; i
++) {
871 * If there are bound interrupts on this cpu, then
872 * rebind them to other processors.
874 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
875 ASSERT((irq_ptr
->airq_temp_cpu
== IRQ_UNBOUND
) ||
876 (irq_ptr
->airq_temp_cpu
== IRQ_UNINIT
) ||
877 (apic_cpu_in_range(irq_ptr
->airq_temp_cpu
)));
879 if (irq_ptr
->airq_temp_cpu
== (cpun
| IRQ_USER_BOUND
)) {
884 if (irq_ptr
->airq_temp_cpu
== cpun
) {
887 apic_find_cpu(APIC_CPU_INTR_ENABLE
);
888 } while (apic_rebind_all(irq_ptr
, bind_cpu
));
893 lock_clear(&apic_ioapic_lock
);
897 cmn_err(CE_WARN
, "Could not disable interrupts on %d"
898 "due to user bound interrupts", cpun
);
899 return (PSM_FAILURE
);
902 return (PSM_SUCCESS
);
906 * Bind interrupts to the CPU's local APIC.
907 * Interrupts should not be bound to a CPU's local APIC until the CPU
908 * is ready to receive interrupts.
911 apic_enable_intr(processorid_t cpun
)
917 iflag
= intr_clear();
918 lock_set(&apic_ioapic_lock
);
920 apic_cpus
[cpun
].aci_status
|= APIC_CPU_INTR_ENABLE
;
922 i
= apic_min_device_irq
;
923 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
924 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
925 if ((irq_ptr
->airq_cpu
& ~IRQ_USER_BOUND
) == cpun
) {
926 (void) apic_rebind_all(irq_ptr
,
932 if (apic_cpus
[cpun
].aci_status
& APIC_CPU_SUSPEND
)
933 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_SUSPEND
;
935 lock_clear(&apic_ioapic_lock
);
940 * If this module needs a periodic handler for the interrupt distribution, it
941 * can be added here. The argument to the periodic handler is not currently
942 * used, but is reserved for future.
945 apic_post_cyclic_setup(void *arg
)
947 _NOTE(ARGUNUSED(arg
))
952 /* cpu_lock is held */
953 /* set up a periodic handler for intr redistribution */
956 * In peridoc mode intr redistribution processing is done in
957 * apic_intr_enter during clk intr processing
963 * Register a periodical handler for the redistribution processing.
964 * Though we would generally prefer to use the DDI interface for
965 * periodic handler invocation, ddi_periodic_add(9F), we are
966 * unfortunately already holding cpu_lock, which ddi_periodic_add will
967 * attempt to take for us. Thus, we add our own cyclic directly:
969 cyh
.cyh_func
= (void (*)(void *))apic_redistribute_compute
;
971 cyh
.cyh_level
= CY_LOW_LEVEL
;
974 cyt
.cyt_interval
= apic_redistribute_sample_interval
;
976 apic_cyclic_id
= cyclic_add(&cyh
, &cyt
);
980 apic_redistribute_compute(void)
984 if (apic_enable_dynamic_migration
) {
985 if (++apic_nticks
== apic_sample_factor_redistribution
) {
987 * Time to call apic_intr_redistribute().
988 * reset apic_nticks. This will cause max_busy
989 * to be calculated below and if it is more than
990 * apic_int_busy, we will do the whole thing
995 for (i
= 0; i
< apic_nproc
; i
++) {
996 if (!apic_cpu_in_range(i
))
1000 * Check if curipl is non zero & if ISR is in
1003 if (((j
= apic_cpus
[i
].aci_curipl
) != 0) &&
1004 (apic_cpus
[i
].aci_ISR_in_progress
& (1 << j
))) {
1007 apic_cpus
[i
].aci_busy
++;
1008 irq
= apic_cpus
[i
].aci_current
[j
];
1009 apic_irq_table
[irq
]->airq_busy
++;
1013 (apic_cpus
[i
].aci_busy
> max_busy
))
1014 max_busy
= apic_cpus
[i
].aci_busy
;
1017 if (max_busy
> apic_int_busy_mark
) {
1019 * We could make the following check be
1020 * skipped > 1 in which case, we get a
1021 * redistribution at half the busy mark (due to
1022 * double interval). Need to be able to collect
1023 * more empirical data to decide if that is a
1024 * good strategy. Punt for now.
1026 if (apic_skipped_redistribute
) {
1027 apic_cleanup_busy();
1028 apic_skipped_redistribute
= 0;
1030 apic_intr_redistribute();
1033 apic_skipped_redistribute
++;
1040 * The following functions are in the platform specific file so that they
1041 * can be different functions depending on whether we are running on
1042 * bare metal or a hypervisor.
1046 * Check to make sure there are enough irq slots
1049 apic_check_free_irqs(int count
)
1054 for (i
= APIC_FIRST_FREE_IRQ
; i
< APIC_RESV_IRQ
; i
++) {
1055 if ((apic_irq_table
[i
] == NULL
) ||
1056 apic_irq_table
[i
]->airq_mps_intr_index
== FREE_INDEX
) {
1057 if (++avail
>= count
)
1058 return (PSM_SUCCESS
);
1061 return (PSM_FAILURE
);
1065 * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1068 apic_alloc_msi_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1072 uchar_t start
, irqno
;
1077 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: dip=0x%p "
1078 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n",
1079 (void *)dip
, inum
, pri
, count
, behavior
));
1082 if (behavior
== DDI_INTR_ALLOC_STRICT
&&
1083 apic_multi_msi_enable
== 0)
1085 if (apic_multi_msi_enable
== 0)
1089 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1091 else if (rcount
== 0 || (rcount
< count
&&
1092 behavior
== DDI_INTR_ALLOC_STRICT
))
1095 /* if not ISP2, then round it down */
1097 rcount
= 1 << (highbit(rcount
) - 1);
1099 mutex_enter(&airq_mutex
);
1101 for (start
= 0; rcount
> 0; rcount
>>= 1) {
1102 if ((start
= apic_find_multi_vectors(pri
, rcount
)) != 0 ||
1103 behavior
== DDI_INTR_ALLOC_STRICT
)
1108 /* no vector available */
1109 mutex_exit(&airq_mutex
);
1113 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1114 /* not enough free irq slots available */
1115 mutex_exit(&airq_mutex
);
1119 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1120 for (i
= 0; i
< rcount
; i
++) {
1121 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1124 * shouldn't happen because of the
1125 * apic_check_free_irqs() check earlier
1127 mutex_exit(&airq_mutex
);
1128 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1129 "apic_allocate_irq failed\n"));
1132 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1133 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1134 irqptr
= apic_irq_table
[irqno
];
1136 if (apic_vector_to_irq
[start
+ i
] != APIC_RESV_IRQ
)
1137 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1138 "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1140 apic_vector_to_irq
[start
+ i
] = (uchar_t
)irqno
;
1142 irqptr
->airq_vector
= (uchar_t
)(start
+ i
);
1143 irqptr
->airq_ioapicindex
= (uchar_t
)inum
; /* start */
1144 irqptr
->airq_intin_no
= (uchar_t
)rcount
;
1145 ASSERT(pri
>= 0 && pri
<= UCHAR_MAX
);
1146 irqptr
->airq_ipl
= (uchar_t
)pri
;
1147 irqptr
->airq_vector
= start
+ i
;
1148 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1149 irqptr
->airq_share_id
= 0;
1150 irqptr
->airq_mps_intr_index
= MSI_INDEX
;
1151 irqptr
->airq_dip
= dip
;
1152 irqptr
->airq_major
= major
;
1153 if (i
== 0) /* they all bound to the same cpu */
1154 cpu
= irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
,
1157 irqptr
->airq_cpu
= cpu
;
1158 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: irq=0x%x "
1159 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno
,
1160 (void *)irqptr
->airq_dip
, irqptr
->airq_vector
,
1161 irqptr
->airq_origirq
, pri
));
1163 mutex_exit(&airq_mutex
);
1168 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1171 apic_alloc_msix_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1177 mutex_enter(&airq_mutex
);
1179 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1181 else if (rcount
== 0 || (rcount
< count
&&
1182 behavior
== DDI_INTR_ALLOC_STRICT
)) {
1187 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1188 /* not enough free irq slots available */
1193 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1194 for (i
= 0; i
< rcount
; i
++) {
1195 uchar_t vector
, irqno
;
1198 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1201 * shouldn't happen because of the
1202 * apic_check_free_irqs() check earlier
1204 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1205 "apic_allocate_irq failed\n"));
1209 if ((vector
= apic_allocate_vector(pri
, irqno
, 1)) == 0) {
1211 * shouldn't happen because of the
1212 * apic_navail_vector() call earlier
1214 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1215 "apic_allocate_vector failed\n"));
1219 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1220 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1221 irqptr
= apic_irq_table
[irqno
];
1222 irqptr
->airq_vector
= (uchar_t
)vector
;
1223 ASSERT(pri
>= 0 && pri
<= UCHAR_MAX
);
1224 irqptr
->airq_ipl
= (uchar_t
)pri
;
1225 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1226 irqptr
->airq_share_id
= 0;
1227 irqptr
->airq_mps_intr_index
= MSIX_INDEX
;
1228 irqptr
->airq_dip
= dip
;
1229 irqptr
->airq_major
= major
;
1230 irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
, 0xff, 0xff);
1233 mutex_exit(&airq_mutex
);
1238 * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1239 * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1240 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1241 * requests and allocated only when pri is set.
1244 apic_allocate_vector(int ipl
, int irq
, int pri
)
1246 int lowest
, highest
, i
;
1248 highest
= apic_ipltopri
[ipl
] + APIC_VECTOR_MASK
;
1249 lowest
= apic_ipltopri
[ipl
- 1] + APIC_VECTOR_PER_IPL
;
1251 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
1252 lowest
-= APIC_VECTOR_PER_IPL
;
1255 if (apic_restrict_vector
) /* for testing shared interrupt logic */
1256 highest
= lowest
+ apic_restrict_vector
+ APIC_HI_PRI_VECTS
;
1259 highest
-= APIC_HI_PRI_VECTS
;
1261 for (i
= lowest
; i
<= highest
; i
++) {
1262 if (APIC_CHECK_RESERVE_VECTORS(i
))
1264 if (apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) {
1265 apic_vector_to_irq
[i
] = (uchar_t
)irq
;
1266 ASSERT(i
>= 0 && i
<= UCHAR_MAX
);
1267 return ((uchar_t
)i
);
1274 /* Mark vector as not being used by any irq */
1276 apic_free_vector(uchar_t vector
)
1278 apic_vector_to_irq
[vector
] = APIC_RESV_IRQ
;
1282 * Call rebind to do the actual programming.
1283 * Must be called with interrupts disabled and apic_ioapic_lock held
1284 * 'p' is polymorphic -- if this function is called to process a deferred
1285 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1286 * the irq pointer is retrieved. If not doing deferred reprogramming,
1287 * p is of the type 'apic_irq_t *'.
1289 * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1290 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1291 * taken offline after a cpu is selected, but before apic_rebind is called to
1292 * bind interrupts to it.
1295 apic_setup_io_intr(void *p
, int irq
, boolean_t deferred
)
1298 struct ioapic_reprogram_data
*drep
= NULL
;
1302 drep
= (struct ioapic_reprogram_data
*)p
;
1303 ASSERT(drep
!= NULL
);
1304 irqptr
= drep
->irqp
;
1306 irqptr
= (apic_irq_t
*)p
;
1308 ASSERT(irqptr
!= NULL
);
1310 rv
= apic_rebind(irqptr
, apic_irq_table
[irq
]->airq_cpu
, drep
);
1313 * CPU is not up or interrupts are disabled. Fall back to
1314 * the first available CPU
1316 rv
= apic_rebind(irqptr
, apic_find_cpu(APIC_CPU_INTR_ENABLE
),
1325 apic_modify_vector(uchar_t vector
, int irq
)
1327 apic_vector_to_irq
[vector
] = (uchar_t
)irq
;
1332 apic_get_apic_type(void)
1334 return (apic_psm_info
.p_mach_idstring
);
1338 x2apic_update_psm(void)
1340 struct psm_ops
*pops
= &apic_ops
;
1342 ASSERT(pops
!= NULL
);
1344 pops
->psm_intr_exit
= x2apic_intr_exit
;
1345 pops
->psm_setspl
= x2apic_setspl
;
1347 pops
->psm_send_ipi
= x2apic_send_ipi
;
1348 send_dirintf
= pops
->psm_send_ipi
;
1350 apic_mode
= LOCAL_X2APIC
;