4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
30 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
31 * Copyright 2013 Pluribus Networks, Inc.
34 #include <sys/processor.h>
37 #include <sys/smp_impldefs.h>
39 #include <sys/acpi/acpi.h>
40 #include <sys/acpica.h>
41 #include <sys/psm_common.h>
44 #include <sys/sunddi.h>
45 #include <sys/ddi_impldefs.h>
47 #include <sys/promif.h>
48 #include <sys/x86_archext.h>
49 #include <sys/cpc_impl.h>
50 #include <sys/uadmin.h>
51 #include <sys/panic.h>
52 #include <sys/debug.h>
53 #include <sys/archsystm.h>
55 #include <sys/machsystm.h>
56 #include <sys/sysmacros.h>
57 #include <sys/cpuvar.h>
58 #include <sys/rm_platter.h>
59 #include <sys/privregs.h>
61 #include <sys/pci_intr_lib.h>
63 #include <sys/clock.h>
64 #include <sys/dditypes.h>
65 #include <sys/sunddi.h>
66 #include <sys/x_call.h>
67 #include <sys/reboot.h>
70 static int apix_get_avail_vector_oncpu(uint32_t, int, int);
71 static apix_vector_t
*apix_init_vector(processorid_t
, uchar_t
);
72 static void apix_cleanup_vector(apix_vector_t
*);
73 static void apix_insert_av(apix_vector_t
*, void *, avfunc
, caddr_t
, caddr_t
,
74 uint64_t *, int, dev_info_t
*);
75 static void apix_remove_av(apix_vector_t
*, struct autovec
*);
76 static void apix_clear_dev_map(dev_info_t
*, int, int);
77 static boolean_t
apix_is_cpu_enabled(processorid_t
);
78 static void apix_wait_till_seen(processorid_t
, int);
80 #define GET_INTR_INUM(ihdlp) \
81 (((ihdlp) != NULL) ? ((ddi_intr_handle_impl_t *)(ihdlp))->ih_inum : 0)
83 apix_rebind_info_t apix_rebindinfo
= {0, 0, 0, NULL
, 0, NULL
};
88 * Return vector number or 0 on error
91 apix_alloc_ipi(int ipl
)
98 APIX_ENTER_CPU_LOCK(0);
100 vector
= apix_get_avail_vector_oncpu(0, APIX_IPI_MIN
, APIX_IPI_MAX
);
102 APIX_LEAVE_CPU_LOCK(0);
103 cmn_err(CE_WARN
, "apix: no available IPI\n");
104 apic_error
|= APIC_ERR_GET_IPIVECT_FAIL
;
108 nproc
= max(apic_nproc
, apic_max_nproc
);
109 for (cpun
= 0; cpun
< nproc
; cpun
++) {
110 vecp
= xv_vector(cpun
, vector
);
112 vecp
= kmem_zalloc(sizeof (apix_vector_t
), KM_NOSLEEP
);
114 cmn_err(CE_WARN
, "apix: No memory for ipi");
117 xv_vector(cpun
, vector
) = vecp
;
119 vecp
->v_state
= APIX_STATE_ALLOCED
;
120 vecp
->v_type
= APIX_TYPE_IPI
;
121 vecp
->v_cpuid
= vecp
->v_bound_cpuid
= cpun
;
122 vecp
->v_vector
= vector
;
125 APIX_LEAVE_CPU_LOCK(0);
130 apix_cleanup_vector(xv_vector(cpun
, vector
));
131 APIX_LEAVE_CPU_LOCK(0);
136 * Add IPI service routine
139 apix_add_ipi(int ipl
, avfunc xxintr
, char *name
, int vector
,
140 caddr_t arg1
, caddr_t arg2
)
146 ASSERT(vector
>= APIX_IPI_MIN
&& vector
<= APIX_IPI_MAX
);
148 nproc
= max(apic_nproc
, apic_max_nproc
);
149 for (cpun
= 0; cpun
< nproc
; cpun
++) {
150 APIX_ENTER_CPU_LOCK(cpun
);
151 vecp
= xv_vector(cpun
, vector
);
152 apix_insert_av(vecp
, NULL
, xxintr
, arg1
, arg2
, NULL
, ipl
, NULL
);
153 vecp
->v_state
= APIX_STATE_ENABLED
;
154 APIX_LEAVE_CPU_LOCK(cpun
);
157 APIC_VERBOSE(IPI
, (CE_CONT
, "apix: add ipi for %s, vector %x "
158 "ipl %x\n", name
, vector
, ipl
));
164 * Find and return first free vector in range (start, end)
167 apix_get_avail_vector_oncpu(uint32_t cpuid
, int start
, int end
)
170 apix_impl_t
*apixp
= apixs
[cpuid
];
172 for (i
= start
; i
<= end
; i
++) {
173 if (APIC_CHECK_RESERVE_VECTORS(i
))
175 if (IS_VECT_FREE(apixp
->x_vectbl
[i
]))
183 * Allocate a vector on specified cpu
185 * Return NULL on error
187 static apix_vector_t
*
188 apix_alloc_vector_oncpu(uint32_t cpuid
, dev_info_t
*dip
, int inum
, int type
)
190 processorid_t tocpu
= cpuid
& ~IRQ_USER_BOUND
;
194 ASSERT(APIX_CPU_LOCK_HELD(tocpu
));
196 /* find free vector */
197 vector
= apix_get_avail_vector_oncpu(tocpu
, APIX_AVINTR_MIN
,
202 vecp
= apix_init_vector(tocpu
, vector
);
203 vecp
->v_type
= (ushort_t
)type
;
205 vecp
->v_flags
= (cpuid
& IRQ_USER_BOUND
) ? APIX_VECT_USER_BOUND
: 0;
208 apix_set_dev_map(vecp
, dip
, inum
);
214 * Allocates "count" contiguous MSI vectors starting at the proper alignment.
215 * Caller needs to make sure that count has to be power of 2 and should not
218 * Return first vector number
221 apix_alloc_nvectors_oncpu(uint32_t cpuid
, dev_info_t
*dip
, int inum
,
224 int i
, msibits
, start
= 0, navail
= 0;
225 apix_vector_t
*vecp
, *startp
= NULL
;
226 processorid_t tocpu
= cpuid
& ~IRQ_USER_BOUND
;
229 ASSERT(APIX_CPU_LOCK_HELD(tocpu
));
232 * msibits is the no. of lower order message data bits for the
233 * allocated MSI vectors and is used to calculate the aligned
238 /* It has to be contiguous */
239 for (i
= APIX_AVINTR_MIN
; i
<= APIX_AVINTR_MAX
; i
++) {
240 if (!IS_VECT_FREE(xv_vector(tocpu
, i
)))
244 * starting vector has to be aligned accordingly for
248 i
= (i
+ msibits
) & ~msibits
;
250 for (navail
= 0, start
= i
; i
<= APIX_AVINTR_MAX
; i
++) {
251 if (!IS_VECT_FREE(xv_vector(tocpu
, i
)))
253 if (APIC_CHECK_RESERVE_VECTORS(i
))
255 if (++navail
== count
)
263 flags
= (cpuid
& IRQ_USER_BOUND
) ? APIX_VECT_USER_BOUND
: 0;
265 for (i
= 0; i
< count
; i
++) {
266 if ((vecp
= apix_init_vector(tocpu
, start
+ i
)) == NULL
)
269 vecp
->v_type
= (ushort_t
)type
;
270 vecp
->v_inum
= inum
+ i
;
271 vecp
->v_flags
= flags
;
274 apix_set_dev_map(vecp
, dip
, inum
+ i
);
283 while (i
-- > 0) { /* Free allocated vectors */
284 vecp
= xv_vector(tocpu
, start
+ i
);
285 apix_clear_dev_map(dip
, inum
+ i
, type
);
286 apix_cleanup_vector(vecp
);
291 #define APIX_WRITE_MSI_DATA(_hdl, _cap, _ctrl, _v)\
293 if ((_ctrl) & PCI_MSI_64BIT_MASK)\
294 pci_config_put16((_hdl), (_cap) + PCI_MSI_64BIT_DATA, (_v));\
296 pci_config_put16((_hdl), (_cap) + PCI_MSI_32BIT_DATA, (_v));\
297 _NOTE(CONSTCOND)} while (0)
300 apix_pci_msi_enable_vector(apix_vector_t
*vecp
, dev_info_t
*dip
, int type
,
301 int inum
, int count
, uchar_t vector
, int target_apic_id
)
303 uint64_t msi_addr
, msi_data
;
305 int i
, cap_ptr
= i_ddi_get_msi_msix_cap_ptr(dip
);
306 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(dip
);
308 void *intrmap_tbl
[PCI_MSI_MAX_INTRS
];
310 DDI_INTR_IMPLDBG((CE_CONT
, "apix_pci_msi_enable_vector: dip=0x%p\n"
311 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip
,
312 ddi_driver_name(dip
), inum
, vector
, target_apic_id
));
314 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
316 msi_regs
.mr_data
= vector
;
317 msi_regs
.mr_addr
= target_apic_id
;
319 for (i
= 0; i
< count
; i
++)
320 intrmap_tbl
[i
] = xv_intrmap_private(vecp
->v_cpuid
, vector
+ i
);
321 apic_vt_ops
->apic_intrmap_alloc_entry(intrmap_tbl
, dip
, type
,
323 for (i
= 0; i
< count
; i
++)
324 xv_intrmap_private(vecp
->v_cpuid
, vector
+ i
) = intrmap_tbl
[i
];
326 apic_vt_ops
->apic_intrmap_map_entry(vecp
->v_intrmap_private
,
327 (void *)&msi_regs
, type
, count
);
328 apic_vt_ops
->apic_intrmap_record_msi(vecp
->v_intrmap_private
,
332 msi_addr
= msi_regs
.mr_addr
;
334 /* MSI Data: MSI is edge triggered according to spec */
335 msi_data
= msi_regs
.mr_data
;
337 DDI_INTR_IMPLDBG((CE_CONT
, "apix_pci_msi_enable_vector: addr=0x%lx "
338 "data=0x%lx\n", (long)msi_addr
, (long)msi_data
));
340 if (type
== APIX_TYPE_MSI
) {
341 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
343 /* Set the bits to inform how many MSIs are enabled */
344 msi_ctrl
|= ((highbit(count
) - 1) << PCI_MSI_MME_SHIFT
);
345 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
347 if ((vecp
->v_flags
& APIX_VECT_MASKABLE
) == 0)
348 APIX_WRITE_MSI_DATA(handle
, cap_ptr
, msi_ctrl
,
351 pci_config_put32(handle
,
352 cap_ptr
+ PCI_MSI_ADDR_OFFSET
, msi_addr
);
353 if (msi_ctrl
& PCI_MSI_64BIT_MASK
)
354 pci_config_put32(handle
,
355 cap_ptr
+ PCI_MSI_ADDR_OFFSET
+ 4, msi_addr
>> 32);
357 APIX_WRITE_MSI_DATA(handle
, cap_ptr
, msi_ctrl
, msi_data
);
358 } else if (type
== APIX_TYPE_MSIX
) {
360 ddi_intr_msix_t
*msix_p
= i_ddi_get_msix(dip
);
362 /* Offset into the "inum"th entry in the MSI-X table */
363 off
= (uintptr_t)msix_p
->msix_tbl_addr
+
364 (inum
* PCI_MSIX_VECTOR_SIZE
);
366 ddi_put32(msix_p
->msix_tbl_hdl
,
367 (uint32_t *)(off
+ PCI_MSIX_DATA_OFFSET
), msi_data
);
368 ddi_put32(msix_p
->msix_tbl_hdl
,
369 (uint32_t *)(off
+ PCI_MSIX_LOWER_ADDR_OFFSET
), msi_addr
);
370 ddi_put32(msix_p
->msix_tbl_hdl
,
371 (uint32_t *)(off
+ PCI_MSIX_UPPER_ADDR_OFFSET
),
377 apix_pci_msi_enable_mode(dev_info_t
*dip
, int type
, int inum
)
380 int cap_ptr
= i_ddi_get_msi_msix_cap_ptr(dip
);
381 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(dip
);
383 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
385 if (type
== APIX_TYPE_MSI
) {
386 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
387 if ((msi_ctrl
& PCI_MSI_ENABLE_BIT
))
390 msi_ctrl
|= PCI_MSI_ENABLE_BIT
;
391 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
393 } else if (type
== DDI_INTR_TYPE_MSIX
) {
396 ddi_intr_msix_t
*msix_p
;
398 msix_p
= i_ddi_get_msix(dip
);
400 /* Offset into "inum"th entry in the MSI-X table & clear mask */
401 off
= (uintptr_t)msix_p
->msix_tbl_addr
+ (inum
*
402 PCI_MSIX_VECTOR_SIZE
) + PCI_MSIX_VECTOR_CTRL_OFFSET
;
404 mask
= ddi_get32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
);
406 ddi_put32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
, (mask
& ~1));
408 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSIX_CTRL
);
410 if (!(msi_ctrl
& PCI_MSIX_ENABLE_BIT
)) {
411 msi_ctrl
|= PCI_MSIX_ENABLE_BIT
;
412 pci_config_put16(handle
, cap_ptr
+ PCI_MSIX_CTRL
,
419 * Setup interrupt, pogramming IO-APIC or MSI/X address/data.
422 apix_enable_vector(apix_vector_t
*vecp
)
424 int tocpu
= vecp
->v_cpuid
, type
= vecp
->v_type
;
425 apic_cpus_info_t
*cpu_infop
;
428 ASSERT(tocpu
< apic_nproc
);
430 cpu_infop
= &apic_cpus
[tocpu
];
431 if (vecp
->v_flags
& APIX_VECT_USER_BOUND
)
432 cpu_infop
->aci_bound
++;
434 cpu_infop
->aci_temp_bound
++;
436 iflag
= intr_clear();
437 lock_set(&apic_ioapic_lock
);
439 if (!DDI_INTR_IS_MSI_OR_MSIX(type
)) { /* fixed */
440 apix_intx_enable(vecp
->v_inum
);
442 int inum
= vecp
->v_inum
;
443 dev_info_t
*dip
= APIX_GET_DIP(vecp
);
444 int count
= i_ddi_intr_get_current_nintrs(dip
);
446 if (type
== APIX_TYPE_MSI
) { /* MSI */
447 if (inum
== apix_get_max_dev_inum(dip
, type
)) {
449 uchar_t start_inum
= inum
+ 1 - count
;
450 uchar_t start_vect
= vecp
->v_vector
+ 1 - count
;
451 apix_vector_t
*start_vecp
=
452 xv_vector(vecp
->v_cpuid
, start_vect
);
454 APIC_VERBOSE(INTR
, (CE_CONT
, "apix: call "
455 "apix_pci_msi_enable_vector\n"));
456 apix_pci_msi_enable_vector(start_vecp
, dip
,
457 type
, start_inum
, count
, start_vect
,
458 cpu_infop
->aci_local_id
);
460 APIC_VERBOSE(INTR
, (CE_CONT
, "apix: call "
461 "apix_pci_msi_enable_mode\n"));
462 apix_pci_msi_enable_mode(dip
, type
, inum
);
465 apix_pci_msi_enable_vector(vecp
, dip
,
466 type
, inum
, 1, vecp
->v_vector
,
467 cpu_infop
->aci_local_id
);
468 apix_pci_msi_enable_mode(dip
, type
, inum
);
471 vecp
->v_state
= APIX_STATE_ENABLED
;
472 apic_redist_cpu_skip
&= ~(1 << tocpu
);
474 lock_clear(&apic_ioapic_lock
);
479 * Disable the interrupt
482 apix_disable_vector(apix_vector_t
*vecp
)
484 struct autovec
*avp
= vecp
->v_autovect
;
489 iflag
= intr_clear();
490 lock_set(&apic_ioapic_lock
);
492 switch (vecp
->v_type
) {
494 ASSERT(avp
->av_vector
!= NULL
&& avp
->av_dip
!= NULL
);
496 * Disable the MSI vector
497 * Make sure we only disable on the last
498 * of the multi-MSI support
500 if (i_ddi_intr_get_current_nenables(avp
->av_dip
) == 1) {
501 apic_pci_msi_disable_mode(avp
->av_dip
,
506 ASSERT(avp
->av_vector
!= NULL
&& avp
->av_dip
!= NULL
);
508 * Disable the MSI-X vector
509 * needs to clear its mask and addr/data for each MSI-X
511 apic_pci_msi_unconfigure(avp
->av_dip
, DDI_INTR_TYPE_MSIX
,
514 * Make sure we only disable on the last MSI-X
516 if (i_ddi_intr_get_current_nenables(avp
->av_dip
) == 1) {
517 apic_pci_msi_disable_mode(avp
->av_dip
,
522 apix_intx_disable(vecp
->v_inum
);
526 if (!(apic_cpus
[vecp
->v_cpuid
].aci_status
& APIC_CPU_SUSPEND
))
527 vecp
->v_state
= APIX_STATE_DISABLED
;
528 apic_vt_ops
->apic_intrmap_free_entry(&vecp
->v_intrmap_private
);
529 vecp
->v_intrmap_private
= NULL
;
531 lock_clear(&apic_ioapic_lock
);
536 * Mark vector as obsoleted or freed. The vector is marked
537 * obsoleted if there are pending requests on it. Otherwise,
538 * free the vector. The obsoleted vectors get freed after
541 * Return 1 on being obosoleted and 0 on being freed.
543 #define INTR_BUSY(_avp)\
544 ((((volatile ushort_t)(_avp)->av_flags) &\
545 (AV_PENTRY_PEND | AV_PENTRY_ONPROC)) != 0)
546 #define LOCAL_WITH_INTR_DISABLED(_cpuid)\
547 ((_cpuid) == psm_get_cpu_id() && !interrupts_enabled())
548 static uint64_t dummy_tick
;
551 apix_obsolete_vector(apix_vector_t
*vecp
)
553 struct autovec
*avp
= vecp
->v_autovect
;
554 int repeats
, tries
, ipl
, busy
= 0, cpuid
= vecp
->v_cpuid
;
555 apix_impl_t
*apixp
= apixs
[cpuid
];
557 ASSERT(APIX_CPU_LOCK_HELD(cpuid
));
559 for (avp
= vecp
->v_autovect
; avp
!= NULL
; avp
= avp
->av_link
) {
560 if (avp
->av_vector
== NULL
)
563 if (LOCAL_WITH_INTR_DISABLED(cpuid
)) {
566 if (INTR_BUSY(avp
)) {
571 /* check IRR for pending interrupts */
572 index
= vecp
->v_vector
/ 32;
573 bit
= vecp
->v_vector
% 32;
574 irr
= apic_reg_ops
->apic_read(APIC_IRR_REG
+ index
);
575 if ((irr
& (1 << bit
)) != 0)
579 apix_remove_av(vecp
, avp
);
587 for (tries
= 0; tries
< apic_max_reps_clear_pending
;
591 } while (INTR_BUSY(avp
) &&
592 (repeats
< apic_max_reps_clear_pending
));
598 * Interrupt is not in pending list or being serviced.
599 * However it might be cached in Local APIC's IRR
600 * register. It's impossible to check another CPU's
601 * IRR register. Then wait till lower levels finish
604 for (ipl
= 1; ipl
< MIN(LOCK_LEVEL
, vecp
->v_pri
); ipl
++)
605 apix_wait_till_seen(cpuid
, ipl
);
611 apix_remove_av(vecp
, avp
);
615 apix_vector_t
*tp
= apixp
->x_obsoletes
;
617 if (vecp
->v_state
== APIX_STATE_OBSOLETED
)
620 vecp
->v_state
= APIX_STATE_OBSOLETED
;
623 apixp
->x_obsoletes
= vecp
;
625 while (tp
->v_next
!= NULL
)
632 /* interrupt is not busy */
633 if (vecp
->v_state
== APIX_STATE_OBSOLETED
) {
634 /* remove from obsoleted list */
635 apixp
->x_obsoletes
= vecp
->v_next
;
638 apix_cleanup_vector(vecp
);
643 * Duplicate number of continuous vectors to specified target vectors.
646 apix_dup_vectors(apix_vector_t
*oldp
, apix_vector_t
*newp
, int count
)
649 apix_vector_t
*fromp
, *top
;
650 processorid_t oldcpu
= oldp
->v_cpuid
, newcpu
= newp
->v_cpuid
;
651 uchar_t oldvec
= oldp
->v_vector
, newvec
= newp
->v_vector
;
654 ASSERT(oldp
->v_type
!= APIX_TYPE_IPI
);
656 for (i
= 0; i
< count
; i
++) {
657 fromp
= xv_vector(oldcpu
, oldvec
+ i
);
658 top
= xv_vector(newcpu
, newvec
+ i
);
659 ASSERT(fromp
!= NULL
&& top
!= NULL
);
661 /* copy over original one */
662 top
->v_state
= fromp
->v_state
;
663 top
->v_type
= fromp
->v_type
;
664 top
->v_bound_cpuid
= fromp
->v_bound_cpuid
;
665 top
->v_inum
= fromp
->v_inum
;
666 top
->v_flags
= fromp
->v_flags
;
667 top
->v_intrmap_private
= fromp
->v_intrmap_private
;
669 for (avp
= fromp
->v_autovect
; avp
!= NULL
; avp
= avp
->av_link
) {
670 if (avp
->av_vector
== NULL
)
673 apix_insert_av(top
, avp
->av_intr_id
, avp
->av_vector
,
674 avp
->av_intarg1
, avp
->av_intarg2
, avp
->av_ticksp
,
675 avp
->av_prilevel
, avp
->av_dip
);
677 if (fromp
->v_type
== APIX_TYPE_FIXED
&&
678 avp
->av_dip
!= NULL
) {
679 inum
= GET_INTR_INUM(avp
->av_intr_id
);
680 apix_set_dev_map(top
, avp
->av_dip
, inum
);
684 if (DDI_INTR_IS_MSI_OR_MSIX(fromp
->v_type
) &&
685 fromp
->v_devp
!= NULL
)
686 apix_set_dev_map(top
, fromp
->v_devp
->dv_dip
,
687 fromp
->v_devp
->dv_inum
);
691 static apix_vector_t
*
692 apix_init_vector(processorid_t cpuid
, uchar_t vector
)
694 apix_impl_t
*apixp
= apixs
[cpuid
];
695 apix_vector_t
*vecp
= apixp
->x_vectbl
[vector
];
697 ASSERT(IS_VECT_FREE(vecp
));
700 vecp
= kmem_zalloc(sizeof (apix_vector_t
), KM_NOSLEEP
);
702 cmn_err(CE_WARN
, "apix: no memory to allocate vector");
705 apixp
->x_vectbl
[vector
] = vecp
;
707 vecp
->v_state
= APIX_STATE_ALLOCED
;
708 vecp
->v_cpuid
= vecp
->v_bound_cpuid
= cpuid
;
709 vecp
->v_vector
= vector
;
715 apix_cleanup_vector(apix_vector_t
*vecp
)
717 ASSERT(vecp
->v_share
== 0);
718 vecp
->v_bound_cpuid
= IRQ_UNINIT
;
719 vecp
->v_state
= APIX_STATE_FREED
;
723 vecp
->v_intrmap_private
= NULL
;
727 apix_dprint_vector(apix_vector_t
*vecp
, dev_info_t
*dip
, int count
)
731 char *name
, *drv_name
;
732 int instance
, len
, t_len
;
733 char mesg
[1024] = "apix: ";
735 t_len
= sizeof (mesg
);
738 name
= ddi_get_name(dip
);
739 major
= ddi_name_to_major(name
);
740 drv_name
= ddi_major_to_name(major
);
741 instance
= ddi_get_instance(dip
);
742 (void) snprintf(mesg
+ len
, t_len
- len
, "%s (%s) instance %d ",
743 name
, drv_name
, instance
);
747 switch (vecp
->v_type
) {
748 case APIX_TYPE_FIXED
:
749 (void) snprintf(mesg
+ len
, t_len
- len
, "irqno %d",
753 (void) snprintf(mesg
+ len
, t_len
- len
,
754 "msi inum %d (count %d)", vecp
->v_inum
, count
);
757 (void) snprintf(mesg
+ len
, t_len
- len
, "msi-x inum %d",
765 APIC_VERBOSE(ALLOC
, (CE_CONT
, "%s allocated with vector 0x%x on "
766 "cpu %d\n", mesg
, vecp
->v_vector
, vecp
->v_cpuid
));
771 * Operations on avintr
774 #define INIT_AUTOVEC(p, intr_id, f, arg1, arg2, ticksp, ipl, dip) \
776 (p)->av_intr_id = intr_id; \
777 (p)->av_vector = f; \
778 (p)->av_intarg1 = arg1; \
779 (p)->av_intarg2 = arg2; \
780 (p)->av_ticksp = ticksp; \
781 (p)->av_prilevel = ipl; \
784 _NOTE(CONSTCOND)} while (0)
787 * Insert an interrupt service routine into chain by its priority from
791 apix_insert_av(apix_vector_t
*vecp
, void *intr_id
, avfunc f
, caddr_t arg1
,
792 caddr_t arg2
, uint64_t *ticksp
, int ipl
, dev_info_t
*dip
)
794 struct autovec
*p
, *prep
, *mem
;
796 APIC_VERBOSE(INTR
, (CE_CONT
, "apix_insert_av: dip %p, vector 0x%x, "
797 "cpu %d\n", (void *)dip
, vecp
->v_vector
, vecp
->v_cpuid
));
799 mem
= kmem_zalloc(sizeof (struct autovec
), KM_SLEEP
);
800 INIT_AUTOVEC(mem
, intr_id
, f
, arg1
, arg2
, ticksp
, ipl
, dip
);
801 if (vecp
->v_type
== APIX_TYPE_FIXED
&& apic_level_intr
[vecp
->v_inum
])
802 mem
->av_flags
|= AV_PENTRY_LEVEL
;
805 vecp
->v_pri
= (ipl
> vecp
->v_pri
) ? ipl
: vecp
->v_pri
;
806 if (vecp
->v_autovect
== NULL
) { /* Nothing on list - put it at head */
807 vecp
->v_autovect
= mem
;
811 if (DDI_INTR_IS_MSI_OR_MSIX(vecp
->v_type
)) { /* MSI/X */
812 ASSERT(vecp
->v_share
== 1); /* No sharing for MSI/X */
814 INIT_AUTOVEC(vecp
->v_autovect
, intr_id
, f
, arg1
, arg2
, ticksp
,
816 prep
= vecp
->v_autovect
->av_link
;
817 vecp
->v_autovect
->av_link
= NULL
;
819 /* Free the following autovect chain */
820 while (prep
!= NULL
) {
821 ASSERT(prep
->av_vector
== NULL
);
824 prep
= prep
->av_link
;
825 kmem_free(p
, sizeof (struct autovec
));
828 kmem_free(mem
, sizeof (struct autovec
));
832 /* find where it goes in list */
834 for (p
= vecp
->v_autovect
; p
!= NULL
; p
= p
->av_link
) {
835 if (p
->av_vector
&& p
->av_prilevel
<= ipl
)
840 if (prep
->av_vector
== NULL
) { /* freed struct available */
841 INIT_AUTOVEC(prep
, intr_id
, f
, arg1
, arg2
,
843 prep
->av_flags
= mem
->av_flags
;
844 kmem_free(mem
, sizeof (struct autovec
));
848 mem
->av_link
= prep
->av_link
;
851 /* insert new intpt at beginning of chain */
852 mem
->av_link
= vecp
->v_autovect
;
853 vecp
->v_autovect
= mem
;
858 * After having made a change to an autovector list, wait until we have
859 * seen specified cpu not executing an interrupt at that level--so we
860 * know our change has taken effect completely (no old state in registers,
863 #define APIX_CPU_ENABLED(_cp) \
864 (quiesce_active == 0 && \
865 (((_cp)->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) == 0))
868 apix_wait_till_seen(processorid_t cpuid
, int ipl
)
870 struct cpu
*cp
= cpu
[cpuid
];
872 if (cp
== NULL
|| LOCAL_WITH_INTR_DISABLED(cpuid
))
876 * Don't wait if the CPU is quiesced or offlined. This can happen
877 * when a CPU is running pause thread but hardware triggered an
878 * interrupt and the interrupt gets queued.
881 if (!INTR_ACTIVE((volatile struct cpu
*)cpu
[cpuid
], ipl
) &&
882 (!APIX_CPU_ENABLED(cp
) ||
883 !INTR_PENDING((volatile apix_impl_t
*)apixs
[cpuid
], ipl
)))
889 apix_remove_av(apix_vector_t
*vecp
, struct autovec
*target
)
897 APIC_VERBOSE(INTR
, (CE_CONT
, "apix_remove_av: dip %p, vector 0x%x, "
898 "cpu %d\n", (void *)target
->av_dip
, vecp
->v_vector
, vecp
->v_cpuid
));
900 for (p
= vecp
->v_autovect
; p
; p
= p
->av_link
) {
901 if (p
== target
|| p
->av_vector
== NULL
)
903 hi_pri
= (p
->av_prilevel
> hi_pri
) ? p
->av_prilevel
: hi_pri
;
907 vecp
->v_pri
= hi_pri
;
910 * This drops the handler from the chain, it can no longer be called.
911 * However, there is no guarantee that the handler is not currently
914 target
->av_vector
= NULL
;
916 * There is a race where we could be just about to pick up the ticksp
917 * pointer to increment it after returning from the service routine
918 * in av_dispatch_autovect. Rather than NULL it out let's just point
919 * it off to something safe so that any final tick update attempt
922 target
->av_ticksp
= &dummy_tick
;
923 apix_wait_till_seen(vecp
->v_cpuid
, target
->av_prilevel
);
926 static struct autovec
*
927 apix_find_av(apix_vector_t
*vecp
, void *intr_id
, avfunc f
)
931 for (p
= vecp
->v_autovect
; p
; p
= p
->av_link
) {
932 if ((p
->av_vector
== f
) && (p
->av_intr_id
== intr_id
)) {
933 /* found the handler */
941 static apix_vector_t
*
942 apix_find_vector_by_avintr(void *intr_id
, avfunc f
)
948 for (n
= 0; n
< apic_nproc
; n
++) {
949 if (!apix_is_cpu_enabled(n
))
952 for (v
= APIX_AVINTR_MIN
; v
<= APIX_AVINTR_MIN
; v
++) {
953 vecp
= xv_vector(n
, v
);
955 vecp
->v_state
<= APIX_STATE_OBSOLETED
)
958 if (apix_find_av(vecp
, intr_id
, f
) != NULL
)
967 * Add interrupt service routine.
969 * For legacy interrupts (HPET timer, ACPI SCI), the vector is actually
970 * IRQ no. A vector is then allocated. Otherwise, the vector is already
971 * allocated. The input argument virt_vect is virtual vector of format
972 * APIX_VIRTVEC_VECTOR(cpuid, vector).
974 * Return 1 on success, 0 on failure.
977 apix_add_avintr(void *intr_id
, int ipl
, avfunc xxintr
, char *name
,
978 int virt_vect
, caddr_t arg1
, caddr_t arg2
, uint64_t *ticksp
,
982 uchar_t v
= (uchar_t
)APIX_VIRTVEC_VECTOR(virt_vect
);
985 if (xxintr
== NULL
) {
986 cmn_err(CE_WARN
, "Attempt to add null for %s "
987 "on vector 0x%x,0x%x", name
,
988 APIX_VIRTVEC_CPU(virt_vect
),
989 APIX_VIRTVEC_VECTOR(virt_vect
));
993 if (v
>= APIX_IPI_MIN
) /* IPIs */
994 return (apix_add_ipi(ipl
, xxintr
, name
, v
, arg1
, arg2
));
996 if (!APIX_IS_VIRTVEC(virt_vect
)) { /* got irq */
997 int irqno
= virt_vect
;
998 int inum
= GET_INTR_INUM(intr_id
);
1002 * a. add_avintr() is called before irqp initialized (legacy)
1003 * b. irqp is initialized, vector is not allocated (fixed)
1004 * c. irqp is initialized, vector is allocated (fixed & shared)
1006 if ((vecp
= apix_alloc_intx(dip
, inum
, irqno
)) == NULL
)
1009 cpuid
= vecp
->v_cpuid
;
1011 virt_vect
= APIX_VIRTVECTOR(cpuid
, v
);
1012 } else { /* got virtual vector */
1013 cpuid
= APIX_VIRTVEC_CPU(virt_vect
);
1014 vecp
= xv_vector(cpuid
, v
);
1015 ASSERT(vecp
!= NULL
);
1018 lock_set(&apix_lock
);
1019 if (vecp
->v_state
<= APIX_STATE_OBSOLETED
) {
1023 * Basically the allocated but not enabled interrupts
1024 * will not get re-targeted. But MSIs in allocated state
1025 * could be re-targeted due to group re-targeting.
1027 if (intr_id
!= NULL
&& dip
!= NULL
) {
1028 ddi_intr_handle_impl_t
*hdlp
= intr_id
;
1029 vecp
= apix_get_dev_map(dip
, hdlp
->ih_inum
,
1031 ASSERT(vecp
->v_state
== APIX_STATE_ALLOCED
);
1034 lock_clear(&apix_lock
);
1035 cmn_err(CE_WARN
, "Invalid interrupt 0x%x,0x%x "
1036 " for %p to add", cpuid
, v
, intr_id
);
1039 cpuid
= vecp
->v_cpuid
;
1040 virt_vect
= APIX_VIRTVECTOR(cpuid
, vecp
->v_vector
);
1043 APIX_ENTER_CPU_LOCK(cpuid
);
1044 apix_insert_av(vecp
, intr_id
, xxintr
, arg1
, arg2
, ticksp
, ipl
, dip
);
1045 APIX_LEAVE_CPU_LOCK(cpuid
);
1047 (void) apix_addspl(virt_vect
, ipl
, 0, 0);
1049 lock_clear(&apix_lock
);
1057 * For fixed, if it's the last one of shared interrupts, free the vector.
1058 * For msi/x, only disable the interrupt but not free the vector, which
1059 * is freed by PSM_XXX_FREE_XXX.
1062 apix_rem_avintr(void *intr_id
, int ipl
, avfunc xxintr
, int virt_vect
)
1065 apix_vector_t
*vecp
;
1066 struct autovec
*avp
;
1067 processorid_t cpuid
;
1069 if ((f
= xxintr
) == NULL
)
1072 lock_set(&apix_lock
);
1074 if (!APIX_IS_VIRTVEC(virt_vect
)) { /* got irq */
1075 vecp
= apix_intx_get_vector(virt_vect
);
1076 virt_vect
= APIX_VIRTVECTOR(vecp
->v_cpuid
, vecp
->v_vector
);
1077 } else /* got virtual vector */
1078 vecp
= xv_vector(APIX_VIRTVEC_CPU(virt_vect
),
1079 APIX_VIRTVEC_VECTOR(virt_vect
));
1082 lock_clear(&apix_lock
);
1083 cmn_err(CE_CONT
, "Invalid interrupt 0x%x,0x%x to remove",
1084 APIX_VIRTVEC_CPU(virt_vect
),
1085 APIX_VIRTVEC_VECTOR(virt_vect
));
1089 if (vecp
->v_state
<= APIX_STATE_OBSOLETED
||
1090 ((avp
= apix_find_av(vecp
, intr_id
, f
)) == NULL
)) {
1092 * It's possible that the interrupt is rebound to a
1093 * different cpu before rem_avintr() is called. Search
1094 * through all vectors once it happens.
1096 if ((vecp
= apix_find_vector_by_avintr(intr_id
, f
))
1098 lock_clear(&apix_lock
);
1099 cmn_err(CE_CONT
, "Unknown interrupt 0x%x,0x%x "
1100 "for %p to remove", APIX_VIRTVEC_CPU(virt_vect
),
1101 APIX_VIRTVEC_VECTOR(virt_vect
), intr_id
);
1104 virt_vect
= APIX_VIRTVECTOR(vecp
->v_cpuid
, vecp
->v_vector
);
1105 avp
= apix_find_av(vecp
, intr_id
, f
);
1107 cpuid
= vecp
->v_cpuid
;
1109 /* disable interrupt */
1110 (void) apix_delspl(virt_vect
, ipl
, 0, 0);
1112 /* remove ISR entry */
1113 APIX_ENTER_CPU_LOCK(cpuid
);
1114 apix_remove_av(vecp
, avp
);
1115 APIX_LEAVE_CPU_LOCK(cpuid
);
1117 lock_clear(&apix_lock
);
1121 * Device to vector mapping table
1125 apix_clear_dev_map(dev_info_t
*dip
, int inum
, int type
)
1129 apix_dev_vector_t
*dvp
, *prev
= NULL
;
1132 name
= ddi_get_name(dip
);
1133 major
= ddi_name_to_major(name
);
1135 mutex_enter(&apix_mutex
);
1137 for (dvp
= apix_dev_vector
[major
]; dvp
!= NULL
;
1138 prev
= dvp
, dvp
= dvp
->dv_next
) {
1139 if (dvp
->dv_dip
== dip
&& dvp
->dv_inum
== inum
&&
1140 dvp
->dv_type
== type
) {
1147 mutex_exit(&apix_mutex
);
1152 prev
->dv_next
= dvp
->dv_next
;
1154 if (apix_dev_vector
[major
] == dvp
)
1155 apix_dev_vector
[major
] = dvp
->dv_next
;
1157 dvp
->dv_vector
->v_devp
= NULL
;
1159 mutex_exit(&apix_mutex
);
1161 kmem_free(dvp
, sizeof (apix_dev_vector_t
));
1165 apix_set_dev_map(apix_vector_t
*vecp
, dev_info_t
*dip
, int inum
)
1167 apix_dev_vector_t
*dvp
;
1172 ASSERT(dip
!= NULL
);
1173 name
= ddi_get_name(dip
);
1174 major
= ddi_name_to_major(name
);
1176 mutex_enter(&apix_mutex
);
1178 for (dvp
= apix_dev_vector
[major
]; dvp
!= NULL
;
1179 dvp
= dvp
->dv_next
) {
1180 if (dvp
->dv_dip
== dip
&& dvp
->dv_inum
== inum
&&
1181 dvp
->dv_type
== vecp
->v_type
) {
1187 if (found
== 0) { /* not found */
1188 dvp
= kmem_zalloc(sizeof (apix_dev_vector_t
), KM_SLEEP
);
1190 dvp
->dv_inum
= inum
;
1191 dvp
->dv_type
= vecp
->v_type
;
1193 dvp
->dv_next
= apix_dev_vector
[major
];
1194 apix_dev_vector
[major
] = dvp
;
1196 dvp
->dv_vector
= vecp
;
1199 mutex_exit(&apix_mutex
);
1201 DDI_INTR_IMPLDBG((CE_CONT
, "apix_set_dev_map: dip=0x%p "
1202 "inum=0x%x vector=0x%x/0x%x\n",
1203 (void *)dip
, inum
, vecp
->v_cpuid
, vecp
->v_vector
));
1207 apix_get_dev_map(dev_info_t
*dip
, int inum
, int type
)
1211 apix_dev_vector_t
*dvp
;
1212 apix_vector_t
*vecp
;
1214 name
= ddi_get_name(dip
);
1215 if ((major
= ddi_name_to_major(name
)) == DDI_MAJOR_T_NONE
)
1218 mutex_enter(&apix_mutex
);
1219 for (dvp
= apix_dev_vector
[major
]; dvp
!= NULL
;
1220 dvp
= dvp
->dv_next
) {
1221 if (dvp
->dv_dip
== dip
&& dvp
->dv_inum
== inum
&&
1222 dvp
->dv_type
== type
) {
1223 vecp
= dvp
->dv_vector
;
1224 mutex_exit(&apix_mutex
);
1228 mutex_exit(&apix_mutex
);
1234 * Get minimum inum for specified device, used for MSI
1237 apix_get_min_dev_inum(dev_info_t
*dip
, int type
)
1241 apix_dev_vector_t
*dvp
;
1244 name
= ddi_get_name(dip
);
1245 major
= ddi_name_to_major(name
);
1247 mutex_enter(&apix_mutex
);
1248 for (dvp
= apix_dev_vector
[major
]; dvp
!= NULL
;
1249 dvp
= dvp
->dv_next
) {
1250 if (dvp
->dv_dip
== dip
&& dvp
->dv_type
== type
) {
1252 inum
= dvp
->dv_inum
;
1254 inum
= (dvp
->dv_inum
< inum
) ?
1255 dvp
->dv_inum
: inum
;
1258 mutex_exit(&apix_mutex
);
1264 apix_get_max_dev_inum(dev_info_t
*dip
, int type
)
1268 apix_dev_vector_t
*dvp
;
1271 name
= ddi_get_name(dip
);
1272 major
= ddi_name_to_major(name
);
1274 mutex_enter(&apix_mutex
);
1275 for (dvp
= apix_dev_vector
[major
]; dvp
!= NULL
;
1276 dvp
= dvp
->dv_next
) {
1277 if (dvp
->dv_dip
== dip
&& dvp
->dv_type
== type
) {
1279 inum
= dvp
->dv_inum
;
1281 inum
= (dvp
->dv_inum
> inum
) ?
1282 dvp
->dv_inum
: inum
;
1285 mutex_exit(&apix_mutex
);
1291 * Major to cpu binding, for INTR_ROUND_ROBIN_WITH_AFFINITY cpu
1296 apix_get_dev_binding(dev_info_t
*dip
)
1300 uint32_t cpu
= IRQ_UNINIT
;
1302 name
= ddi_get_name(dip
);
1303 major
= ddi_name_to_major(name
);
1304 if (major
< devcnt
) {
1305 mutex_enter(&apix_mutex
);
1306 cpu
= apix_major_to_cpu
[major
];
1307 mutex_exit(&apix_mutex
);
1314 apix_set_dev_binding(dev_info_t
*dip
, uint32_t cpu
)
1319 /* setup major to cpu mapping */
1320 name
= ddi_get_name(dip
);
1321 major
= ddi_name_to_major(name
);
1322 if (apix_major_to_cpu
[major
] == IRQ_UNINIT
) {
1323 mutex_enter(&apix_mutex
);
1324 apix_major_to_cpu
[major
] = cpu
;
1325 mutex_exit(&apix_mutex
);
1330 * return the cpu to which this intr should be bound.
1331 * Check properties or any other mechanism to see if user wants it
1332 * bound to a specific CPU. If so, return the cpu id with high bit set.
1333 * If not, use the policy to choose a cpu and return the id.
1336 apix_bind_cpu(dev_info_t
*dip
)
1338 int instance
, instno
, prop_len
, bind_cpu
, count
;
1341 char *name
, *drv_name
, *prop_val
, *cptr
;
1344 lock_set(&apix_lock
);
1346 if (apic_intr_policy
== INTR_LOWEST_PRIORITY
) {
1347 cmn_err(CE_WARN
, "apix: unsupported interrupt binding policy "
1348 "LOWEST PRIORITY, use ROUND ROBIN instead");
1349 apic_intr_policy
= INTR_ROUND_ROBIN
;
1352 if (apic_nproc
== 1) {
1353 lock_clear(&apix_lock
);
1358 rc
= DDI_PROP_NOT_FOUND
;
1359 major
= (major_t
)-1;
1361 name
= ddi_get_name(dip
);
1362 major
= ddi_name_to_major(name
);
1363 drv_name
= ddi_major_to_name(major
);
1364 instance
= ddi_get_instance(dip
);
1365 if (apic_intr_policy
== INTR_ROUND_ROBIN_WITH_AFFINITY
) {
1366 bind_cpu
= apix_get_dev_binding(dip
);
1367 if (bind_cpu
!= IRQ_UNINIT
) {
1368 lock_clear(&apix_lock
);
1373 * search for "drvname"_intpt_bind_cpus property first, the
1374 * syntax of the property should be "a[,b,c,...]" where
1375 * instance 0 binds to cpu a, instance 1 binds to cpu b,
1376 * instance 3 binds to cpu c...
1377 * ddi_getlongprop() will search /option first, then /
1378 * if "drvname"_intpt_bind_cpus doesn't exist, then find
1379 * intpt_bind_cpus property. The syntax is the same, and
1380 * it applies to all the devices if its "drvname" specific
1381 * property doesn't exist
1383 (void) strcpy(prop_name
, drv_name
);
1384 (void) strcat(prop_name
, "_intpt_bind_cpus");
1385 rc
= ddi_getlongprop(DDI_DEV_T_ANY
, dip
, 0, prop_name
,
1386 (caddr_t
)&prop_val
, &prop_len
);
1387 if (rc
!= DDI_PROP_SUCCESS
) {
1388 rc
= ddi_getlongprop(DDI_DEV_T_ANY
, dip
, 0,
1389 "intpt_bind_cpus", (caddr_t
)&prop_val
, &prop_len
);
1392 if (rc
== DDI_PROP_SUCCESS
) {
1393 for (i
= count
= 0; i
< (prop_len
- 1); i
++)
1394 if (prop_val
[i
] == ',')
1396 if (prop_val
[i
-1] != ',')
1399 * if somehow the binding instances defined in the
1400 * property are not enough for this instno., then
1401 * reuse the pattern for the next instance until
1402 * it reaches the requested instno
1404 instno
= instance
% count
;
1410 bind_cpu
= stoi(&cptr
);
1411 kmem_free(prop_val
, prop_len
);
1412 /* if specific cpu is bogus, then default to cpu 0 */
1413 if (bind_cpu
>= apic_nproc
) {
1414 cmn_err(CE_WARN
, "apix: %s=%s: CPU %d not present",
1415 prop_name
, prop_val
, bind_cpu
);
1418 /* indicate that we are bound at user request */
1419 bind_cpu
|= IRQ_USER_BOUND
;
1422 * no need to check apic_cpus[].aci_status, if specific cpu is
1423 * not up, then post_cpu_start will handle it.
1426 bind_cpu
= apic_get_next_bind_cpu();
1429 lock_clear(&apix_lock
);
1431 return ((uint32_t)bind_cpu
);
1435 apix_is_cpu_enabled(processorid_t cpuid
)
1437 apic_cpus_info_t
*cpu_infop
;
1439 cpu_infop
= &apic_cpus
[cpuid
];
1441 if ((cpu_infop
->aci_status
& APIC_CPU_INTR_ENABLE
) == 0)
1448 * Must be called with apix_lock held. This function can be
1449 * called from above lock level by apix_intr_redistribute().
1452 * vecp : Vector to be rebound
1453 * tocpu : Target cpu. IRQ_UNINIT means target is vecp->v_cpuid.
1454 * count : Number of continuous vectors
1456 * Return new vector being bound to
1459 apix_rebind(apix_vector_t
*vecp
, processorid_t newcpu
, int count
)
1461 apix_vector_t
*newp
, *oldp
;
1462 processorid_t oldcpu
= vecp
->v_cpuid
;
1463 uchar_t newvec
, oldvec
= vecp
->v_vector
;
1466 ASSERT(LOCK_HELD(&apix_lock
) && count
> 0);
1468 if (!apix_is_cpu_enabled(newcpu
))
1471 if (vecp
->v_cpuid
== newcpu
) /* rebind to the same cpu */
1474 APIX_ENTER_CPU_LOCK(oldcpu
);
1475 APIX_ENTER_CPU_LOCK(newcpu
);
1477 /* allocate vector */
1479 newp
= apix_alloc_vector_oncpu(newcpu
, NULL
, 0, vecp
->v_type
);
1481 ASSERT(vecp
->v_type
== APIX_TYPE_MSI
);
1482 newp
= apix_alloc_nvectors_oncpu(newcpu
, NULL
, 0, count
,
1486 APIX_LEAVE_CPU_LOCK(newcpu
);
1487 APIX_LEAVE_CPU_LOCK(oldcpu
);
1491 newvec
= newp
->v_vector
;
1492 apix_dup_vectors(vecp
, newp
, count
);
1494 APIX_LEAVE_CPU_LOCK(newcpu
);
1495 APIX_LEAVE_CPU_LOCK(oldcpu
);
1497 if (!DDI_INTR_IS_MSI_OR_MSIX(vecp
->v_type
)) {
1499 if (apix_intx_rebind(vecp
->v_inum
, newcpu
, newvec
) != 0) {
1500 struct autovec
*avp
;
1503 /* undo duplication */
1504 APIX_ENTER_CPU_LOCK(oldcpu
);
1505 APIX_ENTER_CPU_LOCK(newcpu
);
1506 for (avp
= newp
->v_autovect
; avp
!= NULL
;
1507 avp
= avp
->av_link
) {
1508 if (avp
->av_dip
!= NULL
) {
1509 inum
= GET_INTR_INUM(avp
->av_intr_id
);
1510 apix_set_dev_map(vecp
, avp
->av_dip
,
1513 apix_remove_av(newp
, avp
);
1515 apix_cleanup_vector(newp
);
1516 APIX_LEAVE_CPU_LOCK(newcpu
);
1517 APIX_LEAVE_CPU_LOCK(oldcpu
);
1518 APIC_VERBOSE(REBIND
, (CE_CONT
, "apix: rebind fixed "
1519 "interrupt 0x%x to cpu %d failed\n",
1520 vecp
->v_inum
, newcpu
));
1524 APIX_ENTER_CPU_LOCK(oldcpu
);
1525 (void) apix_obsolete_vector(vecp
);
1526 APIX_LEAVE_CPU_LOCK(oldcpu
);
1527 APIC_VERBOSE(REBIND
, (CE_CONT
, "apix: rebind fixed interrupt"
1528 " 0x%x/0x%x to 0x%x/0x%x\n",
1529 oldcpu
, oldvec
, newcpu
, newvec
));
1533 for (i
= 0; i
< count
; i
++) {
1534 oldp
= xv_vector(oldcpu
, oldvec
+ i
);
1535 newp
= xv_vector(newcpu
, newvec
+ i
);
1537 if (newp
->v_share
> 0) {
1538 APIX_SET_REBIND_INFO(oldp
, newp
);
1540 apix_enable_vector(newp
);
1542 APIX_CLR_REBIND_INFO();
1545 APIX_ENTER_CPU_LOCK(oldcpu
);
1546 (void) apix_obsolete_vector(oldp
);
1547 APIX_LEAVE_CPU_LOCK(oldcpu
);
1549 APIC_VERBOSE(REBIND
, (CE_CONT
, "apix: rebind vector 0x%x/0x%x "
1550 "to 0x%x/0x%x, count=%d\n",
1551 oldcpu
, oldvec
, newcpu
, newvec
, count
));
1553 return (xv_vector(newcpu
, newvec
));
1558 * a. add_avintr() is called before irqp initialized (legacy)
1559 * b. irqp is initialized, vector is not allocated (fixed interrupts)
1560 * c. irqp is initialized, vector is allocated (shared interrupts)
1563 apix_alloc_intx(dev_info_t
*dip
, int inum
, int irqno
)
1566 apix_vector_t
*vecp
;
1569 * Allocate IRQ. Caller is later responsible for the
1572 mutex_enter(&airq_mutex
);
1573 if ((irqp
= apic_irq_table
[irqno
]) == NULL
) {
1575 irqp
= kmem_zalloc(sizeof (apic_irq_t
), KM_SLEEP
);
1576 irqp
->airq_mps_intr_index
= FREE_INDEX
;
1577 apic_irq_table
[irqno
] = irqp
;
1579 if (irqp
->airq_mps_intr_index
== FREE_INDEX
) {
1580 irqp
->airq_mps_intr_index
= DEFAULT_INDEX
;
1581 irqp
->airq_cpu
= IRQ_UNINIT
;
1582 irqp
->airq_origirq
= (uchar_t
)irqno
;
1585 mutex_exit(&airq_mutex
);
1590 if (irqp
->airq_cpu
== IRQ_UNINIT
) {
1591 uint32_t bindcpu
, cpuid
;
1593 /* select cpu by system policy */
1594 bindcpu
= apix_bind_cpu(dip
);
1595 cpuid
= bindcpu
& ~IRQ_USER_BOUND
;
1597 /* allocate vector */
1598 APIX_ENTER_CPU_LOCK(cpuid
);
1600 if ((vecp
= apix_alloc_vector_oncpu(bindcpu
, dip
, inum
,
1601 APIX_TYPE_FIXED
)) == NULL
) {
1602 cmn_err(CE_WARN
, "No interrupt vector for irq %x",
1604 APIX_LEAVE_CPU_LOCK(cpuid
);
1607 vecp
->v_inum
= irqno
;
1608 vecp
->v_flags
|= APIX_VECT_MASKABLE
;
1610 apix_intx_set_vector(irqno
, vecp
->v_cpuid
, vecp
->v_vector
);
1612 APIX_LEAVE_CPU_LOCK(cpuid
);
1614 vecp
= xv_vector(irqp
->airq_cpu
, irqp
->airq_vector
);
1615 ASSERT(!IS_VECT_FREE(vecp
));
1618 apix_set_dev_map(vecp
, dip
, inum
);
1621 if ((dip
!= NULL
) &&
1622 (apic_intr_policy
== INTR_ROUND_ROBIN_WITH_AFFINITY
) &&
1623 ((vecp
->v_flags
& APIX_VECT_USER_BOUND
) == 0))
1624 apix_set_dev_binding(dip
, vecp
->v_cpuid
);
1626 apix_dprint_vector(vecp
, dip
, 1);
1632 apix_alloc_msi(dev_info_t
*dip
, int inum
, int count
, int behavior
)
1634 int i
, cap_ptr
, rcount
= count
;
1635 apix_vector_t
*vecp
;
1636 processorid_t bindcpu
, cpuid
;
1638 ddi_acc_handle_t handle
;
1640 DDI_INTR_IMPLDBG((CE_CONT
, "apix_alloc_msi_vectors: dip=0x%p "
1641 "inum=0x%x count=0x%x behavior=%d\n",
1642 (void *)dip
, inum
, count
, behavior
));
1645 if (behavior
== DDI_INTR_ALLOC_STRICT
&&
1646 apic_multi_msi_enable
== 0)
1648 if (apic_multi_msi_enable
== 0)
1652 /* Check whether it supports per-vector masking */
1653 cap_ptr
= i_ddi_get_msi_msix_cap_ptr(dip
);
1654 handle
= i_ddi_get_pci_config_handle(dip
);
1655 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
1658 bindcpu
= apix_bind_cpu(dip
);
1659 cpuid
= bindcpu
& ~IRQ_USER_BOUND
;
1661 /* if not ISP2, then round it down */
1663 rcount
= 1 << (highbit(rcount
) - 1);
1665 APIX_ENTER_CPU_LOCK(cpuid
);
1666 for (vecp
= NULL
; rcount
> 0; rcount
>>= 1) {
1667 vecp
= apix_alloc_nvectors_oncpu(bindcpu
, dip
, inum
, rcount
,
1669 if (vecp
!= NULL
|| behavior
== DDI_INTR_ALLOC_STRICT
)
1672 for (i
= 0; vecp
&& i
< rcount
; i
++)
1673 xv_vector(vecp
->v_cpuid
, vecp
->v_vector
+ i
)->v_flags
|=
1674 (msi_ctrl
& PCI_MSI_PVM_MASK
) ? APIX_VECT_MASKABLE
: 0;
1675 APIX_LEAVE_CPU_LOCK(cpuid
);
1677 APIC_VERBOSE(INTR
, (CE_CONT
,
1678 "apix_alloc_msi: no %d cont vectors found on cpu 0x%x\n",
1683 /* major to cpu binding */
1684 if ((apic_intr_policy
== INTR_ROUND_ROBIN_WITH_AFFINITY
) &&
1685 ((vecp
->v_flags
& APIX_VECT_USER_BOUND
) == 0))
1686 apix_set_dev_binding(dip
, vecp
->v_cpuid
);
1688 apix_dprint_vector(vecp
, dip
, rcount
);
1694 apix_alloc_msix(dev_info_t
*dip
, int inum
, int count
, int behavior
)
1696 apix_vector_t
*vecp
;
1697 processorid_t bindcpu
, cpuid
;
1700 for (i
= 0; i
< count
; i
++) {
1701 /* select cpu by system policy */
1702 bindcpu
= apix_bind_cpu(dip
);
1703 cpuid
= bindcpu
& ~IRQ_USER_BOUND
;
1705 /* allocate vector */
1706 APIX_ENTER_CPU_LOCK(cpuid
);
1707 if ((vecp
= apix_alloc_vector_oncpu(bindcpu
, dip
, inum
+ i
,
1708 APIX_TYPE_MSIX
)) == NULL
) {
1709 APIX_LEAVE_CPU_LOCK(cpuid
);
1710 APIC_VERBOSE(INTR
, (CE_CONT
, "apix_alloc_msix: "
1711 "allocate msix for device dip=%p, inum=%d on"
1712 " cpu %d failed", (void *)dip
, inum
+ i
, bindcpu
));
1715 vecp
->v_flags
|= APIX_VECT_MASKABLE
;
1716 APIX_LEAVE_CPU_LOCK(cpuid
);
1718 /* major to cpu mapping */
1720 (apic_intr_policy
== INTR_ROUND_ROBIN_WITH_AFFINITY
) &&
1721 ((vecp
->v_flags
& APIX_VECT_USER_BOUND
) == 0))
1722 apix_set_dev_binding(dip
, vecp
->v_cpuid
);
1724 apix_dprint_vector(vecp
, dip
, 1);
1727 if (i
< count
&& behavior
== DDI_INTR_ALLOC_STRICT
) {
1728 APIC_VERBOSE(INTR
, (CE_WARN
, "apix_alloc_msix: "
1729 "strictly allocate %d vectors failed, got %d\n",
1731 apix_free_vectors(dip
, inum
, i
, APIX_TYPE_MSIX
);
1739 * A rollback free for vectors allocated by apix_alloc_xxx().
1742 apix_free_vectors(dev_info_t
*dip
, int inum
, int count
, int type
)
1745 apix_vector_t
*vecp
;
1747 DDI_INTR_IMPLDBG((CE_CONT
, "apix_free_vectors: dip: %p inum: %x "
1748 "count: %x type: %x\n",
1749 (void *)dip
, inum
, count
, type
));
1751 lock_set(&apix_lock
);
1753 for (i
= 0; i
< count
; i
++, inum
++) {
1754 if ((vecp
= apix_get_dev_map(dip
, inum
, type
)) == NULL
) {
1755 lock_clear(&apix_lock
);
1756 DDI_INTR_IMPLDBG((CE_CONT
, "apix_free_vectors: "
1757 "dip=0x%p inum=0x%x type=0x%x apix_find_intr() "
1758 "failed\n", (void *)dip
, inum
, type
));
1762 APIX_ENTER_CPU_LOCK(vecp
->v_cpuid
);
1763 cpuid
= vecp
->v_cpuid
;
1765 DDI_INTR_IMPLDBG((CE_CONT
, "apix_free_vectors: "
1766 "dip=0x%p inum=0x%x type=0x%x vector 0x%x (share %d)\n",
1767 (void *)dip
, inum
, type
, vecp
->v_vector
, vecp
->v_share
));
1769 /* tear down device interrupt to vector mapping */
1770 apix_clear_dev_map(dip
, inum
, type
);
1772 if (vecp
->v_type
== APIX_TYPE_FIXED
) {
1773 if (vecp
->v_share
> 0) { /* share IRQ line */
1774 APIX_LEAVE_CPU_LOCK(cpuid
);
1778 /* Free apic_irq_table entry */
1779 apix_intx_free(vecp
->v_inum
);
1783 apix_cleanup_vector(vecp
);
1785 APIX_LEAVE_CPU_LOCK(cpuid
);
1788 lock_clear(&apix_lock
);
1792 * Must be called with apix_lock held
1795 apix_setup_io_intr(apix_vector_t
*vecp
)
1797 processorid_t bindcpu
;
1800 ASSERT(LOCK_HELD(&apix_lock
));
1803 * Interrupts are enabled on the CPU, programme IOAPIC RDT
1804 * entry or MSI/X address/data to enable the interrupt.
1806 if (apix_is_cpu_enabled(vecp
->v_cpuid
)) {
1807 apix_enable_vector(vecp
);
1812 * CPU is not up or interrupts are disabled. Fall back to the
1813 * first avialable CPU.
1815 bindcpu
= apic_find_cpu(APIC_CPU_INTR_ENABLE
);
1817 if (vecp
->v_type
== APIX_TYPE_MSI
)
1818 return (apix_grp_set_cpu(vecp
, bindcpu
, &ret
));
1820 return (apix_set_cpu(vecp
, bindcpu
, &ret
));
1824 * For interrupts which call add_avintr() before apic is initialized.
1825 * ioapix_setup_intr() will
1830 ioapix_setup_intr(int irqno
, iflag_t
*flagp
)
1832 extern struct av_head autovect
[];
1833 apix_vector_t
*vecp
;
1835 uchar_t ioapicindex
, ipin
;
1837 struct autovec
*avp
;
1839 ioapicindex
= acpi_find_ioapic(irqno
);
1840 ASSERT(ioapicindex
!= 0xFF);
1841 ipin
= irqno
- apic_io_vectbase
[ioapicindex
];
1843 mutex_enter(&airq_mutex
);
1844 irqp
= apic_irq_table
[irqno
];
1847 * The irq table entry shouldn't exist unless the interrupts are shared.
1848 * In that case, make sure it matches what we would initialize it to.
1851 ASSERT(irqp
->airq_mps_intr_index
== ACPI_INDEX
);
1852 ASSERT(irqp
->airq_intin_no
== ipin
&&
1853 irqp
->airq_ioapicindex
== ioapicindex
);
1854 vecp
= xv_vector(irqp
->airq_cpu
, irqp
->airq_vector
);
1855 ASSERT(!IS_VECT_FREE(vecp
));
1856 mutex_exit(&airq_mutex
);
1858 irqp
= kmem_zalloc(sizeof (apic_irq_t
), KM_SLEEP
);
1860 irqp
->airq_cpu
= IRQ_UNINIT
;
1861 irqp
->airq_origirq
= (uchar_t
)irqno
;
1862 irqp
->airq_mps_intr_index
= ACPI_INDEX
;
1863 irqp
->airq_ioapicindex
= ioapicindex
;
1864 irqp
->airq_intin_no
= ipin
;
1865 irqp
->airq_iflag
= *flagp
;
1868 apic_irq_table
[irqno
] = irqp
;
1869 mutex_exit(&airq_mutex
);
1871 vecp
= apix_alloc_intx(NULL
, 0, irqno
);
1874 /* copy over autovect */
1875 for (avp
= autovect
[irqno
].avh_link
; avp
; avp
= avp
->av_link
)
1876 apix_insert_av(vecp
, avp
->av_intr_id
, avp
->av_vector
,
1877 avp
->av_intarg1
, avp
->av_intarg2
, avp
->av_ticksp
,
1878 avp
->av_prilevel
, avp
->av_dip
);
1880 /* Program I/O APIC */
1881 iflag
= intr_clear();
1882 lock_set(&apix_lock
);
1884 (void) apix_setup_io_intr(vecp
);
1886 lock_clear(&apix_lock
);
1887 intr_restore(iflag
);
1889 APIC_VERBOSE_IOAPIC((CE_CONT
, "apix: setup ioapic, irqno %x "
1890 "(ioapic %x, ipin %x) is bound to cpu %x, vector %x\n",
1891 irqno
, ioapicindex
, ipin
, irqp
->airq_cpu
, irqp
->airq_vector
));
1895 ioapix_init_intr(int mask_apic
)
1900 /* mask interrupt vectors */
1901 for (j
= 0; j
< apic_io_max
&& mask_apic
; j
++) {
1905 /* Bits 23-16 define the maximum redirection entries */
1906 intin_max
= (ioapic_read(ioapicindex
, APIC_VERS_CMD
) >> 16)
1908 for (i
= 0; i
<= intin_max
; i
++)
1909 ioapic_write(ioapicindex
, APIC_RDT_CMD
+ 2 * i
,
1914 * Hack alert: deal with ACPI SCI interrupt chicken/egg here
1916 if (apic_sci_vect
> 0)
1917 ioapix_setup_intr(apic_sci_vect
, &apic_sci_flags
);
1920 * Hack alert: deal with ACPI HPET interrupt chicken/egg here.
1922 if (apic_hpet_vect
> 0)
1923 ioapix_setup_intr(apic_hpet_vect
, &apic_hpet_flags
);