4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2013 Pluribus Networks, Inc.
24 * Copyright 2017 Joyent, Inc.
29 * Has code for Advanced DDI interrupt framework support.
32 #include <sys/cpuvar.h>
34 #include <sys/archsystm.h>
36 #include <sys/sunddi.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/mach_intr.h>
39 #include <sys/sysmacros.h>
42 #include <sys/pci_intr_lib.h>
43 #include <sys/apic_common.h>
45 #define UCHAR_MAX UINT8_MAX
47 extern struct av_head autovect
[];
50 * Local Function Prototypes
52 apic_irq_t
*apic_find_irq(dev_info_t
*, struct intrspec
*, int);
55 * apic_pci_msi_enable_vector:
56 * Set the address/data fields in the MSI/X capability structure
61 apic_pci_msi_enable_vector(apic_irq_t
*irq_ptr
, int type
, int inum
, int vector
,
62 int count
, int target_apic_id
)
64 uint64_t msi_addr
, msi_data
;
66 dev_info_t
*dip
= irq_ptr
->airq_dip
;
67 int cap_ptr
= i_ddi_get_msi_msix_cap_ptr(dip
);
68 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(dip
);
71 void *intrmap_tbl
[PCI_MSI_MAX_INTRS
];
73 DDI_INTR_IMPLDBG((CE_CONT
, "apic_pci_msi_enable_vector: dip=0x%p\n"
74 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip
,
75 ddi_driver_name(dip
), inum
, vector
, target_apic_id
));
77 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
79 msi_regs
.mr_data
= vector
;
80 msi_regs
.mr_addr
= target_apic_id
;
82 for (i
= 0; i
< count
; i
++) {
83 irqno
= apic_vector_to_irq
[vector
+ i
];
84 intrmap_tbl
[i
] = apic_irq_table
[irqno
]->airq_intrmap_private
;
86 apic_vt_ops
->apic_intrmap_alloc_entry(intrmap_tbl
, dip
, type
,
88 for (i
= 0; i
< count
; i
++) {
89 irqno
= apic_vector_to_irq
[vector
+ i
];
90 apic_irq_table
[irqno
]->airq_intrmap_private
=
94 apic_vt_ops
->apic_intrmap_map_entry(irq_ptr
->airq_intrmap_private
,
95 (void *)&msi_regs
, type
, count
);
96 apic_vt_ops
->apic_intrmap_record_msi(irq_ptr
->airq_intrmap_private
,
100 msi_addr
= msi_regs
.mr_addr
;
102 /* MSI Data: MSI is edge triggered according to spec */
103 msi_data
= msi_regs
.mr_data
;
105 DDI_INTR_IMPLDBG((CE_CONT
, "apic_pci_msi_enable_vector: addr=0x%lx "
106 "data=0x%lx\n", (long)msi_addr
, (long)msi_data
));
108 if (type
== DDI_INTR_TYPE_MSI
) {
109 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
111 /* Set the bits to inform how many MSIs are enabled */
112 msi_ctrl
|= ((highbit(count
) -1) << PCI_MSI_MME_SHIFT
);
113 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
116 * Only set vector if not on hypervisor
118 pci_config_put32(handle
,
119 cap_ptr
+ PCI_MSI_ADDR_OFFSET
, msi_addr
);
121 if (msi_ctrl
& PCI_MSI_64BIT_MASK
) {
122 pci_config_put32(handle
,
123 cap_ptr
+ PCI_MSI_ADDR_OFFSET
+ 4, msi_addr
>> 32);
124 pci_config_put16(handle
,
125 cap_ptr
+ PCI_MSI_64BIT_DATA
, msi_data
);
127 pci_config_put16(handle
,
128 cap_ptr
+ PCI_MSI_32BIT_DATA
, msi_data
);
131 } else if (type
== DDI_INTR_TYPE_MSIX
) {
133 ddi_intr_msix_t
*msix_p
= i_ddi_get_msix(dip
);
135 ASSERT(msix_p
!= NULL
);
137 /* Offset into the "inum"th entry in the MSI-X table */
138 off
= (uintptr_t)msix_p
->msix_tbl_addr
+
139 (inum
* PCI_MSIX_VECTOR_SIZE
);
141 ddi_put32(msix_p
->msix_tbl_hdl
,
142 (uint32_t *)(off
+ PCI_MSIX_DATA_OFFSET
), msi_data
);
143 ddi_put32(msix_p
->msix_tbl_hdl
,
144 (uint32_t *)(off
+ PCI_MSIX_LOWER_ADDR_OFFSET
), msi_addr
);
145 ddi_put32(msix_p
->msix_tbl_hdl
,
146 (uint32_t *)(off
+ PCI_MSIX_UPPER_ADDR_OFFSET
),
152 * This function returns the no. of vectors available for the pri.
153 * dip is not used at this moment. If we really don't need that,
154 * it will be removed.
158 apic_navail_vector(dev_info_t
*dip
, int pri
)
160 int lowest
, highest
, i
, navail
, count
;
162 DDI_INTR_IMPLDBG((CE_CONT
, "apic_navail_vector: dip: %p, pri: %x\n",
165 highest
= apic_ipltopri
[pri
] + APIC_VECTOR_MASK
;
166 lowest
= apic_ipltopri
[pri
- 1] + APIC_VECTOR_PER_IPL
;
169 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
170 lowest
-= APIC_VECTOR_PER_IPL
;
172 /* It has to be contiguous */
173 for (i
= lowest
; i
<= highest
; i
++) {
175 while ((apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) &&
177 if (APIC_CHECK_RESERVE_VECTORS(i
))
189 * Finds "count" contiguous MSI vectors starting at the proper alignment
191 * Caller needs to make sure that count has to be power of 2 and should not
195 apic_find_multi_vectors(int pri
, int count
)
197 int lowest
, highest
, i
, navail
, start
, msibits
;
199 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_mult: pri: %x, count: %x\n",
202 highest
= apic_ipltopri
[pri
] + APIC_VECTOR_MASK
;
203 lowest
= apic_ipltopri
[pri
- 1] + APIC_VECTOR_PER_IPL
;
206 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
207 lowest
-= APIC_VECTOR_PER_IPL
;
210 * msibits is the no. of lower order message data bits for the
211 * allocated MSI vectors and is used to calculate the aligned
216 /* It has to be contiguous */
217 for (i
= lowest
; i
<= highest
; i
++) {
221 * starting vector has to be aligned accordingly for
225 i
= (i
+ msibits
) & ~msibits
;
227 while ((apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) &&
229 if (APIC_CHECK_RESERVE_VECTORS(i
))
232 if (navail
>= count
) {
233 ASSERT(start
>= 0 && start
<= UCHAR_MAX
);
234 return ((uchar_t
)start
);
244 * It finds the apic_irq_t associates with the dip, ispec and type.
247 apic_find_irq(dev_info_t
*dip
, struct intrspec
*ispec
, int type
)
252 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_irq: dip=0x%p vec=0x%x "
253 "ipl=0x%x type=0x%x\n", (void *)dip
, ispec
->intrspec_vec
,
254 ispec
->intrspec_pri
, type
));
256 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
257 for (irqp
= apic_irq_table
[i
]; irqp
; irqp
= irqp
->airq_next
) {
258 if ((irqp
->airq_dip
== dip
) &&
259 (irqp
->airq_origirq
== ispec
->intrspec_vec
) &&
260 (irqp
->airq_ipl
== ispec
->intrspec_pri
)) {
261 if (type
== DDI_INTR_TYPE_MSI
) {
262 if (irqp
->airq_mps_intr_index
==
265 } else if (type
== DDI_INTR_TYPE_MSIX
) {
266 if (irqp
->airq_mps_intr_index
==
274 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_irq: return NULL\n"));
279 * This function will return the pending bit of the irqp.
280 * It either comes from the IRR register of the APIC or the RDT
281 * entry of the I/O APIC.
282 * For the IRR to work, it needs to be to its binding CPU
285 apic_get_pending(apic_irq_t
*irqp
, int type
)
287 int bit
, index
, irr
, pending
;
291 DDI_INTR_IMPLDBG((CE_CONT
, "apic_get_pending: irqp: %p, cpuid: %x "
292 "type: %x\n", (void *)irqp
, irqp
->airq_cpu
& ~IRQ_USER_BOUND
,
295 /* need to get on the bound cpu */
296 mutex_enter(&cpu_lock
);
297 affinity_set(irqp
->airq_cpu
& ~IRQ_USER_BOUND
);
299 index
= irqp
->airq_vector
/ 32;
300 bit
= irqp
->airq_vector
% 32;
301 irr
= apic_reg_ops
->apic_read(APIC_IRR_REG
+ index
);
304 mutex_exit(&cpu_lock
);
306 pending
= (irr
& (1 << bit
)) ? 1 : 0;
307 if (!pending
&& (type
== DDI_INTR_TYPE_FIXED
)) {
308 /* check I/O APIC for fixed interrupt */
309 intin_no
= irqp
->airq_intin_no
;
310 apic_ix
= irqp
->airq_ioapicindex
;
311 pending
= (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
) &
319 * This function will clear the mask for the interrupt on the I/O APIC
322 apic_clear_mask(apic_irq_t
*irqp
)
329 DDI_INTR_IMPLDBG((CE_CONT
, "apic_clear_mask: irqp: %p\n",
332 intin_no
= irqp
->airq_intin_no
;
333 apic_ix
= irqp
->airq_ioapicindex
;
335 iflag
= intr_clear();
336 lock_set(&apic_ioapic_lock
);
338 rdt_entry
= READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
);
341 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
,
342 ((~AV_MASK
) & rdt_entry
));
344 lock_clear(&apic_ioapic_lock
);
350 * This function will mask the interrupt on the I/O APIC
353 apic_set_mask(apic_irq_t
*irqp
)
360 DDI_INTR_IMPLDBG((CE_CONT
, "apic_set_mask: irqp: %p\n", (void *)irqp
));
362 intin_no
= irqp
->airq_intin_no
;
363 apic_ix
= irqp
->airq_ioapicindex
;
365 iflag
= intr_clear();
367 lock_set(&apic_ioapic_lock
);
369 rdt_entry
= READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
);
372 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
,
373 (AV_MASK
| rdt_entry
));
375 lock_clear(&apic_ioapic_lock
);
381 apic_free_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
, int type
)
385 struct intrspec ispec
;
387 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: dip: %p inum: %x "
388 "count: %x pri: %x type: %x\n",
389 (void *)dip
, inum
, count
, pri
, type
));
392 if (!DDI_INTR_IS_MSI_OR_MSIX(type
))
395 for (i
= 0; i
< count
; i
++) {
396 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: inum=0x%x "
397 "pri=0x%x count=0x%x\n", inum
, pri
, count
));
398 ispec
.intrspec_vec
= inum
+ i
;
399 ispec
.intrspec_pri
= pri
;
400 if ((irqptr
= apic_find_irq(dip
, &ispec
, type
)) == NULL
) {
401 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: "
402 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() "
403 "failed\n", (void *)dip
, inum
, pri
));
406 irqptr
->airq_mps_intr_index
= FREE_INDEX
;
407 apic_vector_to_irq
[irqptr
->airq_vector
] = APIC_RESV_IRQ
;
412 * apic_pci_msi_enable_mode:
415 apic_pci_msi_enable_mode(dev_info_t
*rdip
, int type
, int inum
)
418 int cap_ptr
= i_ddi_get_msi_msix_cap_ptr(rdip
);
419 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(rdip
);
421 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
423 if (type
== DDI_INTR_TYPE_MSI
) {
424 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
425 if ((msi_ctrl
& PCI_MSI_ENABLE_BIT
))
428 msi_ctrl
|= PCI_MSI_ENABLE_BIT
;
429 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
431 } else if (type
== DDI_INTR_TYPE_MSIX
) {
434 ddi_intr_msix_t
*msix_p
;
436 msix_p
= i_ddi_get_msix(rdip
);
438 ASSERT(msix_p
!= NULL
);
440 /* Offset into "inum"th entry in the MSI-X table & clear mask */
441 off
= (uintptr_t)msix_p
->msix_tbl_addr
+ (inum
*
442 PCI_MSIX_VECTOR_SIZE
) + PCI_MSIX_VECTOR_CTRL_OFFSET
;
444 mask
= ddi_get32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
);
446 ddi_put32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
, (mask
& ~1));
448 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSIX_CTRL
);
450 if (!(msi_ctrl
& PCI_MSIX_ENABLE_BIT
)) {
451 msi_ctrl
|= PCI_MSIX_ENABLE_BIT
;
452 pci_config_put16(handle
, cap_ptr
+ PCI_MSIX_CTRL
,
459 apic_set_cpu(int irqno
, int cpu
, int *result
)
465 DDI_INTR_IMPLDBG((CE_CONT
, "APIC_SET_CPU\n"));
467 mutex_enter(&airq_mutex
);
468 irqp
= apic_irq_table
[irqno
];
469 mutex_exit(&airq_mutex
);
473 return (PSM_FAILURE
);
476 /* Fail if this is an MSI intr and is part of a group. */
477 if ((irqp
->airq_mps_intr_index
== MSI_INDEX
) &&
478 (irqp
->airq_intin_no
> 1)) {
480 return (PSM_FAILURE
);
483 iflag
= intr_clear();
484 lock_set(&apic_ioapic_lock
);
486 ret
= apic_rebind_all(irqp
, cpu
);
488 lock_clear(&apic_ioapic_lock
);
493 return (PSM_FAILURE
);
496 * keep tracking the default interrupt cpu binding
498 irqp
->airq_cpu
= cpu
;
501 return (PSM_SUCCESS
);
505 apic_grp_set_cpu(int irqno
, int new_cpu
, int *result
)
507 dev_info_t
*orig_dip
;
510 apic_irq_t
*irqps
[PCI_MSI_MAX_INTRS
];
513 int msi_mask_off
= 0;
515 uint32_t msi_pvm
= 0;
516 ddi_acc_handle_t handle
;
520 DDI_INTR_IMPLDBG((CE_CONT
, "APIC_GRP_SET_CPU\n"));
523 * Take mutex to insure that table doesn't change out from underneath
524 * us while we're playing with it.
526 mutex_enter(&airq_mutex
);
527 irqps
[0] = apic_irq_table
[irqno
];
528 orig_cpu
= irqps
[0]->airq_temp_cpu
;
529 orig_dip
= irqps
[0]->airq_dip
;
530 num_vectors
= irqps
[0]->airq_intin_no
;
531 vector
= irqps
[0]->airq_vector
;
534 if (num_vectors
== 1) {
535 mutex_exit(&airq_mutex
);
536 return (apic_set_cpu(irqno
, new_cpu
, result
));
541 if (irqps
[0]->airq_mps_intr_index
!= MSI_INDEX
) {
542 mutex_exit(&airq_mutex
);
543 DDI_INTR_IMPLDBG((CE_CONT
, "set_grp: intr not MSI\n"));
544 goto set_grp_intr_done
;
546 if ((num_vectors
< 1) || ((num_vectors
- 1) & vector
)) {
547 mutex_exit(&airq_mutex
);
548 DDI_INTR_IMPLDBG((CE_CONT
,
549 "set_grp: base vec not part of a grp or not aligned: "
550 "vec:0x%x, num_vec:0x%x\n", vector
, num_vectors
));
551 goto set_grp_intr_done
;
553 DDI_INTR_IMPLDBG((CE_CONT
, "set_grp: num intrs in grp: %d\n",
556 ASSERT((num_vectors
+ vector
) < APIC_MAX_VECTOR
);
561 * All IRQ entries in the table for the given device will be not
562 * shared. Since they are not shared, the dip in the table will
563 * be true to the device of interest.
565 for (i
= 1; i
< num_vectors
; i
++) {
566 irqps
[i
] = apic_irq_table
[apic_vector_to_irq
[vector
+ i
]];
567 if (irqps
[i
] == NULL
) {
568 mutex_exit(&airq_mutex
);
569 goto set_grp_intr_done
;
572 /* Sanity check: CPU and dip is the same for all entries. */
573 if ((irqps
[i
]->airq_dip
!= orig_dip
) ||
574 (irqps
[i
]->airq_temp_cpu
!= orig_cpu
)) {
575 mutex_exit(&airq_mutex
);
576 DDI_INTR_IMPLDBG((CE_CONT
,
577 "set_grp: cpu or dip for vec 0x%x difft than for "
578 "vec 0x%x\n", vector
, vector
+ i
));
579 DDI_INTR_IMPLDBG((CE_CONT
,
580 " cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu
,
581 irqps
[i
]->airq_temp_cpu
, (void *)orig_dip
,
582 (void *)irqps
[i
]->airq_dip
));
583 goto set_grp_intr_done
;
587 mutex_exit(&airq_mutex
);
589 cap_ptr
= i_ddi_get_msi_msix_cap_ptr(orig_dip
);
590 handle
= i_ddi_get_pci_config_handle(orig_dip
);
591 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
593 /* MSI Per vector masking is supported. */
594 if (msi_ctrl
& PCI_MSI_PVM_MASK
) {
595 if (msi_ctrl
& PCI_MSI_64BIT_MASK
)
596 msi_mask_off
= cap_ptr
+ PCI_MSI_64BIT_MASKBITS
;
598 msi_mask_off
= cap_ptr
+ PCI_MSI_32BIT_MASK
;
599 msi_pvm
= pci_config_get32(handle
, msi_mask_off
);
600 pci_config_put32(handle
, msi_mask_off
, (uint32_t)-1);
601 DDI_INTR_IMPLDBG((CE_CONT
,
602 "set_grp: pvm supported. Mask set to 0x%x\n",
603 pci_config_get32(handle
, msi_mask_off
)));
606 iflag
= intr_clear();
607 lock_set(&apic_ioapic_lock
);
610 * Do the first rebind and check for errors. Apic_rebind_all returns
611 * an error if the CPU is not accepting interrupts. If the first one
612 * succeeds they all will.
614 if (apic_rebind_all(irqps
[0], new_cpu
))
615 (void) apic_rebind_all(irqps
[0], orig_cpu
);
617 irqps
[0]->airq_cpu
= new_cpu
;
619 for (i
= 1; i
< num_vectors
; i
++) {
620 (void) apic_rebind_all(irqps
[i
], new_cpu
);
621 irqps
[i
]->airq_cpu
= new_cpu
;
623 *result
= 0; /* SUCCESS */
626 lock_clear(&apic_ioapic_lock
);
629 /* Reenable vectors if per vector masking is supported. */
630 if (msi_ctrl
& PCI_MSI_PVM_MASK
) {
631 pci_config_put32(handle
, msi_mask_off
, msi_pvm
);
632 DDI_INTR_IMPLDBG((CE_CONT
,
633 "set_grp: pvm supported. Mask restored to 0x%x\n",
634 pci_config_get32(handle
, msi_mask_off
)));
639 return (PSM_FAILURE
);
641 return (PSM_SUCCESS
);
645 apic_get_vector_intr_info(int vecirq
, apic_get_intr_t
*intr_params_p
)
647 struct autovec
*av_dev
;
652 /* Sanity check the vector/irq argument. */
653 ASSERT((vecirq
>= 0) || (vecirq
<= APIC_MAX_VECTOR
));
655 mutex_enter(&airq_mutex
);
658 * Convert the vecirq arg to an irq using vector_to_irq table
659 * if the arg is a vector. Pass thru if already an irq.
661 if ((intr_params_p
->avgi_req_flags
& PSMGI_INTRBY_FLAGS
) ==
663 irqno
= apic_vector_to_irq
[vecirq
];
665 irqno
= (uchar_t
)vecirq
;
667 irq_p
= apic_irq_table
[irqno
];
669 if ((irq_p
== NULL
) ||
670 ((irq_p
->airq_mps_intr_index
!= RESERVE_INDEX
) &&
671 ((irq_p
->airq_temp_cpu
== IRQ_UNBOUND
) ||
672 (irq_p
->airq_temp_cpu
== IRQ_UNINIT
)))) {
673 mutex_exit(&airq_mutex
);
674 return (PSM_FAILURE
);
677 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_CPUID
) {
679 /* Get the (temp) cpu from apic_irq table, indexed by irq. */
680 intr_params_p
->avgi_cpu_id
= irq_p
->airq_temp_cpu
;
682 /* Return user bound info for intrd. */
683 if (intr_params_p
->avgi_cpu_id
& IRQ_USER_BOUND
) {
684 intr_params_p
->avgi_cpu_id
&= ~IRQ_USER_BOUND
;
685 intr_params_p
->avgi_cpu_id
|= PSMGI_CPU_USER_BOUND
;
689 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_VECTOR
)
690 intr_params_p
->avgi_vector
= irq_p
->airq_vector
;
692 if (intr_params_p
->avgi_req_flags
&
693 (PSMGI_REQ_NUM_DEVS
| PSMGI_REQ_GET_DEVS
))
694 /* Get number of devices from apic_irq table shared field. */
695 intr_params_p
->avgi_num_devs
= irq_p
->airq_share
;
697 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_GET_DEVS
) {
699 intr_params_p
->avgi_req_flags
|= PSMGI_REQ_NUM_DEVS
;
701 /* Some devices have NULL dip. Don't count these. */
702 if (intr_params_p
->avgi_num_devs
> 0) {
703 for (i
= 0, av_dev
= autovect
[irqno
].avh_link
;
704 av_dev
; av_dev
= av_dev
->av_link
)
705 if (av_dev
->av_vector
&& av_dev
->av_dip
)
707 intr_params_p
->avgi_num_devs
=
708 (uchar_t
)MIN(intr_params_p
->avgi_num_devs
, i
);
711 /* There are no viable dips to return. */
712 if (intr_params_p
->avgi_num_devs
== 0)
713 intr_params_p
->avgi_dip_list
= NULL
;
715 else { /* Return list of dips */
717 /* Allocate space in array for that number of devs. */
718 intr_params_p
->avgi_dip_list
= kmem_zalloc(
719 intr_params_p
->avgi_num_devs
*
720 sizeof (dev_info_t
*),
724 * Loop through the device list of the autovec table
725 * filling in the dip array.
727 * Note that the autovect table may have some special
728 * entries which contain NULL dips. These will be
731 for (i
= 0, av_dev
= autovect
[irqno
].avh_link
;
732 av_dev
; av_dev
= av_dev
->av_link
)
733 if (av_dev
->av_vector
&& av_dev
->av_dip
)
734 intr_params_p
->avgi_dip_list
[i
++] =
739 mutex_exit(&airq_mutex
);
741 return (PSM_SUCCESS
);
745 * This function provides external interface to the nexus for all
746 * functionalities related to the new DDI interrupt framework.
749 * dip - pointer to the dev_info structure of the requested device
750 * hdlp - pointer to the internal interrupt handle structure for the
751 * requested interrupt
752 * intr_op - opcode for this call
753 * result - pointer to the integer that will hold the result to be
754 * passed back if return value is PSM_SUCCESS
757 * return value is either PSM_SUCCESS or PSM_FAILURE
760 apic_intr_ops(dev_info_t
*dip
, ddi_intr_handle_impl_t
*hdlp
,
761 psm_intr_op_t intr_op
, int *result
)
769 struct intrspec
*ispec
, intr_spec
;
771 DDI_INTR_IMPLDBG((CE_CONT
, "apic_intr_ops: dip: %p hdlp: %p "
772 "intr_op: %x\n", (void *)dip
, (void *)hdlp
, intr_op
));
775 ispec
->intrspec_pri
= hdlp
->ih_pri
;
776 ispec
->intrspec_vec
= hdlp
->ih_inum
;
777 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
780 case PSM_INTR_OP_CHECK_MSI
:
782 * Check MSI/X is supported or not at APIC level and
783 * masked off the MSI/X bits in hdlp->ih_type if not
784 * supported before return. If MSI/X is supported,
785 * leave the ih_type unchanged and return.
787 * hdlp->ih_type passed in from the nexus has all the
788 * interrupt types supported by the device.
790 if (apic_support_msi
== 0) {
792 * if apic_support_msi is not set, call
793 * apic_check_msi_support() to check whether msi
796 if (apic_check_msi_support() == PSM_SUCCESS
)
797 apic_support_msi
= 1;
799 apic_support_msi
= -1;
801 if (apic_support_msi
== 1) {
802 if (apic_msix_enable
)
803 *result
= hdlp
->ih_type
;
805 *result
= hdlp
->ih_type
& ~DDI_INTR_TYPE_MSIX
;
807 *result
= hdlp
->ih_type
& ~(DDI_INTR_TYPE_MSI
|
810 case PSM_INTR_OP_ALLOC_VECTORS
:
811 if (hdlp
->ih_type
== DDI_INTR_TYPE_MSI
)
812 *result
= apic_alloc_msi_vectors(dip
, hdlp
->ih_inum
,
813 hdlp
->ih_scratch1
, hdlp
->ih_pri
,
814 (int)(uintptr_t)hdlp
->ih_scratch2
);
816 *result
= apic_alloc_msix_vectors(dip
, hdlp
->ih_inum
,
817 hdlp
->ih_scratch1
, hdlp
->ih_pri
,
818 (int)(uintptr_t)hdlp
->ih_scratch2
);
820 case PSM_INTR_OP_FREE_VECTORS
:
821 apic_free_vectors(dip
, hdlp
->ih_inum
, hdlp
->ih_scratch1
,
822 hdlp
->ih_pri
, hdlp
->ih_type
);
824 case PSM_INTR_OP_NAVAIL_VECTORS
:
825 *result
= apic_navail_vector(dip
, hdlp
->ih_pri
);
827 case PSM_INTR_OP_XLATE_VECTOR
:
828 ispec
= ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
;
829 *result
= apic_introp_xlate(dip
, ispec
, hdlp
->ih_type
);
831 return (PSM_FAILURE
);
833 case PSM_INTR_OP_GET_PENDING
:
834 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
835 return (PSM_FAILURE
);
836 *result
= apic_get_pending(irqp
, hdlp
->ih_type
);
838 case PSM_INTR_OP_CLEAR_MASK
:
839 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
840 return (PSM_FAILURE
);
841 irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
);
843 return (PSM_FAILURE
);
844 apic_clear_mask(irqp
);
846 case PSM_INTR_OP_SET_MASK
:
847 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
848 return (PSM_FAILURE
);
849 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
850 return (PSM_FAILURE
);
853 case PSM_INTR_OP_GET_CAP
:
854 cap
= DDI_INTR_FLAG_PENDING
;
855 if (hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
)
856 cap
|= DDI_INTR_FLAG_MASKABLE
;
859 case PSM_INTR_OP_GET_SHARED
:
860 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
861 return (PSM_FAILURE
);
862 ispec
= ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
;
863 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
864 return (PSM_FAILURE
);
865 *result
= (irqp
->airq_share
> 1) ? 1: 0;
867 case PSM_INTR_OP_SET_PRI
:
868 old_priority
= hdlp
->ih_pri
; /* save old value */
869 new_priority
= *(int *)result
; /* try the new value */
871 if (hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
) {
872 return (PSM_SUCCESS
);
875 /* Now allocate the vectors */
876 if (hdlp
->ih_type
== DDI_INTR_TYPE_MSI
) {
877 /* SET_PRI does not support the case of multiple MSI */
878 if (i_ddi_intr_get_current_nintrs(hdlp
->ih_dip
) > 1)
879 return (PSM_FAILURE
);
881 count_vec
= apic_alloc_msi_vectors(dip
, hdlp
->ih_inum
,
883 DDI_INTR_ALLOC_STRICT
);
885 count_vec
= apic_alloc_msix_vectors(dip
, hdlp
->ih_inum
,
887 DDI_INTR_ALLOC_STRICT
);
890 /* Did we get new vectors? */
892 return (PSM_FAILURE
);
894 /* Finally, free the previously allocated vectors */
895 apic_free_vectors(dip
, hdlp
->ih_inum
, count_vec
,
896 old_priority
, hdlp
->ih_type
);
898 case PSM_INTR_OP_SET_CPU
:
899 case PSM_INTR_OP_GRP_SET_CPU
:
901 * The interrupt handle given here has been allocated
902 * specifically for this command, and ih_private carries
905 new_cpu
= (int)(intptr_t)hdlp
->ih_private
;
906 if (!apic_cpu_in_range(new_cpu
)) {
907 DDI_INTR_IMPLDBG((CE_CONT
,
908 "[grp_]set_cpu: cpu out of range: %d\n", new_cpu
));
910 return (PSM_FAILURE
);
912 if (hdlp
->ih_vector
> APIC_MAX_VECTOR
) {
913 DDI_INTR_IMPLDBG((CE_CONT
,
914 "[grp_]set_cpu: vector out of range: %d\n",
917 return (PSM_FAILURE
);
919 if ((hdlp
->ih_flags
& PSMGI_INTRBY_FLAGS
) == PSMGI_INTRBY_VEC
)
920 hdlp
->ih_vector
= apic_vector_to_irq
[hdlp
->ih_vector
];
921 if (intr_op
== PSM_INTR_OP_SET_CPU
) {
922 if (apic_set_cpu(hdlp
->ih_vector
, new_cpu
, result
) !=
924 return (PSM_FAILURE
);
926 if (apic_grp_set_cpu(hdlp
->ih_vector
, new_cpu
,
927 result
) != PSM_SUCCESS
)
928 return (PSM_FAILURE
);
931 case PSM_INTR_OP_GET_INTR
:
933 * The interrupt handle given here has been allocated
934 * specifically for this command, and ih_private carries
935 * a pointer to a apic_get_intr_t.
937 if (apic_get_vector_intr_info(
938 hdlp
->ih_vector
, hdlp
->ih_private
) != PSM_SUCCESS
)
939 return (PSM_FAILURE
);
941 case PSM_INTR_OP_APIC_TYPE
:
942 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_type
=
943 apic_get_apic_type();
944 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_num_intr
=
946 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_num_cpu
=
948 hdlp
->ih_ver
= apic_get_apic_version();
950 case PSM_INTR_OP_SET_CAP
:
952 return (PSM_FAILURE
);
954 return (PSM_SUCCESS
);