4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
31 #include <sys/cmn_err.h>
32 #include <sys/debug.h>
34 #include <sys/sunndi.h>
35 #include <sys/ndi_impldefs.h> /* include prototypes */
37 #if defined(__i386) || defined(__amd64)
39 * MSI-X allocation limit.
41 extern uint_t ddi_msix_alloc_limit
;
45 * Interrupt Resource Management (IRM).
48 #define DDI_IRM_BALANCE_DELAY (60) /* In seconds */
50 #define DDI_IRM_HAS_CB(c) ((c) && (c->cb_flags & DDI_CB_FLAG_INTR))
52 #define DDI_IRM_IS_REDUCIBLE(r) (((r->ireq_flags & DDI_IRM_FLAG_CALLBACK) && \
53 (r->ireq_type == DDI_INTR_TYPE_MSIX)) || \
54 (r->ireq_flags & DDI_IRM_FLAG_NEW))
56 extern pri_t minclsyspri
;
60 boolean_t irm_active
= B_FALSE
;
61 int irm_default_policy
= DDI_IRM_POLICY_LARGE
;
62 uint_t irm_balance_delay
= DDI_IRM_BALANCE_DELAY
;
64 /* Global list of interrupt pools */
65 kmutex_t irm_pools_lock
;
66 list_t irm_pools_list
;
68 /* Global debug tunables */
70 int irm_debug_policy
= 0;
71 uint_t irm_debug_size
= 0;
74 static void irm_balance_thread(ddi_irm_pool_t
*);
75 static void i_ddi_irm_balance(ddi_irm_pool_t
*);
76 static void i_ddi_irm_enqueue(ddi_irm_pool_t
*, boolean_t
);
77 static void i_ddi_irm_reduce(ddi_irm_pool_t
*pool
);
78 static int i_ddi_irm_reduce_by_policy(ddi_irm_pool_t
*, int, int);
79 static void i_ddi_irm_reduce_new(ddi_irm_pool_t
*, int);
80 static void i_ddi_irm_insertion_sort(list_t
*, ddi_irm_req_t
*);
81 static int i_ddi_irm_notify(ddi_irm_pool_t
*, ddi_irm_req_t
*);
82 static int i_ddi_irm_modify_increase(ddi_irm_req_t
*, int);
85 * OS Initialization Routines
91 * Initialize IRM subsystem before any drivers are attached.
96 /* Do nothing if IRM is disabled */
100 /* Verify that the default balancing policy is valid */
101 if (!DDI_IRM_POLICY_VALID(irm_default_policy
))
102 irm_default_policy
= DDI_IRM_POLICY_LARGE
;
104 /* Initialize the global list of interrupt pools */
105 mutex_init(&irm_pools_lock
, NULL
, MUTEX_DRIVER
, NULL
);
106 list_create(&irm_pools_list
, sizeof (ddi_irm_pool_t
),
107 offsetof(ddi_irm_pool_t
, ipool_link
));
111 * i_ddi_irm_poststartup()
113 * IRM is not activated until after the IO subsystem is initialized.
114 * When activated, per-pool balancing threads are spawned and a flag
115 * is set so that all future pools will be activated when created.
117 * NOTE: the global variable 'irm_enable' disables IRM if zero.
120 i_ddi_irm_poststartup(void)
122 ddi_irm_pool_t
*pool_p
;
124 /* Do nothing if IRM is disabled */
128 /* Lock the global list */
129 mutex_enter(&irm_pools_lock
);
131 /* Activate all defined pools */
132 for (pool_p
= list_head(&irm_pools_list
); pool_p
;
133 pool_p
= list_next(&irm_pools_list
, pool_p
))
134 pool_p
->ipool_thread
= thread_create(NULL
, 0,
135 irm_balance_thread
, pool_p
, 0, &p0
, TS_RUN
, minclsyspri
);
137 /* Set future pools to be active */
140 /* Unlock the global list */
141 mutex_exit(&irm_pools_lock
);
145 * NDI interfaces for creating/destroying IRM pools.
151 * Nexus interface to create an IRM pool. Create the new
152 * pool and add it to the global list of interrupt pools.
155 ndi_irm_create(dev_info_t
*dip
, ddi_irm_params_t
*paramsp
,
156 ddi_irm_pool_t
**pool_retp
)
158 ddi_irm_pool_t
*pool_p
;
161 ASSERT(paramsp
!= NULL
);
162 ASSERT(pool_retp
!= NULL
);
163 ASSERT(paramsp
->iparams_total
>= 1);
164 ASSERT(paramsp
->iparams_types
!= 0);
166 DDI_INTR_IRMDBG((CE_CONT
, "ndi_irm_create: dip %p\n", (void *)dip
));
168 /* Check if IRM is enabled */
170 return (NDI_FAILURE
);
172 /* Validate parameters */
173 if ((dip
== NULL
) || (paramsp
== NULL
) || (pool_retp
== NULL
) ||
174 (paramsp
->iparams_total
< 1) || (paramsp
->iparams_types
== 0))
175 return (NDI_FAILURE
);
177 /* Allocate and initialize the pool */
178 pool_p
= kmem_zalloc(sizeof (ddi_irm_pool_t
), KM_SLEEP
);
179 pool_p
->ipool_owner
= dip
;
180 pool_p
->ipool_policy
= irm_default_policy
;
181 pool_p
->ipool_types
= paramsp
->iparams_types
;
182 pool_p
->ipool_totsz
= paramsp
->iparams_total
;
183 pool_p
->ipool_defsz
= MIN(DDI_MAX_MSIX_ALLOC
, MAX(DDI_MIN_MSIX_ALLOC
,
184 paramsp
->iparams_total
/ DDI_MSIX_ALLOC_DIVIDER
));
185 list_create(&pool_p
->ipool_req_list
, sizeof (ddi_irm_req_t
),
186 offsetof(ddi_irm_req_t
, ireq_link
));
187 list_create(&pool_p
->ipool_scratch_list
, sizeof (ddi_irm_req_t
),
188 offsetof(ddi_irm_req_t
, ireq_scratch_link
));
189 cv_init(&pool_p
->ipool_cv
, NULL
, CV_DRIVER
, NULL
);
190 mutex_init(&pool_p
->ipool_lock
, NULL
, MUTEX_DRIVER
, NULL
);
191 mutex_init(&pool_p
->ipool_navail_lock
, NULL
, MUTEX_DRIVER
, NULL
);
193 /* Add to global list of pools */
194 mutex_enter(&irm_pools_lock
);
195 list_insert_tail(&irm_pools_list
, pool_p
);
196 mutex_exit(&irm_pools_lock
);
198 /* If IRM is active, then activate the pool */
200 pool_p
->ipool_thread
= thread_create(NULL
, 0,
201 irm_balance_thread
, pool_p
, 0, &p0
, TS_RUN
, minclsyspri
);
204 return (NDI_SUCCESS
);
208 * ndi_irm_resize_pool()
210 * Nexus interface to resize IRM pool. If the pool size drops
211 * below the allocated number of vectors then initiate rebalance
212 * operation before resizing the pool. If rebalance operation fails
213 * then return NDI_FAILURE.
216 ndi_irm_resize_pool(ddi_irm_pool_t
*pool_p
, uint_t new_size
)
220 ASSERT(pool_p
!= NULL
);
222 DDI_INTR_IRMDBG((CE_CONT
, "ndi_irm_resize_pool: pool_p %p"
223 " current-size 0x%x new-size 0x%x\n",
224 (void *)pool_p
, pool_p
->ipool_totsz
, new_size
));
229 /* Check if IRM is enabled */
231 return (NDI_FAILURE
);
233 mutex_enter(&pool_p
->ipool_lock
);
236 * If we are increasing the pool size or if the reserved
237 * number of vectors is <= the new pool size then simply
238 * update the pool size and enqueue a reblance operation
239 * if necessary to use the new vectors.
241 if ((pool_p
->ipool_totsz
< new_size
) ||
242 (pool_p
->ipool_resno
<= new_size
)) {
243 /* set new pool size */
244 pool_p
->ipool_totsz
= new_size
;
245 /* adjust the default allocation limit */
246 pool_p
->ipool_defsz
= MIN(DDI_MAX_MSIX_ALLOC
,
247 MAX(DDI_MIN_MSIX_ALLOC
, new_size
/ DDI_MSIX_ALLOC_DIVIDER
));
248 /* queue a rebalance operation to use the new vectors */
249 if (pool_p
->ipool_reqno
> pool_p
->ipool_resno
)
250 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
251 mutex_exit(&pool_p
->ipool_lock
);
252 return (NDI_SUCCESS
);
255 DDI_INTR_IRMDBG((CE_CONT
, "ndi_irm_resize_pool: pool_p %p"
256 " needs a rebalance operation\n", (void *)pool_p
));
259 * requires a rebalance operation
261 /* save the current pool size */
262 prev_size
= pool_p
->ipool_totsz
;
263 /* set the pool size to the desired new value */
264 pool_p
->ipool_totsz
= new_size
;
265 /* perform the rebalance operation */
266 i_ddi_irm_enqueue(pool_p
, B_TRUE
);
269 * If rebalance operation couldn't free up enough
270 * vectors then fail the resize operation.
272 if (pool_p
->ipool_resno
> new_size
) { /* rebalance failed */
273 /* restore the pool size to the previous value */
274 pool_p
->ipool_totsz
= prev_size
;
275 /* enqueue a rebalance operation for the original pool size */
276 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
277 mutex_exit(&pool_p
->ipool_lock
);
278 return (NDI_FAILURE
);
279 } else { /* rebalance worked */
280 /* adjust the default allocation limit */
281 pool_p
->ipool_defsz
= MIN(DDI_MAX_MSIX_ALLOC
,
282 MAX(DDI_MIN_MSIX_ALLOC
, new_size
/ DDI_MSIX_ALLOC_DIVIDER
));
283 mutex_exit(&pool_p
->ipool_lock
);
284 DDI_INTR_IRMDBG((CE_CONT
, "ndi_irm_resize_pool: pool_p %p"
285 " resized from %x to %x\n",
286 (void *)pool_p
, prev_size
, pool_p
->ipool_totsz
));
287 return (NDI_SUCCESS
);
294 * Nexus interface to destroy an IRM pool. Destroy the pool
295 * and remove it from the global list of interrupt pools.
298 ndi_irm_destroy(ddi_irm_pool_t
*pool_p
)
300 ASSERT(pool_p
!= NULL
);
301 ASSERT(pool_p
->ipool_resno
== 0);
303 DDI_INTR_IRMDBG((CE_CONT
, "ndi_irm_destroy: pool_p %p\n",
306 /* Validate parameters */
308 return (NDI_FAILURE
);
310 /* Validate that pool is empty */
311 if (pool_p
->ipool_resno
!= 0)
314 /* Remove the pool from the global list */
315 mutex_enter(&irm_pools_lock
);
316 list_remove(&irm_pools_list
, pool_p
);
317 mutex_exit(&irm_pools_lock
);
319 /* Terminate the balancing thread */
320 mutex_enter(&pool_p
->ipool_lock
);
321 if (pool_p
->ipool_thread
&&
322 (pool_p
->ipool_flags
& DDI_IRM_FLAG_ACTIVE
)) {
323 pool_p
->ipool_flags
|= DDI_IRM_FLAG_EXIT
;
324 cv_signal(&pool_p
->ipool_cv
);
325 mutex_exit(&pool_p
->ipool_lock
);
326 thread_join(pool_p
->ipool_thread
->t_did
);
328 mutex_exit(&pool_p
->ipool_lock
);
330 /* Destroy the pool */
331 cv_destroy(&pool_p
->ipool_cv
);
332 mutex_destroy(&pool_p
->ipool_lock
);
333 mutex_destroy(&pool_p
->ipool_navail_lock
);
334 list_destroy(&pool_p
->ipool_req_list
);
335 list_destroy(&pool_p
->ipool_scratch_list
);
336 kmem_free(pool_p
, sizeof (ddi_irm_pool_t
));
338 return (NDI_SUCCESS
);
342 * Insert/Modify/Remove Interrupt Requests
348 * Insert a new request into an interrupt pool, and balance the pool.
351 i_ddi_irm_insert(dev_info_t
*dip
, int type
, int count
)
353 ddi_irm_req_t
*req_p
;
354 devinfo_intr_t
*intr_p
;
355 ddi_irm_pool_t
*pool_p
;
356 uint_t nreq
, nmin
, npartial
;
357 boolean_t irm_flag
= B_FALSE
;
360 ASSERT(DDI_INTR_TYPE_FLAG_VALID(type
));
363 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: dip %p type %d count %d\n",
364 (void *)dip
, type
, count
));
366 /* Validate parameters */
367 if ((dip
== NULL
) || (count
< 1) || !DDI_INTR_TYPE_FLAG_VALID(type
)) {
368 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: invalid args\n"));
372 /* Check for an existing request */
373 if (((intr_p
= DEVI(dip
)->devi_intr_p
) != NULL
) &&
374 (intr_p
->devi_irm_req_p
!= NULL
))
375 return (DDI_SUCCESS
);
377 /* Check for IRM support from the system */
378 if ((pool_p
= i_ddi_intr_get_pool(dip
, type
)) == NULL
) {
379 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: not supported\n"));
380 return (DDI_ENOTSUP
);
383 /* Check for IRM support from the driver */
384 if (i_ddi_irm_supported(dip
, type
) == DDI_SUCCESS
)
387 /* Determine request size */
388 nreq
= (irm_flag
) ? count
:
389 MIN(count
, i_ddi_intr_get_limit(dip
, type
, pool_p
));
390 nmin
= (irm_flag
) ? 1 : nreq
;
391 npartial
= MIN(nreq
, pool_p
->ipool_defsz
);
393 /* Allocate and initialize the request */
394 req_p
= kmem_zalloc(sizeof (ddi_irm_req_t
), KM_SLEEP
);
395 req_p
->ireq_type
= type
;
396 req_p
->ireq_dip
= dip
;
397 req_p
->ireq_pool_p
= pool_p
;
398 req_p
->ireq_nreq
= nreq
;
399 req_p
->ireq_flags
= DDI_IRM_FLAG_NEW
;
401 req_p
->ireq_flags
|= DDI_IRM_FLAG_CALLBACK
;
404 mutex_enter(&pool_p
->ipool_lock
);
406 /* Check for minimal fit before inserting */
407 if ((pool_p
->ipool_minno
+ nmin
) > pool_p
->ipool_totsz
) {
408 cmn_err(CE_WARN
, "%s%d: interrupt pool too full.\n",
409 ddi_driver_name(dip
), ddi_get_instance(dip
));
410 mutex_exit(&pool_p
->ipool_lock
);
411 kmem_free(req_p
, sizeof (ddi_irm_req_t
));
415 /* Insert the request into the pool */
416 pool_p
->ipool_reqno
+= nreq
;
417 pool_p
->ipool_minno
+= nmin
;
418 i_ddi_irm_insertion_sort(&pool_p
->ipool_req_list
, req_p
);
421 * Try to fulfill the request.
423 * If all the interrupts are available, and either the request
424 * is static or the pool is active, then just take them directly.
426 * If only some of the interrupts are available, and the request
427 * can receive future callbacks, then take some now but queue the
428 * pool to be rebalanced later.
430 * Otherwise, immediately rebalance the pool and wait.
432 if ((!irm_flag
|| (pool_p
->ipool_flags
& DDI_IRM_FLAG_ACTIVE
)) &&
433 ((pool_p
->ipool_resno
+ nreq
) <= pool_p
->ipool_totsz
)) {
435 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: "
436 "request completely fulfilled.\n"));
437 pool_p
->ipool_resno
+= nreq
;
438 req_p
->ireq_navail
= nreq
;
439 req_p
->ireq_flags
&= ~(DDI_IRM_FLAG_NEW
);
441 } else if (irm_flag
&&
442 ((pool_p
->ipool_resno
+ npartial
) <= pool_p
->ipool_totsz
)) {
444 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: "
445 "request partially fulfilled.\n"));
446 pool_p
->ipool_resno
+= npartial
;
447 req_p
->ireq_navail
= npartial
;
448 req_p
->ireq_flags
&= ~(DDI_IRM_FLAG_NEW
);
449 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
453 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_insert: "
454 "request needs immediate rebalance.\n"));
455 i_ddi_irm_enqueue(pool_p
, B_TRUE
);
456 req_p
->ireq_flags
&= ~(DDI_IRM_FLAG_NEW
);
459 /* Fail if the request cannot be fulfilled at all */
460 if (req_p
->ireq_navail
== 0) {
461 cmn_err(CE_WARN
, "%s%d: interrupt pool too full.\n",
462 ddi_driver_name(dip
), ddi_get_instance(dip
));
463 pool_p
->ipool_reqno
-= nreq
;
464 pool_p
->ipool_minno
-= nmin
;
465 list_remove(&pool_p
->ipool_req_list
, req_p
);
466 mutex_exit(&pool_p
->ipool_lock
);
467 kmem_free(req_p
, sizeof (ddi_irm_req_t
));
471 /* Unlock the pool */
472 mutex_exit(&pool_p
->ipool_lock
);
474 intr_p
->devi_irm_req_p
= req_p
;
475 return (DDI_SUCCESS
);
481 * Modify an existing request in an interrupt pool, and balance the pool.
484 i_ddi_irm_modify(dev_info_t
*dip
, int nreq
)
486 devinfo_intr_t
*intr_p
;
487 ddi_irm_req_t
*req_p
;
488 ddi_irm_pool_t
*pool_p
;
490 int retval
= DDI_SUCCESS
;
495 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: dip %p nreq %d\n",
498 /* Validate parameters */
499 if ((dip
== NULL
) || (nreq
< 1)) {
500 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: invalid args\n"));
504 /* Do nothing if not mapped to an IRM pool */
505 if (((intr_p
= DEVI(dip
)->devi_intr_p
) == NULL
) ||
506 ((req_p
= intr_p
->devi_irm_req_p
) == NULL
))
507 return (DDI_SUCCESS
);
509 /* Do nothing if new size is the same */
510 if (nreq
== req_p
->ireq_nreq
)
511 return (DDI_SUCCESS
);
513 /* Do not allow MSI requests to be resized */
514 if ((type
= req_p
->ireq_type
) == DDI_INTR_TYPE_MSI
) {
515 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: invalid type\n"));
516 return (DDI_ENOTSUP
);
519 /* Select the pool */
520 if ((pool_p
= req_p
->ireq_pool_p
) == NULL
) {
521 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: missing pool\n"));
522 return (DDI_FAILURE
);
525 /* Validate request size is not too large */
526 if (nreq
> i_ddi_intr_get_limit(dip
, type
, pool_p
)) {
527 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: invalid args\n"));
532 mutex_enter(&pool_p
->ipool_lock
);
535 * Process the modification.
537 * - To increase a non-IRM request, call the implementation in
538 * i_ddi_irm_modify_increase().
540 * - To decrease a non-IRM request, directly update the pool and
541 * request, then queue the pool for later rebalancing.
543 * - To modify an IRM request, always queue the pool for later
544 * rebalancing. IRM consumers rely upon callbacks for changes.
546 if ((nreq
> req_p
->ireq_nreq
) &&
547 (i_ddi_irm_supported(dip
, type
) != DDI_SUCCESS
)) {
549 retval
= i_ddi_irm_modify_increase(req_p
, nreq
);
553 /* Update pool and request */
554 pool_p
->ipool_reqno
-= req_p
->ireq_nreq
;
555 pool_p
->ipool_reqno
+= nreq
;
556 if (i_ddi_irm_supported(dip
, type
) != DDI_SUCCESS
) {
557 pool_p
->ipool_minno
-= req_p
->ireq_navail
;
558 pool_p
->ipool_resno
-= req_p
->ireq_navail
;
559 pool_p
->ipool_minno
+= nreq
;
560 pool_p
->ipool_resno
+= nreq
;
561 req_p
->ireq_navail
= nreq
;
563 req_p
->ireq_nreq
= nreq
;
565 /* Re-sort request into the pool */
566 list_remove(&pool_p
->ipool_req_list
, req_p
);
567 i_ddi_irm_insertion_sort(&pool_p
->ipool_req_list
, req_p
);
569 /* Queue pool for asynchronous rebalance */
570 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
573 /* Unlock the pool */
574 mutex_exit(&pool_p
->ipool_lock
);
580 * i_ddi_irm_modify_increase()
582 * Increase a non-IRM request. The additional interrupts are
583 * directly taken from the pool when possible. Otherwise, an
584 * immediate, synchronous rebalance is performed. A temporary
585 * proxy request is used for any rebalance operation to ensure
586 * the request is not reduced below its current allocation.
588 * NOTE: pool must already be locked.
591 i_ddi_irm_modify_increase(ddi_irm_req_t
*req_p
, int nreq
)
593 dev_info_t
*dip
= req_p
->ireq_dip
;
594 ddi_irm_pool_t
*pool_p
= req_p
->ireq_pool_p
;
595 ddi_irm_req_t new_req
;
598 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
600 /* Compute number of additional vectors */
601 count
= nreq
- req_p
->ireq_nreq
;
603 /* Check for minimal fit */
604 if ((pool_p
->ipool_minno
+ count
) > pool_p
->ipool_totsz
) {
605 cmn_err(CE_WARN
, "%s%d: interrupt pool too full.\n",
606 ddi_driver_name(dip
), ddi_get_instance(dip
));
610 /* Update the pool */
611 pool_p
->ipool_reqno
+= count
;
612 pool_p
->ipool_minno
+= count
;
614 /* Attempt direct implementation */
615 if ((pool_p
->ipool_resno
+ count
) <= pool_p
->ipool_totsz
) {
616 req_p
->ireq_nreq
+= count
;
617 req_p
->ireq_navail
+= count
;
618 pool_p
->ipool_resno
+= count
;
619 return (DDI_SUCCESS
);
622 /* Rebalance required: fail if pool is not active */
623 if ((pool_p
->ipool_flags
& DDI_IRM_FLAG_ACTIVE
) == 0) {
624 pool_p
->ipool_reqno
-= count
;
625 pool_p
->ipool_minno
-= count
;
629 /* Insert temporary proxy request */
630 bzero(&new_req
, sizeof (ddi_irm_req_t
));
631 new_req
.ireq_dip
= dip
;
632 new_req
.ireq_nreq
= count
;
633 new_req
.ireq_pool_p
= pool_p
;
634 new_req
.ireq_type
= req_p
->ireq_type
;
635 new_req
.ireq_flags
= DDI_IRM_FLAG_NEW
;
636 i_ddi_irm_insertion_sort(&pool_p
->ipool_req_list
, &new_req
);
638 /* Synchronously rebalance */
639 i_ddi_irm_enqueue(pool_p
, B_TRUE
);
641 /* Remove proxy request, and merge into original request */
642 req_p
->ireq_nreq
+= count
;
643 if ((delta
= (count
- new_req
.ireq_navail
)) > 0) {
644 req_p
->ireq_nreq
-= delta
;
645 pool_p
->ipool_reqno
-= delta
;
646 pool_p
->ipool_minno
-= delta
;
648 req_p
->ireq_navail
+= new_req
.ireq_navail
;
649 list_remove(&pool_p
->ipool_req_list
, req_p
);
650 list_remove(&pool_p
->ipool_req_list
, &new_req
);
651 i_ddi_irm_insertion_sort(&pool_p
->ipool_req_list
, req_p
);
653 return (DDI_SUCCESS
);
659 * Remove a request from an interrupt pool, and balance the pool.
662 i_ddi_irm_remove(dev_info_t
*dip
)
664 devinfo_intr_t
*intr_p
;
665 ddi_irm_pool_t
*pool_p
;
666 ddi_irm_req_t
*req_p
;
671 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_remove: dip %p\n", (void *)dip
));
673 /* Validate parameters */
675 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_remove: invalid args\n"));
679 /* Check if the device has a request */
680 if (!(intr_p
= DEVI(dip
)->devi_intr_p
) ||
681 !(req_p
= intr_p
->devi_irm_req_p
)) {
682 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_modify: not found\n"));
687 pool_p
= req_p
->ireq_pool_p
;
688 mutex_enter(&pool_p
->ipool_lock
);
691 nmin
= DDI_IRM_IS_REDUCIBLE(req_p
) ? 1 : req_p
->ireq_nreq
;
692 pool_p
->ipool_minno
-= nmin
;
693 pool_p
->ipool_reqno
-= req_p
->ireq_nreq
;
694 pool_p
->ipool_resno
-= req_p
->ireq_navail
;
695 list_remove(&pool_p
->ipool_req_list
, req_p
);
697 /* Queue pool to be rebalanced */
698 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
700 /* Unlock the pool */
701 mutex_exit(&pool_p
->ipool_lock
);
703 /* Destroy the request */
704 intr_p
->devi_irm_req_p
= NULL
;
705 kmem_free(req_p
, sizeof (ddi_irm_req_t
));
707 return (DDI_SUCCESS
);
713 * Change the callback flag for a request, in response to
714 * a change in its callback registration. Then rebalance
715 * the interrupt pool.
717 * NOTE: the request is not locked because the navail value
718 * is not directly affected. The balancing thread may
719 * modify the navail value in the background after it
720 * locks the request itself.
723 i_ddi_irm_set_cb(dev_info_t
*dip
, boolean_t has_cb_flag
)
725 devinfo_intr_t
*intr_p
;
726 ddi_irm_pool_t
*pool_p
;
727 ddi_irm_req_t
*req_p
;
732 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_set_cb: dip %p has_cb_flag %d\n",
733 (void *)dip
, (int)has_cb_flag
));
735 /* Validate parameters */
739 /* Check for association with interrupt pool */
740 if (!(intr_p
= DEVI(dip
)->devi_intr_p
) ||
741 !(req_p
= intr_p
->devi_irm_req_p
)) {
742 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_set_cb: not in pool\n"));
747 pool_p
= req_p
->ireq_pool_p
;
748 mutex_enter(&pool_p
->ipool_lock
);
751 * Update the request and the pool
755 /* Update pool statistics */
756 if (req_p
->ireq_type
== DDI_INTR_TYPE_MSIX
)
757 pool_p
->ipool_minno
-= (req_p
->ireq_nreq
- 1);
760 req_p
->ireq_flags
|= DDI_IRM_FLAG_CALLBACK
;
762 /* Rebalance in background */
763 i_ddi_irm_enqueue(pool_p
, B_FALSE
);
767 /* Determine new request size */
768 nreq
= MIN(req_p
->ireq_nreq
, pool_p
->ipool_defsz
);
770 #if defined(__i386) || defined(__amd64)
771 /* Use the default static limit for non-IRM drivers */
772 if (req_p
->ireq_type
== DDI_INTR_TYPE_MSIX
)
773 nreq
= MIN(nreq
, ddi_msix_alloc_limit
);
776 /* Update pool statistics */
777 pool_p
->ipool_reqno
-= req_p
->ireq_nreq
;
778 pool_p
->ipool_reqno
+= nreq
;
779 if (req_p
->ireq_type
== DDI_INTR_TYPE_MSIX
) {
780 pool_p
->ipool_minno
-= 1;
781 pool_p
->ipool_minno
+= nreq
;
783 pool_p
->ipool_minno
-= req_p
->ireq_nreq
;
784 pool_p
->ipool_minno
+= nreq
;
787 /* Update request size, and re-sort in pool */
788 req_p
->ireq_nreq
= nreq
;
789 list_remove(&pool_p
->ipool_req_list
, req_p
);
790 i_ddi_irm_insertion_sort(&pool_p
->ipool_req_list
, req_p
);
792 /* Rebalance synchronously, before losing callback */
793 i_ddi_irm_enqueue(pool_p
, B_TRUE
);
795 /* Remove callback flag */
796 req_p
->ireq_flags
&= ~(DDI_IRM_FLAG_CALLBACK
);
799 /* Unlock the pool */
800 mutex_exit(&pool_p
->ipool_lock
);
804 * i_ddi_irm_supported()
806 * Query if IRM is supported by a driver using a specific interrupt type.
807 * Notice that IRM is limited to MSI-X users with registered callbacks.
810 i_ddi_irm_supported(dev_info_t
*dip
, int type
)
812 ddi_cb_t
*cb_p
= DEVI(dip
)->devi_cb_p
;
814 return ((DDI_IRM_HAS_CB(cb_p
) && (type
== DDI_INTR_TYPE_MSIX
)) ?
815 DDI_SUCCESS
: DDI_ENOTSUP
);
819 * Interrupt Pool Balancing
823 * irm_balance_thread()
825 * One instance of this thread operates per each defined IRM pool.
826 * It does the initial activation of the pool, as well as balancing
827 * any requests that were queued up before the pool was active.
828 * Once active, it waits forever to service balance operations.
831 irm_balance_thread(ddi_irm_pool_t
*pool_p
)
835 DDI_INTR_IRMDBG((CE_CONT
, "irm_balance_thread: pool_p %p\n",
839 mutex_enter(&pool_p
->ipool_lock
);
841 /* Perform initial balance if required */
842 if (pool_p
->ipool_reqno
> pool_p
->ipool_resno
)
843 i_ddi_irm_balance(pool_p
);
845 /* Activate the pool */
846 pool_p
->ipool_flags
|= DDI_IRM_FLAG_ACTIVE
;
850 * Iterate once first before wait on signal, in case there is signal
851 * sent before this thread being created
855 /* Compute the delay interval */
856 interval
= drv_usectohz(irm_balance_delay
* 1000000);
858 /* Wait one interval, or until there are waiters */
859 if ((interval
> 0) &&
860 !(pool_p
->ipool_flags
& DDI_IRM_FLAG_WAITERS
) &&
861 !(pool_p
->ipool_flags
& DDI_IRM_FLAG_EXIT
)) {
862 (void) cv_reltimedwait(&pool_p
->ipool_cv
,
863 &pool_p
->ipool_lock
, interval
, TR_CLOCK_TICK
);
866 /* Check if awakened to exit */
867 if (pool_p
->ipool_flags
& DDI_IRM_FLAG_EXIT
) {
868 DDI_INTR_IRMDBG((CE_CONT
,
869 "irm_balance_thread: exiting...\n"));
870 mutex_exit(&pool_p
->ipool_lock
);
874 /* Balance the pool */
875 i_ddi_irm_balance(pool_p
);
878 if (pool_p
->ipool_flags
& DDI_IRM_FLAG_WAITERS
) {
879 cv_broadcast(&pool_p
->ipool_cv
);
880 pool_p
->ipool_flags
&= ~(DDI_IRM_FLAG_WAITERS
);
883 /* Clear QUEUED condition */
884 pool_p
->ipool_flags
&= ~(DDI_IRM_FLAG_QUEUED
);
886 /* Sleep until queued */
887 cv_wait(&pool_p
->ipool_cv
, &pool_p
->ipool_lock
);
889 DDI_INTR_IRMDBG((CE_CONT
, "irm_balance_thread: signaled.\n"));
894 * i_ddi_irm_balance()
896 * Balance a pool. The general algorithm is to first reset all
897 * requests to their maximum size, use reduction algorithms to
898 * solve any imbalance, and then notify affected drivers.
901 i_ddi_irm_balance(ddi_irm_pool_t
*pool_p
)
903 ddi_irm_req_t
*req_p
;
906 uint_t debug_totsz
= 0;
907 int debug_policy
= 0;
910 ASSERT(pool_p
!= NULL
);
911 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
913 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_balance: pool_p %p\n",
917 if ((pool_p
->ipool_reqno
== pool_p
->ipool_resno
)) {
919 if ((pool_p
->ipool_reqno
== pool_p
->ipool_resno
) && !irm_debug_size
) {
921 DDI_INTR_IRMDBG((CE_CONT
,
922 "i_ddi_irm_balance: pool already balanced\n"));
926 #ifdef DEBUG /* Adjust size and policy settings */
927 if (irm_debug_size
> pool_p
->ipool_minno
) {
928 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_balance: debug size %d\n",
930 debug_totsz
= pool_p
->ipool_totsz
;
931 pool_p
->ipool_totsz
= irm_debug_size
;
933 if (DDI_IRM_POLICY_VALID(irm_debug_policy
)) {
934 DDI_INTR_IRMDBG((CE_CONT
,
935 "i_ddi_irm_balance: debug policy %d\n", irm_debug_policy
));
936 debug_policy
= pool_p
->ipool_policy
;
937 pool_p
->ipool_policy
= irm_debug_policy
;
941 /* Lock the availability lock */
942 mutex_enter(&pool_p
->ipool_navail_lock
);
945 * Put all of the reducible requests into a scratch list.
946 * Reset each one of them to their maximum availability.
948 for (req_p
= list_head(&pool_p
->ipool_req_list
); req_p
;
949 req_p
= list_next(&pool_p
->ipool_req_list
, req_p
)) {
950 if (DDI_IRM_IS_REDUCIBLE(req_p
)) {
951 pool_p
->ipool_resno
-= req_p
->ireq_navail
;
952 req_p
->ireq_scratch
= req_p
->ireq_navail
;
953 req_p
->ireq_navail
= req_p
->ireq_nreq
;
954 pool_p
->ipool_resno
+= req_p
->ireq_navail
;
955 list_insert_tail(&pool_p
->ipool_scratch_list
, req_p
);
959 /* Balance the requests */
960 i_ddi_irm_reduce(pool_p
);
962 /* Unlock the availability lock */
963 mutex_exit(&pool_p
->ipool_navail_lock
);
966 * Process REMOVE notifications.
968 * If a driver fails to release interrupts: exclude it from
969 * further processing, correct the resulting imbalance, and
970 * start over again at the head of the scratch list.
972 req_p
= list_head(&pool_p
->ipool_scratch_list
);
974 if ((req_p
->ireq_navail
< req_p
->ireq_scratch
) &&
975 (i_ddi_irm_notify(pool_p
, req_p
) != DDI_SUCCESS
)) {
976 list_remove(&pool_p
->ipool_scratch_list
, req_p
);
977 mutex_enter(&pool_p
->ipool_navail_lock
);
978 i_ddi_irm_reduce(pool_p
);
979 mutex_exit(&pool_p
->ipool_navail_lock
);
980 req_p
= list_head(&pool_p
->ipool_scratch_list
);
982 req_p
= list_next(&pool_p
->ipool_scratch_list
, req_p
);
987 * Process ADD notifications.
989 * This is the last use of the scratch list, so empty it.
991 while (req_p
= list_remove_head(&pool_p
->ipool_scratch_list
)) {
992 if (req_p
->ireq_navail
> req_p
->ireq_scratch
) {
993 (void) i_ddi_irm_notify(pool_p
, req_p
);
997 #ifdef DEBUG /* Restore size and policy settings */
998 if (debug_totsz
!= 0)
999 pool_p
->ipool_totsz
= debug_totsz
;
1000 if (debug_policy
!= 0)
1001 pool_p
->ipool_policy
= debug_policy
;
1006 * i_ddi_irm_reduce()
1008 * Use reduction algorithms to correct an imbalance in a pool.
1011 i_ddi_irm_reduce(ddi_irm_pool_t
*pool_p
)
1015 ASSERT(pool_p
!= NULL
);
1016 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
1017 ASSERT(DDI_IRM_POLICY_VALID(pool_p
->ipool_policy
));
1019 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_reduce: pool_p %p\n",
1022 /* Compute the imbalance. Do nothing if already balanced. */
1023 if ((imbalance
= pool_p
->ipool_resno
- pool_p
->ipool_totsz
) <= 0)
1027 * Try policy based reduction first. If it failed, then
1028 * possibly reduce new requests as a last resort.
1030 if (i_ddi_irm_reduce_by_policy(pool_p
, imbalance
, pool_p
->ipool_policy
)
1033 DDI_INTR_IRMDBG((CE_CONT
,
1034 "i_ddi_irm_reduce: policy reductions failed.\n"));
1036 /* Compute remaining imbalance */
1037 imbalance
= pool_p
->ipool_resno
- pool_p
->ipool_totsz
;
1039 ASSERT(imbalance
> 0);
1041 i_ddi_irm_reduce_new(pool_p
, imbalance
);
1046 * i_ddi_irm_enqueue()
1048 * Queue a pool to be balanced. Signals the balancing thread to wake
1049 * up and process the pool. If 'wait_flag' is true, then the current
1050 * thread becomes a waiter and blocks until the balance is completed.
1053 i_ddi_irm_enqueue(ddi_irm_pool_t
*pool_p
, boolean_t wait_flag
)
1055 ASSERT(pool_p
!= NULL
);
1056 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
1058 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_enqueue: pool_p %p wait_flag %d\n",
1059 (void *)pool_p
, (int)wait_flag
));
1061 /* Do nothing if pool is already balanced */
1063 if ((pool_p
->ipool_reqno
== pool_p
->ipool_resno
)) {
1065 if ((pool_p
->ipool_reqno
== pool_p
->ipool_resno
) && !irm_debug_size
) {
1067 DDI_INTR_IRMDBG((CE_CONT
,
1068 "i_ddi_irm_enqueue: pool already balanced\n"));
1072 /* Avoid deadlocks when IRM is not active */
1073 if (!irm_active
&& wait_flag
) {
1074 DDI_INTR_IRMDBG((CE_CONT
,
1075 "i_ddi_irm_enqueue: pool not active.\n"));
1080 pool_p
->ipool_flags
|= DDI_IRM_FLAG_WAITERS
;
1082 if (wait_flag
|| !(pool_p
->ipool_flags
& DDI_IRM_FLAG_QUEUED
)) {
1083 pool_p
->ipool_flags
|= DDI_IRM_FLAG_QUEUED
;
1084 cv_signal(&pool_p
->ipool_cv
);
1085 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_enqueue: pool queued.\n"));
1089 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_enqueue: waiting...\n"));
1090 cv_wait(&pool_p
->ipool_cv
, &pool_p
->ipool_lock
);
1095 * i_ddi_irm_reduce_by_policy()
1097 * Reduces requests based on reduction policies.
1099 * For the DDI_IRM_POLICY_LARGE reduction policy, the algorithm
1100 * generally reduces larger requests first, before advancing
1101 * to smaller requests.
1102 * For the DDI_IRM_POLICY_EVEN reduction policy, the algorithm
1103 * reduces requests evenly, without giving a specific preference
1104 * to smaller or larger requests. Each iteration reduces all
1105 * reducible requests by the same amount until the imbalance is
1108 * The scratch list is initially sorted in descending order by current
1109 * navail values, which are maximized prior to reduction. This sorted
1110 * order is preserved. It avoids reducing requests below the threshold
1111 * of the interrupt pool's default allocation size.
1113 * Optimizations in this algorithm include trying to reduce multiple
1114 * requests together. And the algorithm attempts to reduce in larger
1115 * increments when possible to minimize the total number of iterations.
1118 i_ddi_irm_reduce_by_policy(ddi_irm_pool_t
*pool_p
, int imbalance
, int policy
)
1120 ASSERT(pool_p
!= NULL
);
1121 ASSERT(imbalance
> 0);
1122 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
1124 while (imbalance
> 0) {
1125 list_t
*slist_p
= &pool_p
->ipool_scratch_list
;
1126 ddi_irm_req_t
*req_p
= list_head(slist_p
), *last_p
;
1127 uint_t nreduce
= 0, nremain
= 0, stop_navail
;
1128 uint_t pool_defsz
= pool_p
->ipool_defsz
;
1129 uint_t reduction
, max_redu
;
1131 /* Fail if none are reducible */
1132 if (!req_p
|| req_p
->ireq_navail
<= pool_defsz
) {
1133 DDI_INTR_IRMDBG((CE_CONT
,
1134 "i_ddi_irm_reduce_by_policy: Failure. "
1135 "All requests have downsized to low limit.\n"));
1136 return (DDI_FAILURE
);
1139 /* Count reducible requests */
1140 stop_navail
= (policy
== DDI_IRM_POLICY_LARGE
) ?
1141 req_p
->ireq_navail
- 1 : pool_defsz
;
1142 for (; req_p
; req_p
= list_next(slist_p
, req_p
)) {
1143 if (req_p
->ireq_navail
<= stop_navail
)
1148 /* Compute reduction */
1149 last_p
= req_p
? list_prev(slist_p
, req_p
) : list_tail(slist_p
);
1150 if ((policy
== DDI_IRM_POLICY_LARGE
) && req_p
&&
1151 req_p
->ireq_navail
> pool_defsz
)
1152 reduction
= last_p
->ireq_navail
- req_p
->ireq_navail
;
1154 reduction
= last_p
->ireq_navail
- pool_defsz
;
1156 if ((max_redu
= reduction
* nreduce
) > imbalance
) {
1157 reduction
= imbalance
/ nreduce
;
1158 nremain
= imbalance
% nreduce
;
1159 pool_p
->ipool_resno
-= imbalance
;
1162 pool_p
->ipool_resno
-= max_redu
;
1163 imbalance
-= max_redu
;
1167 for (req_p
= list_head(slist_p
); (reduction
!= 0) && nreduce
--;
1168 req_p
= list_next(slist_p
, req_p
)) {
1169 req_p
->ireq_navail
-= reduction
;
1172 for (req_p
= last_p
; nremain
--;
1173 req_p
= list_prev(slist_p
, req_p
)) {
1174 req_p
->ireq_navail
--;
1178 return (DDI_SUCCESS
);
1182 * i_ddi_irm_reduce_new()
1184 * Reduces new requests. This is only used as a last resort
1185 * after another reduction algorithm failed.
1187 * NOTE: The pool locking in i_ddi_irm_insert() ensures
1188 * there can be only one new request at a time in a pool.
1191 i_ddi_irm_reduce_new(ddi_irm_pool_t
*pool_p
, int imbalance
)
1193 ddi_irm_req_t
*req_p
;
1195 ASSERT(pool_p
!= NULL
);
1196 ASSERT(imbalance
> 0);
1197 ASSERT(MUTEX_HELD(&pool_p
->ipool_lock
));
1199 DDI_INTR_IRMDBG((CE_CONT
,
1200 "i_ddi_irm_reduce_new: pool_p %p imbalance %d\n",
1201 (void *)pool_p
, imbalance
));
1203 for (req_p
= list_head(&pool_p
->ipool_scratch_list
); req_p
;
1204 req_p
= list_next(&pool_p
->ipool_scratch_list
, req_p
)) {
1205 if (req_p
->ireq_flags
& DDI_IRM_FLAG_NEW
) {
1206 ASSERT(req_p
->ireq_navail
>= imbalance
);
1207 req_p
->ireq_navail
-= imbalance
;
1208 pool_p
->ipool_resno
-= imbalance
;
1213 /* should never go here */
1218 * Miscellaneous Helper Functions
1222 * i_ddi_intr_get_pool()
1224 * Get an IRM pool that supplies interrupts of a specified type.
1225 * Invokes a DDI_INTROP_GETPOOL to the bus nexus driver. Fails
1226 * if no pool exists.
1229 i_ddi_intr_get_pool(dev_info_t
*dip
, int type
)
1231 devinfo_intr_t
*intr_p
;
1232 ddi_irm_pool_t
*pool_p
;
1233 ddi_irm_req_t
*req_p
;
1234 ddi_intr_handle_impl_t hdl
;
1236 ASSERT(dip
!= NULL
);
1237 ASSERT(DDI_INTR_TYPE_FLAG_VALID(type
));
1239 if (((intr_p
= DEVI(dip
)->devi_intr_p
) != NULL
) &&
1240 ((req_p
= intr_p
->devi_irm_req_p
) != NULL
) &&
1241 ((pool_p
= req_p
->ireq_pool_p
) != NULL
) &&
1242 (pool_p
->ipool_types
& type
)) {
1246 bzero(&hdl
, sizeof (ddi_intr_handle_impl_t
));
1250 if (i_ddi_intr_ops(dip
, dip
, DDI_INTROP_GETPOOL
,
1251 &hdl
, (void *)&pool_p
) == DDI_SUCCESS
)
1258 * i_ddi_irm_insertion_sort()
1260 * Use the insertion sort method to insert a request into a list.
1261 * The list is sorted in descending order by request size.
1264 i_ddi_irm_insertion_sort(list_t
*req_list
, ddi_irm_req_t
*req_p
)
1266 ddi_irm_req_t
*next_p
;
1268 next_p
= list_head(req_list
);
1270 while (next_p
&& (next_p
->ireq_nreq
> req_p
->ireq_nreq
))
1271 next_p
= list_next(req_list
, next_p
);
1273 list_insert_before(req_list
, next_p
, req_p
);
1277 * i_ddi_irm_notify()
1279 * Notify a driver of changes to its interrupt request using the
1280 * generic callback mechanism. Checks for errors in processing.
1283 i_ddi_irm_notify(ddi_irm_pool_t
*pool_p
, ddi_irm_req_t
*req_p
)
1285 ddi_cb_action_t action
;
1290 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_notify: pool_p %p req_p %p\n",
1291 (void *)pool_p
, (void *)req_p
));
1293 /* Do not notify new or unchanged requests */
1294 if ((req_p
->ireq_navail
== req_p
->ireq_scratch
) ||
1295 (req_p
->ireq_flags
& DDI_IRM_FLAG_NEW
))
1296 return (DDI_SUCCESS
);
1298 /* Determine action and count */
1299 if (req_p
->ireq_navail
> req_p
->ireq_scratch
) {
1300 action
= DDI_CB_INTR_ADD
;
1301 count
= req_p
->ireq_navail
- req_p
->ireq_scratch
;
1302 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_notify: adding %d\n",
1305 action
= DDI_CB_INTR_REMOVE
;
1306 count
= req_p
->ireq_scratch
- req_p
->ireq_navail
;
1307 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_notify: removing %d\n",
1311 /* Lookup driver callback */
1312 if ((cb_p
= DEVI(req_p
->ireq_dip
)->devi_cb_p
) == NULL
) {
1313 DDI_INTR_IRMDBG((CE_WARN
, "i_ddi_irm_notify: no callback!\n"));
1314 return (DDI_FAILURE
);
1318 ret
= cb_p
->cb_func(req_p
->ireq_dip
, action
, (void *)(uintptr_t)count
,
1319 cb_p
->cb_arg1
, cb_p
->cb_arg2
);
1321 /* Log callback errors */
1322 if (ret
!= DDI_SUCCESS
) {
1323 cmn_err(CE_WARN
, "%s%d: failed callback (action=%d, ret=%d)\n",
1324 ddi_driver_name(req_p
->ireq_dip
),
1325 ddi_get_instance(req_p
->ireq_dip
), (int)action
, ret
);
1328 /* Check if the driver exceeds its availability */
1329 nintrs
= i_ddi_intr_get_current_nintrs(req_p
->ireq_dip
);
1330 if (nintrs
> req_p
->ireq_navail
) {
1331 cmn_err(CE_WARN
, "%s%d: failed to release interrupts "
1332 "(nintrs=%d, navail=%d).\n",
1333 ddi_driver_name(req_p
->ireq_dip
),
1334 ddi_get_instance(req_p
->ireq_dip
), nintrs
,
1335 req_p
->ireq_navail
);
1336 pool_p
->ipool_resno
+= (nintrs
- req_p
->ireq_navail
);
1337 req_p
->ireq_navail
= nintrs
;
1338 return (DDI_FAILURE
);
1341 /* Update request */
1342 req_p
->ireq_scratch
= req_p
->ireq_navail
;
1344 return (DDI_SUCCESS
);
1348 * i_ddi_irm_debug_balance()
1350 * A debug/test only routine to force the immediate,
1351 * synchronous rebalancing of an interrupt pool.
1355 i_ddi_irm_debug_balance(dev_info_t
*dip
, boolean_t wait_flag
)
1357 ddi_irm_pool_t
*pool_p
;
1360 DDI_INTR_IRMDBG((CE_CONT
, "i_ddi_irm_debug_balance: dip %p wait %d\n",
1361 (void *)dip
, (int)wait_flag
));
1363 if (((type
= i_ddi_intr_get_current_type(dip
)) != 0) &&
1364 ((pool_p
= i_ddi_intr_get_pool(dip
, type
)) != NULL
)) {
1365 mutex_enter(&pool_p
->ipool_lock
);
1366 i_ddi_irm_enqueue(pool_p
, wait_flag
);
1367 mutex_exit(&pool_p
->ipool_lock
);