2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
12 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
15 #include <sys/cmn_err.h>
16 #include <sys/ddi_periodic.h>
17 #include <sys/id_space.h>
19 #include <sys/sysmacros.h>
20 #include <sys/systm.h>
21 #include <sys/taskq.h>
22 #include <sys/taskq_impl.h>
24 #include <sys/types.h>
27 extern void sir_on(int);
30 * The ddi_periodic_add(9F) Implementation
32 * This file contains the implementation of the ddi_periodic_add(9F) interface.
33 * It is a thin wrapper around the cyclic subsystem (see documentation in
34 * kernel/os/cyclic.c), providing a DDI interface for registering (and
35 * unregistering) callbacks for periodic invocation at arbitrary interrupt
36 * levels, or in kernel context.
38 * Each call to ddi_periodic_add will result in a new opaque handle, as
39 * allocated from an id_space, a new "periodic" object (ddi_periodic_impl_t)
40 * and a registered cyclic.
44 * Whenever the cyclic fires, our cyclic handler checks that the particular
45 * periodic is not dispatched already (we do not support overlapping execution
46 * of the consumer's handler function), and not yet cancelled. If both of
47 * these conditions hold, we mark the periodic as DPF_DISPATCHED and enqueue it
48 * to either the taskq (for DDI_IPL_0) or to one of the soft interrupt queues
49 * (DDI_IPL_1 to DDI_IPL_10).
51 * While the taskq (or soft interrupt handler) is handling a particular
52 * periodic, we mark it as DPF_EXECUTING. When complete, we reset both
53 * DPF_DISPATCHED and DPF_EXECUTING.
57 * ddi_periodic_delete(9F) historically had spectacularly loose semantics with
58 * respect to cancellation concurrent with handler execution. These semantics
61 * 1. At most one invocation of ddi_periodic_delete(9F) will actually
62 * perform the deletion, all others will return immediately.
63 * 2. The invocation that performs the deletion will _block_ until
64 * the handler is no longer running, and all resources have been
67 * We affect this model by removing the cancelling periodic from the
68 * global list and marking it DPF_CANCELLED. This will prevent further
69 * execution of the handler. We then wait on a CV until the DPF_EXECUTING
70 * and DPF_DISPATCHED flags are clear, which means the periodic is removed
71 * from all request queues, is no longer executing, and may be freed. At this
72 * point we return the opaque ID to the id_space and free the memory.
75 * The ddi_periodic_add(9F) interface is presently limited to a minimum period
76 * of 10ms between firings.
82 int ddi_periodic_max_id
= 1024;
83 int ddi_periodic_taskq_threadcount
= 4;
84 hrtime_t ddi_periodic_resolution
= 10000000;
89 static kmem_cache_t
*periodic_cache
;
90 static id_space_t
*periodic_id_space
;
91 static taskq_t
*periodic_taskq
;
94 * periodics_lock protects the list of all periodics (periodics), and
95 * each of the soft interrupt request queues (periodic_softint_queue).
97 * Do not hold an individual periodic's lock while obtaining periodics_lock.
98 * While in the periodic_softint_queue list, the periodic will be marked
99 * DPF_DISPATCHED, and thus safe from frees. Only the invocation of
100 * i_untimeout() that removes the periodic from the global list is allowed
103 static kmutex_t periodics_lock
;
104 static list_t periodics
;
105 static list_t periodic_softint_queue
[10]; /* for IPL1 up to IPL10 */
107 typedef enum periodic_ipl
{
122 periodic_handler_symbol(ddi_periodic_impl_t
*dpr
)
126 return (kobj_getsymname((uintptr_t)dpr
->dpr_handler
, &off
));
130 * This function may be called either from a soft interrupt handler
131 * (ddi_periodic_softintr), or as a taskq worker function.
134 periodic_execute(void *arg
)
136 ddi_periodic_impl_t
*dpr
= arg
;
137 mutex_enter(&dpr
->dpr_lock
);
140 * We must be DISPATCHED, but not yet EXECUTING:
142 VERIFY((dpr
->dpr_flags
& (DPF_DISPATCHED
| DPF_EXECUTING
)) ==
144 VERIFY(dpr
->dpr_thread
== NULL
);
146 if (!(dpr
->dpr_flags
& DPF_CANCELLED
)) {
147 int level
= dpr
->dpr_level
;
148 uint64_t count
= dpr
->dpr_fire_count
;
150 * If we have not yet been cancelled, then
153 dpr
->dpr_flags
|= DPF_EXECUTING
;
154 dpr
->dpr_thread
= curthread
;
155 mutex_exit(&dpr
->dpr_lock
);
158 * Execute the handler, without holding locks:
160 DTRACE_PROBE4(ddi__periodic__execute
, void *, dpr
->dpr_handler
,
161 void *, dpr
->dpr_arg
, int, level
, uint64_t, count
);
162 (*dpr
->dpr_handler
)(dpr
->dpr_arg
);
163 DTRACE_PROBE4(ddi__periodic__done
, void *, dpr
->dpr_handler
,
164 void *, dpr
->dpr_arg
, int, level
, uint64_t, count
);
166 mutex_enter(&dpr
->dpr_lock
);
167 dpr
->dpr_thread
= NULL
;
168 dpr
->dpr_fire_count
++;
172 * We're done with this periodic for now, so release it and
173 * wake anybody that was waiting for us to be finished:
175 dpr
->dpr_flags
&= ~(DPF_DISPATCHED
| DPF_EXECUTING
);
176 cv_broadcast(&dpr
->dpr_cv
);
177 mutex_exit(&dpr
->dpr_lock
);
181 ddi_periodic_softintr(int level
)
183 ddi_periodic_impl_t
*dpr
;
184 VERIFY(level
>= PERI_IPL_1
&& level
<= PERI_IPL_10
);
186 mutex_enter(&periodics_lock
);
188 * Pull the first scheduled periodic off the queue for this priority
191 while ((dpr
= list_remove_head(&periodic_softint_queue
[level
- 1])) !=
193 mutex_exit(&periodics_lock
);
197 periodic_execute(dpr
);
198 mutex_enter(&periodics_lock
);
200 mutex_exit(&periodics_lock
);
204 ddi_periodic_init(void)
209 * Create a kmem_cache for request tracking objects, and a list
210 * to store them in so we can later delete based on opaque handles:
212 periodic_cache
= kmem_cache_create("ddi_periodic",
213 sizeof (ddi_periodic_impl_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
214 list_create(&periodics
, sizeof (ddi_periodic_impl_t
),
215 offsetof(ddi_periodic_impl_t
, dpr_link
));
218 * Initialise the identifier space for ddi_periodic_add(9F):
220 periodic_id_space
= id_space_create("ddi_periodic", 1,
221 ddi_periodic_max_id
);
224 * Initialise the request queue for each soft interrupt level:
226 for (i
= PERI_IPL_1
; i
<= PERI_IPL_10
; i
++) {
227 list_create(&periodic_softint_queue
[i
- 1],
228 sizeof (ddi_periodic_impl_t
), offsetof(ddi_periodic_impl_t
,
233 * Create the taskq for running PERI_IPL_0 handlers. This taskq will
234 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t
235 * pre-allocated with the ddi_periodic_impl_t.
237 periodic_taskq
= taskq_create_instance("ddi_periodic_taskq", -1,
238 ddi_periodic_taskq_threadcount
, maxclsyspri
, 0, 0, 0);
241 * Initialize the mutex lock used for the soft interrupt request
244 mutex_init(&periodics_lock
, NULL
, MUTEX_ADAPTIVE
, NULL
);
248 ddi_periodic_fini(void)
251 ddi_periodic_impl_t
*dpr
;
254 * Find all periodics that have not yet been unregistered and,
255 * on DEBUG bits, print a warning about this resource leak.
257 mutex_enter(&periodics_lock
);
258 while ((dpr
= list_head(&periodics
)) != NULL
) {
260 printf("DDI periodic handler not deleted (id=%lx, hdlr=%s)\n",
261 (unsigned long)dpr
->dpr_id
, periodic_handler_symbol(dpr
));
264 mutex_exit(&periodics_lock
);
266 * Delete the periodic ourselves:
268 i_untimeout((timeout_t
)(uintptr_t)dpr
->dpr_id
);
269 mutex_enter(&periodics_lock
);
271 mutex_exit(&periodics_lock
);
274 * At this point there are no remaining cyclics, so clean up the
275 * remaining resources:
277 taskq_destroy(periodic_taskq
);
278 periodic_taskq
= NULL
;
280 id_space_destroy(periodic_id_space
);
281 periodic_id_space
= NULL
;
283 kmem_cache_destroy(periodic_cache
);
284 periodic_cache
= NULL
;
286 list_destroy(&periodics
);
287 for (i
= PERI_IPL_1
; i
<= PERI_IPL_10
; i
++)
288 list_destroy(&periodic_softint_queue
[i
- 1]);
290 mutex_destroy(&periodics_lock
);
294 periodic_cyclic_handler(void *arg
)
296 ddi_periodic_impl_t
*dpr
= arg
;
298 mutex_enter(&dpr
->dpr_lock
);
300 * If we've been cancelled, or we're already dispatched, then exit
303 if (dpr
->dpr_flags
& (DPF_CANCELLED
| DPF_DISPATCHED
)) {
304 mutex_exit(&dpr
->dpr_lock
);
307 VERIFY(!(dpr
->dpr_flags
& DPF_EXECUTING
));
310 * This periodic is not presently dispatched, so dispatch it now:
312 dpr
->dpr_flags
|= DPF_DISPATCHED
;
313 mutex_exit(&dpr
->dpr_lock
);
315 if (dpr
->dpr_level
== PERI_IPL_0
) {
317 * DDI_IPL_0 periodics are dispatched onto the taskq:
319 taskq_dispatch_ent(periodic_taskq
, periodic_execute
,
320 dpr
, 0, &dpr
->dpr_taskq_ent
);
323 * Higher priority periodics are handled by a soft interrupt
324 * handler. Enqueue us for processing by the handler:
326 mutex_enter(&periodics_lock
);
327 list_insert_tail(&periodic_softint_queue
[dpr
->dpr_level
- 1],
329 mutex_exit(&periodics_lock
);
332 * Request the execution of the soft interrupt handler for this
333 * periodic's priority level.
335 sir_on(dpr
->dpr_level
);
340 periodic_destroy(ddi_periodic_impl_t
*dpr
)
346 * By now, we should have a periodic that is not busy, and has been
349 VERIFY(dpr
->dpr_flags
== DPF_CANCELLED
);
350 VERIFY(dpr
->dpr_thread
== NULL
);
352 id_free(periodic_id_space
, dpr
->dpr_id
);
353 cv_destroy(&dpr
->dpr_cv
);
354 mutex_destroy(&dpr
->dpr_lock
);
355 kmem_cache_free(periodic_cache
, dpr
);
358 static ddi_periodic_impl_t
*
359 periodic_create(void)
361 ddi_periodic_impl_t
*dpr
;
363 dpr
= kmem_cache_alloc(periodic_cache
, KM_SLEEP
);
364 bzero(dpr
, sizeof (*dpr
));
365 dpr
->dpr_id
= id_alloc(periodic_id_space
);
366 mutex_init(&dpr
->dpr_lock
, NULL
, MUTEX_ADAPTIVE
, NULL
);
367 cv_init(&dpr
->dpr_cv
, NULL
, CV_DEFAULT
, NULL
);
373 * This function provides the implementation for the ddi_periodic_add(9F)
374 * interface. It registers a periodic handler and returns an opaque identifier
375 * that can be unregistered via ddi_periodic_delete(9F)/i_untimeout().
377 * It may be called in user or kernel context, provided cpu_lock is not held.
380 i_timeout(void (*func
)(void *), void *arg
, hrtime_t interval
, int level
)
384 ddi_periodic_impl_t
*dpr
;
386 VERIFY(func
!= NULL
);
387 VERIFY(level
>= 0 && level
<= 10);
390 * Allocate object to track this periodic:
392 dpr
= periodic_create();
393 dpr
->dpr_level
= level
;
394 dpr
->dpr_handler
= func
;
398 * The minimum supported interval between firings of the periodic
399 * handler is 10ms; see ddi_periodic_add(9F) for more details. If a
400 * shorter interval is requested, round up.
402 if (ddi_periodic_resolution
> interval
) {
404 "The periodic timeout (handler=%s, interval=%lld) "
405 "requests a finer interval than the supported resolution. "
406 "It rounds up to %lld\n", periodic_handler_symbol(dpr
),
407 interval
, ddi_periodic_resolution
);
408 interval
= ddi_periodic_resolution
;
412 * Ensure that the interval is an even multiple of the base resolution
413 * that is at least as long as the requested interval.
415 dpr
->dpr_interval
= roundup(interval
, ddi_periodic_resolution
);
418 * Create the underlying cyclic:
420 cyh
.cyh_func
= periodic_cyclic_handler
;
422 cyh
.cyh_level
= CY_LOCK_LEVEL
;
425 cyt
.cyt_interval
= dpr
->dpr_interval
;
427 mutex_enter(&cpu_lock
);
428 dpr
->dpr_cyclic_id
= cyclic_add(&cyh
, &cyt
);
429 mutex_exit(&cpu_lock
);
432 * Make the id visible to ddi_periodic_delete(9F) before we
435 mutex_enter(&periodics_lock
);
436 list_insert_tail(&periodics
, dpr
);
437 mutex_exit(&periodics_lock
);
439 return ((timeout_t
)(uintptr_t)dpr
->dpr_id
);
443 * This function provides the implementation for the ddi_periodic_delete(9F)
444 * interface. It cancels a periodic handler previously registered through
445 * ddi_periodic_add(9F)/i_timeout().
447 * It may be called in user or kernel context, provided cpu_lock is not held.
448 * It may NOT be called from within a periodic handler.
451 i_untimeout(timeout_t id
)
453 ddi_periodic_impl_t
*dpr
;
456 * Find the periodic in the list of all periodics and remove it.
457 * If we find in (and remove it from) the global list, we have
458 * license to free it once it is no longer busy.
460 mutex_enter(&periodics_lock
);
461 for (dpr
= list_head(&periodics
); dpr
!= NULL
; dpr
=
462 list_next(&periodics
, dpr
)) {
463 if (dpr
->dpr_id
== (id_t
)(uintptr_t)id
) {
464 list_remove(&periodics
, dpr
);
468 mutex_exit(&periodics_lock
);
471 * We could not find a periodic for this id, so bail out:
476 mutex_enter(&dpr
->dpr_lock
);
478 * We should be the only one trying to cancel this periodic:
480 VERIFY(!(dpr
->dpr_flags
& DPF_CANCELLED
));
482 * Removing a periodic from within its own handler function will
483 * cause a deadlock, so panic explicitly.
485 if (dpr
->dpr_thread
== curthread
) {
486 panic("ddi_periodic_delete(%lx) called from its own handler\n",
487 (unsigned long)dpr
->dpr_id
);
490 * Mark the periodic as cancelled:
492 dpr
->dpr_flags
|= DPF_CANCELLED
;
493 mutex_exit(&dpr
->dpr_lock
);
496 * Cancel our cyclic. cyclic_remove() guarantees that the cyclic
497 * handler will not run again after it returns. Note that the cyclic
498 * handler merely _dispatches_ the periodic, so this does _not_ mean
499 * the periodic handler is also finished running.
501 mutex_enter(&cpu_lock
);
502 cyclic_remove(dpr
->dpr_cyclic_id
);
503 mutex_exit(&cpu_lock
);
506 * Wait until the periodic handler is no longer running:
508 mutex_enter(&dpr
->dpr_lock
);
509 while (dpr
->dpr_flags
& (DPF_DISPATCHED
| DPF_EXECUTING
)) {
510 cv_wait(&dpr
->dpr_cv
, &dpr
->dpr_lock
);
512 mutex_exit(&dpr
->dpr_lock
);
514 periodic_destroy(dpr
);