2 * Copyright (c) 2004,2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * The original callout mechanism was based on the work of Adam M. Costello
69 * and George Varghese, published in a technical report entitled "Redesigning
70 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
71 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
72 * used in this implementation was published by G. Varghese and T. Lauck in
73 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
74 * the Efficient Implementation of a Timer Facility" in the Proceedings of
75 * the 11th ACM Annual Symposium on Operating Systems Principles,
76 * Austin, Texas Nov 1987.
78 * The per-cpu augmentation was done by Matthew Dillon. This file has
79 * essentially been rewritten pretty much from scratch by Matt.
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/callout.h>
85 #include <sys/kernel.h>
86 #include <sys/interrupt.h>
87 #include <sys/thread.h>
89 #include <sys/thread2.h>
90 #include <sys/mplock2.h>
92 struct softclock_pcpu
{
93 struct callout_tailq
*callwheel
;
94 struct callout
* volatile next
;
95 intptr_t running
; /* NOTE! Bit 0 used to flag wakeup */
96 int softticks
; /* softticks index */
97 int curticks
; /* per-cpu ticks counter */
99 struct thread
*thread
;
102 typedef struct softclock_pcpu
*softclock_pcpu_t
;
104 static MALLOC_DEFINE(M_CALLOUT
, "callout", "callout structures");
105 static int cwheelsize
;
106 static int cwheelmask
;
107 static struct softclock_pcpu softclock_pcpu_ary
[MAXCPU
];
109 static void softclock_handler(void *arg
);
110 static void slotimer_callback(void *arg
);
111 static void callout_reset_ipi(void *arg
);
112 static void callout_stop_ipi(void *arg
, int issync
, struct intrframe
*frame
);
116 callout_setclear(struct callout
*c
, int sflags
, int cflags
)
124 nflags
= (flags
| sflags
) & ~cflags
;
125 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
132 swi_softclock_setup(void *arg
)
139 * Figure out how large a callwheel we need. It must be a power of 2.
141 * ncallout is primarily based on available memory, don't explode
142 * the allocations if the system has a lot of cpus.
144 target
= ncallout
/ ncpus
+ 16;
147 while (cwheelsize
< target
)
149 cwheelmask
= cwheelsize
- 1;
152 * Initialize per-cpu data structures.
154 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
157 sc
= &softclock_pcpu_ary
[cpu
];
159 sc
->callwheel
= kmalloc(sizeof(*sc
->callwheel
) * cwheelsize
,
160 M_CALLOUT
, M_WAITOK
|M_ZERO
);
161 for (i
= 0; i
< cwheelsize
; ++i
)
162 TAILQ_INIT(&sc
->callwheel
[i
]);
165 * Mark the softclock handler as being an interrupt thread
166 * even though it really isn't, but do not allow it to
167 * preempt other threads (do not assign td_preemptable).
169 * Kernel code now assumes that callouts do not preempt
170 * the cpu they were scheduled on.
172 lwkt_create(softclock_handler
, sc
, &sc
->thread
, NULL
,
173 TDF_NOSTART
| TDF_INTTHREAD
,
174 cpu
, "softclock %d", cpu
);
179 * Must occur after ncpus has been initialized.
181 SYSINIT(softclock_setup
, SI_BOOT2_SOFTCLOCK
, SI_ORDER_SECOND
,
182 swi_softclock_setup
, NULL
);
185 * This routine is called from the hardclock() (basically a FASTint/IPI) on
186 * each cpu in the system. sc->curticks is this cpu's notion of the timebase.
187 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where
188 * the callwheel is currently indexed.
190 * WARNING! The MP lock is not necessarily held on call, nor can it be
193 * sc->softticks is adjusted by either this routine or our helper thread
194 * depending on whether the helper thread is running or not.
197 hardclock_softtick(globaldata_t gd
)
201 sc
= &softclock_pcpu_ary
[gd
->gd_cpuid
];
205 if (sc
->softticks
== sc
->curticks
) {
207 * In sync, only wakeup the thread if there is something to
210 if (TAILQ_FIRST(&sc
->callwheel
[sc
->softticks
& cwheelmask
])) {
212 lwkt_schedule(sc
->thread
);
218 * out of sync, wakeup the thread unconditionally so it can
222 lwkt_schedule(sc
->thread
);
227 * This procedure is the main loop of our per-cpu helper thread. The
228 * sc->isrunning flag prevents us from racing hardclock_softtick() and
229 * a critical section is sufficient to interlock sc->curticks and protect
230 * us from remote IPI's / list removal.
232 * The thread starts with the MP lock released and not in a critical
233 * section. The loop itself is MP safe while individual callbacks
234 * may or may not be, so we obtain or release the MP lock as appropriate.
237 softclock_handler(void *arg
)
241 struct callout_tailq
*bucket
;
242 struct callout slotimer
;
247 * Setup pcpu slow clocks which we want to run from the callout
250 callout_init_mp(&slotimer
);
251 callout_reset(&slotimer
, hz
* 10, slotimer_callback
, &slotimer
);
254 * Run the callout thread at the same priority as other kernel
255 * threads so it can be round-robined.
257 /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/
260 * Loop critical section against ipi operations to this cpu.
265 while (sc
->softticks
!= (int)(sc
->curticks
+ 1)) {
266 bucket
= &sc
->callwheel
[sc
->softticks
& cwheelmask
];
268 for (c
= TAILQ_FIRST(bucket
); c
; c
= sc
->next
) {
269 void (*c_func
)(void *);
274 if (c
->c_time
!= sc
->softticks
) {
275 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
280 * Synchronize with mpsafe requirements
283 if (flags
& CALLOUT_MPSAFE
) {
290 * The request might be removed while we
291 * are waiting to get the MP lock. If it
292 * was removed sc->next will point to the
293 * next valid request or NULL, loop up.
305 * Queue protection only exists while we hold the
306 * critical section uninterrupted.
308 * Adjust sc->next when removing (c) from the queue,
309 * note that an IPI on this cpu may make further
310 * adjustments to sc->next.
312 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
313 TAILQ_REMOVE(bucket
, c
, c_links
.tqe
);
315 KASSERT((c
->c_flags
& CALLOUT_DID_INIT
) &&
316 (c
->c_flags
& CALLOUT_PENDING
) &&
317 CALLOUT_FLAGS_TO_CPU(c
->c_flags
) ==
319 ("callout %p: bad flags %08x", c
, c
->c_flags
));
322 * Once CALLOUT_PENDING is cleared only the IPI_MASK
323 * prevents the callout from being moved to another
324 * cpu. However, callout_stop() will also check
325 * sc->running on the assigned cpu if CALLOUT_EXECUTED
326 * is set. CALLOUT_EXECUTE implies a callback
327 * interlock is needed when cross-cpu.
329 sc
->running
= (intptr_t)c
;
335 if ((flags
& (CALLOUT_AUTOLOCK
| CALLOUT_ACTIVE
)) ==
336 (CALLOUT_AUTOLOCK
| CALLOUT_ACTIVE
)) {
337 error
= lockmgr(c_lk
, LK_EXCLUSIVE
|
340 flags
= callout_setclear(c
,
347 lockmgr(c_lk
, LK_RELEASE
);
349 flags
= callout_setclear(c
,
353 } else if (flags
& CALLOUT_ACTIVE
) {
354 flags
= callout_setclear(c
,
362 flags
= callout_setclear(c
,
369 * Read and clear sc->running. If bit 0 was set,
370 * a callout_stop() is likely blocked waiting for
371 * the callback to complete.
373 * The sigclear above also cleared CALLOUT_WAITING
374 * and returns the contents of flags prior to clearing
377 * Interlock wakeup any _stop's waiting on us. Note
378 * that once c_func() was called, the callout
379 * structure (c) pointer may no longer be valid. It
380 * can only be used for the wakeup.
382 if ((atomic_readandclear_ptr(&sc
->running
) & 1) ||
383 (flags
& CALLOUT_WAITING
)) {
386 /* NOTE: list may have changed */
392 * Don't leave us holding the MP lock when we deschedule ourselves.
399 lwkt_deschedule_self(sc
->thread
); /* == curthread */
406 * A very slow system cleanup timer (10 second interval),
410 slotimer_callback(void *arg
)
412 struct callout
*c
= arg
;
415 callout_reset(c
, hz
* 10, slotimer_callback
, c
);
419 * Start or restart a timeout. Installs the callout structure on the
420 * callwheel of the current cpu. Callers may legally pass any value, even
421 * if 0 or negative, but since the sc->curticks index may have already
422 * been processed a minimum timeout of 1 tick will be enforced.
424 * This function will block if the callout is currently queued to a different
425 * cpu or the callback is currently running in another thread.
428 callout_reset(struct callout
*c
, int to_ticks
, void (*ftn
)(void *), void *arg
)
434 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
437 "callout_reset(%p) from %p: callout was not initialized\n",
438 c
, ((int **)&c
)[-1]);
443 sc
= &softclock_pcpu_ary
[gd
->gd_cpuid
];
447 * Our cpu must gain ownership of the callout and cancel anything
448 * still running, which is complex. The easiest way to do it is to
449 * issue a callout_stop_sync(). callout_stop_sync() will also
450 * handle CALLOUT_EXECUTED (dispatch waiting), and clear it.
452 * WARNING: callout_stop_sync()'s return state can race other
453 * callout_*() calls due to blocking, so we must re-check.
459 if (c
->c_flags
& (CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
))
460 callout_stop_sync(c
);
461 flags
= c
->c_flags
& ~(CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
);
462 nflags
= (flags
& ~CALLOUT_CPU_MASK
) |
463 CALLOUT_CPU_TO_FLAGS(gd
->gd_cpuid
) |
466 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
472 * With the critical section held and PENDING set we now 'own' the
480 c
->c_time
= sc
->curticks
+ to_ticks
;
482 TAILQ_INSERT_TAIL(&sc
->callwheel
[c
->c_time
& cwheelmask
],
488 * Setup a callout to run on the specified cpu. Should generally be used
489 * to run a callout on a specific cpu which does not nominally change. This
490 * callout_reset() will be issued asynchronously via an IPI.
493 callout_reset_bycpu(struct callout
*c
, int to_ticks
, void (*ftn
)(void *),
494 void *arg
, int cpuid
)
500 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
503 "callout_reset(%p) from %p: callout was not initialized\n",
504 c
, ((int **)&c
)[-1]);
511 tgd
= globaldata_find(cpuid
);
514 * This code is similar to the code in callout_reset() but we assign
515 * the callout to the target cpu. We cannot set PENDING here since
516 * we cannot atomically add the callout to the target cpu's queue.
517 * However, incrementing the IPI count has the effect of locking
518 * the cpu assignment.
520 * WARNING: callout_stop_sync()'s return state can race other
521 * callout_*() calls due to blocking, so we must re-check.
527 if (c
->c_flags
& (CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
))
528 callout_stop_sync(c
);
529 flags
= c
->c_flags
& ~(CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
);
530 nflags
= (flags
& ~(CALLOUT_CPU_MASK
|
532 CALLOUT_CPU_TO_FLAGS(tgd
->gd_cpuid
) |
534 nflags
= nflags
+ 1; /* bump IPI count */
535 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
541 * Since we control our +1 in the IPI count, the target cpu cannot
542 * now change until our IPI is processed.
549 c
->c_load
= to_ticks
; /* IPI will add curticks */
551 lwkt_send_ipiq(tgd
, callout_reset_ipi
, c
);
556 * Remote IPI for callout_reset_bycpu(). The cpu assignment cannot be
557 * ripped out from under us due to the count in IPI_MASK, but it is possible
558 * that other IPIs executed so we must deal with other flags that might
559 * have been set or cleared.
562 callout_reset_ipi(void *arg
)
564 struct callout
*c
= arg
;
565 globaldata_t gd
= mycpu
;
570 sc
= &softclock_pcpu_ary
[gd
->gd_cpuid
];
575 KKASSERT((flags
& CALLOUT_IPI_MASK
) > 0 &&
576 CALLOUT_FLAGS_TO_CPU(flags
) == gd
->gd_cpuid
);
578 nflags
= (flags
- 1) & ~(CALLOUT_EXECUTED
| CALLOUT_WAITING
);
579 nflags
|= CALLOUT_PENDING
;
582 * Put us on the queue
584 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
585 if (flags
& CALLOUT_PENDING
) {
587 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
589 &sc
->callwheel
[c
->c_time
& cwheelmask
],
593 c
->c_time
= sc
->curticks
+ c
->c_load
;
595 &sc
->callwheel
[c
->c_time
& cwheelmask
],
604 * Issue wakeup if requested.
606 if (flags
& CALLOUT_WAITING
)
611 * Stop a running timer and ensure that any running callout completes before
612 * returning. If the timer is running on another cpu this function may block
613 * to interlock against the callout. If the callout is currently executing
614 * or blocked in another thread this function may also block to interlock
615 * against the callout.
617 * The caller must be careful to avoid deadlocks, either by using
618 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
619 * by using tokens and dealing with breaks in the serialization, or using
620 * the lockmgr lock cancelation feature yourself in the callout callback
623 * callout_stop() returns non-zero if the callout was pending.
626 _callout_stop(struct callout
*c
, int issync
)
628 globaldata_t gd
= mycpu
;
637 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
640 "callout_stop(%p) from %p: callout was not initialized\n",
641 c
, ((int **)&c
)[-1]);
648 * Adjust flags for the required operation. If the callout is
649 * armed on another cpu we break out into the remote-cpu code which
650 * will issue an IPI. If it is not armed we are trivially done,
651 * but may still need to test EXECUTED.
657 cpuid
= CALLOUT_FLAGS_TO_CPU(flags
);
660 * Armed on remote cpu (break to remote-cpu code)
662 if ((flags
& CALLOUT_ARMED_MASK
) && gd
->gd_cpuid
!= cpuid
) {
664 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
671 * Armed or armable on current cpu
673 if (flags
& CALLOUT_IPI_MASK
) {
676 continue; /* retry */
680 * If PENDING is set we can remove the callout from our
681 * queue and also use the side effect that the bit causes
682 * the callout to be locked to our cpu.
684 if (flags
& CALLOUT_PENDING
) {
685 sc
= &softclock_pcpu_ary
[gd
->gd_cpuid
];
687 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
689 &sc
->callwheel
[c
->c_time
& cwheelmask
],
697 nflags
= flags
& ~(CALLOUT_ACTIVE
|
701 if (atomic_cmpset_int(&c
->c_flags
,
711 * If PENDING was not set the callout might not be locked
714 nflags
= flags
& ~(CALLOUT_ACTIVE
|
718 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
726 * Remote cpu path. We incremented the IPI_MASK count so the callout
727 * is now locked to the remote cpu and we can safely send an IPI
730 * Once sent, wait for all IPIs to be processed.
732 tgd
= globaldata_find(cpuid
);
733 lwkt_send_ipiq3(tgd
, callout_stop_ipi
, c
, issync
);
739 if ((flags
& CALLOUT_ARMED_MASK
) == 0)
742 nflags
= flags
| CALLOUT_WAITING
;
743 tsleep_interlock(c
, 0);
744 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
745 tsleep(c
, PINTERLOCKED
, "cstp1", 0);
750 * Caller expects callout_stop_sync() to clear EXECUTED and return
751 * its previous status.
753 atomic_clear_int(&c
->c_flags
, CALLOUT_EXECUTED
);
756 if (flags
& CALLOUT_WAITING
)
760 * If (issync) we must also wait for any in-progress callbacks to
761 * complete, unless the stop is being executed from the callback
762 * itself. The EXECUTED flag is set prior to the callback
763 * being made so our existing flags status already has it.
765 * If auto-lock mode is being used, this is where we cancel any
766 * blocked lock that is potentially preventing the target cpu
767 * from completing the callback.
773 sc
= &softclock_pcpu_ary
[cpuid
];
774 if (gd
->gd_curthread
== sc
->thread
) /* stop from cb */
779 if ((runco
& ~(intptr_t)1) != (intptr_t)c
)
781 if (c
->c_flags
& CALLOUT_AUTOLOCK
)
782 lockmgr(c
->c_lk
, LK_CANCEL_BEG
);
783 tsleep_interlock(c
, 0);
784 if (atomic_cmpset_long(runp
, runco
, runco
| 1))
785 tsleep(c
, PINTERLOCKED
, "cstp3", 0);
786 if (c
->c_flags
& CALLOUT_AUTOLOCK
)
787 lockmgr(c
->c_lk
, LK_CANCEL_END
);
791 rc
= (flags
& CALLOUT_EXECUTED
) != 0;
797 * IPI for stop function. The callout is locked to the receiving cpu
798 * by the IPI_MASK count.
802 callout_stop_ipi(void *arg
, int issync
, struct intrframe
*frame
)
804 globaldata_t gd
= mycpu
;
805 struct callout
*c
= arg
;
813 KKASSERT(CALLOUT_FLAGS_TO_CPU(flags
) == gd
->gd_cpuid
);
816 * We can handle the PENDING flag immediately.
818 if (flags
& CALLOUT_PENDING
) {
819 sc
= &softclock_pcpu_ary
[gd
->gd_cpuid
];
821 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
823 &sc
->callwheel
[c
->c_time
& cwheelmask
],
830 * Transition to the stopped state and decrement the IPI count.
831 * Leave the EXECUTED bit alone (the next callout_reset() will
832 * have to deal with it).
837 nflags
= (flags
- 1) & ~(CALLOUT_ACTIVE
|
841 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
845 if (flags
& CALLOUT_WAITING
)
850 callout_stop(struct callout
*c
)
852 return _callout_stop(c
, 0);
856 callout_stop_sync(struct callout
*c
)
858 return _callout_stop(c
, 1);
862 callout_stop_async(struct callout
*c
)
868 callout_terminate(struct callout
*c
)
871 atomic_clear_int(&c
->c_flags
, CALLOUT_DID_INIT
);
875 * Prepare a callout structure for use by callout_reset() and/or
878 * The MP version of this routine requires that the callback
879 * function installed by callout_reset() be MP safe.
881 * The LK version of this routine is also MPsafe and will automatically
882 * acquire the specified lock for the duration of the function call,
883 * and release it after the function returns. In addition, when autolocking
884 * is used, callout_stop() becomes synchronous if the caller owns the lock.
885 * callout_reset(), callout_stop(), and callout_stop_sync() will block
886 * normally instead of spinning when a cpu race occurs. Lock cancelation
887 * is used to avoid deadlocks against the callout ring dispatch.
889 * The init functions can be called from any cpu and do not have to be
890 * called from the cpu that the timer will eventually run on.
894 _callout_init(struct callout
*c
, int flags
)
901 callout_init(struct callout
*c
)
903 _callout_init(c
, CALLOUT_DID_INIT
);
907 callout_init_mp(struct callout
*c
)
909 _callout_init(c
, CALLOUT_DID_INIT
| CALLOUT_MPSAFE
);
913 callout_init_lk(struct callout
*c
, struct lock
*lk
)
915 _callout_init(c
, CALLOUT_DID_INIT
| CALLOUT_MPSAFE
| CALLOUT_AUTOLOCK
);