2 * Copyright (c) 2004,2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * The original callout mechanism was based on the work of Adam M. Costello
69 * and George Varghese, published in a technical report entitled "Redesigning
70 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
71 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
72 * used in this implementation was published by G. Varghese and T. Lauck in
73 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
74 * the Efficient Implementation of a Timer Facility" in the Proceedings of
75 * the 11th ACM Annual Symposium on Operating Systems Principles,
76 * Austin, Texas Nov 1987.
78 * The per-cpu augmentation was done by Matthew Dillon. This file has
79 * essentially been rewritten pretty much from scratch by Matt.
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/callout.h>
85 #include <sys/kernel.h>
86 #include <sys/interrupt.h>
87 #include <sys/thread.h>
89 #include <sys/thread2.h>
90 #include <sys/mplock2.h>
92 #include <vm/vm_extern.h>
94 struct softclock_pcpu
{
95 struct callout_tailq
*callwheel
;
96 struct callout
* volatile next
;
97 intptr_t running
; /* NOTE! Bit 0 used to flag wakeup */
98 int softticks
; /* softticks index */
99 int curticks
; /* per-cpu ticks counter */
101 struct thread thread
;
104 typedef struct softclock_pcpu
*softclock_pcpu_t
;
106 static int cwheelsize
;
107 static int cwheelmask
;
108 static softclock_pcpu_t softclock_pcpu_ary
[MAXCPU
];
110 static void softclock_handler(void *arg
);
111 static void slotimer_callback(void *arg
);
112 static void callout_reset_ipi(void *arg
);
113 static void callout_stop_ipi(void *arg
, int issync
, struct intrframe
*frame
);
116 callout_setclear(struct callout
*c
, int sflags
, int cflags
)
124 nflags
= (flags
| sflags
) & ~cflags
;
125 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
132 swi_softclock_setup(void *arg
)
139 * Figure out how large a callwheel we need. It must be a power of 2.
141 * ncallout is primarily based on available memory, don't explode
142 * the allocations if the system has a lot of cpus.
144 target
= ncallout
/ ncpus
+ 16;
147 while (cwheelsize
< target
)
149 cwheelmask
= cwheelsize
- 1;
152 * Initialize per-cpu data structures.
154 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
158 sc
= (void *)kmem_alloc3(&kernel_map
, sizeof(*sc
),
159 VM_SUBSYS_GD
, KM_CPU(cpu
));
160 memset(sc
, 0, sizeof(*sc
));
161 softclock_pcpu_ary
[cpu
] = sc
;
163 wheel_sz
= sizeof(*sc
->callwheel
) * cwheelsize
;
164 sc
->callwheel
= (void *)kmem_alloc3(&kernel_map
, wheel_sz
,
165 VM_SUBSYS_GD
, KM_CPU(cpu
));
166 memset(sc
->callwheel
, 0, wheel_sz
);
167 for (i
= 0; i
< cwheelsize
; ++i
)
168 TAILQ_INIT(&sc
->callwheel
[i
]);
171 * Mark the softclock handler as being an interrupt thread
172 * even though it really isn't, but do not allow it to
173 * preempt other threads (do not assign td_preemptable).
175 * Kernel code now assumes that callouts do not preempt
176 * the cpu they were scheduled on.
178 lwkt_create(softclock_handler
, sc
, NULL
, &sc
->thread
,
179 TDF_NOSTART
| TDF_INTTHREAD
,
180 cpu
, "softclock %d", cpu
);
185 * Must occur after ncpus has been initialized.
187 SYSINIT(softclock_setup
, SI_BOOT2_SOFTCLOCK
, SI_ORDER_SECOND
,
188 swi_softclock_setup
, NULL
);
191 * This routine is called from the hardclock() (basically a FASTint/IPI) on
192 * each cpu in the system. sc->curticks is this cpu's notion of the timebase.
193 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where
194 * the callwheel is currently indexed.
196 * WARNING! The MP lock is not necessarily held on call, nor can it be
199 * sc->softticks is adjusted by either this routine or our helper thread
200 * depending on whether the helper thread is running or not.
203 hardclock_softtick(globaldata_t gd
)
207 sc
= softclock_pcpu_ary
[gd
->gd_cpuid
];
211 if (sc
->softticks
== sc
->curticks
) {
213 * In sync, only wakeup the thread if there is something to
216 if (TAILQ_FIRST(&sc
->callwheel
[sc
->softticks
& cwheelmask
])) {
218 lwkt_schedule(&sc
->thread
);
224 * out of sync, wakeup the thread unconditionally so it can
228 lwkt_schedule(&sc
->thread
);
233 * This procedure is the main loop of our per-cpu helper thread. The
234 * sc->isrunning flag prevents us from racing hardclock_softtick() and
235 * a critical section is sufficient to interlock sc->curticks and protect
236 * us from remote IPI's / list removal.
238 * The thread starts with the MP lock released and not in a critical
239 * section. The loop itself is MP safe while individual callbacks
240 * may or may not be, so we obtain or release the MP lock as appropriate.
243 softclock_handler(void *arg
)
247 struct callout_tailq
*bucket
;
248 struct callout slotimer
;
253 * Setup pcpu slow clocks which we want to run from the callout
256 callout_init_mp(&slotimer
);
257 callout_reset(&slotimer
, hz
* 10, slotimer_callback
, &slotimer
);
260 * Run the callout thread at the same priority as other kernel
261 * threads so it can be round-robined.
263 /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/
266 * Loop critical section against ipi operations to this cpu.
271 while (sc
->softticks
!= (int)(sc
->curticks
+ 1)) {
272 bucket
= &sc
->callwheel
[sc
->softticks
& cwheelmask
];
274 for (c
= TAILQ_FIRST(bucket
); c
; c
= sc
->next
) {
275 void (*c_func
)(void *);
280 if (c
->c_time
!= sc
->softticks
) {
281 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
286 * Synchronize with mpsafe requirements
289 if (flags
& CALLOUT_MPSAFE
) {
296 * The request might be removed while we
297 * are waiting to get the MP lock. If it
298 * was removed sc->next will point to the
299 * next valid request or NULL, loop up.
311 * Queue protection only exists while we hold the
312 * critical section uninterrupted.
314 * Adjust sc->next when removing (c) from the queue,
315 * note that an IPI on this cpu may make further
316 * adjustments to sc->next.
318 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
319 TAILQ_REMOVE(bucket
, c
, c_links
.tqe
);
321 KASSERT((c
->c_flags
& CALLOUT_DID_INIT
) &&
322 (c
->c_flags
& CALLOUT_PENDING
) &&
323 CALLOUT_FLAGS_TO_CPU(c
->c_flags
) ==
325 ("callout %p: bad flags %08x", c
, c
->c_flags
));
328 * Once CALLOUT_PENDING is cleared only the IPI_MASK
329 * prevents the callout from being moved to another
330 * cpu. However, callout_stop() will also check
331 * sc->running on the assigned cpu if CALLOUT_EXECUTED
332 * is set. CALLOUT_EXECUTE implies a callback
333 * interlock is needed when cross-cpu.
335 sc
->running
= (intptr_t)c
;
341 if ((flags
& (CALLOUT_AUTOLOCK
| CALLOUT_ACTIVE
)) ==
342 (CALLOUT_AUTOLOCK
| CALLOUT_ACTIVE
)) {
343 error
= lockmgr(c_lk
, LK_EXCLUSIVE
|
346 flags
= callout_setclear(c
,
353 lockmgr(c_lk
, LK_RELEASE
);
355 flags
= callout_setclear(c
,
359 } else if (flags
& CALLOUT_ACTIVE
) {
360 flags
= callout_setclear(c
,
368 flags
= callout_setclear(c
,
375 * Read and clear sc->running. If bit 0 was set,
376 * a callout_stop() is likely blocked waiting for
377 * the callback to complete.
379 * The sigclear above also cleared CALLOUT_WAITING
380 * and returns the contents of flags prior to clearing
383 * Interlock wakeup any _stop's waiting on us. Note
384 * that once c_func() was called, the callout
385 * structure (c) pointer may no longer be valid. It
386 * can only be used for the wakeup.
388 if ((atomic_readandclear_ptr(&sc
->running
) & 1) ||
389 (flags
& CALLOUT_WAITING
)) {
392 /* NOTE: list may have changed */
398 * Don't leave us holding the MP lock when we deschedule ourselves.
405 lwkt_deschedule_self(&sc
->thread
); /* == curthread */
412 * A very slow system cleanup timer (10 second interval),
416 slotimer_callback(void *arg
)
418 struct callout
*c
= arg
;
421 callout_reset(c
, hz
* 10, slotimer_callback
, c
);
425 * Start or restart a timeout. Installs the callout structure on the
426 * callwheel of the current cpu. Callers may legally pass any value, even
427 * if 0 or negative, but since the sc->curticks index may have already
428 * been processed a minimum timeout of 1 tick will be enforced.
430 * This function will block if the callout is currently queued to a different
431 * cpu or the callback is currently running in another thread.
434 callout_reset(struct callout
*c
, int to_ticks
, void (*ftn
)(void *), void *arg
)
440 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
443 "callout_reset(%p) from %p: callout was not initialized\n",
444 c
, ((int **)&c
)[-1]);
449 sc
= softclock_pcpu_ary
[gd
->gd_cpuid
];
453 * Our cpu must gain ownership of the callout and cancel anything
454 * still running, which is complex. The easiest way to do it is to
455 * issue a callout_stop_sync(). callout_stop_sync() will also
456 * handle CALLOUT_EXECUTED (dispatch waiting), and clear it.
458 * WARNING: callout_stop_sync()'s return state can race other
459 * callout_*() calls due to blocking, so we must re-check.
465 if (c
->c_flags
& (CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
))
466 callout_stop_sync(c
);
467 flags
= c
->c_flags
& ~(CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
);
468 nflags
= (flags
& ~CALLOUT_CPU_MASK
) |
469 CALLOUT_CPU_TO_FLAGS(gd
->gd_cpuid
) |
472 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
478 * With the critical section held and PENDING set we now 'own' the
486 c
->c_time
= sc
->curticks
+ to_ticks
;
488 TAILQ_INSERT_TAIL(&sc
->callwheel
[c
->c_time
& cwheelmask
],
494 * Setup a callout to run on the specified cpu. Should generally be used
495 * to run a callout on a specific cpu which does not nominally change. This
496 * callout_reset() will be issued asynchronously via an IPI.
499 callout_reset_bycpu(struct callout
*c
, int to_ticks
, void (*ftn
)(void *),
500 void *arg
, int cpuid
)
506 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
509 "callout_reset(%p) from %p: callout was not initialized\n",
510 c
, ((int **)&c
)[-1]);
517 tgd
= globaldata_find(cpuid
);
520 * This code is similar to the code in callout_reset() but we assign
521 * the callout to the target cpu. We cannot set PENDING here since
522 * we cannot atomically add the callout to the target cpu's queue.
523 * However, incrementing the IPI count has the effect of locking
524 * the cpu assignment.
526 * WARNING: callout_stop_sync()'s return state can race other
527 * callout_*() calls due to blocking, so we must re-check.
533 if (c
->c_flags
& (CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
))
534 callout_stop_sync(c
);
535 flags
= c
->c_flags
& ~(CALLOUT_ARMED_MASK
| CALLOUT_EXECUTED
);
536 nflags
= (flags
& ~(CALLOUT_CPU_MASK
|
538 CALLOUT_CPU_TO_FLAGS(tgd
->gd_cpuid
) |
540 nflags
= nflags
+ 1; /* bump IPI count */
541 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
547 * Since we control our +1 in the IPI count, the target cpu cannot
548 * now change until our IPI is processed.
555 c
->c_load
= to_ticks
; /* IPI will add curticks */
557 lwkt_send_ipiq(tgd
, callout_reset_ipi
, c
);
562 * Remote IPI for callout_reset_bycpu(). The cpu assignment cannot be
563 * ripped out from under us due to the count in IPI_MASK, but it is possible
564 * that other IPIs executed so we must deal with other flags that might
565 * have been set or cleared.
568 callout_reset_ipi(void *arg
)
570 struct callout
*c
= arg
;
571 globaldata_t gd
= mycpu
;
576 sc
= softclock_pcpu_ary
[gd
->gd_cpuid
];
581 KKASSERT((flags
& CALLOUT_IPI_MASK
) > 0 &&
582 CALLOUT_FLAGS_TO_CPU(flags
) == gd
->gd_cpuid
);
584 nflags
= (flags
- 1) & ~(CALLOUT_EXECUTED
| CALLOUT_WAITING
);
585 nflags
|= CALLOUT_PENDING
;
588 * Put us on the queue
590 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
591 if (flags
& CALLOUT_PENDING
) {
593 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
595 &sc
->callwheel
[c
->c_time
& cwheelmask
],
599 c
->c_time
= sc
->curticks
+ c
->c_load
;
601 &sc
->callwheel
[c
->c_time
& cwheelmask
],
610 * Issue wakeup if requested.
612 if (flags
& CALLOUT_WAITING
)
617 * Stop a running timer and ensure that any running callout completes before
618 * returning. If the timer is running on another cpu this function may block
619 * to interlock against the callout. If the callout is currently executing
620 * or blocked in another thread this function may also block to interlock
621 * against the callout.
623 * The caller must be careful to avoid deadlocks, either by using
624 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
625 * by using tokens and dealing with breaks in the serialization, or using
626 * the lockmgr lock cancelation feature yourself in the callout callback
629 * callout_stop() returns non-zero if the callout was pending.
632 _callout_stop(struct callout
*c
, int issync
)
634 globaldata_t gd
= mycpu
;
643 if ((c
->c_flags
& CALLOUT_DID_INIT
) == 0) {
646 "callout_stop(%p) from %p: callout was not initialized\n",
647 c
, ((int **)&c
)[-1]);
655 * Adjust flags for the required operation. If the callout is
656 * armed on another cpu we break out into the remote-cpu code which
657 * will issue an IPI. If it is not armed we are trivially done,
658 * but may still need to test EXECUTED.
664 cpuid
= CALLOUT_FLAGS_TO_CPU(flags
);
667 * Armed on remote cpu (break to remote-cpu code)
669 if ((flags
& CALLOUT_ARMED_MASK
) && gd
->gd_cpuid
!= cpuid
) {
671 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
673 * BREAK TO REMOTE-CPU CODE HERE
682 * Armed or armable on current cpu
684 if (flags
& CALLOUT_IPI_MASK
) {
687 continue; /* retry */
691 * If PENDING is set we can remove the callout from our
692 * queue and also use the side effect that the bit causes
693 * the callout to be locked to our cpu.
695 if (flags
& CALLOUT_PENDING
) {
696 sc
= softclock_pcpu_ary
[gd
->gd_cpuid
];
698 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
700 &sc
->callwheel
[c
->c_time
& cwheelmask
],
708 nflags
= flags
& ~(CALLOUT_ACTIVE
|
712 if (atomic_cmpset_int(&c
->c_flags
,
722 * If PENDING was not set the callout might not be locked
725 nflags
= flags
& ~(CALLOUT_ACTIVE
|
729 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
737 * Remote cpu path. We incremented the IPI_MASK count so the callout
738 * is now locked to the remote cpu and we can safely send an IPI
741 * Once sent, wait for all IPIs to be processed. If PENDING remains
742 * set after all IPIs have processed we raced a callout or
743 * callout_reset and must retry. Callers expect the callout to
744 * be completely stopped upon return, so make sure it is.
746 tgd
= globaldata_find(cpuid
);
747 lwkt_send_ipiq3(tgd
, callout_stop_ipi
, c
, issync
);
753 if ((flags
& CALLOUT_IPI_MASK
) == 0)
756 nflags
= flags
| CALLOUT_WAITING
;
757 tsleep_interlock(c
, 0);
758 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
)) {
759 tsleep(c
, PINTERLOCKED
, "cstp1", 0);
762 if (flags
& CALLOUT_PENDING
)
766 * Caller expects callout_stop_sync() to clear EXECUTED and return
767 * its previous status.
769 atomic_clear_int(&c
->c_flags
, CALLOUT_EXECUTED
);
772 if (flags
& CALLOUT_WAITING
)
776 * If (issync) we must also wait for any in-progress callbacks to
777 * complete, unless the stop is being executed from the callback
778 * itself. The EXECUTED flag is set prior to the callback
779 * being made so our existing flags status already has it.
781 * If auto-lock mode is being used, this is where we cancel any
782 * blocked lock that is potentially preventing the target cpu
783 * from completing the callback.
789 sc
= softclock_pcpu_ary
[cpuid
];
790 if (gd
->gd_curthread
== &sc
->thread
) /* stop from cb */
795 if ((runco
& ~(intptr_t)1) != (intptr_t)c
)
797 if (c
->c_flags
& CALLOUT_AUTOLOCK
)
798 lockmgr(c
->c_lk
, LK_CANCEL_BEG
);
799 tsleep_interlock(c
, 0);
800 if (atomic_cmpset_long(runp
, runco
, runco
| 1))
801 tsleep(c
, PINTERLOCKED
, "cstp3", 0);
802 if (c
->c_flags
& CALLOUT_AUTOLOCK
)
803 lockmgr(c
->c_lk
, LK_CANCEL_END
);
807 rc
= (flags
& CALLOUT_EXECUTED
) != 0;
813 * IPI for stop function. The callout is locked to the receiving cpu
814 * by the IPI_MASK count.
817 callout_stop_ipi(void *arg
, int issync
, struct intrframe
*frame
)
819 globaldata_t gd
= mycpu
;
820 struct callout
*c
= arg
;
828 KKASSERT(CALLOUT_FLAGS_TO_CPU(flags
) == gd
->gd_cpuid
);
831 * We can handle the PENDING flag immediately.
833 if (flags
& CALLOUT_PENDING
) {
834 sc
= softclock_pcpu_ary
[gd
->gd_cpuid
];
836 sc
->next
= TAILQ_NEXT(c
, c_links
.tqe
);
838 &sc
->callwheel
[c
->c_time
& cwheelmask
],
845 * Transition to the stopped state and decrement the IPI count.
846 * Leave the EXECUTED bit alone (the next callout_reset() will
847 * have to deal with it).
852 nflags
= (flags
- 1) & ~(CALLOUT_ACTIVE
|
856 if (atomic_cmpset_int(&c
->c_flags
, flags
, nflags
))
860 if (flags
& CALLOUT_WAITING
)
865 callout_stop(struct callout
*c
)
867 return _callout_stop(c
, 0);
871 callout_stop_sync(struct callout
*c
)
873 return _callout_stop(c
, 1);
877 callout_stop_async(struct callout
*c
)
883 callout_terminate(struct callout
*c
)
886 atomic_clear_int(&c
->c_flags
, CALLOUT_DID_INIT
);
890 * Prepare a callout structure for use by callout_reset() and/or
893 * The MP version of this routine requires that the callback
894 * function installed by callout_reset() be MP safe.
896 * The LK version of this routine is also MPsafe and will automatically
897 * acquire the specified lock for the duration of the function call,
898 * and release it after the function returns. In addition, when autolocking
899 * is used, callout_stop() becomes synchronous if the caller owns the lock.
900 * callout_reset(), callout_stop(), and callout_stop_sync() will block
901 * normally instead of spinning when a cpu race occurs. Lock cancelation
902 * is used to avoid deadlocks against the callout ring dispatch.
904 * The init functions can be called from any cpu and do not have to be
905 * called from the cpu that the timer will eventually run on.
908 _callout_init(struct callout
*c
, int flags
)
915 callout_init(struct callout
*c
)
917 _callout_init(c
, CALLOUT_DID_INIT
);
921 callout_init_mp(struct callout
*c
)
923 _callout_init(c
, CALLOUT_DID_INIT
| CALLOUT_MPSAFE
);
927 callout_init_lk(struct callout
*c
, struct lock
*lk
)
929 _callout_init(c
, CALLOUT_DID_INIT
| CALLOUT_MPSAFE
| CALLOUT_AUTOLOCK
);