ipfw: Use netisr wrappers
[dragonfly.git] / sys / kern / kern_timeout.c
bloba17f592ccf4523a54207f6efc52e91dcf4f5d094
1 /*
2 * Copyright (c) 2004,2014 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
68 * The original callout mechanism was based on the work of Adam M. Costello
69 * and George Varghese, published in a technical report entitled "Redesigning
70 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
71 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
72 * used in this implementation was published by G. Varghese and T. Lauck in
73 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
74 * the Efficient Implementation of a Timer Facility" in the Proceedings of
75 * the 11th ACM Annual Symposium on Operating Systems Principles,
76 * Austin, Texas Nov 1987.
78 * The per-cpu augmentation was done by Matthew Dillon. This file has
79 * essentially been rewritten pretty much from scratch by Matt.
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/callout.h>
85 #include <sys/kernel.h>
86 #include <sys/interrupt.h>
87 #include <sys/thread.h>
89 #include <sys/thread2.h>
90 #include <sys/mplock2.h>
92 struct softclock_pcpu {
93 struct callout_tailq *callwheel;
94 struct callout * volatile next;
95 intptr_t running; /* NOTE! Bit 0 used to flag wakeup */
96 int softticks; /* softticks index */
97 int curticks; /* per-cpu ticks counter */
98 int isrunning;
99 struct thread *thread;
102 typedef struct softclock_pcpu *softclock_pcpu_t;
104 static MALLOC_DEFINE(M_CALLOUT, "callout", "callout structures");
105 static int cwheelsize;
106 static int cwheelmask;
107 static struct softclock_pcpu softclock_pcpu_ary[MAXCPU];
109 static void softclock_handler(void *arg);
110 static void slotimer_callback(void *arg);
111 static void callout_reset_ipi(void *arg);
112 static void callout_stop_ipi(void *arg, int issync, struct intrframe *frame);
114 static __inline
116 callout_setclear(struct callout *c, int sflags, int cflags)
118 int flags;
119 int nflags;
121 for (;;) {
122 flags = c->c_flags;
123 cpu_ccfence();
124 nflags = (flags | sflags) & ~cflags;
125 if (atomic_cmpset_int(&c->c_flags, flags, nflags))
126 break;
128 return flags;
131 static void
132 swi_softclock_setup(void *arg)
134 int cpu;
135 int i;
136 int target;
139 * Figure out how large a callwheel we need. It must be a power of 2.
141 * ncallout is primarily based on available memory, don't explode
142 * the allocations if the system has a lot of cpus.
144 target = ncallout / ncpus + 16;
146 cwheelsize = 1;
147 while (cwheelsize < target)
148 cwheelsize <<= 1;
149 cwheelmask = cwheelsize - 1;
152 * Initialize per-cpu data structures.
154 for (cpu = 0; cpu < ncpus; ++cpu) {
155 softclock_pcpu_t sc;
157 sc = &softclock_pcpu_ary[cpu];
159 sc->callwheel = kmalloc(sizeof(*sc->callwheel) * cwheelsize,
160 M_CALLOUT, M_WAITOK|M_ZERO);
161 for (i = 0; i < cwheelsize; ++i)
162 TAILQ_INIT(&sc->callwheel[i]);
165 * Mark the softclock handler as being an interrupt thread
166 * even though it really isn't, but do not allow it to
167 * preempt other threads (do not assign td_preemptable).
169 * Kernel code now assumes that callouts do not preempt
170 * the cpu they were scheduled on.
172 lwkt_create(softclock_handler, sc, &sc->thread, NULL,
173 TDF_NOSTART | TDF_INTTHREAD,
174 cpu, "softclock %d", cpu);
179 * Must occur after ncpus has been initialized.
181 SYSINIT(softclock_setup, SI_BOOT2_SOFTCLOCK, SI_ORDER_SECOND,
182 swi_softclock_setup, NULL);
185 * This routine is called from the hardclock() (basically a FASTint/IPI) on
186 * each cpu in the system. sc->curticks is this cpu's notion of the timebase.
187 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where
188 * the callwheel is currently indexed.
190 * WARNING! The MP lock is not necessarily held on call, nor can it be
191 * safely obtained.
193 * sc->softticks is adjusted by either this routine or our helper thread
194 * depending on whether the helper thread is running or not.
196 void
197 hardclock_softtick(globaldata_t gd)
199 softclock_pcpu_t sc;
201 sc = &softclock_pcpu_ary[gd->gd_cpuid];
202 ++sc->curticks;
203 if (sc->isrunning)
204 return;
205 if (sc->softticks == sc->curticks) {
207 * In sync, only wakeup the thread if there is something to
208 * do.
210 if (TAILQ_FIRST(&sc->callwheel[sc->softticks & cwheelmask])) {
211 sc->isrunning = 1;
212 lwkt_schedule(sc->thread);
213 } else {
214 ++sc->softticks;
216 } else {
218 * out of sync, wakeup the thread unconditionally so it can
219 * catch up.
221 sc->isrunning = 1;
222 lwkt_schedule(sc->thread);
227 * This procedure is the main loop of our per-cpu helper thread. The
228 * sc->isrunning flag prevents us from racing hardclock_softtick() and
229 * a critical section is sufficient to interlock sc->curticks and protect
230 * us from remote IPI's / list removal.
232 * The thread starts with the MP lock released and not in a critical
233 * section. The loop itself is MP safe while individual callbacks
234 * may or may not be, so we obtain or release the MP lock as appropriate.
236 static void
237 softclock_handler(void *arg)
239 softclock_pcpu_t sc;
240 struct callout *c;
241 struct callout_tailq *bucket;
242 struct callout slotimer;
243 int mpsafe = 1;
244 int flags;
247 * Setup pcpu slow clocks which we want to run from the callout
248 * thread.
250 callout_init_mp(&slotimer);
251 callout_reset(&slotimer, hz * 10, slotimer_callback, &slotimer);
254 * Run the callout thread at the same priority as other kernel
255 * threads so it can be round-robined.
257 /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/
260 * Loop critical section against ipi operations to this cpu.
262 sc = arg;
263 crit_enter();
264 loop:
265 while (sc->softticks != (int)(sc->curticks + 1)) {
266 bucket = &sc->callwheel[sc->softticks & cwheelmask];
268 for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
269 void (*c_func)(void *);
270 void *c_arg;
271 struct lock *c_lk;
272 int error;
274 if (c->c_time != sc->softticks) {
275 sc->next = TAILQ_NEXT(c, c_links.tqe);
276 continue;
280 * Synchronize with mpsafe requirements
282 flags = c->c_flags;
283 if (flags & CALLOUT_MPSAFE) {
284 if (mpsafe == 0) {
285 mpsafe = 1;
286 rel_mplock();
288 } else {
290 * The request might be removed while we
291 * are waiting to get the MP lock. If it
292 * was removed sc->next will point to the
293 * next valid request or NULL, loop up.
295 if (mpsafe) {
296 mpsafe = 0;
297 sc->next = c;
298 get_mplock();
299 if (c != sc->next)
300 continue;
305 * Queue protection only exists while we hold the
306 * critical section uninterrupted.
308 * Adjust sc->next when removing (c) from the queue,
309 * note that an IPI on this cpu may make further
310 * adjustments to sc->next.
312 sc->next = TAILQ_NEXT(c, c_links.tqe);
313 TAILQ_REMOVE(bucket, c, c_links.tqe);
315 KASSERT((c->c_flags & CALLOUT_DID_INIT) &&
316 (c->c_flags & CALLOUT_PENDING) &&
317 CALLOUT_FLAGS_TO_CPU(c->c_flags) ==
318 mycpu->gd_cpuid,
319 ("callout %p: bad flags %08x", c, c->c_flags));
322 * Once CALLOUT_PENDING is cleared only the IPI_MASK
323 * prevents the callout from being moved to another
324 * cpu. However, callout_stop() will also check
325 * sc->running on the assigned cpu if CALLOUT_EXECUTED
326 * is set. CALLOUT_EXECUTE implies a callback
327 * interlock is needed when cross-cpu.
329 sc->running = (intptr_t)c;
330 c_func = c->c_func;
331 c_arg = c->c_arg;
332 c_lk = c->c_lk;
333 c->c_func = NULL;
335 if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) ==
336 (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) {
337 error = lockmgr(c_lk, LK_EXCLUSIVE |
338 LK_CANCELABLE);
339 if (error == 0) {
340 flags = callout_setclear(c,
341 CALLOUT_EXECUTED,
342 CALLOUT_PENDING |
343 CALLOUT_WAITING);
344 crit_exit();
345 c_func(c_arg);
346 crit_enter();
347 lockmgr(c_lk, LK_RELEASE);
348 } else {
349 flags = callout_setclear(c,
351 CALLOUT_PENDING);
353 } else if (flags & CALLOUT_ACTIVE) {
354 flags = callout_setclear(c,
355 CALLOUT_EXECUTED,
356 CALLOUT_PENDING |
357 CALLOUT_WAITING);
358 crit_exit();
359 c_func(c_arg);
360 crit_enter();
361 } else {
362 flags = callout_setclear(c,
364 CALLOUT_PENDING |
365 CALLOUT_WAITING);
369 * Read and clear sc->running. If bit 0 was set,
370 * a callout_stop() is likely blocked waiting for
371 * the callback to complete.
373 * The sigclear above also cleared CALLOUT_WAITING
374 * and returns the contents of flags prior to clearing
375 * any bits.
377 * Interlock wakeup any _stop's waiting on us. Note
378 * that once c_func() was called, the callout
379 * structure (c) pointer may no longer be valid. It
380 * can only be used for the wakeup.
382 if ((atomic_readandclear_ptr(&sc->running) & 1) ||
383 (flags & CALLOUT_WAITING)) {
384 wakeup(c);
386 /* NOTE: list may have changed */
388 ++sc->softticks;
392 * Don't leave us holding the MP lock when we deschedule ourselves.
394 if (mpsafe == 0) {
395 mpsafe = 1;
396 rel_mplock();
398 sc->isrunning = 0;
399 lwkt_deschedule_self(sc->thread); /* == curthread */
400 lwkt_switch();
401 goto loop;
402 /* NOT REACHED */
406 * A very slow system cleanup timer (10 second interval),
407 * per-cpu.
409 void
410 slotimer_callback(void *arg)
412 struct callout *c = arg;
414 slab_cleanup();
415 callout_reset(c, hz * 10, slotimer_callback, c);
419 * Start or restart a timeout. Installs the callout structure on the
420 * callwheel of the current cpu. Callers may legally pass any value, even
421 * if 0 or negative, but since the sc->curticks index may have already
422 * been processed a minimum timeout of 1 tick will be enforced.
424 * This function will block if the callout is currently queued to a different
425 * cpu or the callback is currently running in another thread.
427 void
428 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
430 softclock_pcpu_t sc;
431 globaldata_t gd;
433 #ifdef INVARIANTS
434 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
435 callout_init(c);
436 kprintf(
437 "callout_reset(%p) from %p: callout was not initialized\n",
438 c, ((int **)&c)[-1]);
439 print_backtrace(-1);
441 #endif
442 gd = mycpu;
443 sc = &softclock_pcpu_ary[gd->gd_cpuid];
444 crit_enter_gd(gd);
447 * Our cpu must gain ownership of the callout and cancel anything
448 * still running, which is complex. The easiest way to do it is to
449 * issue a callout_stop_sync(). callout_stop_sync() will also
450 * handle CALLOUT_EXECUTED (dispatch waiting), and clear it.
452 * WARNING: callout_stop_sync()'s return state can race other
453 * callout_*() calls due to blocking, so we must re-check.
455 for (;;) {
456 int flags;
457 int nflags;
459 if (c->c_flags & (CALLOUT_ARMED_MASK | CALLOUT_EXECUTED))
460 callout_stop_sync(c);
461 flags = c->c_flags & ~(CALLOUT_ARMED_MASK | CALLOUT_EXECUTED);
462 nflags = (flags & ~CALLOUT_CPU_MASK) |
463 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid) |
464 CALLOUT_PENDING |
465 CALLOUT_ACTIVE;
466 if (atomic_cmpset_int(&c->c_flags, flags, nflags))
467 break;
468 cpu_pause();
472 * With the critical section held and PENDING set we now 'own' the
473 * callout.
475 if (to_ticks <= 0)
476 to_ticks = 1;
478 c->c_arg = arg;
479 c->c_func = ftn;
480 c->c_time = sc->curticks + to_ticks;
482 TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & cwheelmask],
483 c, c_links.tqe);
484 crit_exit_gd(gd);
488 * Setup a callout to run on the specified cpu. Should generally be used
489 * to run a callout on a specific cpu which does not nominally change. This
490 * callout_reset() will be issued asynchronously via an IPI.
492 void
493 callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
494 void *arg, int cpuid)
496 globaldata_t gd;
497 globaldata_t tgd;
499 #ifdef INVARIANTS
500 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
501 callout_init(c);
502 kprintf(
503 "callout_reset(%p) from %p: callout was not initialized\n",
504 c, ((int **)&c)[-1]);
505 print_backtrace(-1);
507 #endif
508 gd = mycpu;
509 crit_enter_gd(gd);
511 tgd = globaldata_find(cpuid);
514 * This code is similar to the code in callout_reset() but we assign
515 * the callout to the target cpu. We cannot set PENDING here since
516 * we cannot atomically add the callout to the target cpu's queue.
517 * However, incrementing the IPI count has the effect of locking
518 * the cpu assignment.
520 * WARNING: callout_stop_sync()'s return state can race other
521 * callout_*() calls due to blocking, so we must re-check.
523 for (;;) {
524 int flags;
525 int nflags;
527 if (c->c_flags & (CALLOUT_ARMED_MASK | CALLOUT_EXECUTED))
528 callout_stop_sync(c);
529 flags = c->c_flags & ~(CALLOUT_ARMED_MASK | CALLOUT_EXECUTED);
530 nflags = (flags & ~(CALLOUT_CPU_MASK |
531 CALLOUT_EXECUTED)) |
532 CALLOUT_CPU_TO_FLAGS(tgd->gd_cpuid) |
533 CALLOUT_ACTIVE;
534 nflags = nflags + 1; /* bump IPI count */
535 if (atomic_cmpset_int(&c->c_flags, flags, nflags))
536 break;
537 cpu_pause();
541 * Since we control our +1 in the IPI count, the target cpu cannot
542 * now change until our IPI is processed.
544 if (to_ticks <= 0)
545 to_ticks = 1;
547 c->c_arg = arg;
548 c->c_func = ftn;
549 c->c_load = to_ticks; /* IPI will add curticks */
551 lwkt_send_ipiq(tgd, callout_reset_ipi, c);
552 crit_exit_gd(gd);
556 * Remote IPI for callout_reset_bycpu(). The cpu assignment cannot be
557 * ripped out from under us due to the count in IPI_MASK, but it is possible
558 * that other IPIs executed so we must deal with other flags that might
559 * have been set or cleared.
561 static void
562 callout_reset_ipi(void *arg)
564 struct callout *c = arg;
565 globaldata_t gd = mycpu;
566 softclock_pcpu_t sc;
567 int flags;
568 int nflags;
570 sc = &softclock_pcpu_ary[gd->gd_cpuid];
572 for (;;) {
573 flags = c->c_flags;
574 cpu_ccfence();
575 KKASSERT((flags & CALLOUT_IPI_MASK) > 0 &&
576 CALLOUT_FLAGS_TO_CPU(flags) == gd->gd_cpuid);
578 nflags = (flags - 1) & ~(CALLOUT_EXECUTED | CALLOUT_WAITING);
579 nflags |= CALLOUT_PENDING;
582 * Put us on the queue
584 if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
585 if (flags & CALLOUT_PENDING) {
586 if (sc->next == c)
587 sc->next = TAILQ_NEXT(c, c_links.tqe);
588 TAILQ_REMOVE(
589 &sc->callwheel[c->c_time & cwheelmask],
591 c_links.tqe);
593 c->c_time = sc->curticks + c->c_load;
594 TAILQ_INSERT_TAIL(
595 &sc->callwheel[c->c_time & cwheelmask],
596 c, c_links.tqe);
597 break;
599 /* retry */
600 cpu_pause();
604 * Issue wakeup if requested.
606 if (flags & CALLOUT_WAITING)
607 wakeup(c);
611 * Stop a running timer and ensure that any running callout completes before
612 * returning. If the timer is running on another cpu this function may block
613 * to interlock against the callout. If the callout is currently executing
614 * or blocked in another thread this function may also block to interlock
615 * against the callout.
617 * The caller must be careful to avoid deadlocks, either by using
618 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
619 * by using tokens and dealing with breaks in the serialization, or using
620 * the lockmgr lock cancelation feature yourself in the callout callback
621 * function.
623 * callout_stop() returns non-zero if the callout was pending.
625 static int
626 _callout_stop(struct callout *c, int issync)
628 globaldata_t gd = mycpu;
629 globaldata_t tgd;
630 softclock_pcpu_t sc;
631 int flags;
632 int nflags;
633 int rc;
634 int cpuid;
636 #ifdef INVARIANTS
637 if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
638 callout_init(c);
639 kprintf(
640 "callout_stop(%p) from %p: callout was not initialized\n",
641 c, ((int **)&c)[-1]);
642 print_backtrace(-1);
644 #endif
645 crit_enter_gd(gd);
648 * Adjust flags for the required operation. If the callout is
649 * armed on another cpu we break out into the remote-cpu code which
650 * will issue an IPI. If it is not armed we are trivially done,
651 * but may still need to test EXECUTED.
653 for (;;) {
654 flags = c->c_flags;
655 cpu_ccfence();
657 cpuid = CALLOUT_FLAGS_TO_CPU(flags);
660 * Armed on remote cpu (break to remote-cpu code)
662 if ((flags & CALLOUT_ARMED_MASK) && gd->gd_cpuid != cpuid) {
663 nflags = flags + 1;
664 if (atomic_cmpset_int(&c->c_flags, flags, nflags))
665 break;
666 cpu_pause();
667 continue;
671 * Armed or armable on current cpu
673 if (flags & CALLOUT_IPI_MASK) {
674 lwkt_process_ipiq();
675 cpu_pause();
676 continue; /* retry */
680 * If PENDING is set we can remove the callout from our
681 * queue and also use the side effect that the bit causes
682 * the callout to be locked to our cpu.
684 if (flags & CALLOUT_PENDING) {
685 sc = &softclock_pcpu_ary[gd->gd_cpuid];
686 if (sc->next == c)
687 sc->next = TAILQ_NEXT(c, c_links.tqe);
688 TAILQ_REMOVE(
689 &sc->callwheel[c->c_time & cwheelmask],
691 c_links.tqe);
692 c->c_func = NULL;
694 for (;;) {
695 flags = c->c_flags;
696 cpu_ccfence();
697 nflags = flags & ~(CALLOUT_ACTIVE |
698 CALLOUT_EXECUTED |
699 CALLOUT_WAITING |
700 CALLOUT_PENDING);
701 if (atomic_cmpset_int(&c->c_flags,
702 flags, nflags)) {
703 goto skip_slow;
705 cpu_pause();
707 /* NOT REACHED */
711 * If PENDING was not set the callout might not be locked
712 * to this cpu.
714 nflags = flags & ~(CALLOUT_ACTIVE |
715 CALLOUT_EXECUTED |
716 CALLOUT_WAITING |
717 CALLOUT_PENDING);
718 if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
719 goto skip_slow;
721 cpu_pause();
722 /* retry */
726 * Remote cpu path. We incremented the IPI_MASK count so the callout
727 * is now locked to the remote cpu and we can safely send an IPI
728 * to it.
730 * Once sent, wait for all IPIs to be processed.
732 tgd = globaldata_find(cpuid);
733 lwkt_send_ipiq3(tgd, callout_stop_ipi, c, issync);
735 for (;;) {
736 flags = c->c_flags;
737 cpu_ccfence();
739 if ((flags & CALLOUT_ARMED_MASK) == 0)
740 break;
742 nflags = flags | CALLOUT_WAITING;
743 tsleep_interlock(c, 0);
744 if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
745 tsleep(c, PINTERLOCKED, "cstp1", 0);
750 * Caller expects callout_stop_sync() to clear EXECUTED and return
751 * its previous status.
753 atomic_clear_int(&c->c_flags, CALLOUT_EXECUTED);
755 skip_slow:
756 if (flags & CALLOUT_WAITING)
757 wakeup(c);
760 * If (issync) we must also wait for any in-progress callbacks to
761 * complete, unless the stop is being executed from the callback
762 * itself. The EXECUTED flag is set prior to the callback
763 * being made so our existing flags status already has it.
765 * If auto-lock mode is being used, this is where we cancel any
766 * blocked lock that is potentially preventing the target cpu
767 * from completing the callback.
769 while (issync) {
770 intptr_t *runp;
771 intptr_t runco;
773 sc = &softclock_pcpu_ary[cpuid];
774 if (gd->gd_curthread == sc->thread) /* stop from cb */
775 break;
776 runp = &sc->running;
777 runco = *runp;
778 cpu_ccfence();
779 if ((runco & ~(intptr_t)1) != (intptr_t)c)
780 break;
781 if (c->c_flags & CALLOUT_AUTOLOCK)
782 lockmgr(c->c_lk, LK_CANCEL_BEG);
783 tsleep_interlock(c, 0);
784 if (atomic_cmpset_long(runp, runco, runco | 1))
785 tsleep(c, PINTERLOCKED, "cstp3", 0);
786 if (c->c_flags & CALLOUT_AUTOLOCK)
787 lockmgr(c->c_lk, LK_CANCEL_END);
790 crit_exit_gd(gd);
791 rc = (flags & CALLOUT_EXECUTED) != 0;
793 return rc;
797 * IPI for stop function. The callout is locked to the receiving cpu
798 * by the IPI_MASK count.
800 static
801 void
802 callout_stop_ipi(void *arg, int issync, struct intrframe *frame)
804 globaldata_t gd = mycpu;
805 struct callout *c = arg;
806 softclock_pcpu_t sc;
807 int flags;
808 int nflags;
810 flags = c->c_flags;
811 cpu_ccfence();
813 KKASSERT(CALLOUT_FLAGS_TO_CPU(flags) == gd->gd_cpuid);
816 * We can handle the PENDING flag immediately.
818 if (flags & CALLOUT_PENDING) {
819 sc = &softclock_pcpu_ary[gd->gd_cpuid];
820 if (sc->next == c)
821 sc->next = TAILQ_NEXT(c, c_links.tqe);
822 TAILQ_REMOVE(
823 &sc->callwheel[c->c_time & cwheelmask],
825 c_links.tqe);
826 c->c_func = NULL;
830 * Transition to the stopped state and decrement the IPI count.
831 * Leave the EXECUTED bit alone (the next callout_reset() will
832 * have to deal with it).
834 for (;;) {
835 flags = c->c_flags;
836 cpu_ccfence();
837 nflags = (flags - 1) & ~(CALLOUT_ACTIVE |
838 CALLOUT_PENDING |
839 CALLOUT_WAITING);
841 if (atomic_cmpset_int(&c->c_flags, flags, nflags))
842 break;
843 cpu_pause();
845 if (flags & CALLOUT_WAITING)
846 wakeup(c);
850 callout_stop(struct callout *c)
852 return _callout_stop(c, 0);
856 callout_stop_sync(struct callout *c)
858 return _callout_stop(c, 1);
861 void
862 callout_stop_async(struct callout *c)
864 _callout_stop(c, 0);
867 void
868 callout_terminate(struct callout *c)
870 _callout_stop(c, 1);
871 atomic_clear_int(&c->c_flags, CALLOUT_DID_INIT);
875 * Prepare a callout structure for use by callout_reset() and/or
876 * callout_stop().
878 * The MP version of this routine requires that the callback
879 * function installed by callout_reset() be MP safe.
881 * The LK version of this routine is also MPsafe and will automatically
882 * acquire the specified lock for the duration of the function call,
883 * and release it after the function returns. In addition, when autolocking
884 * is used, callout_stop() becomes synchronous if the caller owns the lock.
885 * callout_reset(), callout_stop(), and callout_stop_sync() will block
886 * normally instead of spinning when a cpu race occurs. Lock cancelation
887 * is used to avoid deadlocks against the callout ring dispatch.
889 * The init functions can be called from any cpu and do not have to be
890 * called from the cpu that the timer will eventually run on.
892 static __inline
893 void
894 _callout_init(struct callout *c, int flags)
896 bzero(c, sizeof *c);
897 c->c_flags = flags;
900 void
901 callout_init(struct callout *c)
903 _callout_init(c, CALLOUT_DID_INIT);
906 void
907 callout_init_mp(struct callout *c)
909 _callout_init(c, CALLOUT_DID_INIT | CALLOUT_MPSAFE);
912 void
913 callout_init_lk(struct callout *c, struct lock *lk)
915 _callout_init(c, CALLOUT_DID_INIT | CALLOUT_MPSAFE | CALLOUT_AUTOLOCK);
916 c->c_lk = lk;