usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / sys / callout.h
blobdb06bebd1e88c790678686aef9fd00cb7a79c108
1 /*
2 * Copyright (c) 2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Copyright (c) 1990, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc.
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
69 #ifndef _SYS_CALLOUT_H_
70 #define _SYS_CALLOUT_H_
72 #ifndef _SYS_QUEUE_H_
73 #include <sys/queue.h>
74 #endif
75 #ifndef _SYS_LOCK_H_
76 #include <sys/lock.h>
77 #endif
78 #ifndef _CPU_ATOMIC_H_
79 #include <machine/atomic.h>
80 #endif
82 SLIST_HEAD(callout_list, callout);
83 TAILQ_HEAD(callout_tailq, callout);
86 * Callwheel linkages are only adjusted on the target cpu. All other
87 * actions are handled with atomic ops on any cpu. callout_reset() and
88 * callout_stop() are always synchronous and will interlock against a
89 * running callout. The caller might block, and a deadlock is possible
90 * if the caller does not use callout_init_lk() or is not careful with
91 * locks acquired in the callout function.
93 * Programers should note that our lockmgr locks have a cancelation feature
94 * which can be used to avoid deadlocks. callout_init_lk() also uses this
95 * feature.
97 * callout_deactivate() is asynchronous and will not interlock against
98 * callout which is already running.
100 struct callout {
101 union {
102 SLIST_ENTRY(callout) sle;
103 TAILQ_ENTRY(callout) tqe;
104 } c_links;
105 int c_time; /* match tick on event */
106 int c_load; /* load value for reset ipi */
107 void *c_arg; /* function argument */
108 void (*c_func) (void *); /* function to call */
109 int c_flags; /* state of this entry */
110 int c_unused02;
111 struct lock *c_lk; /* auto-lock */
114 #define CALLOUT_ACTIVE 0x80000000 /* quick [de]activation flag */
115 #define CALLOUT_PENDING 0x40000000 /* callout is on callwheel */
116 #define CALLOUT_MPSAFE 0x20000000 /* callout does not need the BGL */
117 #define CALLOUT_DID_INIT 0x10000000 /* safety check */
118 #define CALLOUT_AUTOLOCK 0x08000000 /* auto locking / cancel feature */
119 #define CALLOUT_WAITING 0x04000000 /* interlocked waiter */
120 #define CALLOUT_EXECUTED 0x02000000 /* (generates stop status) */
121 #define CALLOUT_ARMED 0x01000000 /* callout is assigned to cpu */
122 #define CALLOUT_IPI_MASK 0x00000FFF /* ipi in-flight count mask */
123 #define CALLOUT_CPU_MASK 0x00FFF000 /* ipi in-flight count mask */
125 #define CALLOUT_FLAGS_TO_CPU(flags) (((flags) & CALLOUT_CPU_MASK) >> 12)
126 #define CALLOUT_CPU_TO_FLAGS(cpuid) ((cpuid) << 12)
129 * WARNING! The caller is responsible for stabilizing the callout state,
130 * our suggestion is to either manage the callout on the same cpu
131 * or to use the callout_init_lk() feature and hold the lock while
132 * making callout_*() calls. The lock will be held automatically
133 * by the callout wheel for any call-back and the callout wheel
134 * will handle any callout_stop() deadlocks properly.
136 * active - Indicates that the callout is armed. The callout can be in
137 * any state other than a stopped state. That is, the callout
138 * reset could still be inflight to the target cpu and not yet
139 * pending on the target cpu's callwheel, could be pending on
140 * the callwheel, may have already executed (but not have been
141 * stopped), or might be executing concurrently.
143 * deactivate - Disarm the callout, preventing it from being executed if it
144 * is queued or the queueing operation is in-flight. Has no
145 * effect if the callout has already been dispatched. Does not
146 * dequeue the callout. Does not affect the status returned
147 * by callout_stop().
149 * Not serialized, caller must be careful when racing a new
150 * callout_reset() that might be issued by the callback, which
151 * will re-arm the callout.
153 * pending - Only useful for same-cpu callouts, indicates that the callout
154 * is pending on the callwheel or that a callout_reset() ipi
155 * is in-flight.
157 #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
159 #define callout_deactivate(c) atomic_clear_int(&(c)->c_flags, CALLOUT_ACTIVE)
161 #define callout_pending(c) ((c)->c_flags & (CALLOUT_PENDING | \
162 CALLOUT_IPI_MASK))
164 #ifdef _KERNEL
165 extern int ncallout;
167 struct globaldata;
169 void hardclock_softtick(struct globaldata *);
170 void callout_init (struct callout *);
171 void callout_init_mp (struct callout *);
172 void callout_init_lk (struct callout *, struct lock *);
173 void callout_reset (struct callout *, int, void (*)(void *), void *);
174 int callout_stop (struct callout *);
175 void callout_stop_async (struct callout *);
176 int callout_stop_sync (struct callout *);
177 void callout_terminate (struct callout *);
178 void callout_reset_bycpu (struct callout *, int, void (*)(void *), void *,
179 int);
181 #define callout_drain(x) callout_stop_sync(x)
183 #endif
185 #endif /* _SYS_CALLOUT_H_ */