usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / sys / thread2.h
blobccd93a057207a483994fe039aa676b89b67c907e
1 /*
2 * SYS/THREAD2.H
4 * Implements inline procedure support for the LWKT subsystem.
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
9 * this cpu.
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
15 #ifndef _KERNEL
17 #error "This file should not be included by userland programs."
19 #else
22 * Userland will have its own globaldata which it includes prior to this.
24 #ifndef _SYS_SYSTM_H_
25 #include <sys/systm.h>
26 #endif
27 #ifndef _SYS_GLOBALDATA_H_
28 #include <sys/globaldata.h>
29 #endif
30 #include <machine/cpufunc.h>
31 #include <machine/cpumask.h>
34 * Is a token held either by the specified thread or held shared?
36 * We can't inexpensively validate the thread for a shared token
37 * without iterating td->td_toks, so this isn't a perfect test.
39 static __inline int
40 _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
42 long count = tok->t_count;
44 cpu_ccfence();
45 if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
46 return TRUE;
47 if ((count & TOK_EXCLUSIVE) == 0 &&
48 (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
49 return TRUE;
51 return FALSE;
55 * Is a token held by the specified thread?
57 static __inline int
58 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
60 return ((tok->t_ref >= &td->td_toks_base &&
61 tok->t_ref < td->td_toks_stop));
65 * Critical section debugging
67 #ifdef DEBUG_CRIT_SECTIONS
68 #define __DEBUG_CRIT_ARG__ const char *id
69 #define __DEBUG_CRIT_ADD_ARG__ , const char *id
70 #define __DEBUG_CRIT_PASS_ARG__ , id
71 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
72 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
73 #define crit_enter() _crit_enter(mycpu, __func__)
74 #define crit_enter_id(id) _crit_enter(mycpu, id)
75 #define crit_enter_gd(curgd) _crit_enter((curgd), __func__)
76 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __func__)
77 #define crit_enter_hard() _crit_enter_hard(mycpu, __func__)
78 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __func__)
79 #define crit_exit() _crit_exit(mycpu, __func__)
80 #define crit_exit_id(id) _crit_exit(mycpu, id)
81 #define crit_exit_gd(curgd) _crit_exit((curgd), __func__)
82 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __func__)
83 #define crit_exit_hard() _crit_exit_hard(mycpu, __func__)
84 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __func__)
85 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__func__)
86 #else
87 #define __DEBUG_CRIT_ARG__ void
88 #define __DEBUG_CRIT_ADD_ARG__
89 #define __DEBUG_CRIT_PASS_ARG__
90 #define __DEBUG_CRIT_ENTER(td)
91 #define __DEBUG_CRIT_EXIT(td)
92 #define crit_enter() _crit_enter(mycpu)
93 #define crit_enter_id(id) _crit_enter(mycpu)
94 #define crit_enter_gd(curgd) _crit_enter((curgd))
95 #define crit_enter_quick(curtd) _crit_enter_quick((curtd))
96 #define crit_enter_hard() _crit_enter_hard(mycpu)
97 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
98 #define crit_exit() crit_exit_wrapper()
99 #define crit_exit_id(id) _crit_exit(mycpu)
100 #define crit_exit_gd(curgd) _crit_exit((curgd))
101 #define crit_exit_quick(curtd) _crit_exit_quick((curtd))
102 #define crit_exit_hard() _crit_exit_hard(mycpu)
103 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
104 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
105 #endif
107 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
110 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
112 #ifdef DEBUG_CRIT_SECTIONS
114 static __inline void
115 _debug_crit_enter(thread_t td, const char *id)
117 int wi = td->td_crit_debug_index;
119 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
120 ++td->td_crit_debug_index;
123 static __inline void
124 _debug_crit_exit(thread_t td, const char *id)
126 const char *gid;
127 int wi;
129 wi = td->td_crit_debug_index - 1;
130 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
131 if (td->td_in_crit_report == 0) {
132 td->td_in_crit_report = 1;
133 kprintf("crit_exit(%s) expected id %s\n", id, gid);
134 td->td_in_crit_report = 0;
137 --td->td_crit_debug_index;
140 #endif
143 * Critical sections prevent preemption, but allowing explicit blocking
144 * and thread switching. Any interrupt occuring while in a critical
145 * section is made pending and returns immediately. Interrupts are not
146 * physically disabled.
148 * Hard critical sections prevent preemption and disallow any blocking
149 * or thread switching, and in addition will assert on any blockable
150 * operation (acquire token not already held, lockmgr, mutex ops, or
151 * splz). Spinlocks can still be used in hard sections.
153 * All critical section routines only operate on the current thread.
154 * Passed gd or td arguments are simply optimizations when mycpu or
155 * curthread is already available to the caller.
159 * crit_enter
161 static __inline void
162 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
164 ++td->td_critcount;
165 __DEBUG_CRIT_ENTER(td);
166 cpu_ccfence();
169 static __inline void
170 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
172 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
175 static __inline void
176 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
178 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
179 ++gd->gd_intr_nesting_level;
184 * crit_exit*()
186 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
187 * never true regardless of crit_count, should result in 100%
188 * optimal code execution. We don't check crit_count because
189 * it just bloats the inline and does not improve performance.
191 * NOTE: This can produce a considerable amount of code despite the
192 * relatively few lines of code so the non-debug case typically
193 * just wraps it in a real function, crit_exit_wrapper().
195 static __inline void
196 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
198 __DEBUG_CRIT_EXIT(td);
199 --td->td_critcount;
200 #ifdef INVARIANTS
201 if (__predict_false(td->td_critcount < 0))
202 crit_panic();
203 #endif
204 cpu_ccfence(); /* prevent compiler reordering */
207 static __inline void
208 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
210 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
211 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
212 lwkt_maybe_splz(td);
215 static __inline void
216 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
218 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
221 static __inline void
222 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
224 --gd->gd_intr_nesting_level;
225 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
228 static __inline int
229 crit_test(thread_t td)
231 return(td->td_critcount);
235 * Return whether any threads are runnable.
237 static __inline int
238 lwkt_runnable(void)
240 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
243 static __inline int
244 lwkt_getpri(thread_t td)
246 return(td->td_pri);
249 static __inline int
250 lwkt_getpri_self(void)
252 return(lwkt_getpri(curthread));
256 * Reduce our priority in preparation for a return to userland. If
257 * our passive release function was still in place, our priority was
258 * never raised and does not need to be reduced.
260 * See also lwkt_passive_release() and platform/blah/trap.c
262 static __inline void
263 lwkt_passive_recover(thread_t td)
265 #ifndef NO_LWKT_SPLIT_USERPRI
266 if (td->td_release == NULL)
267 lwkt_setpri_self(TDPRI_USER_NORM);
268 td->td_release = NULL;
269 #endif
273 * cpusync support
275 static __inline void
276 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
277 cpusync_func_t func, void *data)
279 cs->cs_mask = mask;
280 /* cs->cs_mack = 0; handled by _interlock */
281 cs->cs_func = func;
282 cs->cs_data = data;
286 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
287 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
288 * the trap frame is not known). However, we wish to provide opaque
289 * interfaces for simpler callbacks... the basic IPI messaging function as
290 * used by the kernel takes a single argument.
292 static __inline int
293 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
295 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
298 static __inline int
299 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
301 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
304 static __inline int
305 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
307 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
310 static __inline int
311 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
313 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
316 static __inline int
317 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
319 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
322 static __inline int
323 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
324 void *arg1, int arg2)
326 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
329 static __inline int
330 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
332 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
335 static __inline int
336 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
338 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
341 static __inline int
342 lwkt_need_ipiq_process(globaldata_t gd)
344 lwkt_ipiq_t ipiq;
346 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
347 return 1;
349 ipiq = &gd->gd_cpusyncq;
350 return (ipiq->ip_rindex != ipiq->ip_windex);
353 #endif /* _KERNEL */
354 #endif /* _SYS_THREAD2_H_ */