5.4 changes.
[dragonfly.git] / sys / sys / thread2.h
blob2b62fa595909d7756e8608e0a1b9a466a16ccfba
1 /*
2 * SYS/THREAD2.H
4 * Implements inline procedure support for the LWKT subsystem.
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
9 * this cpu.
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
15 #ifndef _KERNEL
17 #error "This file should not be included by userland programs."
19 #else
22 * Userland will have its own globaldata which it includes prior to this.
24 #ifndef _SYS_SYSTM_H_
25 #include <sys/systm.h>
26 #endif
27 #ifndef _SYS_GLOBALDATA_H_
28 #include <sys/globaldata.h>
29 #endif
30 #include <machine/cpufunc.h>
31 #include <machine/cpumask.h>
34 * Don't let GCC reorder critical section count adjustments, because it
35 * will BLOW US UP if it does.
37 static __inline void
38 crit_enter_raw(thread_t td)
40 cpu_ccfence();
41 ++td->td_critcount;
42 cpu_ccfence();
45 static __inline void
46 crit_exit_raw(thread_t td)
48 cpu_ccfence();
49 --td->td_critcount;
50 cpu_ccfence();
54 * Is a token held either by the specified thread or held shared?
56 * We can't inexpensively validate the thread for a shared token
57 * without iterating td->td_toks, so this isn't a perfect test.
59 static __inline int
60 _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
62 long count = tok->t_count;
64 cpu_ccfence();
65 if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
66 return TRUE;
67 if ((count & TOK_EXCLUSIVE) == 0 &&
68 (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
69 return TRUE;
71 return FALSE;
75 * Is a token held by the specified thread?
77 static __inline int
78 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
80 return ((tok->t_ref >= &td->td_toks_base &&
81 tok->t_ref < td->td_toks_stop));
85 * Critical section debugging
87 #ifdef DEBUG_CRIT_SECTIONS
88 #define __DEBUG_CRIT_ARG__ const char *id
89 #define __DEBUG_CRIT_ADD_ARG__ , const char *id
90 #define __DEBUG_CRIT_PASS_ARG__ , id
91 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
92 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
93 #define crit_enter() _crit_enter(mycpu, __func__)
94 #define crit_enter_id(id) _crit_enter(mycpu, id)
95 #define crit_enter_gd(curgd) _crit_enter((curgd), __func__)
96 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __func__)
97 #define crit_enter_hard() _crit_enter_hard(mycpu, __func__)
98 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __func__)
99 #define crit_exit() _crit_exit(mycpu, __func__)
100 #define crit_exit_id(id) _crit_exit(mycpu, id)
101 #define crit_exit_gd(curgd) _crit_exit((curgd), __func__)
102 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __func__)
103 #define crit_exit_hard() _crit_exit_hard(mycpu, __func__)
104 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __func__)
105 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__func__)
106 #else
107 #define __DEBUG_CRIT_ARG__ void
108 #define __DEBUG_CRIT_ADD_ARG__
109 #define __DEBUG_CRIT_PASS_ARG__
110 #define __DEBUG_CRIT_ENTER(td)
111 #define __DEBUG_CRIT_EXIT(td)
112 #define crit_enter() _crit_enter(mycpu)
113 #define crit_enter_id(id) _crit_enter(mycpu)
114 #define crit_enter_gd(curgd) _crit_enter((curgd))
115 #define crit_enter_quick(curtd) _crit_enter_quick((curtd))
116 #define crit_enter_hard() _crit_enter_hard(mycpu)
117 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
118 #define crit_exit() crit_exit_wrapper()
119 #define crit_exit_id(id) _crit_exit(mycpu)
120 #define crit_exit_gd(curgd) _crit_exit((curgd))
121 #define crit_exit_quick(curtd) _crit_exit_quick((curtd))
122 #define crit_exit_hard() _crit_exit_hard(mycpu)
123 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
124 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
125 #endif
127 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
130 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
132 #ifdef DEBUG_CRIT_SECTIONS
134 static __inline void
135 _debug_crit_enter(thread_t td, const char *id)
137 int wi = td->td_crit_debug_index;
139 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
140 ++td->td_crit_debug_index;
143 static __inline void
144 _debug_crit_exit(thread_t td, const char *id)
146 const char *gid;
147 int wi;
149 wi = td->td_crit_debug_index - 1;
150 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
151 if (td->td_in_crit_report == 0) {
152 td->td_in_crit_report = 1;
153 kprintf("crit_exit(%s) expected id %s\n", id, gid);
154 td->td_in_crit_report = 0;
157 --td->td_crit_debug_index;
160 #endif
163 * Critical sections prevent preemption, but allowing explicit blocking
164 * and thread switching. Any interrupt occuring while in a critical
165 * section is made pending and returns immediately. Interrupts are not
166 * physically disabled.
168 * Hard critical sections prevent preemption and disallow any blocking
169 * or thread switching, and in addition will assert on any blockable
170 * operation (acquire token not already held, lockmgr, mutex ops, or
171 * splz). Spinlocks can still be used in hard sections.
173 * All critical section routines only operate on the current thread.
174 * Passed gd or td arguments are simply optimizations when mycpu or
175 * curthread is already available to the caller.
179 * crit_enter
181 static __inline void
182 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
184 crit_enter_raw(td);
185 __DEBUG_CRIT_ENTER(td);
188 static __inline void
189 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
191 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
194 static __inline void
195 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
197 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
198 ++gd->gd_intr_nesting_level;
199 cpu_ccfence();
204 * crit_exit*()
206 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
207 * never true regardless of crit_count, should result in 100%
208 * optimal code execution. We don't check crit_count because
209 * it just bloats the inline and does not improve performance.
211 * NOTE: This can produce a considerable amount of code despite the
212 * relatively few lines of code so the non-debug case typically
213 * just wraps it in a real function, crit_exit_wrapper().
215 static __inline void
216 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
218 __DEBUG_CRIT_EXIT(td);
219 crit_exit_raw(td);
220 #ifdef INVARIANTS
221 if (__predict_false(td->td_critcount < 0))
222 crit_panic();
223 #endif
226 static __inline void
227 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
229 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
230 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
231 lwkt_maybe_splz(td);
234 static __inline void
235 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
237 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
240 static __inline void
241 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
243 cpu_ccfence();
244 --gd->gd_intr_nesting_level;
245 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
248 static __inline int
249 crit_test(thread_t td)
251 return(td->td_critcount);
255 * Return whether any threads are runnable.
257 static __inline int
258 lwkt_runnable(void)
260 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
263 static __inline int
264 lwkt_getpri(thread_t td)
266 return(td->td_pri);
269 static __inline int
270 lwkt_getpri_self(void)
272 return(lwkt_getpri(curthread));
276 * Reduce our priority in preparation for a return to userland. If
277 * our passive release function was still in place, our priority was
278 * never raised and does not need to be reduced.
280 * See also lwkt_passive_release() and platform/blah/trap.c
282 static __inline void
283 lwkt_passive_recover(thread_t td)
285 #ifndef NO_LWKT_SPLIT_USERPRI
286 if (td->td_release == NULL)
287 lwkt_setpri_self(TDPRI_USER_NORM);
288 td->td_release = NULL;
289 #endif
293 * cpusync support
295 static __inline void
296 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
297 cpusync_func_t func, void *data)
299 cs->cs_mask = mask;
300 /* cs->cs_mack = 0; handled by _interlock */
301 cs->cs_func = func;
302 cs->cs_data = data;
306 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
307 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
308 * the trap frame is not known). However, we wish to provide opaque
309 * interfaces for simpler callbacks... the basic IPI messaging function as
310 * used by the kernel takes a single argument.
312 static __inline int
313 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
315 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
318 static __inline int
319 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
321 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
324 static __inline int
325 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
327 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
330 static __inline int
331 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
333 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
336 static __inline int
337 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
339 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
342 static __inline int
343 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
344 void *arg1, int arg2)
346 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
349 static __inline int
350 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
352 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
355 static __inline int
356 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
358 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
361 static __inline int
362 lwkt_need_ipiq_process(globaldata_t gd)
364 lwkt_ipiq_t ipiq;
366 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
367 return 1;
369 ipiq = &gd->gd_cpusyncq;
370 return (ipiq->ip_rindex != ipiq->ip_windex);
373 #endif /* _KERNEL */
374 #endif /* _SYS_THREAD2_H_ */