4 * Implements inline procedure support for the LWKT subsystem.
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
17 #error "This file should not be included by userland programs."
22 * Userland will have its own globaldata which it includes prior to this.
25 #include <sys/systm.h>
27 #ifndef _SYS_GLOBALDATA_H_
28 #include <sys/globaldata.h>
30 #include <machine/cpufunc.h>
31 #include <machine/cpumask.h>
34 * Don't let GCC reorder critical section count adjustments, because it
35 * will BLOW US UP if it does.
38 crit_enter_raw(thread_t td
)
46 crit_exit_raw(thread_t td
)
54 * Is a token held either by the specified thread or held shared?
56 * We can't inexpensively validate the thread for a shared token
57 * without iterating td->td_toks, so this isn't a perfect test.
60 _lwkt_token_held_any(lwkt_token_t tok
, thread_t td
)
62 long count
= tok
->t_count
;
65 if (tok
->t_ref
>= &td
->td_toks_base
&& tok
->t_ref
< td
->td_toks_stop
)
67 if ((count
& TOK_EXCLUSIVE
) == 0 &&
68 (count
& ~(TOK_EXCLUSIVE
|TOK_EXCLREQ
))) {
75 * Is a token held by the specified thread?
78 _lwkt_token_held_excl(lwkt_token_t tok
, thread_t td
)
80 return ((tok
->t_ref
>= &td
->td_toks_base
&&
81 tok
->t_ref
< td
->td_toks_stop
));
85 * Critical section debugging
87 #ifdef DEBUG_CRIT_SECTIONS
88 #define __DEBUG_CRIT_ARG__ const char *id
89 #define __DEBUG_CRIT_ADD_ARG__ , const char *id
90 #define __DEBUG_CRIT_PASS_ARG__ , id
91 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
92 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
93 #define crit_enter() _crit_enter(mycpu, __func__)
94 #define crit_enter_id(id) _crit_enter(mycpu, id)
95 #define crit_enter_gd(curgd) _crit_enter((curgd), __func__)
96 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __func__)
97 #define crit_enter_hard() _crit_enter_hard(mycpu, __func__)
98 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __func__)
99 #define crit_exit() _crit_exit(mycpu, __func__)
100 #define crit_exit_id(id) _crit_exit(mycpu, id)
101 #define crit_exit_gd(curgd) _crit_exit((curgd), __func__)
102 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __func__)
103 #define crit_exit_hard() _crit_exit_hard(mycpu, __func__)
104 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __func__)
105 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__func__)
107 #define __DEBUG_CRIT_ARG__ void
108 #define __DEBUG_CRIT_ADD_ARG__
109 #define __DEBUG_CRIT_PASS_ARG__
110 #define __DEBUG_CRIT_ENTER(td)
111 #define __DEBUG_CRIT_EXIT(td)
112 #define crit_enter() _crit_enter(mycpu)
113 #define crit_enter_id(id) _crit_enter(mycpu)
114 #define crit_enter_gd(curgd) _crit_enter((curgd))
115 #define crit_enter_quick(curtd) _crit_enter_quick((curtd))
116 #define crit_enter_hard() _crit_enter_hard(mycpu)
117 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
118 #define crit_exit() crit_exit_wrapper()
119 #define crit_exit_id(id) _crit_exit(mycpu)
120 #define crit_exit_gd(curgd) _crit_exit((curgd))
121 #define crit_exit_quick(curtd) _crit_exit_quick((curtd))
122 #define crit_exit_hard() _crit_exit_hard(mycpu)
123 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
124 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
127 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__
);
130 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
132 #ifdef DEBUG_CRIT_SECTIONS
135 _debug_crit_enter(thread_t td
, const char *id
)
137 int wi
= td
->td_crit_debug_index
;
139 td
->td_crit_debug_array
[wi
& CRIT_DEBUG_ARRAY_MASK
] = id
;
140 ++td
->td_crit_debug_index
;
144 _debug_crit_exit(thread_t td
, const char *id
)
149 wi
= td
->td_crit_debug_index
- 1;
150 if ((gid
= td
->td_crit_debug_array
[wi
& CRIT_DEBUG_ARRAY_MASK
]) != id
) {
151 if (td
->td_in_crit_report
== 0) {
152 td
->td_in_crit_report
= 1;
153 kprintf("crit_exit(%s) expected id %s\n", id
, gid
);
154 td
->td_in_crit_report
= 0;
157 --td
->td_crit_debug_index
;
163 * Critical sections prevent preemption, but allowing explicit blocking
164 * and thread switching. Any interrupt occuring while in a critical
165 * section is made pending and returns immediately. Interrupts are not
166 * physically disabled.
168 * Hard critical sections prevent preemption and disallow any blocking
169 * or thread switching, and in addition will assert on any blockable
170 * operation (acquire token not already held, lockmgr, mutex ops, or
171 * splz). Spinlocks can still be used in hard sections.
173 * All critical section routines only operate on the current thread.
174 * Passed gd or td arguments are simply optimizations when mycpu or
175 * curthread is already available to the caller.
182 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__
)
185 __DEBUG_CRIT_ENTER(td
);
189 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
191 _crit_enter_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
195 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
197 _crit_enter_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
198 ++gd
->gd_intr_nesting_level
;
206 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
207 * never true regardless of crit_count, should result in 100%
208 * optimal code execution. We don't check crit_count because
209 * it just bloats the inline and does not improve performance.
211 * NOTE: This can produce a considerable amount of code despite the
212 * relatively few lines of code so the non-debug case typically
213 * just wraps it in a real function, crit_exit_wrapper().
216 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__
)
218 __DEBUG_CRIT_EXIT(td
);
221 if (__predict_false(td
->td_critcount
< 0))
227 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__
)
229 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__
);
230 if (__predict_false(td
->td_gd
->gd_reqflags
& RQF_IDLECHECK_MASK
))
235 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
237 _crit_exit_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
241 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
244 --gd
->gd_intr_nesting_level
;
245 _crit_exit_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
249 crit_test(thread_t td
)
251 return(td
->td_critcount
);
255 * Return whether any threads are runnable.
260 return (TAILQ_FIRST(&mycpu
->gd_tdrunq
) != NULL
);
264 lwkt_getpri(thread_t td
)
270 lwkt_getpri_self(void)
272 return(lwkt_getpri(curthread
));
276 * Reduce our priority in preparation for a return to userland. If
277 * our passive release function was still in place, our priority was
278 * never raised and does not need to be reduced.
280 * See also lwkt_passive_release() and platform/blah/trap.c
283 lwkt_passive_recover(thread_t td
)
285 #ifndef NO_LWKT_SPLIT_USERPRI
286 if (td
->td_release
== NULL
)
287 lwkt_setpri_self(TDPRI_USER_NORM
);
288 td
->td_release
= NULL
;
296 lwkt_cpusync_init(lwkt_cpusync_t cs
, cpumask_t mask
,
297 cpusync_func_t func
, void *data
)
300 /* cs->cs_mack = 0; handled by _interlock */
306 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
307 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
308 * the trap frame is not known). However, we wish to provide opaque
309 * interfaces for simpler callbacks... the basic IPI messaging function as
310 * used by the kernel takes a single argument.
313 lwkt_send_ipiq(globaldata_t target
, ipifunc1_t func
, void *arg
)
315 return(lwkt_send_ipiq3(target
, (ipifunc3_t
)func
, arg
, 0));
319 lwkt_send_ipiq2(globaldata_t target
, ipifunc2_t func
, void *arg1
, int arg2
)
321 return(lwkt_send_ipiq3(target
, (ipifunc3_t
)func
, arg1
, arg2
));
325 lwkt_send_ipiq_mask(cpumask_t mask
, ipifunc1_t func
, void *arg
)
327 return(lwkt_send_ipiq3_mask(mask
, (ipifunc3_t
)func
, arg
, 0));
331 lwkt_send_ipiq2_mask(cpumask_t mask
, ipifunc2_t func
, void *arg1
, int arg2
)
333 return(lwkt_send_ipiq3_mask(mask
, (ipifunc3_t
)func
, arg1
, arg2
));
337 lwkt_send_ipiq_passive(globaldata_t target
, ipifunc1_t func
, void *arg
)
339 return(lwkt_send_ipiq3_passive(target
, (ipifunc3_t
)func
, arg
, 0));
343 lwkt_send_ipiq2_passive(globaldata_t target
, ipifunc2_t func
,
344 void *arg1
, int arg2
)
346 return(lwkt_send_ipiq3_passive(target
, (ipifunc3_t
)func
, arg1
, arg2
));
350 lwkt_send_ipiq_bycpu(int dcpu
, ipifunc1_t func
, void *arg
)
352 return(lwkt_send_ipiq3_bycpu(dcpu
, (ipifunc3_t
)func
, arg
, 0));
356 lwkt_send_ipiq2_bycpu(int dcpu
, ipifunc2_t func
, void *arg1
, int arg2
)
358 return(lwkt_send_ipiq3_bycpu(dcpu
, (ipifunc3_t
)func
, arg1
, arg2
));
362 lwkt_need_ipiq_process(globaldata_t gd
)
366 if (CPUMASK_TESTNZERO(gd
->gd_ipimask
))
369 ipiq
= &gd
->gd_cpusyncq
;
370 return (ipiq
->ip_rindex
!= ipiq
->ip_windex
);
374 #endif /* _SYS_THREAD2_H_ */