4 * Implements inline procedure support for the LWKT subsystem.
6 * Generally speaking these routines only operate on threads associated
7 * with the current cpu. For example, a higher priority thread pending
8 * on a different cpu will not be immediately scheduled by a yield() on
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
17 #error "This file should not be included by userland programs."
22 * Userland will have its own globaldata which it includes prior to this.
25 #include <sys/systm.h>
27 #ifndef _SYS_GLOBALDATA_H_
28 #include <sys/globaldata.h>
30 #include <machine/cpufunc.h>
33 * Is a token held either by the specified thread or held shared?
35 * We can't inexpensively validate the thread for a shared token
36 * without iterating td->td_toks, so this isn't a perfect test.
39 _lwkt_token_held_any(lwkt_token_t tok
, thread_t td
)
41 long count
= tok
->t_count
;
44 if (tok
->t_ref
>= &td
->td_toks_base
&& tok
->t_ref
< td
->td_toks_stop
)
46 if ((count
& TOK_EXCLUSIVE
) == 0 &&
47 (count
& ~(TOK_EXCLUSIVE
|TOK_EXCLREQ
))) {
54 * Is a token held by the specified thread?
57 _lwkt_token_held_excl(lwkt_token_t tok
, thread_t td
)
59 return ((tok
->t_ref
>= &td
->td_toks_base
&&
60 tok
->t_ref
< td
->td_toks_stop
));
64 * Critical section debugging
66 #ifdef DEBUG_CRIT_SECTIONS
67 #define __DEBUG_CRIT_ARG__ const char *id
68 #define __DEBUG_CRIT_ADD_ARG__ , const char *id
69 #define __DEBUG_CRIT_PASS_ARG__ , id
70 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id)
71 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id)
72 #define crit_enter() _crit_enter(mycpu, __func__)
73 #define crit_enter_id(id) _crit_enter(mycpu, id)
74 #define crit_enter_gd(curgd) _crit_enter((curgd), __func__)
75 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __func__)
76 #define crit_enter_hard() _crit_enter_hard(mycpu, __func__)
77 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __func__)
78 #define crit_exit() _crit_exit(mycpu, __func__)
79 #define crit_exit_id(id) _crit_exit(mycpu, id)
80 #define crit_exit_gd(curgd) _crit_exit((curgd), __func__)
81 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __func__)
82 #define crit_exit_hard() _crit_exit_hard(mycpu, __func__)
83 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __func__)
84 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__func__)
86 #define __DEBUG_CRIT_ARG__ void
87 #define __DEBUG_CRIT_ADD_ARG__
88 #define __DEBUG_CRIT_PASS_ARG__
89 #define __DEBUG_CRIT_ENTER(td)
90 #define __DEBUG_CRIT_EXIT(td)
91 #define crit_enter() _crit_enter(mycpu)
92 #define crit_enter_id(id) _crit_enter(mycpu)
93 #define crit_enter_gd(curgd) _crit_enter((curgd))
94 #define crit_enter_quick(curtd) _crit_enter_quick((curtd))
95 #define crit_enter_hard() _crit_enter_hard(mycpu)
96 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd))
97 #define crit_exit() crit_exit_wrapper()
98 #define crit_exit_id(id) _crit_exit(mycpu)
99 #define crit_exit_gd(curgd) _crit_exit((curgd))
100 #define crit_exit_quick(curtd) _crit_exit_quick((curtd))
101 #define crit_exit_hard() _crit_exit_hard(mycpu)
102 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd))
103 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd))
106 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__
);
109 * Track crit_enter()/crit_exit() pairs and warn on mismatches.
111 #ifdef DEBUG_CRIT_SECTIONS
114 _debug_crit_enter(thread_t td
, const char *id
)
116 int wi
= td
->td_crit_debug_index
;
118 td
->td_crit_debug_array
[wi
& CRIT_DEBUG_ARRAY_MASK
] = id
;
119 ++td
->td_crit_debug_index
;
123 _debug_crit_exit(thread_t td
, const char *id
)
128 wi
= td
->td_crit_debug_index
- 1;
129 if ((gid
= td
->td_crit_debug_array
[wi
& CRIT_DEBUG_ARRAY_MASK
]) != id
) {
130 if (td
->td_in_crit_report
== 0) {
131 td
->td_in_crit_report
= 1;
132 kprintf("crit_exit(%s) expected id %s\n", id
, gid
);
133 td
->td_in_crit_report
= 0;
136 --td
->td_crit_debug_index
;
142 * Critical sections prevent preemption, but allowing explicit blocking
143 * and thread switching. Any interrupt occuring while in a critical
144 * section is made pending and returns immediately. Interrupts are not
145 * physically disabled.
147 * Hard critical sections prevent preemption and disallow any blocking
148 * or thread switching, and in addition will assert on any blockable
149 * operation (acquire token not already held, lockmgr, mutex ops, or
150 * splz). Spinlocks can still be used in hard sections.
152 * All critical section routines only operate on the current thread.
153 * Passed gd or td arguments are simply optimizations when mycpu or
154 * curthread is already available to the caller.
161 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__
)
164 __DEBUG_CRIT_ENTER(td
);
169 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
171 _crit_enter_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
175 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
177 _crit_enter_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
178 ++gd
->gd_intr_nesting_level
;
185 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
186 * never true regardless of crit_count, should result in 100%
187 * optimal code execution. We don't check crit_count because
188 * it just bloats the inline and does not improve performance.
190 * NOTE: This can produce a considerable amount of code despite the
191 * relatively few lines of code so the non-debug case typically
192 * just wraps it in a real function, crit_exit_wrapper().
195 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__
)
197 __DEBUG_CRIT_EXIT(td
);
200 if (__predict_false(td
->td_critcount
< 0))
203 cpu_ccfence(); /* prevent compiler reordering */
207 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__
)
209 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__
);
210 if (__predict_false(td
->td_gd
->gd_reqflags
& RQF_IDLECHECK_MASK
))
215 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
217 _crit_exit_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
221 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__
)
223 --gd
->gd_intr_nesting_level
;
224 _crit_exit_quick(gd
->gd_curthread __DEBUG_CRIT_PASS_ARG__
);
228 crit_test(thread_t td
)
230 return(td
->td_critcount
);
234 * Return whether any threads are runnable.
239 return (TAILQ_FIRST(&mycpu
->gd_tdrunq
) != NULL
);
243 lwkt_getpri(thread_t td
)
249 lwkt_getpri_self(void)
251 return(lwkt_getpri(curthread
));
255 * Reduce our priority in preparation for a return to userland. If
256 * our passive release function was still in place, our priority was
257 * never raised and does not need to be reduced.
259 * See also lwkt_passive_release() and platform/blah/trap.c
262 lwkt_passive_recover(thread_t td
)
264 #ifndef NO_LWKT_SPLIT_USERPRI
265 if (td
->td_release
== NULL
)
266 lwkt_setpri_self(TDPRI_USER_NORM
);
267 td
->td_release
= NULL
;
275 lwkt_cpusync_init(lwkt_cpusync_t cs
, cpumask_t mask
,
276 cpusync_func_t func
, void *data
)
279 /* cs->cs_mack = 0; handled by _interlock */
285 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments:
286 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
287 * the trap frame is not known). However, we wish to provide opaque
288 * interfaces for simpler callbacks... the basic IPI messaging function as
289 * used by the kernel takes a single argument.
292 lwkt_send_ipiq(globaldata_t target
, ipifunc1_t func
, void *arg
)
294 return(lwkt_send_ipiq3(target
, (ipifunc3_t
)func
, arg
, 0));
298 lwkt_send_ipiq2(globaldata_t target
, ipifunc2_t func
, void *arg1
, int arg2
)
300 return(lwkt_send_ipiq3(target
, (ipifunc3_t
)func
, arg1
, arg2
));
304 lwkt_send_ipiq_mask(cpumask_t mask
, ipifunc1_t func
, void *arg
)
306 return(lwkt_send_ipiq3_mask(mask
, (ipifunc3_t
)func
, arg
, 0));
310 lwkt_send_ipiq2_mask(cpumask_t mask
, ipifunc2_t func
, void *arg1
, int arg2
)
312 return(lwkt_send_ipiq3_mask(mask
, (ipifunc3_t
)func
, arg1
, arg2
));
316 lwkt_send_ipiq_passive(globaldata_t target
, ipifunc1_t func
, void *arg
)
318 return(lwkt_send_ipiq3_passive(target
, (ipifunc3_t
)func
, arg
, 0));
322 lwkt_send_ipiq2_passive(globaldata_t target
, ipifunc2_t func
,
323 void *arg1
, int arg2
)
325 return(lwkt_send_ipiq3_passive(target
, (ipifunc3_t
)func
, arg1
, arg2
));
329 lwkt_send_ipiq_bycpu(int dcpu
, ipifunc1_t func
, void *arg
)
331 return(lwkt_send_ipiq3_bycpu(dcpu
, (ipifunc3_t
)func
, arg
, 0));
335 lwkt_send_ipiq2_bycpu(int dcpu
, ipifunc2_t func
, void *arg1
, int arg2
)
337 return(lwkt_send_ipiq3_bycpu(dcpu
, (ipifunc3_t
)func
, arg1
, arg2
));
341 lwkt_need_ipiq_process(globaldata_t gd
)
345 if (CPUMASK_TESTNZERO(gd
->gd_ipimask
))
348 ipiq
= &gd
->gd_cpusyncq
;
349 return (ipiq
->ip_rindex
!= ipiq
->ip_windex
);
353 #endif /* _SYS_THREAD2_H_ */