Cosmetic changes (remove whitespace).
[dragonfly.git] / sys / kern / lwkt_token.c
blob27c1423ed4f7cc72ffe1de701ff89adb5b04b4e4
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/lwkt_token.c,v 1.31 2008/05/18 20:57:56 nth Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/rtprio.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/ktr.h>
45 #include <sys/kthread.h>
46 #include <machine/cpu.h>
47 #include <sys/lock.h>
48 #include <sys/caps.h>
49 #include <sys/spinlock.h>
51 #include <sys/thread2.h>
52 #include <sys/spinlock2.h>
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_zone.h>
64 #include <machine/stdarg.h>
65 #include <machine/smp.h>
67 #ifndef LWKT_NUM_POOL_TOKENS
68 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
69 #endif
70 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
72 #ifdef INVARIANTS
73 static int token_debug = 0;
74 #endif
76 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
78 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
79 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
80 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
81 #if !defined(KTR_TOKENS)
82 #define KTR_TOKENS KTR_ALL
83 #endif
85 KTR_INFO_MASTER(tokens);
86 KTR_INFO(KTR_TOKENS, tokens, try, 0, TOKEN_STRING, sizeof(void *) * 3);
87 KTR_INFO(KTR_TOKENS, tokens, get, 1, TOKEN_STRING, sizeof(void *) * 3);
88 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
89 #if 0
90 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
91 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
92 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
93 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
94 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
96 #endif
98 #define logtoken(name, ref) \
99 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
101 #ifdef INVARIANTS
102 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
103 #endif
106 * Obtain all the tokens required by the specified thread on the current
107 * cpu, return 0 on failure and non-zero on success.
109 * The preemption code will not allow a target thread holding spinlocks to
110 * preempt the current thread so we do not have to implement this for UP.
111 * The only reason why we implement this for UP is that we want to detect
112 * stale tokens (lwkt_token_is_stale).
114 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
115 * tokens that the thread had aquired prior to going to sleep.
117 * Called from a critical section.
120 lwkt_getalltokens(thread_t td)
122 lwkt_tokref_t refs;
123 #ifdef SMP
124 lwkt_tokref_t undo;
125 #endif
126 lwkt_token_t tok;
128 for (refs = td->td_toks; refs; refs = refs->tr_next) {
129 KKASSERT(refs->tr_state == 0);
130 tok = refs->tr_tok;
131 if (tok->t_owner != td) {
132 #ifdef SMP
133 if (spin_trylock_wr(&tok->t_spinlock) == 0) {
135 * Release the partial list of tokens obtained and return
136 * failure.
138 for (undo = td->td_toks; undo != refs; undo = undo->tr_next) {
139 tok = undo->tr_tok;
140 undo->tr_state = 0;
141 if (--tok->t_count == 0) {
142 tok->t_owner = NULL;
143 spin_unlock_wr(&tok->t_spinlock);
146 return (FALSE);
148 #endif
149 KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
150 tok->t_owner = td;
153 * Detect the situation where the token was acquired by
154 * another thread while the token was released from the
155 * current thread due to a blocking condition.
156 * In this case we set t_lastowner to NULL to mark the
157 * token as stale from the point of view of BOTH threads.
158 * See lwkt_token_is_stale().
160 if (tok->t_lastowner != tok->t_owner)
161 tok->t_lastowner = NULL;
163 ++tok->t_count;
164 refs->tr_state = 1;
166 return (TRUE);
170 * Release all tokens owned by the specified thread on the current cpu.
172 * Called from a critical section.
174 void
175 lwkt_relalltokens(thread_t td)
177 lwkt_tokref_t refs;
178 lwkt_token_t tok;
180 for (refs = td->td_toks; refs; refs = refs->tr_next) {
181 if (refs->tr_state) {
182 refs->tr_state = 0;
183 tok = refs->tr_tok;
184 KKASSERT(tok->t_owner == td && tok->t_count > 0);
185 if (--tok->t_count == 0) {
186 tok->t_owner = NULL;
187 #ifdef SMP
188 spin_unlock_wr(&tok->t_spinlock);
189 #endif
196 * Token acquisition helper function. Note that get/trytokenref do not
197 * reset t_lastowner if the token is already held. Only lwkt_token_is_stale()
198 * is allowed to do that.
200 * NOTE: On failure, this function doesn't remove the token from the
201 * thread's token list, so that you have to perform that yourself:
203 * td->td_toks = ref->tr_next;
205 static __inline
207 _lwkt_trytokref2(lwkt_tokref_t ref, thread_t td)
209 #ifndef SMP
210 lwkt_tokref_t scan;
211 thread_t itd;
212 #endif
213 lwkt_token_t tok;
215 KKASSERT(mycpu->gd_intr_nesting_level == 0);
216 KKASSERT(ref->tr_state == 0);
217 tok = ref->tr_tok;
220 * Link the tokref to the thread's list
222 ref->tr_next = td->td_toks;
223 cpu_ccfence();
226 * Once td_toks is set to a non NULL value, we can't preempt
227 * another thread anymore (the scheduler takes care that this
228 * won't happen). Additionally, we can't get preempted by
229 * another thread that wants to access the same token (tok).
231 td->td_toks = ref;
233 if (tok->t_owner != td) {
234 #ifdef SMP
236 * Gain ownership of the token's spinlock, SMP version.
238 if (spin_trylock_wr(&tok->t_spinlock) == 0) {
239 return (FALSE);
241 #else
243 * Gain ownership of the token, UP version. All we have to do
244 * is check the token if we are preempting someone owning the
245 * same token, in which case we fail to acquire the token.
247 itd = td;
248 while ((itd = itd->td_preempted) != NULL) {
249 for (scan = itd->td_toks; scan; scan = scan->tr_next) {
250 if (scan->tr_tok == tok) {
251 return (FALSE);
255 #endif
256 KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
257 tok->t_owner = td;
258 tok->t_lastowner = td;
260 ++tok->t_count;
261 ref->tr_state = 1;
263 return (TRUE);
266 static __inline
268 _lwkt_trytokref(lwkt_tokref_t ref)
270 thread_t td = curthread;
272 if (_lwkt_trytokref2(ref, td) == FALSE) {
274 * Cleanup. Remove the token from the thread's list.
276 td->td_toks = ref->tr_next;
277 return (FALSE);
280 return (TRUE);
284 * Acquire a serializing token. This routine can block.
286 * We track ownership and a per-owner counter. Tokens are
287 * released when a thread switches out and reacquired when a thread
288 * switches back in.
290 static __inline
291 void
292 _lwkt_gettokref(lwkt_tokref_t ref)
294 if (_lwkt_trytokref2(ref, curthread) == FALSE) {
296 * Give up running if we can't acquire the token right now. But as we
297 * have linked in the tokref to the thread's list (_lwkt_trytokref2),
298 * the scheduler now takes care to acquire the token (by calling
299 * lwkt_getalltokens) before resuming execution. As such, when we
300 * return from lwkt_yield(), the token is acquired.
302 lwkt_yield();
306 void
307 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
309 lwkt_tokref_init(ref, tok);
310 logtoken(get, ref);
311 _lwkt_gettokref(ref);
314 void
315 lwkt_gettokref(lwkt_tokref_t ref)
317 logtoken(get, ref);
318 _lwkt_gettokref(ref);
322 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
324 lwkt_tokref_init(ref, tok);
325 logtoken(try, ref);
326 return(_lwkt_trytokref(ref));
330 lwkt_trytokref(lwkt_tokref_t ref)
332 logtoken(try, ref);
333 return(_lwkt_trytokref(ref));
337 * Release a serializing token
339 void
340 lwkt_reltoken(lwkt_tokref_t ref)
342 struct lwkt_tokref **scanp;
343 lwkt_token_t tok;
344 thread_t td;
346 td = curthread;
347 tok = ref->tr_tok;
349 KKASSERT(tok->t_owner == td && ref->tr_state == 1 && tok->t_count > 0);
351 ref->tr_state = 0;
354 * Fix-up the count now to avoid racing a preemption which may occur
355 * after the token has been removed from td_toks.
357 if (--tok->t_count == 0) {
358 tok->t_owner = NULL;
359 tok->t_lastowner = NULL;
360 #ifdef SMP
361 spin_unlock_wr(&tok->t_spinlock);
362 #endif
366 * Remove ref from thread's token list.
368 * After removing the token from the thread's list, it's unsafe
369 * on a UP machine to modify the token, because we might get
370 * preempted by another thread that wants to acquire the same token.
371 * This thread now thinks that it can acquire the token, because it's
372 * no longer in our thread's list. Bang!
374 * SMP: Do not modify token after spin_unlock_wr.
376 for (scanp = &td->td_toks; *scanp != ref; scanp = &((*scanp)->tr_next))
378 *scanp = ref->tr_next;
380 logtoken(release, ref);
384 * Pool tokens are used to provide a type-stable serializing token
385 * pointer that does not race against disappearing data structures.
387 * This routine is called in early boot just after we setup the BSP's
388 * globaldata structure.
390 void
391 lwkt_token_pool_init(void)
393 int i;
395 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
396 lwkt_token_init(&pool_tokens[i]);
399 lwkt_token_t
400 lwkt_token_pool_get(void *ptraddr)
402 int i;
404 i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
405 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
409 * Initialize the owner and release-to cpu to the current cpu
410 * and reset the generation count.
412 void
413 lwkt_token_init(lwkt_token_t tok)
415 #ifdef SMP
416 spin_init(&tok->t_spinlock);
417 #endif
418 tok->t_owner = NULL;
419 tok->t_lastowner = NULL;
420 tok->t_count = 0;
423 void
424 lwkt_token_uninit(lwkt_token_t tok)
426 /* empty */
430 lwkt_token_is_stale(lwkt_tokref_t ref)
432 lwkt_token_t tok = ref->tr_tok;
434 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
435 tok->t_count > 0);
437 /* Token is not stale */
438 if (tok->t_lastowner == tok->t_owner)
439 return (FALSE);
442 * The token is stale. Reset to not stale so that the next call to
443 * lwkt_token_is_stale will return "not stale" unless the token
444 * was acquired in-between by another thread.
446 tok->t_lastowner = tok->t_owner;
447 return (TRUE);