2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
82 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
84 static lwkt_token pool_tokens
[LWKT_NUM_POOL_TOKENS
];
86 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
87 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
88 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
89 #if !defined(KTR_TOKENS)
90 #define KTR_TOKENS KTR_ALL
93 KTR_INFO_MASTER(tokens
);
94 KTR_INFO(KTR_TOKENS
, tokens
, fail
, 0, TOKEN_STRING
, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS
, tokens
, succ
, 1, TOKEN_STRING
, sizeof(void *) * 3);
97 KTR_INFO(KTR_TOKENS
, tokens
, release
, 2, TOKEN_STRING
, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS
, tokens
, remote
, 3, TOKEN_STRING
, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS
, tokens
, reqremote
, 4, TOKEN_STRING
, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS
, tokens
, reqfail
, 5, TOKEN_STRING
, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS
, tokens
, drain
, 6, TOKEN_STRING
, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS
, tokens
, contention_start
, 7, CONTENDED_STRING
, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS
, tokens
, contention_stop
, 7, UNCONTENDED_STRING
, sizeof(void *) * 3);
106 #define logtoken(name, ref) \
107 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
110 * Global tokens. These replace the MP lock for major subsystem locking.
111 * These tokens are initially used to lockup both global and individual
114 * Once individual structures get their own locks these tokens are used
115 * only to protect global lists & other variables and to interlock
116 * allocations and teardowns and such.
118 * The UP initializer causes token acquisition to also acquire the MP lock
119 * for maximum compatibility. The feature may be enabled and disabled at
120 * any time, the MP state is copied to the tokref when the token is acquired
121 * and will not race against sysctl changes.
123 struct lwkt_token mp_token
= LWKT_TOKEN_INITIALIZER(mp_token
);
124 struct lwkt_token pmap_token
= LWKT_TOKEN_INITIALIZER(pmap_token
);
125 struct lwkt_token dev_token
= LWKT_TOKEN_INITIALIZER(dev_token
);
126 struct lwkt_token vm_token
= LWKT_TOKEN_INITIALIZER(vm_token
);
127 struct lwkt_token vmspace_token
= LWKT_TOKEN_INITIALIZER(vmspace_token
);
128 struct lwkt_token kvm_token
= LWKT_TOKEN_INITIALIZER(kvm_token
);
129 struct lwkt_token proc_token
= LWKT_TOKEN_INITIALIZER(proc_token
);
130 struct lwkt_token tty_token
= LWKT_TOKEN_INITIALIZER(tty_token
);
131 struct lwkt_token vnode_token
= LWKT_TOKEN_INITIALIZER(vnode_token
);
132 struct lwkt_token vmobj_token
= LWKT_TOKEN_INITIALIZER(vmobj_token
);
134 static int lwkt_token_ipi_dispatch
= 4;
135 SYSCTL_INT(_lwkt
, OID_AUTO
, token_ipi_dispatch
, CTLFLAG_RW
,
136 &lwkt_token_ipi_dispatch
, 0, "Number of IPIs to dispatch on token release");
139 * The collision count is bumped every time the LWKT scheduler fails
140 * to acquire needed tokens in addition to a normal lwkt_gettoken()
143 SYSCTL_LONG(_lwkt
, OID_AUTO
, mp_collisions
, CTLFLAG_RW
,
144 &mp_token
.t_collisions
, 0, "Collision counter of mp_token");
145 SYSCTL_LONG(_lwkt
, OID_AUTO
, pmap_collisions
, CTLFLAG_RW
,
146 &pmap_token
.t_collisions
, 0, "Collision counter of pmap_token");
147 SYSCTL_LONG(_lwkt
, OID_AUTO
, dev_collisions
, CTLFLAG_RW
,
148 &dev_token
.t_collisions
, 0, "Collision counter of dev_token");
149 SYSCTL_LONG(_lwkt
, OID_AUTO
, vm_collisions
, CTLFLAG_RW
,
150 &vm_token
.t_collisions
, 0, "Collision counter of vm_token");
151 SYSCTL_LONG(_lwkt
, OID_AUTO
, vmspace_collisions
, CTLFLAG_RW
,
152 &vmspace_token
.t_collisions
, 0, "Collision counter of vmspace_token");
153 SYSCTL_LONG(_lwkt
, OID_AUTO
, kvm_collisions
, CTLFLAG_RW
,
154 &kvm_token
.t_collisions
, 0, "Collision counter of kvm_token");
155 SYSCTL_LONG(_lwkt
, OID_AUTO
, proc_collisions
, CTLFLAG_RW
,
156 &proc_token
.t_collisions
, 0, "Collision counter of proc_token");
157 SYSCTL_LONG(_lwkt
, OID_AUTO
, tty_collisions
, CTLFLAG_RW
,
158 &tty_token
.t_collisions
, 0, "Collision counter of tty_token");
159 SYSCTL_LONG(_lwkt
, OID_AUTO
, vnode_collisions
, CTLFLAG_RW
,
160 &vnode_token
.t_collisions
, 0, "Collision counter of vnode_token");
164 * Acquire the initial mplock
166 * (low level boot only)
169 cpu_get_initial_mplock(void)
171 KKASSERT(mp_token
.t_ref
== NULL
);
172 if (lwkt_trytoken(&mp_token
) == FALSE
)
173 panic("cpu_get_initial_mplock");
178 * Return a pool token given an address
182 _lwkt_token_pool_lookup(void *ptr
)
186 i
= ((int)(intptr_t)ptr
>> 2) ^ ((int)(intptr_t)ptr
>> 12);
187 return(&pool_tokens
[i
& LWKT_MASK_POOL_TOKENS
]);
191 * Initialize a tokref_t prior to making it visible in the thread's
194 * As an optimization we set the MPSAFE flag if the thread is already
195 * holding the mp_token. This bypasses unncessary calls to get_mplock() and
196 * rel_mplock() on tokens which are not normally MPSAFE when the thread
197 * is already holding the MP lock.
201 _lwkt_tok_flags(lwkt_token_t tok
, thread_t td
)
203 return(tok
->t_flags
);
208 _lwkt_tokref_init(lwkt_tokref_t ref
, lwkt_token_t tok
, thread_t td
,
213 ref
->tr_flags
= flags
;
218 * Force a LWKT reschedule on the target cpu when a requested token
223 lwkt_reltoken_mask_remote(void *arg
, int arg2
, struct intrframe
*frame
)
230 * This bit of code sends a LWKT reschedule request to whatever other cpus
231 * had contended on the token being released. We could wake up all the cpus
232 * but generally speaking if there is a lot of contention we really only want
233 * to wake up a subset of cpus to avoid aggregating O(N^2) IPIs. The current
234 * cpuid is used as a basis to select which other cpus to wake up.
236 * For the selected cpus we can avoid issuing the actual IPI if the target
237 * cpu's RQF_WAKEUP is already set. In this case simply setting the
238 * reschedule flag RQF_AST_LWKT_RESCHED will be sufficient.
240 * lwkt.token_ipi_dispatch specifies the maximum number of IPIs to dispatch
241 * on a token release.
245 _lwkt_reltoken_mask(lwkt_token_t tok
)
251 cpumask_t wumask
; /* wakeup mask */
252 cpumask_t remask
; /* clear mask */
253 int wucount
; /* wakeup count */
258 * Mask of contending cpus we want to wake up.
260 mask
= tok
->t_collmask
;
266 * Degenerate case - IPI to all contending cpus
268 wucount
= lwkt_token_ipi_dispatch
;
269 if (wucount
<= 0 || wucount
>= ncpus
) {
279 * Calculate which cpus to IPI. These cpus are potentially in a
280 * HLT state waiting for token contention to go away.
282 * Ask the cpu LWKT scheduler to reschedule by setting
283 * RQF_AST_LWKT_RESCHEDULE. Signal the cpu if RQF_WAKEUP is not
284 * set (otherwise it has already been signalled or will check the
285 * flag very soon anyway). Both bits must be adjusted atomically
286 * all in one go to avoid races.
288 * The collision mask is cleared for all cpus we set the resched
289 * flag for, but we only IPI the ones that need signalling.
291 while (wucount
&& mask
) {
292 tmpmask
= mask
& ~(CPUMASK(mycpu
->gd_cpuid
) - 1);
294 cpuid
= BSFCPUMASK(tmpmask
);
296 cpuid
= BSFCPUMASK(mask
);
297 ngd
= globaldata_find(cpuid
);
299 reqflags
= ngd
->gd_reqflags
;
300 if (atomic_cmpset_int(&ngd
->gd_reqflags
, reqflags
,
303 RQF_AST_LWKT_RESCHED
))) {
307 if ((reqflags
& RQF_WAKEUP
) == 0) {
308 wumask
|= CPUMASK(cpuid
);
311 remask
|= CPUMASK(cpuid
);
312 mask
&= ~CPUMASK(cpuid
);
315 atomic_clear_cpumask(&tok
->t_collmask
, remask
);
316 lwkt_send_ipiq3_mask(wumask
, lwkt_reltoken_mask_remote
,
323 * Obtain all the tokens required by the specified thread on the current
324 * cpu, return 0 on failure and non-zero on success. If a failure occurs
325 * any partially acquired tokens will be released prior to return.
327 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
328 * tokens that the thread had acquired prior to going to sleep.
330 * We always clear the collision mask on token aquision.
332 * Called from a critical section.
335 lwkt_getalltokens(thread_t td
)
342 * Acquire tokens in forward order, assign or validate tok->t_ref.
344 for (scan
= &td
->td_toks_base
; scan
< td
->td_toks_stop
; ++scan
) {
348 * Try to acquire the token if we do not already have
351 * NOTE: If atomic_cmpset_ptr() fails we have to
352 * loop and try again. It just means we
357 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, scan
))
359 if (tok
->t_collmask
& td
->td_gd
->gd_cpumask
) {
360 atomic_clear_cpumask(&tok
->t_collmask
,
361 td
->td_gd
->gd_cpumask
);
369 * Someone holds the token.
371 * Test if ref is already recursively held by this
372 * thread. We cannot safely dereference tok->t_ref
373 * (it might belong to another thread and is thus
374 * unstable), but we don't have to. We can simply
377 if (ref
>= &td
->td_toks_base
&& ref
< td
->td_toks_stop
)
382 * Otherwise we failed to acquire all the tokens.
383 * Undo and return. We have to try once more after
384 * setting cpumask to cover possible races against
385 * the checking of t_collmask.
387 atomic_set_cpumask(&tok
->t_collmask
,
388 td
->td_gd
->gd_cpumask
);
389 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, scan
)) {
390 if (tok
->t_collmask
& td
->td_gd
->gd_cpumask
) {
391 atomic_clear_cpumask(&tok
->t_collmask
,
392 td
->td_gd
->gd_cpumask
);
397 td
->td_wmesg
= tok
->t_desc
;
398 atomic_add_long(&tok
->t_collisions
, 1);
399 lwkt_relalltokens(td
);
407 * Release all tokens owned by the specified thread on the current cpu.
409 * This code is really simple. Even in cases where we own all the tokens
410 * note that t_ref may not match the scan for recursively held tokens,
411 * or for the case where a lwkt_getalltokens() failed.
413 * The scheduler is responsible for maintaining the MP lock count, so
414 * we don't need to deal with tr_flags here.
416 * Called from a critical section.
419 lwkt_relalltokens(thread_t td
)
424 for (scan
= &td
->td_toks_base
; scan
< td
->td_toks_stop
; ++scan
) {
426 if (tok
->t_ref
== scan
) {
428 _lwkt_reltoken_mask(tok
);
434 * Token acquisition helper function. The caller must have already
435 * made nref visible by adjusting td_toks_stop and will be responsible
436 * for the disposition of nref on either success or failure.
438 * When acquiring tokens recursively we want tok->t_ref to point to
439 * the outer (first) acquisition so it gets cleared only on the last
444 _lwkt_trytokref2(lwkt_tokref_t nref
, thread_t td
, int blocking
)
450 * Make sure the compiler does not reorder prior instructions
451 * beyond this demark.
456 * Attempt to gain ownership
461 * Try to acquire the token if we do not already have
462 * it. This is not allowed if we are in a hard code
463 * section (because it 'might' have blocked).
467 KASSERT((blocking
== 0 ||
468 td
->td_gd
->gd_intr_nesting_level
== 0 ||
469 panic_cpu_gd
== mycpu
),
470 ("Attempt to acquire token %p not already "
471 "held in hard code section", tok
));
474 * NOTE: If atomic_cmpset_ptr() fails we have to
475 * loop and try again. It just means we
478 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, nref
))
484 * Test if ref is already recursively held by this
485 * thread. We cannot safely dereference tok->t_ref
486 * (it might belong to another thread and is thus
487 * unstable), but we don't have to. We can simply
490 * It is ok to acquire a token that is already held
491 * by the current thread when in a hard code section.
493 if (ref
>= &td
->td_toks_base
&& ref
< td
->td_toks_stop
)
497 * Otherwise we failed, and it is not ok to attempt to
498 * acquire a token in a hard code section.
500 KASSERT((blocking
== 0 ||
501 td
->td_gd
->gd_intr_nesting_level
== 0),
502 ("Attempt to acquire token %p not already "
503 "held in hard code section", tok
));
510 * Get a serializing token. This routine can block.
513 lwkt_gettoken(lwkt_token_t tok
)
515 thread_t td
= curthread
;
519 flags
= _lwkt_tok_flags(tok
, td
);
520 ref
= td
->td_toks_stop
;
521 KKASSERT(ref
< &td
->td_toks_end
);
524 _lwkt_tokref_init(ref
, tok
, td
, flags
);
526 if (_lwkt_trytokref2(ref
, td
, 1) == FALSE
) {
528 * Give up running if we can't acquire the token right now.
530 * Since the tokref is already active the scheduler now
531 * takes care of acquisition, so we need only call
534 * Since we failed this was not a recursive token so upon
535 * return tr_tok->t_ref should be assigned to this specific
541 * (DISABLED ATM) - Do not set t_collmask on a token
542 * acquisition failure, the scheduler will spin at least
543 * once and deal with hlt/spin semantics.
545 atomic_set_cpumask(&tok
->t_collmask
, td
->td_gd
->gd_cpumask
);
546 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, ref
)) {
547 atomic_clear_cpumask(&tok
->t_collmask
,
548 td
->td_gd
->gd_cpumask
);
553 td
->td_wmesg
= tok
->t_desc
;
554 atomic_add_long(&tok
->t_collisions
, 1);
558 KKASSERT(tok
->t_ref
== ref
);
563 lwkt_gettoken_hard(lwkt_token_t tok
)
565 thread_t td
= curthread
;
569 flags
= _lwkt_tok_flags(tok
, td
);
570 ref
= td
->td_toks_stop
;
571 KKASSERT(ref
< &td
->td_toks_end
);
574 _lwkt_tokref_init(ref
, tok
, td
, flags
);
576 if (_lwkt_trytokref2(ref
, td
, 1) == FALSE
) {
578 * Give up running if we can't acquire the token right now.
580 * Since the tokref is already active the scheduler now
581 * takes care of acquisition, so we need only call
584 * Since we failed this was not a recursive token so upon
585 * return tr_tok->t_ref should be assigned to this specific
591 * (DISABLED ATM) - Do not set t_collmask on a token
592 * acquisition failure, the scheduler will spin at least
593 * once and deal with hlt/spin semantics.
595 atomic_set_cpumask(&tok
->t_collmask
, td
->td_gd
->gd_cpumask
);
596 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, ref
)) {
597 atomic_clear_cpumask(&tok
->t_collmask
,
598 td
->td_gd
->gd_cpumask
);
603 td
->td_wmesg
= tok
->t_desc
;
604 atomic_add_long(&tok
->t_collisions
, 1);
608 KKASSERT(tok
->t_ref
== ref
);
615 crit_enter_hard_gd(td
->td_gd
);
619 lwkt_getpooltoken(void *ptr
)
621 thread_t td
= curthread
;
626 tok
= _lwkt_token_pool_lookup(ptr
);
627 flags
= _lwkt_tok_flags(tok
, td
);
628 ref
= td
->td_toks_stop
;
629 KKASSERT(ref
< &td
->td_toks_end
);
632 _lwkt_tokref_init(ref
, tok
, td
, flags
);
634 if (_lwkt_trytokref2(ref
, td
, 1) == FALSE
) {
636 * Give up running if we can't acquire the token right now.
638 * Since the tokref is already active the scheduler now
639 * takes care of acquisition, so we need only call
642 * Since we failed this was not a recursive token so upon
643 * return tr_tok->t_ref should be assigned to this specific
649 * (DISABLED ATM) - Do not set t_collmask on a token
650 * acquisition failure, the scheduler will spin at least
651 * once and deal with hlt/spin semantics.
653 atomic_set_cpumask(&tok
->t_collmask
, td
->td_gd
->gd_cpumask
);
654 if (atomic_cmpset_ptr(&tok
->t_ref
, NULL
, ref
)) {
655 atomic_clear_cpumask(&tok
->t_collmask
,
656 td
->td_gd
->gd_cpumask
);
661 td
->td_wmesg
= tok
->t_desc
;
662 atomic_add_long(&tok
->t_collisions
, 1);
666 KKASSERT(tok
->t_ref
== ref
);
677 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
680 lwkt_trytoken(lwkt_token_t tok
)
682 thread_t td
= curthread
;
686 flags
= _lwkt_tok_flags(tok
, td
);
687 ref
= td
->td_toks_stop
;
688 KKASSERT(ref
< &td
->td_toks_end
);
691 _lwkt_tokref_init(ref
, tok
, td
, flags
);
693 if (_lwkt_trytokref2(ref
, td
, 0) == FALSE
) {
695 * Cleanup, deactivate the failed token.
705 * Release a serializing token.
707 * WARNING! All tokens must be released in reverse order. This will be
711 lwkt_reltoken(lwkt_token_t tok
)
713 thread_t td
= curthread
;
717 * Remove ref from thread token list and assert that it matches
718 * the token passed in. Tokens must be released in reverse order.
720 ref
= td
->td_toks_stop
- 1;
721 KKASSERT(ref
>= &td
->td_toks_base
&& ref
->tr_tok
== tok
);
724 * Only clear the token if it matches ref. If ref was a recursively
725 * acquired token it may not match. Then adjust td_toks_stop.
727 * Some comparisons must be run prior to adjusting td_toks_stop
728 * to avoid racing against a fast interrupt/ ipi which tries to
731 * We must also be absolutely sure that the compiler does not
732 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
733 * or reorder the adjustment of td_toks_stop against the conditional.
735 * NOTE: The mplock is a token also so sequencing is a bit complex.
737 if (tok
->t_ref
== ref
) {
739 _lwkt_reltoken_mask(tok
);
743 td
->td_toks_stop
= ref
;
745 KKASSERT(tok
->t_ref
!= ref
);
749 lwkt_reltoken_hard(lwkt_token_t tok
)
756 * It is faster for users of lwkt_getpooltoken() to use the returned
757 * token and just call lwkt_reltoken(), but for convenience we provide
758 * this function which looks the token up based on the ident.
761 lwkt_relpooltoken(void *ptr
)
763 lwkt_token_t tok
= _lwkt_token_pool_lookup(ptr
);
768 * Return a count of the number of token refs the thread has to the
769 * specified token, whether it currently owns the token or not.
772 lwkt_cnttoken(lwkt_token_t tok
, thread_t td
)
777 for (scan
= &td
->td_toks_base
; scan
< td
->td_toks_stop
; ++scan
) {
778 if (scan
->tr_tok
== tok
)
786 * Pool tokens are used to provide a type-stable serializing token
787 * pointer that does not race against disappearing data structures.
789 * This routine is called in early boot just after we setup the BSP's
790 * globaldata structure.
793 lwkt_token_pool_init(void)
797 for (i
= 0; i
< LWKT_NUM_POOL_TOKENS
; ++i
)
798 lwkt_token_init(&pool_tokens
[i
], "pool");
802 lwkt_token_pool_lookup(void *ptr
)
804 return (_lwkt_token_pool_lookup(ptr
));
808 * Initialize a token. If mpsafe is 0, the MP lock is acquired before
809 * acquiring the token and released after releasing the token.
812 lwkt_token_init(lwkt_token_t tok
, const char *desc
)
816 tok
->t_collisions
= 0;
822 lwkt_token_uninit(lwkt_token_t tok
)
829 lwkt_token_is_stale(lwkt_tokref_t ref
)
831 lwkt_token_t tok
= ref
->tr_tok
;
833 KKASSERT(tok
->t_owner
== curthread
&& ref
->tr_state
== 1 &&
836 /* Token is not stale */
837 if (tok
->t_lastowner
== tok
->t_owner
)
841 * The token is stale. Reset to not stale so that the next call to
842 * lwkt_token_is_stale will return "not stale" unless the token
843 * was acquired in-between by another thread.
845 tok
->t_lastowner
= tok
->t_owner
;