kern - Convert aio from zalloc to objcache
[dragonfly.git] / sys / kern / lwkt_token.c
blob174d80ca8c439f500da3a6c94a443cd982fa058c
1 /*
2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
55 #include <sys/ktr.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
58 #include <sys/lock.h>
59 #include <sys/caps.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
81 #endif
82 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
84 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
86 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
87 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
88 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
89 #if !defined(KTR_TOKENS)
90 #define KTR_TOKENS KTR_ALL
91 #endif
93 KTR_INFO_MASTER(tokens);
94 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
96 #if 0
97 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
104 #endif
106 #define logtoken(name, ref) \
107 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
110 * Global tokens. These replace the MP lock for major subsystem locking.
111 * These tokens are initially used to lockup both global and individual
112 * operations.
114 * Once individual structures get their own locks these tokens are used
115 * only to protect global lists & other variables and to interlock
116 * allocations and teardowns and such.
118 * The UP initializer causes token acquisition to also acquire the MP lock
119 * for maximum compatibility. The feature may be enabled and disabled at
120 * any time, the MP state is copied to the tokref when the token is acquired
121 * and will not race against sysctl changes.
123 struct lwkt_token mp_token = LWKT_TOKEN_INITIALIZER(mp_token);
124 struct lwkt_token pmap_token = LWKT_TOKEN_INITIALIZER(pmap_token);
125 struct lwkt_token dev_token = LWKT_TOKEN_INITIALIZER(dev_token);
126 struct lwkt_token vm_token = LWKT_TOKEN_INITIALIZER(vm_token);
127 struct lwkt_token vmspace_token = LWKT_TOKEN_INITIALIZER(vmspace_token);
128 struct lwkt_token kvm_token = LWKT_TOKEN_INITIALIZER(kvm_token);
129 struct lwkt_token proc_token = LWKT_TOKEN_INITIALIZER(proc_token);
130 struct lwkt_token tty_token = LWKT_TOKEN_INITIALIZER(tty_token);
131 struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token);
132 struct lwkt_token vmobj_token = LWKT_TOKEN_INITIALIZER(vmobj_token);
134 static int lwkt_token_ipi_dispatch = 4;
135 SYSCTL_INT(_lwkt, OID_AUTO, token_ipi_dispatch, CTLFLAG_RW,
136 &lwkt_token_ipi_dispatch, 0, "Number of IPIs to dispatch on token release");
139 * The collision count is bumped every time the LWKT scheduler fails
140 * to acquire needed tokens in addition to a normal lwkt_gettoken()
141 * stall.
143 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
144 &mp_token.t_collisions, 0, "Collision counter of mp_token");
145 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
146 &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
147 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
148 &dev_token.t_collisions, 0, "Collision counter of dev_token");
149 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
150 &vm_token.t_collisions, 0, "Collision counter of vm_token");
151 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
152 &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
153 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
154 &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
155 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
156 &proc_token.t_collisions, 0, "Collision counter of proc_token");
157 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
158 &tty_token.t_collisions, 0, "Collision counter of tty_token");
159 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
160 &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
162 #ifdef SMP
164 * Acquire the initial mplock
166 * (low level boot only)
168 void
169 cpu_get_initial_mplock(void)
171 KKASSERT(mp_token.t_ref == NULL);
172 if (lwkt_trytoken(&mp_token) == FALSE)
173 panic("cpu_get_initial_mplock");
175 #endif
178 * Return a pool token given an address
180 static __inline
181 lwkt_token_t
182 _lwkt_token_pool_lookup(void *ptr)
184 int i;
186 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
187 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
191 * Initialize a tokref_t prior to making it visible in the thread's
192 * token array.
194 * As an optimization we set the MPSAFE flag if the thread is already
195 * holding the mp_token. This bypasses unncessary calls to get_mplock() and
196 * rel_mplock() on tokens which are not normally MPSAFE when the thread
197 * is already holding the MP lock.
199 static __inline
200 intptr_t
201 _lwkt_tok_flags(lwkt_token_t tok, thread_t td)
203 return(tok->t_flags);
206 static __inline
207 void
208 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td,
209 intptr_t flags)
211 ref->tr_tok = tok;
212 ref->tr_owner = td;
213 ref->tr_flags = flags;
216 #ifdef SMP
218 * Force a LWKT reschedule on the target cpu when a requested token
219 * becomes available.
221 static
222 void
223 lwkt_reltoken_mask_remote(void *arg, int arg2, struct intrframe *frame)
225 need_lwkt_resched();
227 #endif
230 * This bit of code sends a LWKT reschedule request to whatever other cpus
231 * had contended on the token being released. We could wake up all the cpus
232 * but generally speaking if there is a lot of contention we really only want
233 * to wake up a subset of cpus to avoid aggregating O(N^2) IPIs. The current
234 * cpuid is used as a basis to select which other cpus to wake up.
236 * For the selected cpus we can avoid issuing the actual IPI if the target
237 * cpu's RQF_WAKEUP is already set. In this case simply setting the
238 * reschedule flag RQF_AST_LWKT_RESCHED will be sufficient.
240 * lwkt.token_ipi_dispatch specifies the maximum number of IPIs to dispatch
241 * on a token release.
243 static __inline
244 void
245 _lwkt_reltoken_mask(lwkt_token_t tok)
247 #ifdef SMP
248 globaldata_t ngd;
249 cpumask_t mask;
250 cpumask_t tmpmask;
251 cpumask_t wumask; /* wakeup mask */
252 cpumask_t remask; /* clear mask */
253 int wucount; /* wakeup count */
254 int cpuid;
255 int reqflags;
258 * Mask of contending cpus we want to wake up.
260 mask = tok->t_collmask;
261 cpu_ccfence();
262 if (mask == 0)
263 return;
266 * Degenerate case - IPI to all contending cpus
268 wucount = lwkt_token_ipi_dispatch;
269 if (wucount <= 0 || wucount >= ncpus) {
270 wucount = 0;
271 wumask = mask;
272 remask = mask;
273 } else {
274 wumask = 0;
275 remask = 0;
279 * Calculate which cpus to IPI. These cpus are potentially in a
280 * HLT state waiting for token contention to go away.
282 * Ask the cpu LWKT scheduler to reschedule by setting
283 * RQF_AST_LWKT_RESCHEDULE. Signal the cpu if RQF_WAKEUP is not
284 * set (otherwise it has already been signalled or will check the
285 * flag very soon anyway). Both bits must be adjusted atomically
286 * all in one go to avoid races.
288 * The collision mask is cleared for all cpus we set the resched
289 * flag for, but we only IPI the ones that need signalling.
291 while (wucount && mask) {
292 tmpmask = mask & ~(CPUMASK(mycpu->gd_cpuid) - 1);
293 if (tmpmask)
294 cpuid = BSFCPUMASK(tmpmask);
295 else
296 cpuid = BSFCPUMASK(mask);
297 ngd = globaldata_find(cpuid);
298 for (;;) {
299 reqflags = ngd->gd_reqflags;
300 if (atomic_cmpset_int(&ngd->gd_reqflags, reqflags,
301 reqflags |
302 (RQF_WAKEUP |
303 RQF_AST_LWKT_RESCHED))) {
304 break;
307 if ((reqflags & RQF_WAKEUP) == 0) {
308 wumask |= CPUMASK(cpuid);
309 --wucount;
311 remask |= CPUMASK(cpuid);
312 mask &= ~CPUMASK(cpuid);
314 if (remask) {
315 atomic_clear_cpumask(&tok->t_collmask, remask);
316 lwkt_send_ipiq3_mask(wumask, lwkt_reltoken_mask_remote,
317 NULL, 0);
319 #endif
323 * Obtain all the tokens required by the specified thread on the current
324 * cpu, return 0 on failure and non-zero on success. If a failure occurs
325 * any partially acquired tokens will be released prior to return.
327 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
328 * tokens that the thread had acquired prior to going to sleep.
330 * We always clear the collision mask on token aquision.
332 * Called from a critical section.
335 lwkt_getalltokens(thread_t td)
337 lwkt_tokref_t scan;
338 lwkt_tokref_t ref;
339 lwkt_token_t tok;
342 * Acquire tokens in forward order, assign or validate tok->t_ref.
344 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
345 tok = scan->tr_tok;
346 for (;;) {
348 * Try to acquire the token if we do not already have
349 * it.
351 * NOTE: If atomic_cmpset_ptr() fails we have to
352 * loop and try again. It just means we
353 * lost a cpu race.
355 ref = tok->t_ref;
356 if (ref == NULL) {
357 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
359 if (tok->t_collmask & td->td_gd->gd_cpumask) {
360 atomic_clear_cpumask(&tok->t_collmask,
361 td->td_gd->gd_cpumask);
363 break;
365 continue;
369 * Someone holds the token.
371 * Test if ref is already recursively held by this
372 * thread. We cannot safely dereference tok->t_ref
373 * (it might belong to another thread and is thus
374 * unstable), but we don't have to. We can simply
375 * range-check it.
377 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
378 break;
380 #ifdef SMP
382 * Otherwise we failed to acquire all the tokens.
383 * Undo and return. We have to try once more after
384 * setting cpumask to cover possible races against
385 * the checking of t_collmask.
387 atomic_set_cpumask(&tok->t_collmask,
388 td->td_gd->gd_cpumask);
389 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan)) {
390 if (tok->t_collmask & td->td_gd->gd_cpumask) {
391 atomic_clear_cpumask(&tok->t_collmask,
392 td->td_gd->gd_cpumask);
394 break;
396 #endif
397 td->td_wmesg = tok->t_desc;
398 atomic_add_long(&tok->t_collisions, 1);
399 lwkt_relalltokens(td);
400 return(FALSE);
403 return (TRUE);
407 * Release all tokens owned by the specified thread on the current cpu.
409 * This code is really simple. Even in cases where we own all the tokens
410 * note that t_ref may not match the scan for recursively held tokens,
411 * or for the case where a lwkt_getalltokens() failed.
413 * The scheduler is responsible for maintaining the MP lock count, so
414 * we don't need to deal with tr_flags here.
416 * Called from a critical section.
418 void
419 lwkt_relalltokens(thread_t td)
421 lwkt_tokref_t scan;
422 lwkt_token_t tok;
424 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
425 tok = scan->tr_tok;
426 if (tok->t_ref == scan) {
427 tok->t_ref = NULL;
428 _lwkt_reltoken_mask(tok);
434 * Token acquisition helper function. The caller must have already
435 * made nref visible by adjusting td_toks_stop and will be responsible
436 * for the disposition of nref on either success or failure.
438 * When acquiring tokens recursively we want tok->t_ref to point to
439 * the outer (first) acquisition so it gets cleared only on the last
440 * release.
442 static __inline
444 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
446 lwkt_token_t tok;
447 lwkt_tokref_t ref;
450 * Make sure the compiler does not reorder prior instructions
451 * beyond this demark.
453 cpu_ccfence();
456 * Attempt to gain ownership
458 tok = nref->tr_tok;
459 for (;;) {
461 * Try to acquire the token if we do not already have
462 * it. This is not allowed if we are in a hard code
463 * section (because it 'might' have blocked).
465 ref = tok->t_ref;
466 if (ref == NULL) {
467 KASSERT((blocking == 0 ||
468 td->td_gd->gd_intr_nesting_level == 0 ||
469 panic_cpu_gd == mycpu),
470 ("Attempt to acquire token %p not already "
471 "held in hard code section", tok));
474 * NOTE: If atomic_cmpset_ptr() fails we have to
475 * loop and try again. It just means we
476 * lost a cpu race.
478 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
479 return (TRUE);
480 continue;
484 * Test if ref is already recursively held by this
485 * thread. We cannot safely dereference tok->t_ref
486 * (it might belong to another thread and is thus
487 * unstable), but we don't have to. We can simply
488 * range-check it.
490 * It is ok to acquire a token that is already held
491 * by the current thread when in a hard code section.
493 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
494 return(TRUE);
497 * Otherwise we failed, and it is not ok to attempt to
498 * acquire a token in a hard code section.
500 KASSERT((blocking == 0 ||
501 td->td_gd->gd_intr_nesting_level == 0),
502 ("Attempt to acquire token %p not already "
503 "held in hard code section", tok));
505 return(FALSE);
510 * Get a serializing token. This routine can block.
512 void
513 lwkt_gettoken(lwkt_token_t tok)
515 thread_t td = curthread;
516 lwkt_tokref_t ref;
517 intptr_t flags;
519 flags = _lwkt_tok_flags(tok, td);
520 ref = td->td_toks_stop;
521 KKASSERT(ref < &td->td_toks_end);
522 ++td->td_toks_stop;
523 cpu_ccfence();
524 _lwkt_tokref_init(ref, tok, td, flags);
526 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
528 * Give up running if we can't acquire the token right now.
530 * Since the tokref is already active the scheduler now
531 * takes care of acquisition, so we need only call
532 * lwkt_switch().
534 * Since we failed this was not a recursive token so upon
535 * return tr_tok->t_ref should be assigned to this specific
536 * ref.
538 #ifdef SMP
539 #if 0
541 * (DISABLED ATM) - Do not set t_collmask on a token
542 * acquisition failure, the scheduler will spin at least
543 * once and deal with hlt/spin semantics.
545 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
546 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
547 atomic_clear_cpumask(&tok->t_collmask,
548 td->td_gd->gd_cpumask);
549 return;
551 #endif
552 #endif
553 td->td_wmesg = tok->t_desc;
554 atomic_add_long(&tok->t_collisions, 1);
555 logtoken(fail, ref);
556 lwkt_switch();
557 logtoken(succ, ref);
558 KKASSERT(tok->t_ref == ref);
562 void
563 lwkt_gettoken_hard(lwkt_token_t tok)
565 thread_t td = curthread;
566 lwkt_tokref_t ref;
567 intptr_t flags;
569 flags = _lwkt_tok_flags(tok, td);
570 ref = td->td_toks_stop;
571 KKASSERT(ref < &td->td_toks_end);
572 ++td->td_toks_stop;
573 cpu_ccfence();
574 _lwkt_tokref_init(ref, tok, td, flags);
576 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
578 * Give up running if we can't acquire the token right now.
580 * Since the tokref is already active the scheduler now
581 * takes care of acquisition, so we need only call
582 * lwkt_switch().
584 * Since we failed this was not a recursive token so upon
585 * return tr_tok->t_ref should be assigned to this specific
586 * ref.
588 #ifdef SMP
589 #if 0
591 * (DISABLED ATM) - Do not set t_collmask on a token
592 * acquisition failure, the scheduler will spin at least
593 * once and deal with hlt/spin semantics.
595 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
596 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
597 atomic_clear_cpumask(&tok->t_collmask,
598 td->td_gd->gd_cpumask);
599 goto success;
601 #endif
602 #endif
603 td->td_wmesg = tok->t_desc;
604 atomic_add_long(&tok->t_collisions, 1);
605 logtoken(fail, ref);
606 lwkt_switch();
607 logtoken(succ, ref);
608 KKASSERT(tok->t_ref == ref);
610 #ifdef SMP
611 #if 0
612 success:
613 #endif
614 #endif
615 crit_enter_hard_gd(td->td_gd);
618 lwkt_token_t
619 lwkt_getpooltoken(void *ptr)
621 thread_t td = curthread;
622 lwkt_token_t tok;
623 lwkt_tokref_t ref;
624 intptr_t flags;
626 tok = _lwkt_token_pool_lookup(ptr);
627 flags = _lwkt_tok_flags(tok, td);
628 ref = td->td_toks_stop;
629 KKASSERT(ref < &td->td_toks_end);
630 ++td->td_toks_stop;
631 cpu_ccfence();
632 _lwkt_tokref_init(ref, tok, td, flags);
634 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
636 * Give up running if we can't acquire the token right now.
638 * Since the tokref is already active the scheduler now
639 * takes care of acquisition, so we need only call
640 * lwkt_switch().
642 * Since we failed this was not a recursive token so upon
643 * return tr_tok->t_ref should be assigned to this specific
644 * ref.
646 #ifdef SMP
647 #if 0
649 * (DISABLED ATM) - Do not set t_collmask on a token
650 * acquisition failure, the scheduler will spin at least
651 * once and deal with hlt/spin semantics.
653 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
654 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
655 atomic_clear_cpumask(&tok->t_collmask,
656 td->td_gd->gd_cpumask);
657 goto success;
659 #endif
660 #endif
661 td->td_wmesg = tok->t_desc;
662 atomic_add_long(&tok->t_collisions, 1);
663 logtoken(fail, ref);
664 lwkt_switch();
665 logtoken(succ, ref);
666 KKASSERT(tok->t_ref == ref);
668 #ifdef SMP
669 #if 0
670 success:
671 #endif
672 #endif
673 return(tok);
677 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
680 lwkt_trytoken(lwkt_token_t tok)
682 thread_t td = curthread;
683 lwkt_tokref_t ref;
684 intptr_t flags;
686 flags = _lwkt_tok_flags(tok, td);
687 ref = td->td_toks_stop;
688 KKASSERT(ref < &td->td_toks_end);
689 ++td->td_toks_stop;
690 cpu_ccfence();
691 _lwkt_tokref_init(ref, tok, td, flags);
693 if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
695 * Cleanup, deactivate the failed token.
697 cpu_ccfence();
698 --td->td_toks_stop;
699 return (FALSE);
701 return (TRUE);
705 * Release a serializing token.
707 * WARNING! All tokens must be released in reverse order. This will be
708 * asserted.
710 void
711 lwkt_reltoken(lwkt_token_t tok)
713 thread_t td = curthread;
714 lwkt_tokref_t ref;
717 * Remove ref from thread token list and assert that it matches
718 * the token passed in. Tokens must be released in reverse order.
720 ref = td->td_toks_stop - 1;
721 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
724 * Only clear the token if it matches ref. If ref was a recursively
725 * acquired token it may not match. Then adjust td_toks_stop.
727 * Some comparisons must be run prior to adjusting td_toks_stop
728 * to avoid racing against a fast interrupt/ ipi which tries to
729 * acquire a token.
731 * We must also be absolutely sure that the compiler does not
732 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
733 * or reorder the adjustment of td_toks_stop against the conditional.
735 * NOTE: The mplock is a token also so sequencing is a bit complex.
737 if (tok->t_ref == ref) {
738 tok->t_ref = NULL;
739 _lwkt_reltoken_mask(tok);
741 cpu_sfence();
742 cpu_ccfence();
743 td->td_toks_stop = ref;
744 cpu_ccfence();
745 KKASSERT(tok->t_ref != ref);
748 void
749 lwkt_reltoken_hard(lwkt_token_t tok)
751 lwkt_reltoken(tok);
752 crit_exit_hard();
756 * It is faster for users of lwkt_getpooltoken() to use the returned
757 * token and just call lwkt_reltoken(), but for convenience we provide
758 * this function which looks the token up based on the ident.
760 void
761 lwkt_relpooltoken(void *ptr)
763 lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
764 lwkt_reltoken(tok);
768 * Return a count of the number of token refs the thread has to the
769 * specified token, whether it currently owns the token or not.
772 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
774 lwkt_tokref_t scan;
775 int count = 0;
777 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
778 if (scan->tr_tok == tok)
779 ++count;
781 return(count);
786 * Pool tokens are used to provide a type-stable serializing token
787 * pointer that does not race against disappearing data structures.
789 * This routine is called in early boot just after we setup the BSP's
790 * globaldata structure.
792 void
793 lwkt_token_pool_init(void)
795 int i;
797 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
798 lwkt_token_init(&pool_tokens[i], "pool");
801 lwkt_token_t
802 lwkt_token_pool_lookup(void *ptr)
804 return (_lwkt_token_pool_lookup(ptr));
808 * Initialize a token. If mpsafe is 0, the MP lock is acquired before
809 * acquiring the token and released after releasing the token.
811 void
812 lwkt_token_init(lwkt_token_t tok, const char *desc)
814 tok->t_ref = NULL;
815 tok->t_flags = 0;
816 tok->t_collisions = 0;
817 tok->t_collmask = 0;
818 tok->t_desc = desc;
821 void
822 lwkt_token_uninit(lwkt_token_t tok)
824 /* empty */
827 #if 0
829 lwkt_token_is_stale(lwkt_tokref_t ref)
831 lwkt_token_t tok = ref->tr_tok;
833 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
834 tok->t_count > 0);
836 /* Token is not stale */
837 if (tok->t_lastowner == tok->t_owner)
838 return (FALSE);
841 * The token is stale. Reset to not stale so that the next call to
842 * lwkt_token_is_stale will return "not stale" unless the token
843 * was acquired in-between by another thread.
845 tok->t_lastowner = tok->t_owner;
846 return (TRUE);
848 #endif