(atomic_add): Don't allow address register for operand 0.
[glibc.git] / linuxthreads / spinlock.c
blob199386a4fce9f41c30ca45f3a43722f0450517e9
1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* Internal locks */
17 #include <errno.h>
18 #include <sched.h>
19 #include <time.h>
20 #include <stdlib.h>
21 #include <limits.h>
22 #include "pthread.h"
23 #include "internals.h"
24 #include "spinlock.h"
25 #include "restart.h"
27 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
28 static void __pthread_acquire(int * spinlock);
30 static inline void __pthread_release(int * spinlock)
32 WRITE_MEMORY_BARRIER();
33 *spinlock = __LT_SPINLOCK_INIT;
34 __asm __volatile ("" : "=m" (*spinlock) : "0" (*spinlock));
36 #endif
39 /* The status field of a spinlock is a pointer whose least significant
40 bit is a locked flag.
42 Thus the field values have the following meanings:
44 status == 0: spinlock is free
45 status == 1: spinlock is taken; no thread is waiting on it
47 (status & 1) == 1: spinlock is taken and (status & ~1L) is a
48 pointer to the first waiting thread; other
49 waiting threads are linked via the p_nextlock
50 field.
51 (status & 1) == 0: same as above, but spinlock is not taken.
53 The waiting list is not sorted by priority order.
54 Actually, we always insert at top of list (sole insertion mode
55 that can be performed without locking).
56 For __pthread_unlock, we perform a linear search in the list
57 to find the highest-priority, oldest waiting thread.
58 This is safe because there are no concurrent __pthread_unlock
59 operations -- only the thread that locked the mutex can unlock it. */
62 void internal_function __pthread_lock(struct _pthread_fastlock * lock,
63 pthread_descr self)
65 #if defined HAS_COMPARE_AND_SWAP
66 long oldstatus, newstatus;
67 int successful_seizure, spurious_wakeup_count;
68 int spin_count;
69 #endif
71 #if defined TEST_FOR_COMPARE_AND_SWAP
72 if (!__pthread_has_cas)
73 #endif
74 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
76 __pthread_acquire(&lock->__spinlock);
77 return;
79 #endif
81 #if defined HAS_COMPARE_AND_SWAP
82 /* First try it without preparation. Maybe it's a completely
83 uncontested lock. */
84 if (lock->__status == 0 && __compare_and_swap (&lock->__status, 0, 1))
85 return;
87 spurious_wakeup_count = 0;
88 spin_count = 0;
90 again:
92 /* On SMP, try spinning to get the lock. */
94 if (__pthread_smp_kernel) {
95 int max_count = lock->__spinlock * 2 + 10;
97 for (spin_count = 0; spin_count < max_count; spin_count++) {
98 if (((oldstatus = lock->__status) & 1) == 0) {
99 if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
101 if (spin_count)
102 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
103 READ_MEMORY_BARRIER();
104 return;
107 __asm __volatile ("" : "=m" (lock->__status) : "0" (lock->__status));
110 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
113 /* No luck, try once more or suspend. */
115 do {
116 oldstatus = lock->__status;
117 successful_seizure = 0;
119 if ((oldstatus & 1) == 0) {
120 newstatus = oldstatus | 1;
121 successful_seizure = 1;
122 } else {
123 if (self == NULL)
124 self = thread_self();
125 newstatus = (long) self | 1;
128 if (self != NULL) {
129 THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
130 /* Make sure the store in p_nextlock completes before performing
131 the compare-and-swap */
132 MEMORY_BARRIER();
134 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
136 /* Suspend with guard against spurious wakeup.
137 This can happen in pthread_cond_timedwait_relative, when the thread
138 wakes up due to timeout and is still on the condvar queue, and then
139 locks the queue to remove itself. At that point it may still be on the
140 queue, and may be resumed by a condition signal. */
142 if (!successful_seizure) {
143 for (;;) {
144 suspend(self);
145 if (self->p_nextlock != NULL) {
146 /* Count resumes that don't belong to us. */
147 spurious_wakeup_count++;
148 continue;
150 break;
152 goto again;
155 /* Put back any resumes we caught that don't belong to us. */
156 while (spurious_wakeup_count--)
157 restart(self);
159 READ_MEMORY_BARRIER();
160 #endif
163 int __pthread_unlock(struct _pthread_fastlock * lock)
165 #if defined HAS_COMPARE_AND_SWAP
166 long oldstatus;
167 pthread_descr thr, * ptr, * maxptr;
168 int maxprio;
169 #endif
171 #if defined TEST_FOR_COMPARE_AND_SWAP
172 if (!__pthread_has_cas)
173 #endif
174 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
176 __pthread_release(&lock->__spinlock);
177 return 0;
179 #endif
181 #if defined HAS_COMPARE_AND_SWAP
182 WRITE_MEMORY_BARRIER();
184 again:
185 while ((oldstatus = lock->__status) == 1) {
186 if (__compare_and_swap_with_release_semantics(&lock->__status,
187 oldstatus, 0))
188 return 0;
191 /* Find thread in waiting queue with maximal priority */
192 ptr = (pthread_descr *) &lock->__status;
193 thr = (pthread_descr) (oldstatus & ~1L);
194 maxprio = 0;
195 maxptr = ptr;
197 /* Before we iterate over the wait queue, we need to execute
198 a read barrier, otherwise we may read stale contents of nodes that may
199 just have been inserted by other processors. One read barrier is enough to
200 ensure we have a stable list; we don't need one for each pointer chase
201 through the list, because we are the owner of the lock; other threads
202 can only add nodes at the front; if a front node is consistent,
203 the ones behind it must also be. */
205 READ_MEMORY_BARRIER();
207 while (thr != 0) {
208 if (thr->p_priority >= maxprio) {
209 maxptr = ptr;
210 maxprio = thr->p_priority;
212 ptr = &(thr->p_nextlock);
213 thr = *ptr;
216 /* Remove max prio thread from waiting list. */
217 if (maxptr == (pthread_descr *) &lock->__status) {
218 /* If max prio thread is at head, remove it with compare-and-swap
219 to guard against concurrent lock operation. This removal
220 also has the side effect of marking the lock as released
221 because the new status comes from thr->p_nextlock whose
222 least significant bit is clear. */
223 thr = (pthread_descr) (oldstatus & ~1L);
224 if (! __compare_and_swap_with_release_semantics
225 (&lock->__status, oldstatus, (long)(thr->p_nextlock)))
226 goto again;
227 } else {
228 /* No risk of concurrent access, remove max prio thread normally.
229 But in this case we must also flip the least significant bit
230 of the status to mark the lock as released. */
231 thr = *maxptr;
232 *maxptr = thr->p_nextlock;
234 /* Ensure deletion from linked list completes before we
235 release the lock. */
236 WRITE_MEMORY_BARRIER();
238 do {
239 oldstatus = lock->__status;
240 } while (!__compare_and_swap_with_release_semantics(&lock->__status,
241 oldstatus, oldstatus & ~1L));
244 /* Wake up the selected waiting thread. Woken thread can check
245 its own p_nextlock field for NULL to detect that it has been removed. No
246 barrier is needed here, since restart() and suspend() take
247 care of memory synchronization. */
249 thr->p_nextlock = NULL;
250 restart(thr);
252 return 0;
253 #endif
257 * Alternate fastlocks do not queue threads directly. Instead, they queue
258 * these wait queue node structures. When a timed wait wakes up due to
259 * a timeout, it can leave its wait node in the queue (because there
260 * is no safe way to remove from the quue). Some other thread will
261 * deallocate the abandoned node.
265 struct wait_node {
266 struct wait_node *next; /* Next node in null terminated linked list */
267 pthread_descr thr; /* The thread waiting with this node */
268 int abandoned; /* Atomic flag */
271 static long wait_node_free_list;
272 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
273 static int wait_node_free_list_spinlock;
274 #endif
276 /* Allocate a new node from the head of the free list using an atomic
277 operation, or else using malloc if that list is empty. A fundamental
278 assumption here is that we can safely access wait_node_free_list->next.
279 That's because we never free nodes once we allocate them, so a pointer to a
280 node remains valid indefinitely. */
282 static struct wait_node *wait_node_alloc(void)
284 #if defined HAS_COMPARE_AND_SWAP
285 long oldvalue, newvalue;
286 #endif
288 #if defined TEST_FOR_COMPARE_AND_SWAP
289 if (!__pthread_has_cas)
290 #endif
291 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
293 struct wait_node *new_node = 0;
295 __pthread_acquire(&wait_node_free_list_spinlock);
296 if (wait_node_free_list != 0) {
297 new_node = (struct wait_node *) wait_node_free_list;
298 wait_node_free_list = (long) new_node->next;
300 WRITE_MEMORY_BARRIER();
301 wait_node_free_list_spinlock = 0;
303 if (new_node == 0)
304 return malloc(sizeof *wait_node_alloc());
306 return new_node;
308 #endif
310 #if defined HAS_COMPARE_AND_SWAP
311 do {
312 oldvalue = wait_node_free_list;
314 if (oldvalue == 0)
315 return malloc(sizeof *wait_node_alloc());
317 /* Ensure we don't read stale next link through oldvalue pointer. */
318 READ_MEMORY_BARRIER();
319 newvalue = (long) ((struct wait_node *) oldvalue)->next;
320 } while (! __compare_and_swap(&wait_node_free_list, oldvalue, newvalue));
322 return (struct wait_node *) oldvalue;
323 #endif
326 /* Return a node to the head of the free list using an atomic
327 operation. */
329 static void wait_node_free(struct wait_node *wn)
331 #if defined HAS_COMPARE_AND_SWAP
332 long oldvalue, newvalue;
333 #endif
335 #if defined TEST_FOR_COMPARE_AND_SWAP
336 if (!__pthread_has_cas)
337 #endif
338 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
340 __pthread_acquire(&wait_node_free_list_spinlock);
341 wn->next = (struct wait_node *) wait_node_free_list;
342 wait_node_free_list = (long) wn;
343 WRITE_MEMORY_BARRIER();
344 wait_node_free_list_spinlock = 0;
345 return;
347 #endif
349 #if defined HAS_COMPARE_AND_SWAP
350 do {
351 oldvalue = wait_node_free_list;
352 wn->next = (struct wait_node *) oldvalue;
353 newvalue = (long) wn;
354 /* Ensure node contents are written before we swap it into the list. */
355 WRITE_MEMORY_BARRIER();
356 } while (! __compare_and_swap(&wait_node_free_list, oldvalue, newvalue));
357 #endif
360 #if defined HAS_COMPARE_AND_SWAP
362 /* Remove a wait node from the specified queue. It is assumed
363 that the removal takes place concurrently with only atomic insertions at the
364 head of the queue. */
366 static void wait_node_dequeue(struct wait_node **pp_head,
367 struct wait_node **pp_node,
368 struct wait_node *p_node)
370 /* If the node is being deleted from the head of the
371 list, it must be deleted using atomic compare-and-swap.
372 Otherwise it can be deleted in the straightforward way. */
374 if (pp_node == pp_head) {
375 /* We don't need a read barrier between these next two loads,
376 because it is assumed that the caller has already ensured
377 the stability of *p_node with respect to p_node. */
379 long oldvalue = (long) p_node;
380 long newvalue = (long) p_node->next;
382 if (__compare_and_swap((long *) pp_node, oldvalue, newvalue))
383 return;
385 /* Oops! Compare and swap failed, which means the node is
386 no longer first. We delete it using the ordinary method. But we don't
387 know the identity of the node which now holds the pointer to the node
388 being deleted, so we must search from the beginning. */
390 for (pp_node = pp_head; p_node != *pp_node; ) {
391 pp_node = &(*pp_node)->next;
392 READ_MEMORY_BARRIER(); /* Stabilize *pp_node for next iteration. */
396 *pp_node = p_node->next;
397 return;
400 #endif
402 void __pthread_alt_lock(struct _pthread_fastlock * lock,
403 pthread_descr self)
405 #if defined HAS_COMPARE_AND_SWAP
406 long oldstatus, newstatus;
407 #endif
408 struct wait_node wait_node;
410 #if defined TEST_FOR_COMPARE_AND_SWAP
411 if (!__pthread_has_cas)
412 #endif
413 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
415 int suspend_needed = 0;
416 __pthread_acquire(&lock->__spinlock);
418 if (lock->__status == 0)
419 lock->__status = 1;
420 else {
421 if (self == NULL)
422 self = thread_self();
424 wait_node.abandoned = 0;
425 wait_node.next = (struct wait_node *) lock->__status;
426 wait_node.thr = self;
427 lock->__status = (long) &wait_node;
428 suspend_needed = 1;
431 __pthread_release(&lock->__spinlock);
433 if (suspend_needed)
434 suspend (self);
435 return;
437 #endif
439 #if defined HAS_COMPARE_AND_SWAP
440 do {
441 oldstatus = lock->__status;
442 if (oldstatus == 0) {
443 newstatus = 1;
444 } else {
445 if (self == NULL)
446 self = thread_self();
447 wait_node.thr = self;
448 newstatus = (long) &wait_node;
450 wait_node.abandoned = 0;
451 wait_node.next = (struct wait_node *) oldstatus;
452 /* Make sure the store in wait_node.next completes before performing
453 the compare-and-swap */
454 MEMORY_BARRIER();
455 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
457 /* Suspend. Note that unlike in __pthread_lock, we don't worry
458 here about spurious wakeup. That's because this lock is not
459 used in situations where that can happen; the restart can
460 only come from the previous lock owner. */
462 if (oldstatus != 0)
463 suspend(self);
465 READ_MEMORY_BARRIER();
466 #endif
469 /* Timed-out lock operation; returns 0 to indicate timeout. */
471 int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
472 pthread_descr self, const struct timespec *abstime)
474 long oldstatus = 0;
475 #if defined HAS_COMPARE_AND_SWAP
476 long newstatus;
477 #endif
478 struct wait_node *p_wait_node = wait_node_alloc();
480 /* Out of memory, just give up and do ordinary lock. */
481 if (p_wait_node == 0) {
482 __pthread_alt_lock(lock, self);
483 return 1;
486 #if defined TEST_FOR_COMPARE_AND_SWAP
487 if (!__pthread_has_cas)
488 #endif
489 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
491 __pthread_acquire(&lock->__spinlock);
493 if (lock->__status == 0)
494 lock->__status = 1;
495 else {
496 if (self == NULL)
497 self = thread_self();
499 p_wait_node->abandoned = 0;
500 p_wait_node->next = (struct wait_node *) lock->__status;
501 p_wait_node->thr = self;
502 lock->__status = (long) p_wait_node;
503 oldstatus = 1; /* force suspend */
506 __pthread_release(&lock->__spinlock);
507 goto suspend;
509 #endif
511 #if defined HAS_COMPARE_AND_SWAP
512 do {
513 oldstatus = lock->__status;
514 if (oldstatus == 0) {
515 newstatus = 1;
516 } else {
517 if (self == NULL)
518 self = thread_self();
519 p_wait_node->thr = self;
520 newstatus = (long) p_wait_node;
522 p_wait_node->abandoned = 0;
523 p_wait_node->next = (struct wait_node *) oldstatus;
524 /* Make sure the store in wait_node.next completes before performing
525 the compare-and-swap */
526 MEMORY_BARRIER();
527 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
528 #endif
530 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
531 suspend:
532 #endif
534 /* If we did not get the lock, do a timed suspend. If we wake up due
535 to a timeout, then there is a race; the old lock owner may try
536 to remove us from the queue. This race is resolved by us and the owner
537 doing an atomic testandset() to change the state of the wait node from 0
538 to 1. If we succeed, then it's a timeout and we abandon the node in the
539 queue. If we fail, it means the owner gave us the lock. */
541 if (oldstatus != 0) {
542 if (timedsuspend(self, abstime) == 0) {
543 if (!testandset(&p_wait_node->abandoned))
544 return 0; /* Timeout! */
546 /* Eat oustanding resume from owner, otherwise wait_node_free() below
547 will race with owner's wait_node_dequeue(). */
548 suspend(self);
552 wait_node_free(p_wait_node);
554 READ_MEMORY_BARRIER();
556 return 1; /* Got the lock! */
559 void __pthread_alt_unlock(struct _pthread_fastlock *lock)
561 struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
562 struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
563 int maxprio;
565 WRITE_MEMORY_BARRIER();
567 #if defined TEST_FOR_COMPARE_AND_SWAP
568 if (!__pthread_has_cas)
569 #endif
570 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
572 __pthread_acquire(&lock->__spinlock);
574 #endif
576 while (1) {
578 /* If no threads are waiting for this lock, try to just
579 atomically release it. */
580 #if defined TEST_FOR_COMPARE_AND_SWAP
581 if (!__pthread_has_cas)
582 #endif
583 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
585 if (lock->__status == 0 || lock->__status == 1) {
586 lock->__status = 0;
587 break;
590 #endif
592 #if defined TEST_FOR_COMPARE_AND_SWAP
593 else
594 #endif
596 #if defined HAS_COMPARE_AND_SWAP
598 long oldstatus = lock->__status;
599 if (oldstatus == 0 || oldstatus == 1) {
600 if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0))
601 break;
602 else
603 continue;
606 #endif
608 /* Process the entire queue of wait nodes. Remove all abandoned
609 wait nodes and put them into the global free queue, and
610 remember the one unabandoned node which refers to the thread
611 having the highest priority. */
613 pp_max_prio = pp_node = pp_head;
614 p_max_prio = p_node = *pp_head;
615 maxprio = INT_MIN;
617 READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */
619 while (p_node != (struct wait_node *) 1) {
620 int prio;
622 if (p_node->abandoned) {
623 /* Remove abandoned node. */
624 #if defined TEST_FOR_COMPARE_AND_SWAP
625 if (!__pthread_has_cas)
626 #endif
627 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
628 *pp_node = p_node->next;
629 #endif
630 #if defined TEST_FOR_COMPARE_AND_SWAP
631 else
632 #endif
633 #if defined HAS_COMPARE_AND_SWAP
634 wait_node_dequeue(pp_head, pp_node, p_node);
635 #endif
636 wait_node_free(p_node);
637 /* Note that the next assignment may take us to the beginning
638 of the queue, to newly inserted nodes, if pp_node == pp_head.
639 In that case we need a memory barrier to stabilize the first of
640 these new nodes. */
641 p_node = *pp_node;
642 if (pp_node == pp_head)
643 READ_MEMORY_BARRIER(); /* No stale reads through p_node */
644 continue;
645 } else if ((prio = p_node->thr->p_priority) >= maxprio) {
646 /* Otherwise remember it if its thread has a higher or equal priority
647 compared to that of any node seen thus far. */
648 maxprio = prio;
649 pp_max_prio = pp_node;
650 p_max_prio = p_node;
653 /* This canno6 jump backward in the list, so no further read
654 barrier is needed. */
655 pp_node = &p_node->next;
656 p_node = *pp_node;
659 /* If all threads abandoned, go back to top */
660 if (maxprio == INT_MIN)
661 continue;
663 ASSERT (p_max_prio != (struct wait_node *) 1);
665 /* Now we want to to remove the max priority thread's wait node from
666 the list. Before we can do this, we must atomically try to change the
667 node's abandon state from zero to nonzero. If we succeed, that means we
668 have the node that we will wake up. If we failed, then it means the
669 thread timed out and abandoned the node in which case we repeat the
670 whole unlock operation. */
672 if (!testandset(&p_max_prio->abandoned)) {
673 #if defined TEST_FOR_COMPARE_AND_SWAP
674 if (!__pthread_has_cas)
675 #endif
676 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
677 *pp_max_prio = p_max_prio->next;
678 #endif
679 #if defined TEST_FOR_COMPARE_AND_SWAP
680 else
681 #endif
682 #if defined HAS_COMPARE_AND_SWAP
683 wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
684 #endif
685 restart(p_max_prio->thr);
686 break;
690 #if defined TEST_FOR_COMPARE_AND_SWAP
691 if (!__pthread_has_cas)
692 #endif
693 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
695 __pthread_release(&lock->__spinlock);
697 #endif
701 /* Compare-and-swap emulation with a spinlock */
703 #ifdef TEST_FOR_COMPARE_AND_SWAP
704 int __pthread_has_cas = 0;
705 #endif
707 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
709 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
710 int * spinlock)
712 int res;
714 __pthread_acquire(spinlock);
716 if (*ptr == oldval) {
717 *ptr = newval; res = 1;
718 } else {
719 res = 0;
722 __pthread_release(spinlock);
724 return res;
727 /* This function is called if the inlined test-and-set
728 in __pthread_compare_and_swap() failed */
730 /* The retry strategy is as follows:
731 - We test and set the spinlock MAX_SPIN_COUNT times, calling
732 sched_yield() each time. This gives ample opportunity for other
733 threads with priority >= our priority to make progress and
734 release the spinlock.
735 - If a thread with priority < our priority owns the spinlock,
736 calling sched_yield() repeatedly is useless, since we're preventing
737 the owning thread from making progress and releasing the spinlock.
738 So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
739 using nanosleep(). This again should give time to the owning thread
740 for releasing the spinlock.
741 Notice that the nanosleep() interval must not be too small,
742 since the kernel does busy-waiting for short intervals in a realtime
743 process (!). The smallest duration that guarantees thread
744 suspension is currently 2ms.
745 - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
746 sched_yield(), then sleeping again if needed. */
748 static void __pthread_acquire(int * spinlock)
750 int cnt = 0;
751 struct timespec tm;
753 READ_MEMORY_BARRIER();
755 while (testandset(spinlock)) {
756 if (cnt < MAX_SPIN_COUNT) {
757 sched_yield();
758 cnt++;
759 } else {
760 tm.tv_sec = 0;
761 tm.tv_nsec = SPIN_SLEEP_DURATION;
762 nanosleep(&tm, NULL);
763 cnt = 0;
768 #endif