Merge mozilla-central and tracemonkey. (a=blockers)
[mozilla-central.git] / js / src / jslock.cpp
blobe9f505e21fc7c94a7c3b4b244ae0fa308e17e135
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
24 * Contributor(s):
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
40 #ifdef JS_THREADSAFE
43 * JS locking stubs.
45 #include <stdlib.h>
46 #include <string.h>
47 #include "jspubtd.h"
48 #include "jsutil.h"
49 #include "jstypes.h"
50 #include "jsstdint.h"
51 #include "jsbit.h"
52 #include "jscntxt.h"
53 #include "jsgc.h"
54 #include "jslock.h"
55 #include "jsscope.h"
56 #include "jsstr.h"
58 using namespace js;
60 #define ReadWord(W) (W)
62 #if !defined(__GNUC__)
63 # define __asm__ asm
64 # define __volatile__ volatile
65 #endif
67 /* Implement NativeCompareAndSwap. */
69 #if defined(_MSC_VER) && defined(_M_IX86)
70 #pragma warning( disable : 4035 )
71 JS_BEGIN_EXTERN_C
72 extern long __cdecl
73 _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
74 JS_END_EXTERN_C
75 #pragma intrinsic(_InterlockedCompareExchange)
77 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
79 static JS_ALWAYS_INLINE int
80 NativeCompareAndSwapHelper(volatile jsword *w, jsword ov, jsword nv)
82 _InterlockedCompareExchange((long*) w, nv, ov);
83 __asm {
84 sete al
88 static JS_ALWAYS_INLINE int
89 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
91 return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
94 #elif defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_X64))
95 JS_BEGIN_EXTERN_C
96 extern long long __cdecl
97 _InterlockedCompareExchange64(long long *volatile dest, long long exchange, long long comp);
98 JS_END_EXTERN_C
99 #pragma intrinsic(_InterlockedCompareExchange64)
101 static JS_ALWAYS_INLINE int
102 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
104 return _InterlockedCompareExchange64((long long *volatile)w, nv, ov) == ov;
107 #elif defined(XP_MACOSX) || defined(DARWIN)
109 #include <libkern/OSAtomic.h>
111 static JS_ALWAYS_INLINE int
112 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
114 /* Details on these functions available in the manpage for atomic */
115 return OSAtomicCompareAndSwapPtrBarrier(reinterpret_cast<void *>(ov),
116 reinterpret_cast<void *>(nv),
117 reinterpret_cast<void * volatile *>(w));
120 #elif defined(__i386) && (defined(__GNUC__) || defined(__SUNPRO_CC))
122 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
123 static JS_ALWAYS_INLINE int
124 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
126 unsigned int res;
128 __asm__ __volatile__ (
129 "lock\n"
130 "cmpxchgl %2, (%1)\n"
131 "sete %%al\n"
132 "andl $1, %%eax\n"
133 : "=a" (res)
134 #ifdef __SUNPRO_CC
135 /* Different code for Sun Studio because of a bug of SS12U1 */
136 : "c" (w), "d" (nv), "a" (ov)
137 #else
138 : "r" (w), "r" (nv), "a" (ov)
139 #endif
140 : "cc", "memory");
141 return (int)res;
143 #elif defined(__x86_64) && (defined(__GNUC__) || defined(__SUNPRO_CC))
145 static JS_ALWAYS_INLINE int
146 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
148 unsigned int res;
150 __asm__ __volatile__ (
151 "lock\n"
152 "cmpxchgq %2, (%1)\n"
153 "sete %%al\n"
154 "movzbl %%al, %%eax\n"
155 : "=a" (res)
156 : "r" (w), "r" (nv), "a" (ov)
157 : "cc", "memory");
158 return (int)res;
161 #elif defined(__sparc)
162 #if defined(__GNUC__)
164 static JS_ALWAYS_INLINE int
165 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
167 unsigned int res;
169 __asm__ __volatile__ (
170 "membar #StoreLoad | #LoadLoad\n"
171 #if JS_BITS_PER_WORD == 32
172 "cas [%1],%2,%3\n"
173 #else
174 "casx [%1],%2,%3\n"
175 #endif
176 "membar #StoreLoad | #LoadLoad\n"
177 "cmp %2,%3\n"
178 "be,a 1f\n"
179 "mov 1,%0\n"
180 "mov 0,%0\n"
181 "1:"
182 : "=r" (res)
183 : "r" (w), "r" (ov), "r" (nv));
184 return (int)res;
187 #elif defined(__SUNPRO_CC)
189 /* Implementation in lock_sparc*.il */
190 extern "C" int
191 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv);
193 #endif
195 #elif defined(AIX)
197 #include <sys/atomic_op.h>
199 static JS_ALWAYS_INLINE int
200 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
202 int res;
203 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
205 res = compare_and_swaplp((atomic_l)w, &ov, nv);
206 if (res)
207 __asm__("isync");
208 return res;
211 #elif defined(USE_ARM_KUSER)
213 /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
214 * description of this ABI; this is a function provided at a fixed
215 * location by the kernel in the memory space of each process.
217 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
218 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
220 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
222 static JS_ALWAYS_INLINE int
223 NativeCompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
225 volatile int *vp = (volatile int *) w;
226 PRInt32 failed = 1;
228 /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
229 do {
230 failed = __kernel_cmpxchg(ov, nv, vp);
231 } while (failed && *vp == ov);
232 return !failed;
235 #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
237 #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
239 #endif /* arch-tests */
241 #if JS_HAS_NATIVE_COMPARE_AND_SWAP
243 JSBool
244 js_CompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
246 return !!NativeCompareAndSwap(w, ov, nv);
249 #elif defined(NSPR_LOCK)
251 # ifdef __GNUC__
252 # warning "js_CompareAndSwap is implemented using NSPR lock"
253 # endif
255 JSBool
256 js_CompareAndSwap(volatile jsword *w, jsword ov, jsword nv)
258 int result;
259 static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
261 JS_ACQUIRE_LOCK(CompareAndSwapLock);
262 result = (*w == ov);
263 if (result)
264 *w = nv;
265 JS_RELEASE_LOCK(CompareAndSwapLock);
266 return result;
269 #else /* !defined(NSPR_LOCK) */
271 #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
273 #endif
275 void
276 js_AtomicSetMask(volatile jsword *w, jsword mask)
278 jsword ov, nv;
280 do {
281 ov = *w;
282 nv = ov | mask;
283 } while (!js_CompareAndSwap(w, ov, nv));
286 void
287 js_AtomicClearMask(volatile jsword *w, jsword mask)
289 jsword ov, nv;
291 do {
292 ov = *w;
293 nv = ov & ~mask;
294 } while (!js_CompareAndSwap(w, ov, nv));
297 #ifndef NSPR_LOCK
299 struct JSFatLock {
300 int susp;
301 PRLock *slock;
302 PRCondVar *svar;
303 JSFatLock *next;
304 JSFatLock **prevp;
307 typedef struct JSFatLockTable {
308 JSFatLock *free;
309 JSFatLock *taken;
310 } JSFatLockTable;
312 #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
314 static void
315 js_Dequeue(JSThinLock *);
317 static PRLock **global_locks;
318 static uint32 global_lock_count = 1;
319 static uint32 global_locks_log2 = 0;
320 static uint32 global_locks_mask = 0;
322 static void
323 js_LockGlobal(void *id)
325 uint32 i = GLOBAL_LOCK_INDEX(id);
326 PR_Lock(global_locks[i]);
329 static void
330 js_UnlockGlobal(void *id)
332 uint32 i = GLOBAL_LOCK_INDEX(id);
333 PR_Unlock(global_locks[i]);
336 #endif /* !NSPR_LOCK */
338 void
339 js_InitLock(JSThinLock *tl)
341 #ifdef NSPR_LOCK
342 tl->owner = 0;
343 tl->fat = (JSFatLock*)JS_NEW_LOCK();
344 #else
345 PodZero(tl);
346 #endif
349 void
350 js_FinishLock(JSThinLock *tl)
352 #ifdef NSPR_LOCK
353 tl->owner = 0xdeadbeef;
354 if (tl->fat)
355 JS_DESTROY_LOCK(((JSLock*)tl->fat));
356 #else
357 JS_ASSERT(tl->owner == 0);
358 JS_ASSERT(tl->fat == NULL);
359 #endif
362 #ifndef NSPR_LOCK
364 static JSFatLock *
365 NewFatlock()
367 JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */
368 if (!fl) return NULL;
369 fl->susp = 0;
370 fl->next = NULL;
371 fl->prevp = NULL;
372 fl->slock = PR_NewLock();
373 fl->svar = PR_NewCondVar(fl->slock);
374 return fl;
377 static void
378 DestroyFatlock(JSFatLock *fl)
380 PR_DestroyLock(fl->slock);
381 PR_DestroyCondVar(fl->svar);
382 js_free(fl);
385 static JSFatLock *
386 ListOfFatlocks(int listc)
388 JSFatLock *m;
389 JSFatLock *m0;
390 int i;
392 JS_ASSERT(listc>0);
393 m0 = m = NewFatlock();
394 for (i=1; i<listc; i++) {
395 m->next = NewFatlock();
396 m = m->next;
398 return m0;
401 static void
402 DeleteListOfFatlocks(JSFatLock *m)
404 JSFatLock *m0;
405 for (; m; m=m0) {
406 m0 = m->next;
407 DestroyFatlock(m);
411 static JSFatLockTable *fl_list_table = NULL;
412 static uint32 fl_list_table_len = 0;
413 static uint32 fl_list_chunk_len = 0;
415 static JSFatLock *
416 GetFatlock(void *id)
418 JSFatLock *m;
420 uint32 i = GLOBAL_LOCK_INDEX(id);
421 if (fl_list_table[i].free == NULL) {
422 #ifdef DEBUG
423 if (fl_list_table[i].taken)
424 printf("Ran out of fat locks!\n");
425 #endif
426 fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
428 m = fl_list_table[i].free;
429 fl_list_table[i].free = m->next;
430 m->susp = 0;
431 m->next = fl_list_table[i].taken;
432 m->prevp = &fl_list_table[i].taken;
433 if (fl_list_table[i].taken)
434 fl_list_table[i].taken->prevp = &m->next;
435 fl_list_table[i].taken = m;
436 return m;
439 static void
440 PutFatlock(JSFatLock *m, void *id)
442 uint32 i;
443 if (m == NULL)
444 return;
446 /* Unlink m from fl_list_table[i].taken. */
447 *m->prevp = m->next;
448 if (m->next)
449 m->next->prevp = m->prevp;
451 /* Insert m in fl_list_table[i].free. */
452 i = GLOBAL_LOCK_INDEX(id);
453 m->next = fl_list_table[i].free;
454 fl_list_table[i].free = m;
457 #endif /* !NSPR_LOCK */
459 JSBool
460 js_SetupLocks(int listc, int globc)
462 #ifndef NSPR_LOCK
463 uint32 i;
465 if (global_locks)
466 return JS_TRUE;
467 #ifdef DEBUG
468 if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
469 printf("Bad number %d in js_SetupLocks()!\n", listc);
470 if (globc > 100 || globc < 0) /* globc == number of global locks */
471 printf("Bad number %d in js_SetupLocks()!\n", listc);
472 #endif
473 global_locks_log2 = JS_CeilingLog2(globc);
474 global_locks_mask = JS_BITMASK(global_locks_log2);
475 global_lock_count = JS_BIT(global_locks_log2);
476 global_locks = (PRLock **) js_malloc(global_lock_count * sizeof(PRLock*));
477 if (!global_locks)
478 return JS_FALSE;
479 for (i = 0; i < global_lock_count; i++) {
480 global_locks[i] = PR_NewLock();
481 if (!global_locks[i]) {
482 global_lock_count = i;
483 js_CleanupLocks();
484 return JS_FALSE;
487 fl_list_table = (JSFatLockTable *) js_malloc(i * sizeof(JSFatLockTable));
488 if (!fl_list_table) {
489 js_CleanupLocks();
490 return JS_FALSE;
492 fl_list_table_len = global_lock_count;
493 for (i = 0; i < global_lock_count; i++)
494 fl_list_table[i].free = fl_list_table[i].taken = NULL;
495 fl_list_chunk_len = listc;
496 #endif /* !NSPR_LOCK */
497 return JS_TRUE;
500 void
501 js_CleanupLocks()
503 #ifndef NSPR_LOCK
504 uint32 i;
506 if (global_locks) {
507 for (i = 0; i < global_lock_count; i++)
508 PR_DestroyLock(global_locks[i]);
509 js_free(global_locks);
510 global_locks = NULL;
511 global_lock_count = 1;
512 global_locks_log2 = 0;
513 global_locks_mask = 0;
515 if (fl_list_table) {
516 for (i = 0; i < fl_list_table_len; i++) {
517 DeleteListOfFatlocks(fl_list_table[i].free);
518 fl_list_table[i].free = NULL;
519 DeleteListOfFatlocks(fl_list_table[i].taken);
520 fl_list_table[i].taken = NULL;
522 js_free(fl_list_table);
523 fl_list_table = NULL;
524 fl_list_table_len = 0;
526 #endif /* !NSPR_LOCK */
529 #ifdef NSPR_LOCK
531 static JS_ALWAYS_INLINE void
532 ThinLock(JSThinLock *tl, jsword me)
534 JS_ACQUIRE_LOCK((JSLock *) tl->fat);
535 tl->owner = me;
538 static JS_ALWAYS_INLINE void
539 ThinUnlock(JSThinLock *tl, jsword /*me*/)
541 tl->owner = 0;
542 JS_RELEASE_LOCK((JSLock *) tl->fat);
545 #else
548 * Fast locking and unlocking is implemented by delaying the allocation of a
549 * system lock (fat lock) until contention. As long as a locking thread A
550 * runs uncontended, the lock is represented solely by storing A's identity in
551 * the object being locked.
553 * If another thread B tries to lock the object currently locked by A, B is
554 * enqueued into a fat lock structure (which might have to be allocated and
555 * pointed to by the object), and suspended using NSPR conditional variables
556 * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
557 * signalling to A that when releasing the lock, B must be dequeued and
558 * notified.
560 * The basic operation of the locking primitives (js_Lock, js_Unlock,
561 * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
562 * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
563 * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
564 * succeeds this implies that p is uncontended (no one is waiting because the
565 * wait bit is not set).
567 * When dequeueing, the lock is released, and one of the threads suspended on
568 * the lock is notified. If other threads still are waiting, the wait bit is
569 * kept (in js_Enqueue), and if not, the fat lock is deallocated.
571 * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
572 * are serialized using a global lock. For scalability, a hashtable of global
573 * locks is used, which is indexed modulo the thin lock pointer.
577 * Invariants:
578 * (i) global lock is held
579 * (ii) fl->susp >= 0
581 static int
582 js_SuspendThread(JSThinLock *tl)
584 JSFatLock *fl;
585 PRStatus stat;
587 if (tl->fat == NULL)
588 fl = tl->fat = GetFatlock(tl);
589 else
590 fl = tl->fat;
591 JS_ASSERT(fl->susp >= 0);
592 fl->susp++;
593 PR_Lock(fl->slock);
594 js_UnlockGlobal(tl);
595 stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
596 JS_ASSERT(stat != PR_FAILURE);
597 PR_Unlock(fl->slock);
598 js_LockGlobal(tl);
599 fl->susp--;
600 if (fl->susp == 0) {
601 PutFatlock(fl, tl);
602 tl->fat = NULL;
604 return tl->fat == NULL;
608 * (i) global lock is held
609 * (ii) fl->susp > 0
611 static void
612 js_ResumeThread(JSThinLock *tl)
614 JSFatLock *fl = tl->fat;
615 PRStatus stat;
617 JS_ASSERT(fl != NULL);
618 JS_ASSERT(fl->susp > 0);
619 PR_Lock(fl->slock);
620 js_UnlockGlobal(tl);
621 stat = PR_NotifyCondVar(fl->svar);
622 JS_ASSERT(stat != PR_FAILURE);
623 PR_Unlock(fl->slock);
626 static void
627 js_Enqueue(JSThinLock *tl, jsword me)
629 jsword o, n;
631 js_LockGlobal(tl);
632 for (;;) {
633 o = ReadWord(tl->owner);
634 n = Thin_SetWait(o);
635 if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
636 if (js_SuspendThread(tl))
637 me = Thin_RemoveWait(me);
638 else
639 me = Thin_SetWait(me);
641 else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
642 js_UnlockGlobal(tl);
643 return;
648 static void
649 js_Dequeue(JSThinLock *tl)
651 jsword o;
653 js_LockGlobal(tl);
654 o = ReadWord(tl->owner);
655 JS_ASSERT(Thin_GetWait(o) != 0);
656 JS_ASSERT(tl->fat != NULL);
657 if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
658 JS_ASSERT(0);
659 js_ResumeThread(tl);
662 static JS_ALWAYS_INLINE void
663 ThinLock(JSThinLock *tl, jsword me)
665 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
666 if (NativeCompareAndSwap(&tl->owner, 0, me))
667 return;
668 if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
669 js_Enqueue(tl, me);
670 #ifdef DEBUG
671 else
672 JS_ASSERT(0);
673 #endif
676 static JS_ALWAYS_INLINE void
677 ThinUnlock(JSThinLock *tl, jsword me)
679 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
682 * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
683 * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
685 if (NativeCompareAndSwap(&tl->owner, me, 0))
686 return;
688 JS_ASSERT(Thin_GetWait(tl->owner));
689 if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
690 js_Dequeue(tl);
691 #ifdef DEBUG
692 else
693 JS_ASSERT(0); /* unbalanced unlock */
694 #endif
697 #endif /* !NSPR_LOCK */
699 void
700 js_Lock(JSContext *cx, JSThinLock *tl)
702 ThinLock(tl, CX_THINLOCK_ID(cx));
705 void
706 js_Unlock(JSContext *cx, JSThinLock *tl)
708 ThinUnlock(tl, CX_THINLOCK_ID(cx));
711 void
712 js_LockRuntime(JSRuntime *rt)
714 PR_Lock(rt->rtLock);
715 #ifdef DEBUG
716 rt->rtLockOwner = js_CurrentThreadId();
717 #endif
720 void
721 js_UnlockRuntime(JSRuntime *rt)
723 #ifdef DEBUG
724 rt->rtLockOwner = NULL;
725 #endif
726 PR_Unlock(rt->rtLock);
729 #ifdef DEBUG
730 JSBool
731 js_IsRuntimeLocked(JSRuntime *rt)
733 return js_CurrentThreadId() == rt->rtLockOwner;
735 #endif /* DEBUG */
736 #endif /* JS_THREADSAFE */