2010-04-07 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / metadata / monitor.c
blobfc7038490faebae35fb1763ed0bdbbde7eab8553
1 /*
2 * monitor.c: Monitor locking functions
4 * Author:
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
9 */
11 #include <config.h>
12 #include <glib.h>
13 #include <string.h>
15 #include <mono/metadata/monitor.h>
16 #include <mono/metadata/threads-types.h>
17 #include <mono/metadata/exception.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/io-layer/io-layer.h>
20 #include <mono/metadata/object-internals.h>
21 #include <mono/metadata/class-internals.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/metadata/method-builder.h>
24 #include <mono/metadata/debug-helpers.h>
25 #include <mono/metadata/tabledefs.h>
26 #include <mono/metadata/marshal.h>
27 #include <mono/metadata/profiler-private.h>
28 #include <mono/utils/mono-time.h>
31 * Pull the list of opcodes
33 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
34 a = i,
36 enum {
37 #include "mono/cil/opcode.def"
38 LAST = 0xff
40 #undef OPDEF
42 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
43 #define LOCK_DEBUG(a)
46 * The monitor implementation here is based on
47 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
48 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
50 * The Dice paper describes a technique for saving lock record space
51 * by returning records to a free list when they become unused. That
52 * sounds like unnecessary complexity to me, though if it becomes
53 * clear that unused lock records are taking up lots of space or we
54 * need to shave more time off by avoiding a malloc then we can always
55 * implement the free list idea later. The timeout parameter to
56 * try_enter voids some of the assumptions about the reference count
57 * field in Dice's implementation too. In his version, the thread
58 * attempting to lock a contended object will block until it succeeds,
59 * so the reference count will never be decremented while an object is
60 * locked.
62 * Bacon's thin locks have a fast path that doesn't need a lock record
63 * for the common case of locking an unlocked or shallow-nested
64 * object, but the technique relies on encoding the thread ID in 15
65 * bits (to avoid too much per-object space overhead.) Unfortunately
66 * I don't think it's possible to reliably encode a pthread_t into 15
67 * bits. (The JVM implementation used seems to have a 15-bit
68 * per-thread identifier available.)
70 * This implementation then combines Dice's basic lock model with
71 * Bacon's simplification of keeping a lock record for the lifetime of
72 * an object.
75 struct _MonoThreadsSync
77 gsize owner; /* thread ID */
78 guint32 nest;
79 #ifdef HAVE_MOVING_COLLECTOR
80 gint32 hash_code;
81 #endif
82 volatile gint32 entry_count;
83 HANDLE entry_sem;
84 GSList *wait_list;
85 void *data;
88 typedef struct _MonitorArray MonitorArray;
90 struct _MonitorArray {
91 MonitorArray *next;
92 int num_monitors;
93 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
96 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
97 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
98 static CRITICAL_SECTION monitor_mutex;
99 static MonoThreadsSync *monitor_freelist;
100 static MonitorArray *monitor_allocated;
101 static int array_size = 16;
103 #ifdef HAVE_KW_THREAD
104 static __thread gsize tls_pthread_self MONO_TLS_FAST;
105 #endif
107 #ifndef HOST_WIN32
108 #ifdef HAVE_KW_THREAD
109 #define GetCurrentThreadId() tls_pthread_self
110 #else
112 * The usual problem: we can't replace GetCurrentThreadId () with a macro because
113 * it is in a public header.
115 #define GetCurrentThreadId() ((gsize)pthread_self ())
116 #endif
117 #endif
119 void
120 mono_monitor_init (void)
122 InitializeCriticalSection (&monitor_mutex);
125 void
126 mono_monitor_cleanup (void)
128 /*DeleteCriticalSection (&monitor_mutex);*/
132 * mono_monitor_init_tls:
134 * Setup TLS variables used by the monitor code for the current thread.
136 void
137 mono_monitor_init_tls (void)
139 #if !defined(HOST_WIN32) && defined(HAVE_KW_THREAD)
140 tls_pthread_self = pthread_self ();
141 #endif
144 static int
145 monitor_is_on_freelist (MonoThreadsSync *mon)
147 MonitorArray *marray;
148 for (marray = monitor_allocated; marray; marray = marray->next) {
149 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
150 return TRUE;
152 return FALSE;
156 * mono_locks_dump:
157 * @include_untaken:
159 * Print a report on stdout of the managed locks currently held by
160 * threads. If @include_untaken is specified, list also inflated locks
161 * which are unheld.
162 * This is supposed to be used in debuggers like gdb.
164 void
165 mono_locks_dump (gboolean include_untaken)
167 int i;
168 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
169 MonoThreadsSync *mon;
170 MonitorArray *marray;
171 for (mon = monitor_freelist; mon; mon = mon->data)
172 on_freelist++;
173 for (marray = monitor_allocated; marray; marray = marray->next) {
174 total += marray->num_monitors;
175 num_arrays++;
176 for (i = 0; i < marray->num_monitors; ++i) {
177 mon = &marray->monitors [i];
178 if (mon->data == NULL) {
179 if (i < marray->num_monitors - 1)
180 to_recycle++;
181 } else {
182 if (!monitor_is_on_freelist (mon->data)) {
183 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
184 if (mon->owner) {
185 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
186 mon, holder, (void*)mon->owner, mon->nest);
187 if (mon->entry_sem)
188 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
189 } else if (include_untaken) {
190 g_print ("Lock %p in object %p untaken\n", mon, holder);
192 used++;
197 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
198 num_arrays, total, used, on_freelist, to_recycle);
201 /* LOCKING: this is called with monitor_mutex held */
202 static void
203 mon_finalize (MonoThreadsSync *mon)
205 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
207 if (mon->entry_sem != NULL) {
208 CloseHandle (mon->entry_sem);
209 mon->entry_sem = NULL;
211 /* If this isn't empty then something is seriously broken - it
212 * means a thread is still waiting on the object that owned
213 * this lock, but the object has been finalized.
215 g_assert (mon->wait_list == NULL);
217 mon->entry_count = 0;
218 /* owner and nest are set in mon_new, no need to zero them out */
220 mon->data = monitor_freelist;
221 monitor_freelist = mon;
222 mono_perfcounters->gc_sync_blocks--;
225 /* LOCKING: this is called with monitor_mutex held */
226 static MonoThreadsSync *
227 mon_new (gsize id)
229 MonoThreadsSync *new;
231 if (!monitor_freelist) {
232 MonitorArray *marray;
233 int i;
234 /* see if any sync block has been collected */
235 new = NULL;
236 for (marray = monitor_allocated; marray; marray = marray->next) {
237 for (i = 0; i < marray->num_monitors; ++i) {
238 if (marray->monitors [i].data == NULL) {
239 new = &marray->monitors [i];
240 if (new->wait_list) {
241 /* Orphaned events left by aborted threads */
242 while (new->wait_list) {
243 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", GetCurrentThreadId (), new->wait_list->data));
244 CloseHandle (new->wait_list->data);
245 new->wait_list = g_slist_remove (new->wait_list, new->wait_list->data);
248 new->data = monitor_freelist;
249 monitor_freelist = new;
252 /* small perf tweak to avoid scanning all the blocks */
253 if (new)
254 break;
256 /* need to allocate a new array of monitors */
257 if (!monitor_freelist) {
258 MonitorArray *last;
259 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
260 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
261 marray->num_monitors = array_size;
262 array_size *= 2;
263 /* link into the freelist */
264 for (i = 0; i < marray->num_monitors - 1; ++i) {
265 marray->monitors [i].data = &marray->monitors [i + 1];
267 marray->monitors [i].data = NULL; /* the last one */
268 monitor_freelist = &marray->monitors [0];
269 /* we happend the marray instead of prepending so that
270 * the collecting loop above will need to scan smaller arrays first
272 if (!monitor_allocated) {
273 monitor_allocated = marray;
274 } else {
275 last = monitor_allocated;
276 while (last->next)
277 last = last->next;
278 last->next = marray;
283 new = monitor_freelist;
284 monitor_freelist = new->data;
286 new->owner = id;
287 new->nest = 1;
289 mono_perfcounters->gc_sync_blocks++;
290 return new;
294 * Format of the lock word:
295 * thinhash | fathash | data
297 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
298 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
299 * struct pointed to by data
300 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
302 typedef union {
303 gsize lock_word;
304 MonoThreadsSync *sync;
305 } LockWord;
307 enum {
308 LOCK_WORD_THIN_HASH = 1,
309 LOCK_WORD_FAT_HASH = 1 << 1,
310 LOCK_WORD_BITS_MASK = 0x3,
311 LOCK_WORD_HASH_SHIFT = 2
314 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
317 * mono_object_hash:
318 * @obj: an object
320 * Calculate a hash code for @obj that is constant while @obj is alive.
323 mono_object_hash (MonoObject* obj)
325 #ifdef HAVE_MOVING_COLLECTOR
326 LockWord lw;
327 unsigned int hash;
328 if (!obj)
329 return 0;
330 lw.sync = obj->synchronisation;
331 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
332 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
333 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
335 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
336 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
337 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
338 return lw.sync->hash_code;
341 * while we are inside this function, the GC will keep this object pinned,
342 * since we are in the unmanaged stack. Thanks to this and to the hash
343 * function that depends only on the address, we can ignore the races if
344 * another thread computes the hash at the same time, because it'll end up
345 * with the same value.
347 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
348 /* clear the top bits as they can be discarded */
349 hash &= ~(LOCK_WORD_BITS_MASK << 30);
350 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
351 if (lw.sync) {
352 lw.sync->hash_code = hash;
353 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
354 lw.lock_word |= LOCK_WORD_FAT_HASH;
355 /* this is safe since we don't deflate locks */
356 obj->synchronisation = lw.sync;
357 } else {
358 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
359 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
360 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
361 return hash;
362 /*g_print ("failed store\n");*/
363 /* someone set the hash flag or someone inflated the object */
364 lw.sync = obj->synchronisation;
365 if (lw.lock_word & LOCK_WORD_THIN_HASH)
366 return hash;
367 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
368 lw.sync->hash_code = hash;
369 lw.lock_word |= LOCK_WORD_FAT_HASH;
370 /* this is safe since we don't deflate locks */
371 obj->synchronisation = lw.sync;
373 return hash;
374 #else
376 * Wang's address-based hash function:
377 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
379 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
380 #endif
383 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
384 * is requested. In this case it returns -1.
386 static inline gint32
387 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
389 MonoThreadsSync *mon;
390 gsize id = GetCurrentThreadId ();
391 HANDLE sem;
392 guint32 then = 0, now, delta;
393 guint32 waitms;
394 guint32 ret;
395 MonoInternalThread *thread;
397 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
399 if (G_UNLIKELY (!obj)) {
400 mono_raise_exception (mono_get_exception_argument_null ("obj"));
401 return FALSE;
404 retry:
405 mon = obj->synchronisation;
407 /* If the object has never been locked... */
408 if (G_UNLIKELY (mon == NULL)) {
409 mono_monitor_allocator_lock ();
410 mon = mon_new (id);
411 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
412 mono_gc_weak_link_add (&mon->data, obj, FALSE);
413 mono_monitor_allocator_unlock ();
414 /* Successfully locked */
415 return 1;
416 } else {
417 #ifdef HAVE_MOVING_COLLECTOR
418 LockWord lw;
419 lw.sync = obj->synchronisation;
420 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
421 MonoThreadsSync *oldlw = lw.sync;
422 /* move the already calculated hash */
423 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
424 lw.sync = mon;
425 lw.lock_word |= LOCK_WORD_FAT_HASH;
426 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
427 mono_gc_weak_link_add (&mon->data, obj, FALSE);
428 mono_monitor_allocator_unlock ();
429 /* Successfully locked */
430 return 1;
431 } else {
432 mon_finalize (mon);
433 mono_monitor_allocator_unlock ();
434 goto retry;
436 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
437 mon_finalize (mon);
438 mono_monitor_allocator_unlock ();
439 /* get the old lock without the fat hash bit */
440 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
441 mon = lw.sync;
442 } else {
443 mon_finalize (mon);
444 mono_monitor_allocator_unlock ();
445 mon = obj->synchronisation;
447 #else
448 mon_finalize (mon);
449 mono_monitor_allocator_unlock ();
450 mon = obj->synchronisation;
451 #endif
453 } else {
454 #ifdef HAVE_MOVING_COLLECTOR
455 LockWord lw;
456 lw.sync = mon;
457 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
458 MonoThreadsSync *oldlw = lw.sync;
459 mono_monitor_allocator_lock ();
460 mon = mon_new (id);
461 /* move the already calculated hash */
462 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
463 lw.sync = mon;
464 lw.lock_word |= LOCK_WORD_FAT_HASH;
465 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
466 mono_gc_weak_link_add (&mon->data, obj, TRUE);
467 mono_monitor_allocator_unlock ();
468 /* Successfully locked */
469 return 1;
470 } else {
471 mon_finalize (mon);
472 mono_monitor_allocator_unlock ();
473 goto retry;
476 #endif
479 #ifdef HAVE_MOVING_COLLECTOR
481 LockWord lw;
482 lw.sync = mon;
483 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
484 mon = lw.sync;
486 #endif
488 /* If the object has previously been locked but isn't now... */
490 /* This case differs from Dice's case 3 because we don't
491 * deflate locks or cache unused lock records
493 if (G_LIKELY (mon->owner == 0)) {
494 /* Try to install our ID in the owner field, nest
495 * should have been left at 1 by the previous unlock
496 * operation
498 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
499 /* Success */
500 g_assert (mon->nest == 1);
501 return 1;
502 } else {
503 /* Trumped again! */
504 goto retry;
508 /* If the object is currently locked by this thread... */
509 if (mon->owner == id) {
510 mon->nest++;
511 return 1;
514 /* The object must be locked by someone else... */
515 mono_perfcounters->thread_contentions++;
517 /* If ms is 0 we don't block, but just fail straight away */
518 if (ms == 0) {
519 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
520 return 0;
523 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
525 /* The slow path begins here. */
526 retry_contended:
527 /* a small amount of duplicated code, but it allows us to insert the profiler
528 * callbacks without impacting the fast path: from here on we don't need to go back to the
529 * retry label, but to retry_contended. At this point mon is already installed in the object
530 * header.
532 /* This case differs from Dice's case 3 because we don't
533 * deflate locks or cache unused lock records
535 if (G_LIKELY (mon->owner == 0)) {
536 /* Try to install our ID in the owner field, nest
537 * should have been left at 1 by the previous unlock
538 * operation
540 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
541 /* Success */
542 g_assert (mon->nest == 1);
543 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
544 return 1;
548 /* If the object is currently locked by this thread... */
549 if (mon->owner == id) {
550 mon->nest++;
551 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
552 return 1;
555 /* We need to make sure there's a semaphore handle (creating it if
556 * necessary), and block on it
558 if (mon->entry_sem == NULL) {
559 /* Create the semaphore */
560 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
561 g_assert (sem != NULL);
562 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
563 /* Someone else just put a handle here */
564 CloseHandle (sem);
568 /* If we need to time out, record a timestamp and adjust ms,
569 * because WaitForSingleObject doesn't tell us how long it
570 * waited for.
572 * Don't block forever here, because theres a chance the owner
573 * thread released the lock while we were creating the
574 * semaphore: we would not get the wakeup. Using the event
575 * handle technique from pulse/wait would involve locking the
576 * lock struct and therefore slowing down the fast path.
578 if (ms != INFINITE) {
579 then = mono_msec_ticks ();
580 if (ms < 100) {
581 waitms = ms;
582 } else {
583 waitms = 100;
585 } else {
586 waitms = 100;
589 InterlockedIncrement (&mon->entry_count);
591 mono_perfcounters->thread_queue_len++;
592 mono_perfcounters->thread_queue_max++;
593 thread = mono_thread_internal_current ();
595 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
598 * We pass TRUE instead of allow_interruption since we have to check for the
599 * StopRequested case below.
601 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
603 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
605 InterlockedDecrement (&mon->entry_count);
606 mono_perfcounters->thread_queue_len--;
608 if (ms != INFINITE) {
609 now = mono_msec_ticks ();
611 if (now < then) {
612 /* The counter must have wrapped around */
613 LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
615 now += (0xffffffff - then);
616 then = 0;
618 LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
621 delta = now - then;
622 if (delta >= ms) {
623 ms = 0;
624 } else {
625 ms -= delta;
628 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
629 /* More time left */
630 goto retry_contended;
632 } else {
633 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
634 if (ret == WAIT_IO_COMPLETION && (mono_thread_test_state (mono_thread_internal_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested)))) {
636 * We have to obey a stop/suspend request even if
637 * allow_interruption is FALSE to avoid hangs at shutdown.
639 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
640 return -1;
642 /* Infinite wait, so just try again */
643 goto retry_contended;
647 if (ret == WAIT_OBJECT_0) {
648 /* retry from the top */
649 goto retry_contended;
652 /* We must have timed out */
653 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
655 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
657 if (ret == WAIT_IO_COMPLETION)
658 return -1;
659 else
660 return 0;
663 gboolean
664 mono_monitor_enter (MonoObject *obj)
666 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
669 gboolean
670 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
672 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
675 void
676 mono_monitor_exit (MonoObject *obj)
678 MonoThreadsSync *mon;
679 guint32 nest;
681 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, GetCurrentThreadId (), obj));
683 if (G_UNLIKELY (!obj)) {
684 mono_raise_exception (mono_get_exception_argument_null ("obj"));
685 return;
688 mon = obj->synchronisation;
690 #ifdef HAVE_MOVING_COLLECTOR
692 LockWord lw;
693 lw.sync = mon;
694 if (lw.lock_word & LOCK_WORD_THIN_HASH)
695 return;
696 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
697 mon = lw.sync;
699 #endif
700 if (G_UNLIKELY (mon == NULL)) {
701 /* No one ever used Enter. Just ignore the Exit request as MS does */
702 return;
704 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
705 return;
708 nest = mon->nest - 1;
709 if (nest == 0) {
710 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, GetCurrentThreadId (), obj));
712 /* object is now unlocked, leave nest==1 so we don't
713 * need to set it when the lock is reacquired
715 mon->owner = 0;
717 /* Do the wakeup stuff. It's possible that the last
718 * blocking thread gave up waiting just before we
719 * release the semaphore resulting in a futile wakeup
720 * next time there's contention for this object, but
721 * it means we don't have to waste time locking the
722 * struct.
724 if (mon->entry_count > 0) {
725 ReleaseSemaphore (mon->entry_sem, 1, NULL);
727 } else {
728 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, GetCurrentThreadId (), obj, nest));
729 mon->nest = nest;
733 void**
734 mono_monitor_get_object_monitor_weak_link (MonoObject *object)
736 LockWord lw;
737 MonoThreadsSync *sync = NULL;
739 lw.sync = object->synchronisation;
740 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
741 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
742 sync = lw.sync;
743 } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
744 sync = lw.sync;
747 if (sync && sync->data)
748 return &sync->data;
749 return NULL;
752 static void
753 emit_obj_syncp_check (MonoMethodBuilder *mb, int syncp_loc, int *obj_null_branch, int *syncp_true_false_branch,
754 gboolean branch_on_true)
757 ldarg 0 obj
758 brfalse.s obj_null
761 mono_mb_emit_byte (mb, CEE_LDARG_0);
762 *obj_null_branch = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
765 ldarg 0 obj
766 conv.i objp
767 ldc.i4 G_STRUCT_OFFSET(MonoObject, synchronisation) objp off
768 add &syncp
769 ldind.i syncp
770 stloc syncp
771 ldloc syncp syncp
772 brtrue/false.s syncp_true_false
775 mono_mb_emit_byte (mb, CEE_LDARG_0);
776 mono_mb_emit_byte (mb, CEE_CONV_I);
777 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoObject, synchronisation));
778 mono_mb_emit_byte (mb, CEE_ADD);
779 mono_mb_emit_byte (mb, CEE_LDIND_I);
780 mono_mb_emit_stloc (mb, syncp_loc);
781 mono_mb_emit_ldloc (mb, syncp_loc);
782 *syncp_true_false_branch = mono_mb_emit_short_branch (mb, branch_on_true ? CEE_BRTRUE_S : CEE_BRFALSE_S);
785 static MonoMethod*
786 mono_monitor_get_fast_enter_method (MonoMethod *monitor_enter_method)
788 static MonoMethod *fast_monitor_enter;
789 static MonoMethod *compare_exchange_method;
791 MonoMethodBuilder *mb;
792 int obj_null_branch, syncp_null_branch, has_owner_branch, other_owner_branch, tid_branch;
793 int tid_loc, syncp_loc, owner_loc;
794 int thread_tls_offset;
796 #ifdef HAVE_MOVING_COLLECTOR
797 return NULL;
798 #endif
800 thread_tls_offset = mono_thread_get_tls_offset ();
801 if (thread_tls_offset == -1)
802 return NULL;
804 if (fast_monitor_enter)
805 return fast_monitor_enter;
807 if (!compare_exchange_method) {
808 MonoMethodDesc *desc;
809 MonoClass *class;
811 desc = mono_method_desc_new ("Interlocked:CompareExchange(intptr&,intptr,intptr)", FALSE);
812 class = mono_class_from_name (mono_defaults.corlib, "System.Threading", "Interlocked");
813 compare_exchange_method = mono_method_desc_search_in_class (desc, class);
814 mono_method_desc_free (desc);
816 if (!compare_exchange_method)
817 return NULL;
820 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorEnter", MONO_WRAPPER_UNKNOWN);
822 mb->method->slot = -1;
823 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
824 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
826 tid_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
827 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
828 owner_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
830 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &syncp_null_branch, FALSE);
833 mono. tls thread_tls_offset threadp
834 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) threadp off
835 add &tid
836 ldind.i tid
837 stloc tid
838 ldloc syncp syncp
839 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
840 add &owner
841 ldind.i owner
842 stloc owner
843 ldloc owner owner
844 brtrue.s tid
847 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
848 mono_mb_emit_byte (mb, CEE_MONO_TLS);
849 mono_mb_emit_i4 (mb, thread_tls_offset);
850 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
851 mono_mb_emit_byte (mb, CEE_ADD);
852 mono_mb_emit_byte (mb, CEE_LDIND_I);
853 mono_mb_emit_stloc (mb, tid_loc);
854 mono_mb_emit_ldloc (mb, syncp_loc);
855 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
856 mono_mb_emit_byte (mb, CEE_ADD);
857 mono_mb_emit_byte (mb, CEE_LDIND_I);
858 mono_mb_emit_stloc (mb, owner_loc);
859 mono_mb_emit_ldloc (mb, owner_loc);
860 tid_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
863 ldloc syncp syncp
864 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
865 add &owner
866 ldloc tid &owner tid
867 ldc.i4 0 &owner tid 0
868 call System.Threading.Interlocked.CompareExchange oldowner
869 brtrue.s has_owner
873 mono_mb_emit_ldloc (mb, syncp_loc);
874 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
875 mono_mb_emit_byte (mb, CEE_ADD);
876 mono_mb_emit_ldloc (mb, tid_loc);
877 mono_mb_emit_byte (mb, CEE_LDC_I4_0);
878 mono_mb_emit_managed_call (mb, compare_exchange_method, NULL);
879 has_owner_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
880 mono_mb_emit_byte (mb, CEE_RET);
883 tid:
884 ldloc owner owner
885 ldloc tid owner tid
886 brne.s other_owner
887 ldloc syncp syncp
888 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
889 add &nest
890 dup &nest &nest
891 ldind.i4 &nest nest
892 ldc.i4 1 &nest nest 1
893 add &nest nest+
894 stind.i4
898 mono_mb_patch_short_branch (mb, tid_branch);
899 mono_mb_emit_ldloc (mb, owner_loc);
900 mono_mb_emit_ldloc (mb, tid_loc);
901 other_owner_branch = mono_mb_emit_short_branch (mb, CEE_BNE_UN_S);
902 mono_mb_emit_ldloc (mb, syncp_loc);
903 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
904 mono_mb_emit_byte (mb, CEE_ADD);
905 mono_mb_emit_byte (mb, CEE_DUP);
906 mono_mb_emit_byte (mb, CEE_LDIND_I4);
907 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
908 mono_mb_emit_byte (mb, CEE_ADD);
909 mono_mb_emit_byte (mb, CEE_STIND_I4);
910 mono_mb_emit_byte (mb, CEE_RET);
913 obj_null, syncp_null, has_owner, other_owner:
914 ldarg 0 obj
915 call System.Threading.Monitor.Enter
919 mono_mb_patch_short_branch (mb, obj_null_branch);
920 mono_mb_patch_short_branch (mb, syncp_null_branch);
921 mono_mb_patch_short_branch (mb, has_owner_branch);
922 mono_mb_patch_short_branch (mb, other_owner_branch);
923 mono_mb_emit_byte (mb, CEE_LDARG_0);
924 mono_mb_emit_managed_call (mb, monitor_enter_method, NULL);
925 mono_mb_emit_byte (mb, CEE_RET);
927 fast_monitor_enter = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_enter_method), 5);
928 mono_mb_free (mb);
930 return fast_monitor_enter;
933 static MonoMethod*
934 mono_monitor_get_fast_exit_method (MonoMethod *monitor_exit_method)
936 static MonoMethod *fast_monitor_exit;
938 MonoMethodBuilder *mb;
939 int obj_null_branch, has_waiting_branch, has_syncp_branch, owned_branch, nested_branch;
940 int thread_tls_offset;
941 int syncp_loc;
943 #ifdef HAVE_MOVING_COLLECTOR
944 return NULL;
945 #endif
947 thread_tls_offset = mono_thread_get_tls_offset ();
948 if (thread_tls_offset == -1)
949 return NULL;
951 if (fast_monitor_exit)
952 return fast_monitor_exit;
954 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorExit", MONO_WRAPPER_UNKNOWN);
956 mb->method->slot = -1;
957 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
958 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
960 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
962 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &has_syncp_branch, TRUE);
968 mono_mb_emit_byte (mb, CEE_RET);
971 has_syncp:
972 ldloc syncp syncp
973 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
974 add &owner
975 ldind.i owner
976 mono. tls thread_tls_offset owner threadp
977 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) owner threadp off
978 add owner &tid
979 ldind.i owner tid
980 beq.s owned
983 mono_mb_patch_short_branch (mb, has_syncp_branch);
984 mono_mb_emit_ldloc (mb, syncp_loc);
985 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
986 mono_mb_emit_byte (mb, CEE_ADD);
987 mono_mb_emit_byte (mb, CEE_LDIND_I);
988 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
989 mono_mb_emit_byte (mb, CEE_MONO_TLS);
990 mono_mb_emit_i4 (mb, thread_tls_offset);
991 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
992 mono_mb_emit_byte (mb, CEE_ADD);
993 mono_mb_emit_byte (mb, CEE_LDIND_I);
994 owned_branch = mono_mb_emit_short_branch (mb, CEE_BEQ_S);
1000 mono_mb_emit_byte (mb, CEE_RET);
1003 owned:
1004 ldloc syncp syncp
1005 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
1006 add &nest
1007 dup &nest &nest
1008 ldind.i4 &nest nest
1009 dup &nest nest nest
1010 ldc.i4 1 &nest nest nest 1
1011 bgt.un.s nested &nest nest
1014 mono_mb_patch_short_branch (mb, owned_branch);
1015 mono_mb_emit_ldloc (mb, syncp_loc);
1016 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
1017 mono_mb_emit_byte (mb, CEE_ADD);
1018 mono_mb_emit_byte (mb, CEE_DUP);
1019 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1020 mono_mb_emit_byte (mb, CEE_DUP);
1021 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1022 nested_branch = mono_mb_emit_short_branch (mb, CEE_BGT_UN_S);
1025 pop &nest
1027 ldloc syncp syncp
1028 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, entry_count) syncp off
1029 add &count
1030 ldind.i4 count
1031 brtrue.s has_waiting
1034 mono_mb_emit_byte (mb, CEE_POP);
1035 mono_mb_emit_byte (mb, CEE_POP);
1036 mono_mb_emit_ldloc (mb, syncp_loc);
1037 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, entry_count));
1038 mono_mb_emit_byte (mb, CEE_ADD);
1039 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1040 has_waiting_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
1043 ldloc syncp syncp
1044 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1045 add &owner
1046 ldnull &owner 0
1047 stind.i
1051 mono_mb_emit_ldloc (mb, syncp_loc);
1052 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
1053 mono_mb_emit_byte (mb, CEE_ADD);
1054 mono_mb_emit_byte (mb, CEE_LDNULL);
1055 mono_mb_emit_byte (mb, CEE_STIND_I);
1056 mono_mb_emit_byte (mb, CEE_RET);
1059 nested:
1060 ldc.i4 1 &nest nest 1
1061 sub &nest nest-
1062 stind.i4
1066 mono_mb_patch_short_branch (mb, nested_branch);
1067 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1068 mono_mb_emit_byte (mb, CEE_SUB);
1069 mono_mb_emit_byte (mb, CEE_STIND_I4);
1070 mono_mb_emit_byte (mb, CEE_RET);
1073 obj_null, has_waiting:
1074 ldarg 0 obj
1075 call System.Threading.Monitor.Exit
1079 mono_mb_patch_short_branch (mb, obj_null_branch);
1080 mono_mb_patch_short_branch (mb, has_waiting_branch);
1081 mono_mb_emit_byte (mb, CEE_LDARG_0);
1082 mono_mb_emit_managed_call (mb, monitor_exit_method, NULL);
1083 mono_mb_emit_byte (mb, CEE_RET);
1085 fast_monitor_exit = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_exit_method), 5);
1086 mono_mb_free (mb);
1088 return fast_monitor_exit;
1091 MonoMethod*
1092 mono_monitor_get_fast_path (MonoMethod *enter_or_exit)
1094 if (strcmp (enter_or_exit->name, "Enter") == 0)
1095 return mono_monitor_get_fast_enter_method (enter_or_exit);
1096 if (strcmp (enter_or_exit->name, "Exit") == 0)
1097 return mono_monitor_get_fast_exit_method (enter_or_exit);
1098 g_assert_not_reached ();
1099 return NULL;
1103 * mono_monitor_threads_sync_member_offset:
1104 * @owner_offset: returns size and offset of the "owner" member
1105 * @nest_offset: returns size and offset of the "nest" member
1106 * @entry_count_offset: returns size and offset of the "entry_count" member
1108 * Returns the offsets and sizes of three members of the
1109 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1111 void
1112 mono_monitor_threads_sync_members_offset (int *owner_offset, int *nest_offset, int *entry_count_offset)
1114 MonoThreadsSync ts;
1116 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1118 *owner_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, owner), sizeof (ts.owner));
1119 *nest_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1120 *entry_count_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, entry_count), sizeof (ts.entry_count));
1123 gboolean
1124 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
1126 gint32 res;
1128 do {
1129 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1130 if (res == -1)
1131 mono_thread_interruption_checkpoint ();
1132 } while (res == -1);
1134 return res == 1;
1137 gboolean
1138 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1140 MonoThreadsSync *mon;
1142 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, GetCurrentThreadId()));
1144 mon = obj->synchronisation;
1145 #ifdef HAVE_MOVING_COLLECTOR
1147 LockWord lw;
1148 lw.sync = mon;
1149 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1150 return FALSE;
1151 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1152 mon = lw.sync;
1154 #endif
1155 if (mon == NULL) {
1156 return FALSE;
1159 if(mon->owner==GetCurrentThreadId ()) {
1160 return(TRUE);
1163 return(FALSE);
1166 gboolean
1167 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1169 MonoThreadsSync *mon;
1171 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, GetCurrentThreadId (), obj));
1173 mon = obj->synchronisation;
1174 #ifdef HAVE_MOVING_COLLECTOR
1176 LockWord lw;
1177 lw.sync = mon;
1178 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1179 return FALSE;
1180 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1181 mon = lw.sync;
1183 #endif
1184 if (mon == NULL) {
1185 return FALSE;
1188 if (mon->owner != 0) {
1189 return TRUE;
1192 return FALSE;
1195 /* All wait list manipulation in the pulse, pulseall and wait
1196 * functions happens while the monitor lock is held, so we don't need
1197 * any extra struct locking
1200 void
1201 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1203 MonoThreadsSync *mon;
1205 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, GetCurrentThreadId (), obj));
1207 mon = obj->synchronisation;
1208 #ifdef HAVE_MOVING_COLLECTOR
1210 LockWord lw;
1211 lw.sync = mon;
1212 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1213 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1214 return;
1216 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1217 mon = lw.sync;
1219 #endif
1220 if (mon == NULL) {
1221 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1222 return;
1224 if (mon->owner != GetCurrentThreadId ()) {
1225 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1226 return;
1229 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1231 if (mon->wait_list != NULL) {
1232 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1234 SetEvent (mon->wait_list->data);
1235 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1239 void
1240 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1242 MonoThreadsSync *mon;
1244 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, GetCurrentThreadId (), obj));
1246 mon = obj->synchronisation;
1247 #ifdef HAVE_MOVING_COLLECTOR
1249 LockWord lw;
1250 lw.sync = mon;
1251 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1252 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1253 return;
1255 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1256 mon = lw.sync;
1258 #endif
1259 if (mon == NULL) {
1260 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1261 return;
1263 if (mon->owner != GetCurrentThreadId ()) {
1264 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1265 return;
1268 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1270 while (mon->wait_list != NULL) {
1271 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1273 SetEvent (mon->wait_list->data);
1274 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1278 gboolean
1279 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1281 MonoThreadsSync *mon;
1282 HANDLE event;
1283 guint32 nest;
1284 guint32 ret;
1285 gboolean success = FALSE;
1286 gint32 regain;
1287 MonoInternalThread *thread = mono_thread_internal_current ();
1289 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, GetCurrentThreadId (), obj, ms));
1291 mon = obj->synchronisation;
1292 #ifdef HAVE_MOVING_COLLECTOR
1294 LockWord lw;
1295 lw.sync = mon;
1296 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1297 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1298 return FALSE;
1300 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1301 mon = lw.sync;
1303 #endif
1304 if (mon == NULL) {
1305 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1306 return FALSE;
1308 if (mon->owner != GetCurrentThreadId ()) {
1309 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1310 return FALSE;
1313 /* Do this WaitSleepJoin check before creating the event handle */
1314 mono_thread_current_check_pending_interrupt ();
1316 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1317 if (event == NULL) {
1318 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1319 return FALSE;
1322 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, GetCurrentThreadId (), event));
1324 mono_thread_current_check_pending_interrupt ();
1326 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1328 mon->wait_list = g_slist_append (mon->wait_list, event);
1330 /* Save the nest count, and release the lock */
1331 nest = mon->nest;
1332 mon->nest = 1;
1333 mono_monitor_exit (obj);
1335 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1337 /* There's no race between unlocking mon and waiting for the
1338 * event, because auto reset events are sticky, and this event
1339 * is private to this thread. Therefore even if the event was
1340 * signalled before we wait, we still succeed.
1342 ret = WaitForSingleObjectEx (event, ms, TRUE);
1344 /* Reset the thread state fairly early, so we don't have to worry
1345 * about the monitor error checking
1347 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1349 if (mono_thread_interruption_requested ()) {
1351 * Can't remove the event from wait_list, since the monitor is not locked by
1352 * us. So leave it there, mon_new () will delete it when the mon structure
1353 * is placed on the free list.
1354 * FIXME: The caller expects to hold the lock after the wait returns, but it
1355 * doesn't happen in this case:
1356 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=97268
1358 return FALSE;
1361 /* Regain the lock with the previous nest count */
1362 do {
1363 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
1364 if (regain == -1)
1365 mono_thread_interruption_checkpoint ();
1366 } while (regain == -1);
1368 if (regain == 0) {
1369 /* Something went wrong, so throw a
1370 * SynchronizationLockException
1372 CloseHandle (event);
1373 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
1374 return FALSE;
1377 mon->nest = nest;
1379 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1381 if (ret == WAIT_TIMEOUT) {
1382 /* Poll the event again, just in case it was signalled
1383 * while we were trying to regain the monitor lock
1385 ret = WaitForSingleObjectEx (event, 0, FALSE);
1388 /* Pulse will have popped our event from the queue if it signalled
1389 * us, so we only do it here if the wait timed out.
1391 * This avoids a race condition where the thread holding the
1392 * lock can Pulse several times before the WaitForSingleObject
1393 * returns. If we popped the queue here then this event might
1394 * be signalled more than once, thereby starving another
1395 * thread.
1398 if (ret == WAIT_OBJECT_0) {
1399 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, GetCurrentThreadId ()));
1400 success = TRUE;
1401 } else {
1402 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, GetCurrentThreadId (), event));
1403 /* No pulse, so we have to remove ourself from the
1404 * wait queue
1406 mon->wait_list = g_slist_remove (mon->wait_list, event);
1408 CloseHandle (event);
1410 return success;