2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/monitor.h>
14 #include <mono/metadata/threads-types.h>
15 #include <mono/metadata/exception.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/io-layer/io-layer.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internal.h>
21 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
25 * The monitor implementation here is based on
26 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
27 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
29 * The Dice paper describes a technique for saving lock record space
30 * by returning records to a free list when they become unused. That
31 * sounds like unnecessary complexity to me, though if it becomes
32 * clear that unused lock records are taking up lots of space or we
33 * need to shave more time off by avoiding a malloc then we can always
34 * implement the free list idea later. The timeout parameter to
35 * try_enter voids some of the assumptions about the reference count
36 * field in Dice's implementation too. In his version, the thread
37 * attempting to lock a contended object will block until it succeeds,
38 * so the reference count will never be decremented while an object is
41 * Bacon's thin locks have a fast path that doesn't need a lock record
42 * for the common case of locking an unlocked or shallow-nested
43 * object, but the technique relies on encoding the thread ID in 15
44 * bits (to avoid too much per-object space overhead.) Unfortunately
45 * I don't think it's possible to reliably encode a pthread_t into 15
46 * bits. (The JVM implementation used seems to have a 15-bit
47 * per-thread identifier available.)
49 * This implementation then combines Dice's basic lock model with
50 * Bacon's simplification of keeping a lock record for the lifetime of
54 struct _MonoThreadsSync
56 gsize owner
; /* thread ID */
58 #ifdef HAVE_MOVING_COLLECTOR
61 volatile gint32 entry_count
;
67 typedef struct _MonitorArray MonitorArray
;
69 struct _MonitorArray
{
72 MonoThreadsSync monitors
[MONO_ZERO_LEN_ARRAY
];
75 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
76 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
77 static CRITICAL_SECTION monitor_mutex
;
78 static MonoThreadsSync
*monitor_freelist
;
79 static MonitorArray
*monitor_allocated
;
80 static int array_size
= 16;
83 mono_monitor_init (void)
85 InitializeCriticalSection (&monitor_mutex
);
89 mono_monitor_cleanup (void)
91 /*DeleteCriticalSection (&monitor_mutex);*/
95 monitor_is_on_freelist (MonoThreadsSync
*mon
)
98 for (marray
= monitor_allocated
; marray
; marray
= marray
->next
) {
99 if (mon
>= marray
->monitors
&& mon
< &marray
->monitors
[marray
->num_monitors
])
109 * Print a report on stdout of the managed locks currently held by
110 * threads. If @include_untaken is specified, list also inflated locks
112 * This is supposed to be used in debuggers like gdb.
115 mono_locks_dump (gboolean include_untaken
)
118 int used
= 0, on_freelist
= 0, to_recycle
= 0, total
= 0, num_arrays
= 0;
119 MonoThreadsSync
*mon
;
120 MonitorArray
*marray
;
121 for (mon
= monitor_freelist
; mon
; mon
= mon
->data
)
123 for (marray
= monitor_allocated
; marray
; marray
= marray
->next
) {
124 total
+= marray
->num_monitors
;
126 for (i
= 0; i
< marray
->num_monitors
; ++i
) {
127 mon
= &marray
->monitors
[i
];
128 if (mon
->data
== NULL
) {
129 if (i
< marray
->num_monitors
- 1)
132 if (!monitor_is_on_freelist (mon
->data
)) {
133 MonoObject
*holder
= mono_gc_weak_link_get (&mon
->data
);
135 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
136 mon
, holder
, (void*)mon
->owner
, mon
->nest
);
138 g_print ("\tWaiting on semaphore %p: %d\n", mon
->entry_sem
, mon
->entry_count
);
139 } else if (include_untaken
) {
140 g_print ("Lock %p in object %p untaken\n", mon
, holder
);
147 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
148 num_arrays
, total
, used
, on_freelist
, to_recycle
);
151 /* LOCKING: this is called with monitor_mutex held */
153 mon_finalize (MonoThreadsSync
*mon
)
155 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": Finalizing sync %p", mon
));
157 if (mon
->entry_sem
!= NULL
) {
158 CloseHandle (mon
->entry_sem
);
159 mon
->entry_sem
= NULL
;
161 /* If this isn't empty then something is seriously broken - it
162 * means a thread is still waiting on the object that owned
163 * this lock, but the object has been finalized.
165 g_assert (mon
->wait_list
== NULL
);
167 mon
->entry_count
= 0;
168 /* owner and nest are set in mon_new, no need to zero them out */
170 mon
->data
= monitor_freelist
;
171 monitor_freelist
= mon
;
174 /* LOCKING: this is called with monitor_mutex held */
175 static MonoThreadsSync
*
178 MonoThreadsSync
*new;
180 if (!monitor_freelist
) {
181 MonitorArray
*marray
;
183 /* see if any sync block has been collected */
185 for (marray
= monitor_allocated
; marray
; marray
= marray
->next
) {
186 for (i
= 0; i
< marray
->num_monitors
; ++i
) {
187 if (marray
->monitors
[i
].data
== NULL
) {
188 new = &marray
->monitors
[i
];
189 new->data
= monitor_freelist
;
190 monitor_freelist
= new;
193 /* small perf tweak to avoid scanning all the blocks */
197 /* need to allocate a new array of monitors */
198 if (!monitor_freelist
) {
200 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": allocating more monitors: %d", array_size
));
201 marray
= g_malloc0 (sizeof (MonoArray
) + array_size
* sizeof (MonoThreadsSync
));
202 marray
->num_monitors
= array_size
;
204 /* link into the freelist */
205 for (i
= 0; i
< marray
->num_monitors
- 1; ++i
) {
206 marray
->monitors
[i
].data
= &marray
->monitors
[i
+ 1];
208 marray
->monitors
[i
].data
= NULL
; /* the last one */
209 monitor_freelist
= &marray
->monitors
[0];
210 /* we happend the marray instead of prepending so that
211 * the collecting loop above will need to scan smaller arrays first
213 if (!monitor_allocated
) {
214 monitor_allocated
= marray
;
216 last
= monitor_allocated
;
224 new = monitor_freelist
;
225 monitor_freelist
= new->data
;
234 * Format of the lock word:
235 * thinhash | fathash | data
237 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
238 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
239 * struct pointed to by data
240 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
244 MonoThreadsSync
*sync
;
248 LOCK_WORD_THIN_HASH
= 1,
249 LOCK_WORD_FAT_HASH
= 1 << 1,
250 LOCK_WORD_BITS_MASK
= 0x3,
251 LOCK_WORD_HASH_SHIFT
= 2
254 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
260 * Calculate a hash code for @obj that is constant while @obj is alive.
263 mono_object_hash (MonoObject
* obj
)
265 #ifdef HAVE_MOVING_COLLECTOR
270 lw
.sync
= obj
->synchronisation
;
271 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
272 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
273 return (unsigned int)lw
.lock_word
>> LOCK_WORD_HASH_SHIFT
;
275 if (lw
.lock_word
& LOCK_WORD_FAT_HASH
) {
276 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
277 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
278 return lw
.sync
->hash_code
;
281 * while we are inside this function, the GC will keep this object pinned,
282 * since we are in the unmanaged stack. Thanks to this and to the hash
283 * function that depends only on the address, we can ignore the races if
284 * another thread computes the hash at the same time, because it'll end up
285 * with the same value.
287 hash
= (GPOINTER_TO_UINT (obj
) >> MONO_OBJECT_ALIGNMENT_SHIFT
) * 2654435761u;
288 /* clear the top bits as they can be discarded */
289 hash
&= ~(LOCK_WORD_BITS_MASK
<< 30);
290 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
292 lw
.sync
->hash_code
= hash
;
293 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
294 lw
.lock_word
|= LOCK_WORD_FAT_HASH
;
295 /* this is safe since we don't deflate locks */
296 obj
->synchronisation
= lw
.sync
;
298 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
299 lw
.lock_word
= LOCK_WORD_THIN_HASH
| (hash
<< LOCK_WORD_HASH_SHIFT
);
300 if (InterlockedCompareExchangePointer ((gpointer
*)&obj
->synchronisation
, lw
.sync
, NULL
) == NULL
)
302 /*g_print ("failed store\n");*/
303 /* someone set the hash flag or someone inflated the object */
304 lw
.sync
= obj
->synchronisation
;
305 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
)
307 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
308 lw
.sync
->hash_code
= hash
;
309 lw
.lock_word
|= LOCK_WORD_FAT_HASH
;
310 /* this is safe since we don't deflate locks */
311 obj
->synchronisation
= lw
.sync
;
316 * Wang's address-based hash function:
317 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
319 return (GPOINTER_TO_UINT (obj
) >> MONO_OBJECT_ALIGNMENT_SHIFT
) * 2654435761u;
323 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
324 * is requested. In this case it returns -1.
327 mono_monitor_try_enter_internal (MonoObject
*obj
, guint32 ms
, gboolean allow_interruption
)
329 MonoThreadsSync
*mon
;
330 gsize id
= GetCurrentThreadId ();
332 guint32 then
= 0, now
, delta
;
336 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
337 ": (%d) Trying to lock object %p (%d ms)", id
, obj
, ms
));
340 mon
= obj
->synchronisation
;
342 /* If the object has never been locked... */
344 mono_monitor_allocator_lock ();
346 if (InterlockedCompareExchangePointer ((gpointer
*)&obj
->synchronisation
, mon
, NULL
) == NULL
) {
347 mono_gc_weak_link_add (&mon
->data
, obj
);
348 mono_monitor_allocator_unlock ();
349 /* Successfully locked */
352 #ifdef HAVE_MOVING_COLLECTOR
354 lw
.sync
= obj
->synchronisation
;
355 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
356 MonoThreadsSync
*oldlw
= lw
.sync
;
357 /* move the already calculated hash */
358 mon
->hash_code
= lw
.lock_word
>> LOCK_WORD_HASH_SHIFT
;
360 lw
.lock_word
|= LOCK_WORD_FAT_HASH
;
361 if (InterlockedCompareExchangePointer ((gpointer
*)&obj
->synchronisation
, lw
.sync
, oldlw
) == oldlw
) {
362 mono_gc_weak_link_add (&mon
->data
, obj
);
363 mono_monitor_allocator_unlock ();
364 /* Successfully locked */
368 mono_monitor_allocator_unlock ();
371 } else if (lw
.lock_word
& LOCK_WORD_FAT_HASH
) {
373 mono_monitor_allocator_unlock ();
374 /* get the old lock without the fat hash bit */
375 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
379 mono_monitor_allocator_unlock ();
380 mon
= obj
->synchronisation
;
384 mono_monitor_allocator_unlock ();
385 mon
= obj
->synchronisation
;
389 #ifdef HAVE_MOVING_COLLECTOR
392 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
393 MonoThreadsSync
*oldlw
= lw
.sync
;
394 mono_monitor_allocator_lock ();
396 /* move the already calculated hash */
397 mon
->hash_code
= lw
.lock_word
>> LOCK_WORD_HASH_SHIFT
;
399 lw
.lock_word
|= LOCK_WORD_FAT_HASH
;
400 if (InterlockedCompareExchangePointer ((gpointer
*)&obj
->synchronisation
, lw
.sync
, oldlw
) == oldlw
) {
401 mono_gc_weak_link_add (&mon
->data
, obj
);
402 mono_monitor_allocator_unlock ();
403 /* Successfully locked */
407 mono_monitor_allocator_unlock ();
414 #ifdef HAVE_MOVING_COLLECTOR
418 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
423 /* If the object is currently locked by this thread... */
424 if (mon
->owner
== id
) {
429 /* If the object has previously been locked but isn't now... */
431 /* This case differs from Dice's case 3 because we don't
432 * deflate locks or cache unused lock records
434 if (mon
->owner
== 0) {
435 /* Try to install our ID in the owner field, nest
436 * should have been left at 1 by the previous unlock
439 if (InterlockedCompareExchangePointer ((gpointer
*)&mon
->owner
, (gpointer
)id
, 0) == 0) {
441 g_assert (mon
->nest
== 1);
449 /* The object must be locked by someone else... */
451 /* If ms is 0 we don't block, but just fail straight away */
453 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) timed out, returning FALSE", id
));
457 /* The slow path begins here. We need to make sure theres a
458 * semaphore handle (creating it if necessary), and block on
461 if (mon
->entry_sem
== NULL
) {
462 /* Create the semaphore */
463 sem
= CreateSemaphore (NULL
, 0, 0x7fffffff, NULL
);
464 g_assert (sem
!= NULL
);
465 if (InterlockedCompareExchangePointer ((gpointer
*)&mon
->entry_sem
, sem
, NULL
) != NULL
) {
466 /* Someone else just put a handle here */
471 /* If we need to time out, record a timestamp and adjust ms,
472 * because WaitForSingleObject doesn't tell us how long it
475 * Don't block forever here, because theres a chance the owner
476 * thread released the lock while we were creating the
477 * semaphore: we would not get the wakeup. Using the event
478 * handle technique from pulse/wait would involve locking the
479 * lock struct and therefore slowing down the fast path.
481 if (ms
!= INFINITE
) {
482 then
= GetTickCount ();
492 InterlockedIncrement (&mon
->entry_count
);
493 ret
= WaitForSingleObjectEx (mon
->entry_sem
, waitms
, allow_interruption
);
494 InterlockedDecrement (&mon
->entry_count
);
496 if (ms
!= INFINITE
) {
497 now
= GetTickCount ();
500 /* The counter must have wrapped around */
501 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
502 ": wrapped around! now=0x%x then=0x%x", now
, then
));
504 now
+= (0xffffffff - then
);
507 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": wrap rejig: now=0x%x then=0x%x delta=0x%x", now
, then
, now
-then
));
517 if ((ret
== WAIT_TIMEOUT
|| (ret
== WAIT_IO_COMPLETION
&& !allow_interruption
)) && ms
> 0) {
522 if (ret
== WAIT_TIMEOUT
|| (ret
== WAIT_IO_COMPLETION
&& !allow_interruption
)) {
523 /* Infinite wait, so just try again */
528 if (ret
== WAIT_OBJECT_0
) {
529 /* retry from the top */
533 /* We must have timed out */
534 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) timed out waiting, returning FALSE", id
));
536 if (ret
== WAIT_IO_COMPLETION
)
543 mono_monitor_enter (MonoObject
*obj
)
545 return mono_monitor_try_enter_internal (obj
, INFINITE
, FALSE
) == 1;
549 mono_monitor_try_enter (MonoObject
*obj
, guint32 ms
)
551 return mono_monitor_try_enter_internal (obj
, ms
, FALSE
) == 1;
555 mono_monitor_exit (MonoObject
*obj
)
557 MonoThreadsSync
*mon
;
560 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Unlocking %p", GetCurrentThreadId (), obj
));
562 mon
= obj
->synchronisation
;
564 #ifdef HAVE_MOVING_COLLECTOR
568 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
)
570 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
575 /* No one ever used Enter. Just ignore the Exit request as MS does */
578 if (mon
->owner
!= GetCurrentThreadId ()) {
582 nest
= mon
->nest
- 1;
584 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
585 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj
));
587 /* object is now unlocked, leave nest==1 so we don't
588 * need to set it when the lock is reacquired
592 /* Do the wakeup stuff. It's possible that the last
593 * blocking thread gave up waiting just before we
594 * release the semaphore resulting in a futile wakeup
595 * next time there's contention for this object, but
596 * it means we don't have to waste time locking the
599 if (mon
->entry_count
> 0) {
600 ReleaseSemaphore (mon
->entry_sem
, 1, NULL
);
603 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
604 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj
, nest
));
610 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject
*obj
, guint32 ms
)
615 res
= mono_monitor_try_enter_internal (obj
, ms
, TRUE
);
617 mono_thread_interruption_checkpoint ();
624 ves_icall_System_Threading_Monitor_Monitor_exit (MonoObject
*obj
)
626 mono_monitor_exit (obj
);
630 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject
*obj
)
632 MonoThreadsSync
*mon
;
634 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
635 ": Testing if %p is owned by thread %d", obj
, GetCurrentThreadId()));
637 mon
= obj
->synchronisation
;
638 #ifdef HAVE_MOVING_COLLECTOR
642 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
)
644 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
652 if(mon
->owner
==GetCurrentThreadId ()) {
660 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject
*obj
)
662 MonoThreadsSync
*mon
;
664 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
665 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj
));
667 mon
= obj
->synchronisation
;
668 #ifdef HAVE_MOVING_COLLECTOR
672 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
)
674 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
682 if (mon
->owner
!= 0) {
689 /* All wait list manipulation in the pulse, pulseall and wait
690 * functions happens while the monitor lock is held, so we don't need
691 * any extra struct locking
695 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject
*obj
)
697 MonoThreadsSync
*mon
;
699 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Pulsing %p",
700 GetCurrentThreadId (), obj
));
702 mon
= obj
->synchronisation
;
703 #ifdef HAVE_MOVING_COLLECTOR
707 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
708 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
711 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
716 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
719 if (mon
->owner
!= GetCurrentThreadId ()) {
720 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
724 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) %d threads waiting",
725 GetCurrentThreadId (), g_slist_length (mon
->wait_list
)));
727 if (mon
->wait_list
!= NULL
) {
728 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
729 ": (%d) signalling and dequeuing handle %p",
730 GetCurrentThreadId (), mon
->wait_list
->data
));
732 SetEvent (mon
->wait_list
->data
);
733 mon
->wait_list
= g_slist_remove (mon
->wait_list
, mon
->wait_list
->data
);
738 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject
*obj
)
740 MonoThreadsSync
*mon
;
742 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
": (%d) Pulsing all %p",
743 GetCurrentThreadId (), obj
));
745 mon
= obj
->synchronisation
;
746 #ifdef HAVE_MOVING_COLLECTOR
750 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
751 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
754 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
759 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
762 if (mon
->owner
!= GetCurrentThreadId ()) {
763 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
767 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) %d threads waiting",
768 GetCurrentThreadId (), g_slist_length (mon
->wait_list
)));
770 while (mon
->wait_list
!= NULL
) {
771 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
772 ": (%d) signalling and dequeuing handle %p",
773 GetCurrentThreadId (), mon
->wait_list
->data
));
775 SetEvent (mon
->wait_list
->data
);
776 mon
->wait_list
= g_slist_remove (mon
->wait_list
, mon
->wait_list
->data
);
781 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject
*obj
, guint32 ms
)
783 MonoThreadsSync
*mon
;
787 gboolean success
= FALSE
;
789 MonoThread
*thread
= mono_thread_current ();
791 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
792 ": (%d) Trying to wait for %p with timeout %dms",
793 GetCurrentThreadId (), obj
, ms
));
795 mon
= obj
->synchronisation
;
796 #ifdef HAVE_MOVING_COLLECTOR
800 if (lw
.lock_word
& LOCK_WORD_THIN_HASH
) {
801 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
804 lw
.lock_word
&= ~LOCK_WORD_BITS_MASK
;
809 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
812 if (mon
->owner
!= GetCurrentThreadId ()) {
813 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
817 /* Do this WaitSleepJoin check before creating the event handle */
818 mono_thread_current_check_pending_interrupt ();
820 event
= CreateEvent (NULL
, FALSE
, FALSE
, NULL
);
822 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
826 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) queuing handle %p",
827 GetCurrentThreadId (), event
));
829 mono_monitor_enter (thread
->synch_lock
);
830 thread
->state
|= ThreadState_WaitSleepJoin
;
831 mono_monitor_exit (thread
->synch_lock
);
833 mon
->wait_list
= g_slist_append (mon
->wait_list
, event
);
835 /* Save the nest count, and release the lock */
838 mono_monitor_exit (obj
);
840 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Unlocked %p lock %p",
841 GetCurrentThreadId (), obj
, mon
));
843 /* There's no race between unlocking mon and waiting for the
844 * event, because auto reset events are sticky, and this event
845 * is private to this thread. Therefore even if the event was
846 * signalled before we wait, we still succeed.
848 ret
= WaitForSingleObjectEx (event
, ms
, TRUE
);
850 /* Reset the thread state fairly early, so we don't have to worry
851 * about the monitor error checking
853 mono_monitor_enter (thread
->synch_lock
);
854 thread
->state
&= ~ThreadState_WaitSleepJoin
;
855 mono_monitor_exit (thread
->synch_lock
);
857 if (mono_thread_interruption_requested ()) {
862 /* Regain the lock with the previous nest count */
864 regain
= mono_monitor_try_enter_internal (obj
, INFINITE
, TRUE
);
866 mono_thread_interruption_checkpoint ();
867 } while (regain
== -1);
870 /* Something went wrong, so throw a
871 * SynchronizationLockException
874 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
880 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Regained %p lock %p",
881 GetCurrentThreadId (), obj
, mon
));
883 if (ret
== WAIT_TIMEOUT
) {
884 /* Poll the event again, just in case it was signalled
885 * while we were trying to regain the monitor lock
887 ret
= WaitForSingleObjectEx (event
, 0, FALSE
);
890 /* Pulse will have popped our event from the queue if it signalled
891 * us, so we only do it here if the wait timed out.
893 * This avoids a race condition where the thread holding the
894 * lock can Pulse several times before the WaitForSingleObject
895 * returns. If we popped the queue here then this event might
896 * be signalled more than once, thereby starving another
900 if (ret
== WAIT_OBJECT_0
) {
901 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Success",
902 GetCurrentThreadId ()));
905 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
": (%d) Wait failed, dequeuing handle %p",
906 GetCurrentThreadId (), event
));
907 /* No pulse, so we have to remove ourself from the
910 mon
->wait_list
= g_slist_remove (mon
->wait_list
, event
);