2007-04-06 Andreas Faerber <andreas.faerber@web.de>
[mono.git] / mono / metadata / monitor.c
blob1e05eaddc43c7d9cdc9c7deda8bdf252ae36ba02
1 /*
2 * monitor.c: Monitor locking functions
4 * Author:
5 * Dick Porter (dick@ximian.com)
7 * (C) 2003 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
13 #include <mono/metadata/monitor.h>
14 #include <mono/metadata/threads-types.h>
15 #include <mono/metadata/exception.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/io-layer/io-layer.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internal.h>
21 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
22 #define LOCK_DEBUG(a)
25 * The monitor implementation here is based on
26 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
27 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
29 * The Dice paper describes a technique for saving lock record space
30 * by returning records to a free list when they become unused. That
31 * sounds like unnecessary complexity to me, though if it becomes
32 * clear that unused lock records are taking up lots of space or we
33 * need to shave more time off by avoiding a malloc then we can always
34 * implement the free list idea later. The timeout parameter to
35 * try_enter voids some of the assumptions about the reference count
36 * field in Dice's implementation too. In his version, the thread
37 * attempting to lock a contended object will block until it succeeds,
38 * so the reference count will never be decremented while an object is
39 * locked.
41 * Bacon's thin locks have a fast path that doesn't need a lock record
42 * for the common case of locking an unlocked or shallow-nested
43 * object, but the technique relies on encoding the thread ID in 15
44 * bits (to avoid too much per-object space overhead.) Unfortunately
45 * I don't think it's possible to reliably encode a pthread_t into 15
46 * bits. (The JVM implementation used seems to have a 15-bit
47 * per-thread identifier available.)
49 * This implementation then combines Dice's basic lock model with
50 * Bacon's simplification of keeping a lock record for the lifetime of
51 * an object.
54 struct _MonoThreadsSync
56 gsize owner; /* thread ID */
57 guint32 nest;
58 #ifdef HAVE_MOVING_COLLECTOR
59 gint32 hash_code;
60 #endif
61 volatile gint32 entry_count;
62 HANDLE entry_sem;
63 GSList *wait_list;
64 void *data;
67 typedef struct _MonitorArray MonitorArray;
69 struct _MonitorArray {
70 MonitorArray *next;
71 int num_monitors;
72 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
75 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
76 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
77 static CRITICAL_SECTION monitor_mutex;
78 static MonoThreadsSync *monitor_freelist;
79 static MonitorArray *monitor_allocated;
80 static int array_size = 16;
82 void
83 mono_monitor_init (void)
85 InitializeCriticalSection (&monitor_mutex);
88 void
89 mono_monitor_cleanup (void)
91 /*DeleteCriticalSection (&monitor_mutex);*/
94 static int
95 monitor_is_on_freelist (MonoThreadsSync *mon)
97 MonitorArray *marray;
98 for (marray = monitor_allocated; marray; marray = marray->next) {
99 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
100 return TRUE;
102 return FALSE;
106 * mono_locks_dump:
107 * @include_untaken:
109 * Print a report on stdout of the managed locks currently held by
110 * threads. If @include_untaken is specified, list also inflated locks
111 * which are unheld.
112 * This is supposed to be used in debuggers like gdb.
114 void
115 mono_locks_dump (gboolean include_untaken)
117 int i;
118 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
119 MonoThreadsSync *mon;
120 MonitorArray *marray;
121 for (mon = monitor_freelist; mon; mon = mon->data)
122 on_freelist++;
123 for (marray = monitor_allocated; marray; marray = marray->next) {
124 total += marray->num_monitors;
125 num_arrays++;
126 for (i = 0; i < marray->num_monitors; ++i) {
127 mon = &marray->monitors [i];
128 if (mon->data == NULL) {
129 if (i < marray->num_monitors - 1)
130 to_recycle++;
131 } else {
132 if (!monitor_is_on_freelist (mon->data)) {
133 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
134 if (mon->owner) {
135 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
136 mon, holder, (void*)mon->owner, mon->nest);
137 if (mon->entry_sem)
138 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
139 } else if (include_untaken) {
140 g_print ("Lock %p in object %p untaken\n", mon, holder);
142 used++;
147 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
148 num_arrays, total, used, on_freelist, to_recycle);
151 /* LOCKING: this is called with monitor_mutex held */
152 static void
153 mon_finalize (MonoThreadsSync *mon)
155 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": Finalizing sync %p", mon));
157 if (mon->entry_sem != NULL) {
158 CloseHandle (mon->entry_sem);
159 mon->entry_sem = NULL;
161 /* If this isn't empty then something is seriously broken - it
162 * means a thread is still waiting on the object that owned
163 * this lock, but the object has been finalized.
165 g_assert (mon->wait_list == NULL);
167 mon->entry_count = 0;
168 /* owner and nest are set in mon_new, no need to zero them out */
170 mon->data = monitor_freelist;
171 monitor_freelist = mon;
174 /* LOCKING: this is called with monitor_mutex held */
175 static MonoThreadsSync *
176 mon_new (gsize id)
178 MonoThreadsSync *new;
180 if (!monitor_freelist) {
181 MonitorArray *marray;
182 int i;
183 /* see if any sync block has been collected */
184 new = NULL;
185 for (marray = monitor_allocated; marray; marray = marray->next) {
186 for (i = 0; i < marray->num_monitors; ++i) {
187 if (marray->monitors [i].data == NULL) {
188 new = &marray->monitors [i];
189 new->data = monitor_freelist;
190 monitor_freelist = new;
193 /* small perf tweak to avoid scanning all the blocks */
194 if (new)
195 break;
197 /* need to allocate a new array of monitors */
198 if (!monitor_freelist) {
199 MonitorArray *last;
200 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": allocating more monitors: %d", array_size));
201 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
202 marray->num_monitors = array_size;
203 array_size *= 2;
204 /* link into the freelist */
205 for (i = 0; i < marray->num_monitors - 1; ++i) {
206 marray->monitors [i].data = &marray->monitors [i + 1];
208 marray->monitors [i].data = NULL; /* the last one */
209 monitor_freelist = &marray->monitors [0];
210 /* we happend the marray instead of prepending so that
211 * the collecting loop above will need to scan smaller arrays first
213 if (!monitor_allocated) {
214 monitor_allocated = marray;
215 } else {
216 last = monitor_allocated;
217 while (last->next)
218 last = last->next;
219 last->next = marray;
224 new = monitor_freelist;
225 monitor_freelist = new->data;
227 new->owner = id;
228 new->nest = 1;
230 return new;
234 * Format of the lock word:
235 * thinhash | fathash | data
237 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
238 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
239 * struct pointed to by data
240 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
242 typedef union {
243 gsize lock_word;
244 MonoThreadsSync *sync;
245 } LockWord;
247 enum {
248 LOCK_WORD_THIN_HASH = 1,
249 LOCK_WORD_FAT_HASH = 1 << 1,
250 LOCK_WORD_BITS_MASK = 0x3,
251 LOCK_WORD_HASH_SHIFT = 2
254 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
257 * mono_object_hash:
258 * @obj: an object
260 * Calculate a hash code for @obj that is constant while @obj is alive.
263 mono_object_hash (MonoObject* obj)
265 #ifdef HAVE_MOVING_COLLECTOR
266 LockWord lw;
267 unsigned int hash;
268 if (!obj)
269 return 0;
270 lw.sync = obj->synchronisation;
271 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
272 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
273 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
275 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
276 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
277 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
278 return lw.sync->hash_code;
281 * while we are inside this function, the GC will keep this object pinned,
282 * since we are in the unmanaged stack. Thanks to this and to the hash
283 * function that depends only on the address, we can ignore the races if
284 * another thread computes the hash at the same time, because it'll end up
285 * with the same value.
287 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
288 /* clear the top bits as they can be discarded */
289 hash &= ~(LOCK_WORD_BITS_MASK << 30);
290 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
291 if (lw.sync) {
292 lw.sync->hash_code = hash;
293 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
294 lw.lock_word |= LOCK_WORD_FAT_HASH;
295 /* this is safe since we don't deflate locks */
296 obj->synchronisation = lw.sync;
297 } else {
298 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
299 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
300 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
301 return hash;
302 /*g_print ("failed store\n");*/
303 /* someone set the hash flag or someone inflated the object */
304 lw.sync = obj->synchronisation;
305 if (lw.lock_word & LOCK_WORD_THIN_HASH)
306 return hash;
307 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
308 lw.sync->hash_code = hash;
309 lw.lock_word |= LOCK_WORD_FAT_HASH;
310 /* this is safe since we don't deflate locks */
311 obj->synchronisation = lw.sync;
313 return hash;
314 #else
316 * Wang's address-based hash function:
317 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
319 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
320 #endif
323 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
324 * is requested. In this case it returns -1.
326 static gint32
327 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
329 MonoThreadsSync *mon;
330 gsize id = GetCurrentThreadId ();
331 HANDLE sem;
332 guint32 then = 0, now, delta;
333 guint32 waitms;
334 guint32 ret;
336 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
337 ": (%d) Trying to lock object %p (%d ms)", id, obj, ms));
339 retry:
340 mon = obj->synchronisation;
342 /* If the object has never been locked... */
343 if (mon == NULL) {
344 mono_monitor_allocator_lock ();
345 mon = mon_new (id);
346 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
347 mono_gc_weak_link_add (&mon->data, obj);
348 mono_monitor_allocator_unlock ();
349 /* Successfully locked */
350 return 1;
351 } else {
352 #ifdef HAVE_MOVING_COLLECTOR
353 LockWord lw;
354 lw.sync = obj->synchronisation;
355 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
356 MonoThreadsSync *oldlw = lw.sync;
357 /* move the already calculated hash */
358 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
359 lw.sync = mon;
360 lw.lock_word |= LOCK_WORD_FAT_HASH;
361 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
362 mono_gc_weak_link_add (&mon->data, obj);
363 mono_monitor_allocator_unlock ();
364 /* Successfully locked */
365 return 1;
366 } else {
367 mon_finalize (mon);
368 mono_monitor_allocator_unlock ();
369 goto retry;
371 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
372 mon_finalize (mon);
373 mono_monitor_allocator_unlock ();
374 /* get the old lock without the fat hash bit */
375 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
376 mon = lw.sync;
377 } else {
378 mon_finalize (mon);
379 mono_monitor_allocator_unlock ();
380 mon = obj->synchronisation;
382 #else
383 mon_finalize (mon);
384 mono_monitor_allocator_unlock ();
385 mon = obj->synchronisation;
386 #endif
388 } else {
389 #ifdef HAVE_MOVING_COLLECTOR
390 LockWord lw;
391 lw.sync = mon;
392 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
393 MonoThreadsSync *oldlw = lw.sync;
394 mono_monitor_allocator_lock ();
395 mon = mon_new (id);
396 /* move the already calculated hash */
397 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
398 lw.sync = mon;
399 lw.lock_word |= LOCK_WORD_FAT_HASH;
400 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
401 mono_gc_weak_link_add (&mon->data, obj);
402 mono_monitor_allocator_unlock ();
403 /* Successfully locked */
404 return 1;
405 } else {
406 mon_finalize (mon);
407 mono_monitor_allocator_unlock ();
408 goto retry;
411 #endif
414 #ifdef HAVE_MOVING_COLLECTOR
416 LockWord lw;
417 lw.sync = mon;
418 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
419 mon = lw.sync;
421 #endif
423 /* If the object is currently locked by this thread... */
424 if (mon->owner == id) {
425 mon->nest++;
426 return 1;
429 /* If the object has previously been locked but isn't now... */
431 /* This case differs from Dice's case 3 because we don't
432 * deflate locks or cache unused lock records
434 if (mon->owner == 0) {
435 /* Try to install our ID in the owner field, nest
436 * should have been left at 1 by the previous unlock
437 * operation
439 if (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0) {
440 /* Success */
441 g_assert (mon->nest == 1);
442 return 1;
443 } else {
444 /* Trumped again! */
445 goto retry;
449 /* The object must be locked by someone else... */
451 /* If ms is 0 we don't block, but just fail straight away */
452 if (ms == 0) {
453 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out, returning FALSE", id));
454 return 0;
457 /* The slow path begins here. We need to make sure theres a
458 * semaphore handle (creating it if necessary), and block on
459 * it
461 if (mon->entry_sem == NULL) {
462 /* Create the semaphore */
463 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
464 g_assert (sem != NULL);
465 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
466 /* Someone else just put a handle here */
467 CloseHandle (sem);
471 /* If we need to time out, record a timestamp and adjust ms,
472 * because WaitForSingleObject doesn't tell us how long it
473 * waited for.
475 * Don't block forever here, because theres a chance the owner
476 * thread released the lock while we were creating the
477 * semaphore: we would not get the wakeup. Using the event
478 * handle technique from pulse/wait would involve locking the
479 * lock struct and therefore slowing down the fast path.
481 if (ms != INFINITE) {
482 then = GetTickCount ();
483 if (ms < 100) {
484 waitms = ms;
485 } else {
486 waitms = 100;
488 } else {
489 waitms = 100;
492 InterlockedIncrement (&mon->entry_count);
493 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, allow_interruption);
494 InterlockedDecrement (&mon->entry_count);
496 if (ms != INFINITE) {
497 now = GetTickCount ();
499 if (now < then) {
500 /* The counter must have wrapped around */
501 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
502 ": wrapped around! now=0x%x then=0x%x", now, then));
504 now += (0xffffffff - then);
505 then = 0;
507 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": wrap rejig: now=0x%x then=0x%x delta=0x%x", now, then, now-then));
510 delta = now - then;
511 if (delta >= ms) {
512 ms = 0;
513 } else {
514 ms -= delta;
517 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
518 /* More time left */
519 goto retry;
521 } else {
522 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
523 /* Infinite wait, so just try again */
524 goto retry;
528 if (ret == WAIT_OBJECT_0) {
529 /* retry from the top */
530 goto retry;
533 /* We must have timed out */
534 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out waiting, returning FALSE", id));
536 if (ret == WAIT_IO_COMPLETION)
537 return -1;
538 else
539 return 0;
542 gboolean
543 mono_monitor_enter (MonoObject *obj)
545 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
548 gboolean
549 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
551 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
554 void
555 mono_monitor_exit (MonoObject *obj)
557 MonoThreadsSync *mon;
558 guint32 nest;
560 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocking %p", GetCurrentThreadId (), obj));
562 mon = obj->synchronisation;
564 #ifdef HAVE_MOVING_COLLECTOR
566 LockWord lw;
567 lw.sync = mon;
568 if (lw.lock_word & LOCK_WORD_THIN_HASH)
569 return;
570 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
571 mon = lw.sync;
573 #endif
574 if (mon == NULL) {
575 /* No one ever used Enter. Just ignore the Exit request as MS does */
576 return;
578 if (mon->owner != GetCurrentThreadId ()) {
579 return;
582 nest = mon->nest - 1;
583 if (nest == 0) {
584 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
585 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj));
587 /* object is now unlocked, leave nest==1 so we don't
588 * need to set it when the lock is reacquired
590 mon->owner = 0;
592 /* Do the wakeup stuff. It's possible that the last
593 * blocking thread gave up waiting just before we
594 * release the semaphore resulting in a futile wakeup
595 * next time there's contention for this object, but
596 * it means we don't have to waste time locking the
597 * struct.
599 if (mon->entry_count > 0) {
600 ReleaseSemaphore (mon->entry_sem, 1, NULL);
602 } else {
603 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
604 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj, nest));
605 mon->nest = nest;
609 gboolean
610 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
612 gint32 res;
614 do {
615 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
616 if (res == -1)
617 mono_thread_interruption_checkpoint ();
618 } while (res == -1);
620 return res == 1;
623 void
624 ves_icall_System_Threading_Monitor_Monitor_exit (MonoObject *obj)
626 mono_monitor_exit (obj);
629 gboolean
630 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
632 MonoThreadsSync *mon;
634 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
635 ": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
637 mon = obj->synchronisation;
638 #ifdef HAVE_MOVING_COLLECTOR
640 LockWord lw;
641 lw.sync = mon;
642 if (lw.lock_word & LOCK_WORD_THIN_HASH)
643 return FALSE;
644 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
645 mon = lw.sync;
647 #endif
648 if (mon == NULL) {
649 return FALSE;
652 if(mon->owner==GetCurrentThreadId ()) {
653 return(TRUE);
656 return(FALSE);
659 gboolean
660 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
662 MonoThreadsSync *mon;
664 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
665 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
667 mon = obj->synchronisation;
668 #ifdef HAVE_MOVING_COLLECTOR
670 LockWord lw;
671 lw.sync = mon;
672 if (lw.lock_word & LOCK_WORD_THIN_HASH)
673 return FALSE;
674 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
675 mon = lw.sync;
677 #endif
678 if (mon == NULL) {
679 return FALSE;
682 if (mon->owner != 0) {
683 return TRUE;
686 return FALSE;
689 /* All wait list manipulation in the pulse, pulseall and wait
690 * functions happens while the monitor lock is held, so we don't need
691 * any extra struct locking
694 void
695 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
697 MonoThreadsSync *mon;
699 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing %p",
700 GetCurrentThreadId (), obj));
702 mon = obj->synchronisation;
703 #ifdef HAVE_MOVING_COLLECTOR
705 LockWord lw;
706 lw.sync = mon;
707 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
708 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
709 return;
711 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
712 mon = lw.sync;
714 #endif
715 if (mon == NULL) {
716 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
717 return;
719 if (mon->owner != GetCurrentThreadId ()) {
720 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
721 return;
724 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
725 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
727 if (mon->wait_list != NULL) {
728 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
729 ": (%d) signalling and dequeuing handle %p",
730 GetCurrentThreadId (), mon->wait_list->data));
732 SetEvent (mon->wait_list->data);
733 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
737 void
738 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
740 MonoThreadsSync *mon;
742 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing all %p",
743 GetCurrentThreadId (), obj));
745 mon = obj->synchronisation;
746 #ifdef HAVE_MOVING_COLLECTOR
748 LockWord lw;
749 lw.sync = mon;
750 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
751 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
752 return;
754 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
755 mon = lw.sync;
757 #endif
758 if (mon == NULL) {
759 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
760 return;
762 if (mon->owner != GetCurrentThreadId ()) {
763 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
764 return;
767 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
768 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
770 while (mon->wait_list != NULL) {
771 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
772 ": (%d) signalling and dequeuing handle %p",
773 GetCurrentThreadId (), mon->wait_list->data));
775 SetEvent (mon->wait_list->data);
776 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
780 gboolean
781 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
783 MonoThreadsSync *mon;
784 HANDLE event;
785 guint32 nest;
786 guint32 ret;
787 gboolean success = FALSE;
788 gint32 regain;
789 MonoThread *thread = mono_thread_current ();
791 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
792 ": (%d) Trying to wait for %p with timeout %dms",
793 GetCurrentThreadId (), obj, ms));
795 mon = obj->synchronisation;
796 #ifdef HAVE_MOVING_COLLECTOR
798 LockWord lw;
799 lw.sync = mon;
800 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
801 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
802 return FALSE;
804 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
805 mon = lw.sync;
807 #endif
808 if (mon == NULL) {
809 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
810 return FALSE;
812 if (mon->owner != GetCurrentThreadId ()) {
813 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
814 return FALSE;
817 /* Do this WaitSleepJoin check before creating the event handle */
818 mono_thread_current_check_pending_interrupt ();
820 event = CreateEvent (NULL, FALSE, FALSE, NULL);
821 if (event == NULL) {
822 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
823 return FALSE;
826 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) queuing handle %p",
827 GetCurrentThreadId (), event));
829 mono_monitor_enter (thread->synch_lock);
830 thread->state |= ThreadState_WaitSleepJoin;
831 mono_monitor_exit (thread->synch_lock);
833 mon->wait_list = g_slist_append (mon->wait_list, event);
835 /* Save the nest count, and release the lock */
836 nest = mon->nest;
837 mon->nest = 1;
838 mono_monitor_exit (obj);
840 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocked %p lock %p",
841 GetCurrentThreadId (), obj, mon));
843 /* There's no race between unlocking mon and waiting for the
844 * event, because auto reset events are sticky, and this event
845 * is private to this thread. Therefore even if the event was
846 * signalled before we wait, we still succeed.
848 ret = WaitForSingleObjectEx (event, ms, TRUE);
850 /* Reset the thread state fairly early, so we don't have to worry
851 * about the monitor error checking
853 mono_monitor_enter (thread->synch_lock);
854 thread->state &= ~ThreadState_WaitSleepJoin;
855 mono_monitor_exit (thread->synch_lock);
857 if (mono_thread_interruption_requested ()) {
858 CloseHandle (event);
859 return FALSE;
862 /* Regain the lock with the previous nest count */
863 do {
864 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
865 if (regain == -1)
866 mono_thread_interruption_checkpoint ();
867 } while (regain == -1);
869 if (regain == 0) {
870 /* Something went wrong, so throw a
871 * SynchronizationLockException
873 CloseHandle (event);
874 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
875 return FALSE;
878 mon->nest = nest;
880 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Regained %p lock %p",
881 GetCurrentThreadId (), obj, mon));
883 if (ret == WAIT_TIMEOUT) {
884 /* Poll the event again, just in case it was signalled
885 * while we were trying to regain the monitor lock
887 ret = WaitForSingleObjectEx (event, 0, FALSE);
890 /* Pulse will have popped our event from the queue if it signalled
891 * us, so we only do it here if the wait timed out.
893 * This avoids a race condition where the thread holding the
894 * lock can Pulse several times before the WaitForSingleObject
895 * returns. If we popped the queue here then this event might
896 * be signalled more than once, thereby starving another
897 * thread.
900 if (ret == WAIT_OBJECT_0) {
901 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Success",
902 GetCurrentThreadId ()));
903 success = TRUE;
904 } else {
905 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Wait failed, dequeuing handle %p",
906 GetCurrentThreadId (), event));
907 /* No pulse, so we have to remove ourself from the
908 * wait queue
910 mon->wait_list = g_slist_remove (mon->wait_list, event);
912 CloseHandle (event);
914 return success;