msvcr100: Add _ReentrantPPLLock class implementation.
[wine.git] / dlls / msvcrt / lock.c
blobd159745e855e209dc73df276e400eff9404353be
1 /*
2 * Copyright (c) 2002, TransGaming Technologies Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include "config.h"
20 #include "wine/port.h"
22 #include <stdarg.h>
24 #include "wine/debug.h"
25 #include "windef.h"
26 #include "winbase.h"
27 #include "winternl.h"
28 #include "wine/heap.h"
29 #include "msvcrt.h"
30 #include "cppexcept.h"
31 #include "mtdll.h"
32 #include "cxx.h"
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 typedef struct
38 BOOL bInit;
39 CRITICAL_SECTION crit;
40 } LOCKTABLEENTRY;
42 static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
44 static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
46 lock_table[ locknum ].bInit = initialized;
49 static inline void msvcrt_initialize_mlock( int locknum )
51 InitializeCriticalSection( &(lock_table[ locknum ].crit) );
52 lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
53 msvcrt_mlock_set_entry_initialized( locknum, TRUE );
56 static inline void msvcrt_uninitialize_mlock( int locknum )
58 lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
59 DeleteCriticalSection( &(lock_table[ locknum ].crit) );
60 msvcrt_mlock_set_entry_initialized( locknum, FALSE );
63 /**********************************************************************
64 * msvcrt_init_mt_locks (internal)
66 * Initialize the table lock. All other locks will be initialized
67 * upon first use.
70 void msvcrt_init_mt_locks(void)
72 int i;
74 TRACE( "initializing mtlocks\n" );
76 /* Initialize the table */
77 for( i=0; i < _TOTAL_LOCKS; i++ )
79 msvcrt_mlock_set_entry_initialized( i, FALSE );
82 /* Initialize our lock table lock */
83 msvcrt_initialize_mlock( _LOCKTAB_LOCK );
86 /**********************************************************************
87 * _lock (MSVCRT.@)
89 void CDECL _lock( int locknum )
91 TRACE( "(%d)\n", locknum );
93 /* If the lock doesn't exist yet, create it */
94 if( lock_table[ locknum ].bInit == FALSE )
96 /* Lock while we're changing the lock table */
97 _lock( _LOCKTAB_LOCK );
99 /* Check again if we've got a bit of a race on lock creation */
100 if( lock_table[ locknum ].bInit == FALSE )
102 TRACE( ": creating lock #%d\n", locknum );
103 msvcrt_initialize_mlock( locknum );
106 /* Unlock ourselves */
107 _unlock( _LOCKTAB_LOCK );
110 EnterCriticalSection( &(lock_table[ locknum ].crit) );
113 /**********************************************************************
114 * _unlock (MSVCRT.@)
116 * NOTE: There is no error detection to make sure the lock exists and is acquired.
118 void CDECL _unlock( int locknum )
120 TRACE( "(%d)\n", locknum );
122 LeaveCriticalSection( &(lock_table[ locknum ].crit) );
125 #if _MSVCR_VER >= 100
126 typedef enum
128 SPINWAIT_INIT,
129 SPINWAIT_SPIN,
130 SPINWAIT_YIELD,
131 SPINWAIT_DONE
132 } SpinWait_state;
134 typedef void (__cdecl *yield_func)(void);
136 typedef struct
138 ULONG spin;
139 ULONG unknown;
140 SpinWait_state state;
141 yield_func yield_func;
142 } SpinWait;
144 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
145 unsigned int __cdecl SpinCount__Value(void)
147 static unsigned int val = -1;
149 TRACE("()\n");
151 if(val == -1) {
152 SYSTEM_INFO si;
154 GetSystemInfo(&si);
155 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
158 return val;
161 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
162 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
163 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
164 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
166 TRACE("(%p %p)\n", this, yf);
168 this->state = SPINWAIT_INIT;
169 this->unknown = 1;
170 this->yield_func = yf;
171 return this;
174 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
175 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
176 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
177 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
179 TRACE("(%p %p)\n", this, yf);
181 this->state = SPINWAIT_INIT;
182 this->unknown = 0;
183 this->yield_func = yf;
184 return this;
187 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
188 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
189 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
190 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
191 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
192 void __thiscall SpinWait_dtor(SpinWait *this)
194 TRACE("(%p)\n", this);
197 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
198 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
199 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
200 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
201 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
202 void __thiscall SpinWait__DoYield(SpinWait *this)
204 TRACE("(%p)\n", this);
206 if(this->unknown)
207 this->yield_func();
210 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
211 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
212 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
213 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
214 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
215 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
217 TRACE("(%p)\n", this);
218 return 1;
221 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
222 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
223 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
224 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
225 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
226 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
228 TRACE("(%p %d)\n", this, spin);
230 this->spin = spin;
231 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
234 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
235 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
236 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
237 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
238 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
239 void __thiscall SpinWait__Reset(SpinWait *this)
241 SpinWait__SetSpinCount(this, SpinCount__Value());
244 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
245 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
246 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
247 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
248 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
249 MSVCRT_bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
251 TRACE("(%p)\n", this);
253 this->spin--;
254 return this->spin > 0;
257 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
258 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
259 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
260 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
261 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
262 MSVCRT_bool __thiscall SpinWait__SpinOnce(SpinWait *this)
264 switch(this->state) {
265 case SPINWAIT_INIT:
266 SpinWait__Reset(this);
267 /* fall through */
268 case SPINWAIT_SPIN:
269 InterlockedDecrement((LONG*)&this->spin);
270 if(!this->spin)
271 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
272 return TRUE;
273 case SPINWAIT_YIELD:
274 this->state = SPINWAIT_DONE;
275 this->yield_func();
276 return TRUE;
277 default:
278 SpinWait__Reset(this);
279 return FALSE;
283 static HANDLE keyed_event;
285 /* keep in sync with msvcp90/msvcp90.h */
286 typedef struct cs_queue
288 struct cs_queue *next;
289 #if _MSVCR_VER >= 110
290 BOOL free;
291 int unknown;
292 #endif
293 } cs_queue;
295 typedef struct
297 ULONG_PTR unk_thread_id;
298 cs_queue unk_active;
299 #if _MSVCR_VER >= 110
300 void *unknown[2];
301 #else
302 void *unknown[1];
303 #endif
304 cs_queue *head;
305 void *tail;
306 } critical_section;
308 /* ??0critical_section@Concurrency@@QAE@XZ */
309 /* ??0critical_section@Concurrency@@QEAA@XZ */
310 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
311 critical_section* __thiscall critical_section_ctor(critical_section *this)
313 TRACE("(%p)\n", this);
315 if(!keyed_event) {
316 HANDLE event;
318 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
319 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
320 NtClose(event);
323 this->unk_thread_id = 0;
324 this->head = this->tail = NULL;
325 return this;
328 /* ??1critical_section@Concurrency@@QAE@XZ */
329 /* ??1critical_section@Concurrency@@QEAA@XZ */
330 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
331 void __thiscall critical_section_dtor(critical_section *this)
333 TRACE("(%p)\n", this);
336 static void __cdecl spin_wait_yield(void)
338 Sleep(0);
341 static inline void spin_wait_for_next_cs(cs_queue *q)
343 SpinWait sw;
345 if(q->next) return;
347 SpinWait_ctor(&sw, &spin_wait_yield);
348 SpinWait__Reset(&sw);
349 while(!q->next)
350 SpinWait__SpinOnce(&sw);
351 SpinWait_dtor(&sw);
354 static inline void cs_set_head(critical_section *cs, cs_queue *q)
356 cs->unk_thread_id = GetCurrentThreadId();
357 cs->unk_active.next = q->next;
358 cs->head = &cs->unk_active;
361 static inline void cs_lock(critical_section *cs, cs_queue *q)
363 cs_queue *last;
365 if(cs->unk_thread_id == GetCurrentThreadId())
366 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
368 memset(q, 0, sizeof(*q));
369 last = InterlockedExchangePointer(&cs->tail, q);
370 if(last) {
371 last->next = q;
372 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
375 cs_set_head(cs, q);
376 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
377 spin_wait_for_next_cs(q);
378 cs->unk_active.next = q->next;
382 /* ?lock@critical_section@Concurrency@@QAEXXZ */
383 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
384 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
385 void __thiscall critical_section_lock(critical_section *this)
387 cs_queue q;
389 TRACE("(%p)\n", this);
390 cs_lock(this, &q);
393 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
394 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
395 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
396 MSVCRT_bool __thiscall critical_section_try_lock(critical_section *this)
398 cs_queue q;
400 TRACE("(%p)\n", this);
402 if(this->unk_thread_id == GetCurrentThreadId())
403 return FALSE;
405 memset(&q, 0, sizeof(q));
406 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
407 cs_set_head(this, &q);
408 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
409 spin_wait_for_next_cs(&q);
410 this->unk_active.next = q.next;
412 return TRUE;
414 return FALSE;
417 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
418 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
419 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
420 void __thiscall critical_section_unlock(critical_section *this)
422 TRACE("(%p)\n", this);
424 this->unk_thread_id = 0;
425 this->head = NULL;
426 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
427 == &this->unk_active) return;
428 spin_wait_for_next_cs(&this->unk_active);
430 #if _MSVCR_VER >= 110
431 while(1) {
432 cs_queue *next;
434 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
435 break;
437 next = this->unk_active.next;
438 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
439 HeapFree(GetProcessHeap(), 0, next);
440 return;
442 spin_wait_for_next_cs(next);
444 this->unk_active.next = next->next;
445 HeapFree(GetProcessHeap(), 0, next);
447 #endif
449 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
452 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
453 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
454 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
455 critical_section* __thiscall critical_section_native_handle(critical_section *this)
457 TRACE("(%p)\n", this);
458 return this;
461 #if _MSVCR_VER >= 110
462 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
463 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
464 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
465 MSVCRT_bool __thiscall critical_section_try_lock_for(
466 critical_section *this, unsigned int timeout)
468 cs_queue *q, *last;
470 TRACE("(%p %d)\n", this, timeout);
472 if(this->unk_thread_id == GetCurrentThreadId())
473 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
475 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
476 return critical_section_try_lock(this);
478 last = InterlockedExchangePointer(&this->tail, q);
479 if(last) {
480 LARGE_INTEGER to;
481 NTSTATUS status;
482 FILETIME ft;
484 last->next = q;
485 GetSystemTimeAsFileTime(&ft);
486 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
487 ft.dwLowDateTime + (LONGLONG)timeout*10000;
488 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
489 if(status == STATUS_TIMEOUT) {
490 if(!InterlockedExchange(&q->free, TRUE))
491 return FALSE;
492 /* A thread has signaled the event and is block waiting. */
493 /* We need to catch the event to wake the thread. */
494 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
498 cs_set_head(this, q);
499 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
500 spin_wait_for_next_cs(q);
501 this->unk_active.next = q->next;
504 HeapFree(GetProcessHeap(), 0, q);
505 return TRUE;
507 #endif
509 typedef struct
511 critical_section *cs;
512 union {
513 cs_queue q;
514 struct {
515 void *unknown[4];
516 int unknown2[2];
517 } unknown;
518 } lock;
519 } critical_section_scoped_lock;
521 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
522 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
523 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
524 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
525 critical_section_scoped_lock *this, critical_section *cs)
527 TRACE("(%p %p)\n", this, cs);
528 this->cs = cs;
529 cs_lock(this->cs, &this->lock.q);
530 return this;
533 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
534 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
535 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
536 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
538 TRACE("(%p)\n", this);
539 critical_section_unlock(this->cs);
542 typedef struct
544 critical_section cs;
545 } _NonReentrantPPLLock;
547 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
548 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
549 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
550 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
552 TRACE("(%p)\n", this);
554 critical_section_ctor(&this->cs);
555 return this;
558 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
559 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
560 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
561 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
563 TRACE("(%p %p)\n", this, q);
564 cs_lock(&this->cs, q);
567 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
568 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
569 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
570 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
572 TRACE("(%p)\n", this);
573 critical_section_unlock(&this->cs);
576 typedef struct
578 critical_section cs;
579 LONG count;
580 LONG owner;
581 } _ReentrantPPLLock;
583 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
584 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
585 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
586 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
588 TRACE("(%p)\n", this);
590 critical_section_ctor(&this->cs);
591 this->count = 0;
592 this->owner = -1;
593 return this;
596 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
597 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
598 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
599 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
601 TRACE("(%p %p)\n", this, q);
603 if(this->owner == GetCurrentThreadId()) {
604 this->count++;
605 return;
608 cs_lock(&this->cs, q);
609 this->count++;
610 this->owner = GetCurrentThreadId();
613 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
614 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
615 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
616 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
618 TRACE("(%p)\n", this);
620 this->count--;
621 if(this->count)
622 return;
624 this->owner = -1;
625 critical_section_unlock(&this->cs);
628 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
629 unsigned int __cdecl _GetConcurrency(void)
631 static unsigned int val = -1;
633 TRACE("()\n");
635 if(val == -1) {
636 SYSTEM_INFO si;
638 GetSystemInfo(&si);
639 val = si.dwNumberOfProcessors;
642 return val;
645 #define EVT_RUNNING (void*)1
646 #define EVT_WAITING NULL
648 struct thread_wait;
649 typedef struct thread_wait_entry
651 struct thread_wait *wait;
652 struct thread_wait_entry *next;
653 struct thread_wait_entry *prev;
654 } thread_wait_entry;
656 typedef struct thread_wait
658 void *signaled;
659 int pending_waits;
660 thread_wait_entry entries[1];
661 } thread_wait;
663 typedef struct
665 thread_wait_entry *waiters;
666 INT_PTR signaled;
667 critical_section cs;
668 } event;
670 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
672 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
673 pTime->QuadPart = (ULONGLONG)timeout * -10000;
674 return pTime;
677 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
679 entry->next = *head;
680 entry->prev = NULL;
681 if(*head) (*head)->prev = entry;
682 *head = entry;
685 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
687 if(entry == *head)
688 *head = entry->next;
689 else if(entry->prev)
690 entry->prev->next = entry->next;
691 if(entry->next) entry->next->prev = entry->prev;
694 static MSVCRT_size_t evt_end_wait(thread_wait *wait, event **events, int count)
696 MSVCRT_size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
698 for(i = 0; i < count; i++) {
699 critical_section_lock(&events[i]->cs);
700 if(events[i] == wait->signaled) ret = i;
701 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
702 critical_section_unlock(&events[i]->cs);
705 return ret;
708 static inline int evt_transition(void **state, void *from, void *to)
710 return InterlockedCompareExchangePointer(state, to, from) == from;
713 static MSVCRT_size_t evt_wait(thread_wait *wait, event **events, int count, MSVCRT_bool wait_all, unsigned int timeout)
715 int i;
716 NTSTATUS status;
717 LARGE_INTEGER ntto;
719 wait->signaled = EVT_RUNNING;
720 wait->pending_waits = wait_all ? count : 1;
721 for(i = 0; i < count; i++) {
722 wait->entries[i].wait = wait;
724 critical_section_lock(&events[i]->cs);
725 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
726 if(events[i]->signaled) {
727 if(!InterlockedDecrement(&wait->pending_waits)) {
728 wait->signaled = events[i];
729 critical_section_unlock(&events[i]->cs);
731 return evt_end_wait(wait, events, i+1);
734 critical_section_unlock(&events[i]->cs);
737 if(!timeout)
738 return evt_end_wait(wait, events, count);
740 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
741 return evt_end_wait(wait, events, count);
743 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
745 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
746 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
748 return evt_end_wait(wait, events, count);
751 /* ??0event@Concurrency@@QAE@XZ */
752 /* ??0event@Concurrency@@QEAA@XZ */
753 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
754 event* __thiscall event_ctor(event *this)
756 TRACE("(%p)\n", this);
758 this->waiters = NULL;
759 this->signaled = FALSE;
760 critical_section_ctor(&this->cs);
762 return this;
765 /* ??1event@Concurrency@@QAE@XZ */
766 /* ??1event@Concurrency@@QEAA@XZ */
767 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
768 void __thiscall event_dtor(event *this)
770 TRACE("(%p)\n", this);
771 critical_section_dtor(&this->cs);
772 if(this->waiters)
773 ERR("there's a wait on destroyed event\n");
776 /* ?reset@event@Concurrency@@QAEXXZ */
777 /* ?reset@event@Concurrency@@QEAAXXZ */
778 DEFINE_THISCALL_WRAPPER(event_reset, 4)
779 void __thiscall event_reset(event *this)
781 thread_wait_entry *entry;
783 TRACE("(%p)\n", this);
785 critical_section_lock(&this->cs);
786 if(this->signaled) {
787 this->signaled = FALSE;
788 for(entry=this->waiters; entry; entry = entry->next)
789 InterlockedIncrement(&entry->wait->pending_waits);
791 critical_section_unlock(&this->cs);
794 /* ?set@event@Concurrency@@QAEXXZ */
795 /* ?set@event@Concurrency@@QEAAXXZ */
796 DEFINE_THISCALL_WRAPPER(event_set, 4)
797 void __thiscall event_set(event *this)
799 thread_wait_entry *wakeup = NULL;
800 thread_wait_entry *entry, *next;
802 TRACE("(%p)\n", this);
804 critical_section_lock(&this->cs);
805 if(!this->signaled) {
806 this->signaled = TRUE;
807 for(entry=this->waiters; entry; entry=next) {
808 next = entry->next;
809 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
810 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
811 evt_remove_queue(&this->waiters, entry);
812 evt_add_queue(&wakeup, entry);
817 critical_section_unlock(&this->cs);
819 for(entry=wakeup; entry; entry=next) {
820 next = entry->next;
821 entry->next = entry->prev = NULL;
822 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
826 /* ?wait@event@Concurrency@@QAEII@Z */
827 /* ?wait@event@Concurrency@@QEAA_KI@Z */
828 DEFINE_THISCALL_WRAPPER(event_wait, 8)
829 MSVCRT_size_t __thiscall event_wait(event *this, unsigned int timeout)
831 thread_wait wait;
832 MSVCRT_size_t signaled;
834 TRACE("(%p %u)\n", this, timeout);
836 critical_section_lock(&this->cs);
837 signaled = this->signaled;
838 critical_section_unlock(&this->cs);
840 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
841 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
844 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
845 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
846 int __cdecl event_wait_for_multiple(event **events, MSVCRT_size_t count, MSVCRT_bool wait_all, unsigned int timeout)
848 thread_wait *wait;
849 MSVCRT_size_t ret;
851 TRACE("(%p %ld %d %u)\n", events, count, wait_all, timeout);
853 if(count == 0)
854 return 0;
856 wait = heap_alloc(FIELD_OFFSET(thread_wait, entries[count]));
857 if(!wait)
858 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
859 ret = evt_wait(wait, events, count, wait_all, timeout);
860 heap_free(wait);
862 return ret;
864 #endif
866 #if _MSVCR_VER >= 110
867 typedef struct cv_queue {
868 struct cv_queue *next;
869 BOOL expired;
870 } cv_queue;
872 typedef struct {
873 /* cv_queue structure is not binary compatible */
874 cv_queue *queue;
875 critical_section lock;
876 } _Condition_variable;
878 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
879 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
880 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
881 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
883 TRACE("(%p)\n", this);
885 this->queue = NULL;
886 critical_section_ctor(&this->lock);
887 return this;
890 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
891 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
892 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
893 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
895 TRACE("(%p)\n", this);
897 while(this->queue) {
898 cv_queue *next = this->queue->next;
899 if(!this->queue->expired)
900 ERR("there's an active wait\n");
901 HeapFree(GetProcessHeap(), 0, this->queue);
902 this->queue = next;
904 critical_section_dtor(&this->lock);
907 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
908 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
909 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
910 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
912 cv_queue q;
914 TRACE("(%p, %p)\n", this, cs);
916 critical_section_lock(&this->lock);
917 q.next = this->queue;
918 q.expired = FALSE;
919 this->queue = &q;
920 critical_section_unlock(&this->lock);
922 critical_section_unlock(cs);
923 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
924 critical_section_lock(cs);
927 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
928 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
929 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
930 MSVCRT_bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
931 critical_section *cs, unsigned int timeout)
933 LARGE_INTEGER to;
934 NTSTATUS status;
935 FILETIME ft;
936 cv_queue *q;
938 TRACE("(%p %p %d)\n", this, cs, timeout);
940 if(!(q = HeapAlloc(GetProcessHeap(), 0, sizeof(cv_queue)))) {
941 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
944 critical_section_lock(&this->lock);
945 q->next = this->queue;
946 q->expired = FALSE;
947 this->queue = q;
948 critical_section_unlock(&this->lock);
950 critical_section_unlock(cs);
952 GetSystemTimeAsFileTime(&ft);
953 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
954 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
955 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
956 if(status == STATUS_TIMEOUT) {
957 if(!InterlockedExchange(&q->expired, TRUE)) {
958 critical_section_lock(cs);
959 return FALSE;
961 else
962 NtWaitForKeyedEvent(keyed_event, q, 0, 0);
965 HeapFree(GetProcessHeap(), 0, q);
966 critical_section_lock(cs);
967 return TRUE;
970 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
971 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
972 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
973 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
975 cv_queue *node;
977 TRACE("(%p)\n", this);
979 if(!this->queue)
980 return;
982 while(1) {
983 critical_section_lock(&this->lock);
984 node = this->queue;
985 if(!node) {
986 critical_section_unlock(&this->lock);
987 return;
989 this->queue = node->next;
990 critical_section_unlock(&this->lock);
992 if(!InterlockedExchange(&node->expired, TRUE)) {
993 NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
994 return;
995 } else {
996 HeapFree(GetProcessHeap(), 0, node);
1001 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
1002 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
1003 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
1004 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
1006 cv_queue *ptr;
1008 TRACE("(%p)\n", this);
1010 if(!this->queue)
1011 return;
1013 critical_section_lock(&this->lock);
1014 ptr = this->queue;
1015 this->queue = NULL;
1016 critical_section_unlock(&this->lock);
1018 while(ptr) {
1019 cv_queue *next = ptr->next;
1021 if(!InterlockedExchange(&ptr->expired, TRUE))
1022 NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
1023 else
1024 HeapFree(GetProcessHeap(), 0, ptr);
1025 ptr = next;
1028 #endif
1030 #if _MSVCR_VER >= 100
1031 typedef struct rwl_queue
1033 struct rwl_queue *next;
1034 } rwl_queue;
1036 #define WRITER_WAITING 0x80000000
1037 /* FIXME: reader_writer_lock structure is not binary compatible
1038 * it can't exceed 28/56 bytes */
1039 typedef struct
1041 LONG count;
1042 LONG thread_id;
1043 rwl_queue active;
1044 rwl_queue *writer_head;
1045 rwl_queue *writer_tail;
1046 rwl_queue *reader_head;
1047 } reader_writer_lock;
1049 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
1050 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
1051 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
1052 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
1054 TRACE("(%p)\n", this);
1056 if (!keyed_event) {
1057 HANDLE event;
1059 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1060 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1061 NtClose(event);
1064 memset(this, 0, sizeof(*this));
1065 return this;
1068 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
1069 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
1070 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
1071 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
1073 TRACE("(%p)\n", this);
1075 if (this->thread_id != 0 || this->count)
1076 WARN("destroying locked reader_writer_lock\n");
1079 static inline void spin_wait_for_next_rwl(rwl_queue *q)
1081 SpinWait sw;
1083 if(q->next) return;
1085 SpinWait_ctor(&sw, &spin_wait_yield);
1086 SpinWait__Reset(&sw);
1087 while(!q->next)
1088 SpinWait__SpinOnce(&sw);
1089 SpinWait_dtor(&sw);
1092 /* Remove when proper InterlockedOr implementation is added to wine */
1093 static LONG InterlockedOr(LONG *d, LONG v)
1095 LONG l;
1096 while (~(l = *d) & v)
1097 if (InterlockedCompareExchange(d, l|v, l) == l) break;
1098 return l;
1101 static LONG InterlockedAnd(LONG *d, LONG v)
1103 LONG l = *d, old;
1104 while ((l & v) != l) {
1105 if((old = InterlockedCompareExchange(d, l&v, l)) == l) break;
1106 l = old;
1108 return l;
1111 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
1112 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
1113 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
1114 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
1116 rwl_queue q = { NULL }, *last;
1118 TRACE("(%p)\n", this);
1120 if (this->thread_id == GetCurrentThreadId())
1121 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
1123 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
1124 if (last) {
1125 last->next = &q;
1126 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1127 } else {
1128 this->writer_head = &q;
1129 if (InterlockedOr(&this->count, WRITER_WAITING))
1130 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1133 this->thread_id = GetCurrentThreadId();
1134 this->writer_head = &this->active;
1135 this->active.next = NULL;
1136 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1137 spin_wait_for_next_rwl(&q);
1138 this->active.next = q.next;
1142 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
1143 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
1144 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
1145 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
1147 rwl_queue q;
1149 TRACE("(%p)\n", this);
1151 if (this->thread_id == GetCurrentThreadId())
1152 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked as writer");
1154 do {
1155 q.next = this->reader_head;
1156 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
1158 if (!q.next) {
1159 rwl_queue *head;
1160 LONG count;
1162 while (!((count = this->count) & WRITER_WAITING))
1163 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
1165 if (count & WRITER_WAITING)
1166 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1168 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1169 while(head && head != &q) {
1170 rwl_queue *next = head->next;
1171 InterlockedIncrement(&this->count);
1172 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1173 head = next;
1175 } else {
1176 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1180 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
1181 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
1182 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
1183 MSVCRT_bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
1185 rwl_queue q = { NULL };
1187 TRACE("(%p)\n", this);
1189 if (this->thread_id == GetCurrentThreadId())
1190 return FALSE;
1192 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
1193 return FALSE;
1194 this->writer_head = &q;
1195 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
1196 this->thread_id = GetCurrentThreadId();
1197 this->writer_head = &this->active;
1198 this->active.next = NULL;
1199 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1200 spin_wait_for_next_rwl(&q);
1201 this->active.next = q.next;
1203 return TRUE;
1206 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
1207 return FALSE;
1208 spin_wait_for_next_rwl(&q);
1209 this->writer_head = q.next;
1210 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
1211 this->thread_id = GetCurrentThreadId();
1212 this->writer_head = &this->active;
1213 this->active.next = q.next;
1214 return TRUE;
1216 return FALSE;
1219 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
1220 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
1221 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
1222 MSVCRT_bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
1224 LONG count;
1226 TRACE("(%p)\n", this);
1228 while (!((count = this->count) & WRITER_WAITING))
1229 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
1230 return FALSE;
1233 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
1234 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
1235 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
1236 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
1238 LONG count;
1239 rwl_queue *head, *next;
1241 TRACE("(%p)\n", this);
1243 if ((count = this->count) & ~WRITER_WAITING) {
1244 count = InterlockedDecrement(&this->count);
1245 if (count != WRITER_WAITING)
1246 return;
1247 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
1248 return;
1251 this->thread_id = 0;
1252 next = this->writer_head->next;
1253 if (next) {
1254 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
1255 return;
1257 InterlockedAnd(&this->count, ~WRITER_WAITING);
1258 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1259 while (head) {
1260 next = head->next;
1261 InterlockedIncrement(&this->count);
1262 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1263 head = next;
1266 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
1267 return;
1268 InterlockedOr(&this->count, WRITER_WAITING);
1271 typedef struct {
1272 reader_writer_lock *lock;
1273 } reader_writer_lock_scoped_lock;
1275 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1276 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1277 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
1278 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
1279 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1281 TRACE("(%p %p)\n", this, lock);
1283 this->lock = lock;
1284 reader_writer_lock_lock(lock);
1285 return this;
1288 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
1289 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
1290 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
1291 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
1293 TRACE("(%p)\n", this);
1294 reader_writer_lock_unlock(this->lock);
1297 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1298 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1299 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
1300 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
1301 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1303 TRACE("(%p %p)\n", this, lock);
1305 this->lock = lock;
1306 reader_writer_lock_lock_read(lock);
1307 return this;
1310 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
1311 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
1312 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
1313 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
1315 TRACE("(%p)\n", this);
1316 reader_writer_lock_unlock(this->lock);
1319 typedef struct {
1320 CRITICAL_SECTION cs;
1321 } _ReentrantBlockingLock;
1323 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1324 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1325 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
1326 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
1328 TRACE("(%p)\n", this);
1330 InitializeCriticalSection(&this->cs);
1331 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
1332 return this;
1335 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1336 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1337 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
1338 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
1340 TRACE("(%p)\n", this);
1342 this->cs.DebugInfo->Spare[0] = 0;
1343 DeleteCriticalSection(&this->cs);
1346 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1347 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1348 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
1349 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
1351 TRACE("(%p)\n", this);
1352 EnterCriticalSection(&this->cs);
1355 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1356 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1357 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
1358 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
1360 TRACE("(%p)\n", this);
1361 LeaveCriticalSection(&this->cs);
1364 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
1365 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
1366 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
1367 MSVCRT_bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
1369 TRACE("(%p)\n", this);
1370 return TryEnterCriticalSection(&this->cs);
1372 #endif
1374 #if _MSVCR_VER == 110
1375 static LONG shared_ptr_lock;
1377 void __cdecl _Lock_shared_ptr_spin_lock(void)
1379 LONG l = 0;
1381 while(InterlockedCompareExchange(&shared_ptr_lock, 1, 0) != 0) {
1382 if(l++ == 1000) {
1383 Sleep(0);
1384 l = 0;
1389 void __cdecl _Unlock_shared_ptr_spin_lock(void)
1391 shared_ptr_lock = 0;
1393 #endif
1395 /**********************************************************************
1396 * msvcrt_free_locks (internal)
1398 * Uninitialize all mt locks. Assume that neither _lock or _unlock will
1399 * be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
1402 void msvcrt_free_locks(void)
1404 int i;
1406 TRACE( ": uninitializing all mtlocks\n" );
1408 /* Uninitialize the table */
1409 for( i=0; i < _TOTAL_LOCKS; i++ )
1411 if( lock_table[ i ].bInit )
1413 msvcrt_uninitialize_mlock( i );
1417 #if _MSVCR_VER >= 100
1418 if(keyed_event)
1419 NtClose(keyed_event);
1420 #endif