wow64: Add thunks for the I/O completion syscalls.
[wine.git] / dlls / msvcrt / lock.c
blob74156aa7db3d9b503e5939d62218deea77b1169c
1 /*
2 * Copyright (c) 2002, TransGaming Technologies Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include <stdarg.h>
20 #include <stdbool.h>
22 #include "wine/debug.h"
23 #include "windef.h"
24 #include "winbase.h"
25 #include "winternl.h"
26 #include "wine/heap.h"
27 #include "msvcrt.h"
28 #include "cppexcept.h"
29 #include "mtdll.h"
30 #include "cxx.h"
32 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
34 typedef struct
36 BOOL bInit;
37 CRITICAL_SECTION crit;
38 } LOCKTABLEENTRY;
40 static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
42 static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
44 lock_table[ locknum ].bInit = initialized;
47 static inline void msvcrt_initialize_mlock( int locknum )
49 InitializeCriticalSection( &(lock_table[ locknum ].crit) );
50 lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
51 msvcrt_mlock_set_entry_initialized( locknum, TRUE );
54 static inline void msvcrt_uninitialize_mlock( int locknum )
56 lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
57 DeleteCriticalSection( &(lock_table[ locknum ].crit) );
58 msvcrt_mlock_set_entry_initialized( locknum, FALSE );
61 /**********************************************************************
62 * msvcrt_init_mt_locks (internal)
64 * Initialize the table lock. All other locks will be initialized
65 * upon first use.
68 void msvcrt_init_mt_locks(void)
70 int i;
72 TRACE( "initializing mtlocks\n" );
74 /* Initialize the table */
75 for( i=0; i < _TOTAL_LOCKS; i++ )
77 msvcrt_mlock_set_entry_initialized( i, FALSE );
80 /* Initialize our lock table lock */
81 msvcrt_initialize_mlock( _LOCKTAB_LOCK );
84 /**********************************************************************
85 * _lock (MSVCRT.@)
87 void CDECL _lock( int locknum )
89 TRACE( "(%d)\n", locknum );
91 /* If the lock doesn't exist yet, create it */
92 if( lock_table[ locknum ].bInit == FALSE )
94 /* Lock while we're changing the lock table */
95 _lock( _LOCKTAB_LOCK );
97 /* Check again if we've got a bit of a race on lock creation */
98 if( lock_table[ locknum ].bInit == FALSE )
100 TRACE( ": creating lock #%d\n", locknum );
101 msvcrt_initialize_mlock( locknum );
104 /* Unlock ourselves */
105 _unlock( _LOCKTAB_LOCK );
108 EnterCriticalSection( &(lock_table[ locknum ].crit) );
111 /**********************************************************************
112 * _unlock (MSVCRT.@)
114 * NOTE: There is no error detection to make sure the lock exists and is acquired.
116 void CDECL _unlock( int locknum )
118 TRACE( "(%d)\n", locknum );
120 LeaveCriticalSection( &(lock_table[ locknum ].crit) );
123 #if _MSVCR_VER >= 100
124 typedef enum
126 SPINWAIT_INIT,
127 SPINWAIT_SPIN,
128 SPINWAIT_YIELD,
129 SPINWAIT_DONE
130 } SpinWait_state;
132 typedef void (__cdecl *yield_func)(void);
134 typedef struct
136 ULONG spin;
137 ULONG unknown;
138 SpinWait_state state;
139 yield_func yield_func;
140 } SpinWait;
142 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
143 unsigned int __cdecl SpinCount__Value(void)
145 static unsigned int val = -1;
147 TRACE("()\n");
149 if(val == -1) {
150 SYSTEM_INFO si;
152 GetSystemInfo(&si);
153 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
156 return val;
159 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
160 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
161 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
162 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
164 TRACE("(%p %p)\n", this, yf);
166 this->state = SPINWAIT_INIT;
167 this->unknown = 1;
168 this->yield_func = yf;
169 return this;
172 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
173 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
174 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
175 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
177 TRACE("(%p %p)\n", this, yf);
179 this->state = SPINWAIT_INIT;
180 this->unknown = 0;
181 this->yield_func = yf;
182 return this;
185 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
186 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
187 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
188 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
189 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
190 void __thiscall SpinWait_dtor(SpinWait *this)
192 TRACE("(%p)\n", this);
195 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
196 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
197 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
198 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
199 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
200 void __thiscall SpinWait__DoYield(SpinWait *this)
202 TRACE("(%p)\n", this);
204 if(this->unknown)
205 this->yield_func();
208 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
209 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
210 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
211 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
212 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
213 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
215 TRACE("(%p)\n", this);
216 return 1;
219 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
220 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
221 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
222 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
223 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
224 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
226 TRACE("(%p %d)\n", this, spin);
228 this->spin = spin;
229 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
232 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
233 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
234 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
235 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
236 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
237 void __thiscall SpinWait__Reset(SpinWait *this)
239 SpinWait__SetSpinCount(this, SpinCount__Value());
242 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
243 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
244 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
245 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
246 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
247 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
249 TRACE("(%p)\n", this);
251 this->spin--;
252 return this->spin > 0;
255 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
256 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
257 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
258 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
259 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
260 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
262 switch(this->state) {
263 case SPINWAIT_INIT:
264 SpinWait__Reset(this);
265 /* fall through */
266 case SPINWAIT_SPIN:
267 InterlockedDecrement((LONG*)&this->spin);
268 if(!this->spin)
269 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
270 return TRUE;
271 case SPINWAIT_YIELD:
272 this->state = SPINWAIT_DONE;
273 this->yield_func();
274 return TRUE;
275 default:
276 SpinWait__Reset(this);
277 return FALSE;
281 static HANDLE keyed_event;
283 /* keep in sync with msvcp90/msvcp90.h */
284 typedef struct cs_queue
286 struct cs_queue *next;
287 #if _MSVCR_VER >= 110
288 BOOL free;
289 int unknown;
290 #endif
291 } cs_queue;
293 typedef struct
295 ULONG_PTR unk_thread_id;
296 cs_queue unk_active;
297 #if _MSVCR_VER >= 110
298 void *unknown[2];
299 #else
300 void *unknown[1];
301 #endif
302 cs_queue *head;
303 void *tail;
304 } critical_section;
306 /* ??0critical_section@Concurrency@@QAE@XZ */
307 /* ??0critical_section@Concurrency@@QEAA@XZ */
308 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
309 critical_section* __thiscall critical_section_ctor(critical_section *this)
311 TRACE("(%p)\n", this);
313 if(!keyed_event) {
314 HANDLE event;
316 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
317 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
318 NtClose(event);
321 this->unk_thread_id = 0;
322 this->head = this->tail = NULL;
323 return this;
326 /* ??1critical_section@Concurrency@@QAE@XZ */
327 /* ??1critical_section@Concurrency@@QEAA@XZ */
328 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
329 void __thiscall critical_section_dtor(critical_section *this)
331 TRACE("(%p)\n", this);
334 static void __cdecl spin_wait_yield(void)
336 Sleep(0);
339 static inline void spin_wait_for_next_cs(cs_queue *q)
341 SpinWait sw;
343 if(q->next) return;
345 SpinWait_ctor(&sw, &spin_wait_yield);
346 SpinWait__Reset(&sw);
347 while(!q->next)
348 SpinWait__SpinOnce(&sw);
349 SpinWait_dtor(&sw);
352 static inline void cs_set_head(critical_section *cs, cs_queue *q)
354 cs->unk_thread_id = GetCurrentThreadId();
355 cs->unk_active.next = q->next;
356 cs->head = &cs->unk_active;
359 static inline void cs_lock(critical_section *cs, cs_queue *q)
361 cs_queue *last;
363 if(cs->unk_thread_id == GetCurrentThreadId())
364 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
366 memset(q, 0, sizeof(*q));
367 last = InterlockedExchangePointer(&cs->tail, q);
368 if(last) {
369 last->next = q;
370 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
373 cs_set_head(cs, q);
374 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
375 spin_wait_for_next_cs(q);
376 cs->unk_active.next = q->next;
380 /* ?lock@critical_section@Concurrency@@QAEXXZ */
381 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
382 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
383 void __thiscall critical_section_lock(critical_section *this)
385 cs_queue q;
387 TRACE("(%p)\n", this);
388 cs_lock(this, &q);
391 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
392 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
393 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
394 bool __thiscall critical_section_try_lock(critical_section *this)
396 cs_queue q;
398 TRACE("(%p)\n", this);
400 if(this->unk_thread_id == GetCurrentThreadId())
401 return FALSE;
403 memset(&q, 0, sizeof(q));
404 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
405 cs_set_head(this, &q);
406 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
407 spin_wait_for_next_cs(&q);
408 this->unk_active.next = q.next;
410 return TRUE;
412 return FALSE;
415 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
416 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
417 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
418 void __thiscall critical_section_unlock(critical_section *this)
420 TRACE("(%p)\n", this);
422 this->unk_thread_id = 0;
423 this->head = NULL;
424 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
425 == &this->unk_active) return;
426 spin_wait_for_next_cs(&this->unk_active);
428 #if _MSVCR_VER >= 110
429 while(1) {
430 cs_queue *next;
432 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
433 break;
435 next = this->unk_active.next;
436 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
437 HeapFree(GetProcessHeap(), 0, next);
438 return;
440 spin_wait_for_next_cs(next);
442 this->unk_active.next = next->next;
443 HeapFree(GetProcessHeap(), 0, next);
445 #endif
447 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
450 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
451 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
452 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
453 critical_section* __thiscall critical_section_native_handle(critical_section *this)
455 TRACE("(%p)\n", this);
456 return this;
459 #if _MSVCR_VER >= 110
460 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
461 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
462 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
463 bool __thiscall critical_section_try_lock_for(
464 critical_section *this, unsigned int timeout)
466 cs_queue *q, *last;
468 TRACE("(%p %d)\n", this, timeout);
470 if(this->unk_thread_id == GetCurrentThreadId())
471 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
473 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
474 return critical_section_try_lock(this);
476 last = InterlockedExchangePointer(&this->tail, q);
477 if(last) {
478 LARGE_INTEGER to;
479 NTSTATUS status;
480 FILETIME ft;
482 last->next = q;
483 GetSystemTimeAsFileTime(&ft);
484 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
485 ft.dwLowDateTime + (LONGLONG)timeout*10000;
486 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
487 if(status == STATUS_TIMEOUT) {
488 if(!InterlockedExchange(&q->free, TRUE))
489 return FALSE;
490 /* A thread has signaled the event and is block waiting. */
491 /* We need to catch the event to wake the thread. */
492 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
496 cs_set_head(this, q);
497 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
498 spin_wait_for_next_cs(q);
499 this->unk_active.next = q->next;
502 HeapFree(GetProcessHeap(), 0, q);
503 return TRUE;
505 #endif
507 typedef struct
509 critical_section *cs;
510 union {
511 cs_queue q;
512 struct {
513 void *unknown[4];
514 int unknown2[2];
515 } unknown;
516 } lock;
517 } critical_section_scoped_lock;
519 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
520 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
521 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
522 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
523 critical_section_scoped_lock *this, critical_section *cs)
525 TRACE("(%p %p)\n", this, cs);
526 this->cs = cs;
527 cs_lock(this->cs, &this->lock.q);
528 return this;
531 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
532 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
533 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
534 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
536 TRACE("(%p)\n", this);
537 critical_section_unlock(this->cs);
540 typedef struct
542 critical_section cs;
543 } _NonReentrantPPLLock;
545 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
546 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
547 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
548 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
550 TRACE("(%p)\n", this);
552 critical_section_ctor(&this->cs);
553 return this;
556 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
557 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
558 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
559 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
561 TRACE("(%p %p)\n", this, q);
562 cs_lock(&this->cs, q);
565 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
566 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
567 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
568 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
570 TRACE("(%p)\n", this);
571 critical_section_unlock(&this->cs);
574 typedef struct
576 _NonReentrantPPLLock *lock;
577 union {
578 cs_queue q;
579 struct {
580 void *unknown[4];
581 int unknown2[2];
582 } unknown;
583 } wait;
584 } _NonReentrantPPLLock__Scoped_lock;
586 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
587 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
588 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
589 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
590 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
592 TRACE("(%p %p)\n", this, lock);
594 this->lock = lock;
595 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
596 return this;
599 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
600 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
601 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
602 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
604 TRACE("(%p)\n", this);
606 _NonReentrantPPLLock__Release(this->lock);
609 typedef struct
611 critical_section cs;
612 LONG count;
613 LONG owner;
614 } _ReentrantPPLLock;
616 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
617 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
618 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
619 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
621 TRACE("(%p)\n", this);
623 critical_section_ctor(&this->cs);
624 this->count = 0;
625 this->owner = -1;
626 return this;
629 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
630 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
631 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
632 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
634 TRACE("(%p %p)\n", this, q);
636 if(this->owner == GetCurrentThreadId()) {
637 this->count++;
638 return;
641 cs_lock(&this->cs, q);
642 this->count++;
643 this->owner = GetCurrentThreadId();
646 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
647 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
648 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
649 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
651 TRACE("(%p)\n", this);
653 this->count--;
654 if(this->count)
655 return;
657 this->owner = -1;
658 critical_section_unlock(&this->cs);
661 typedef struct
663 _ReentrantPPLLock *lock;
664 union {
665 cs_queue q;
666 struct {
667 void *unknown[4];
668 int unknown2[2];
669 } unknown;
670 } wait;
671 } _ReentrantPPLLock__Scoped_lock;
673 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
674 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
675 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
676 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
677 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
679 TRACE("(%p %p)\n", this, lock);
681 this->lock = lock;
682 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
683 return this;
686 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
687 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
688 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
689 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
691 TRACE("(%p)\n", this);
693 _ReentrantPPLLock__Release(this->lock);
696 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
697 unsigned int __cdecl _GetConcurrency(void)
699 static unsigned int val = -1;
701 TRACE("()\n");
703 if(val == -1) {
704 SYSTEM_INFO si;
706 GetSystemInfo(&si);
707 val = si.dwNumberOfProcessors;
710 return val;
713 #define EVT_RUNNING (void*)1
714 #define EVT_WAITING NULL
716 struct thread_wait;
717 typedef struct thread_wait_entry
719 struct thread_wait *wait;
720 struct thread_wait_entry *next;
721 struct thread_wait_entry *prev;
722 } thread_wait_entry;
724 typedef struct thread_wait
726 void *signaled;
727 int pending_waits;
728 thread_wait_entry entries[1];
729 } thread_wait;
731 typedef struct
733 thread_wait_entry *waiters;
734 INT_PTR signaled;
735 critical_section cs;
736 } event;
738 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
740 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
741 pTime->QuadPart = (ULONGLONG)timeout * -10000;
742 return pTime;
745 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
747 entry->next = *head;
748 entry->prev = NULL;
749 if(*head) (*head)->prev = entry;
750 *head = entry;
753 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
755 if(entry == *head)
756 *head = entry->next;
757 else if(entry->prev)
758 entry->prev->next = entry->next;
759 if(entry->next) entry->next->prev = entry->prev;
762 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
764 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
766 for(i = 0; i < count; i++) {
767 critical_section_lock(&events[i]->cs);
768 if(events[i] == wait->signaled) ret = i;
769 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
770 critical_section_unlock(&events[i]->cs);
773 return ret;
776 static inline int evt_transition(void **state, void *from, void *to)
778 return InterlockedCompareExchangePointer(state, to, from) == from;
781 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
783 int i;
784 NTSTATUS status;
785 LARGE_INTEGER ntto;
787 wait->signaled = EVT_RUNNING;
788 wait->pending_waits = wait_all ? count : 1;
789 for(i = 0; i < count; i++) {
790 wait->entries[i].wait = wait;
792 critical_section_lock(&events[i]->cs);
793 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
794 if(events[i]->signaled) {
795 if(!InterlockedDecrement(&wait->pending_waits)) {
796 wait->signaled = events[i];
797 critical_section_unlock(&events[i]->cs);
799 return evt_end_wait(wait, events, i+1);
802 critical_section_unlock(&events[i]->cs);
805 if(!timeout)
806 return evt_end_wait(wait, events, count);
808 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
809 return evt_end_wait(wait, events, count);
811 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
813 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
814 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
816 return evt_end_wait(wait, events, count);
819 /* ??0event@Concurrency@@QAE@XZ */
820 /* ??0event@Concurrency@@QEAA@XZ */
821 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
822 event* __thiscall event_ctor(event *this)
824 TRACE("(%p)\n", this);
826 this->waiters = NULL;
827 this->signaled = FALSE;
828 critical_section_ctor(&this->cs);
830 return this;
833 /* ??1event@Concurrency@@QAE@XZ */
834 /* ??1event@Concurrency@@QEAA@XZ */
835 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
836 void __thiscall event_dtor(event *this)
838 TRACE("(%p)\n", this);
839 critical_section_dtor(&this->cs);
840 if(this->waiters)
841 ERR("there's a wait on destroyed event\n");
844 /* ?reset@event@Concurrency@@QAEXXZ */
845 /* ?reset@event@Concurrency@@QEAAXXZ */
846 DEFINE_THISCALL_WRAPPER(event_reset, 4)
847 void __thiscall event_reset(event *this)
849 thread_wait_entry *entry;
851 TRACE("(%p)\n", this);
853 critical_section_lock(&this->cs);
854 if(this->signaled) {
855 this->signaled = FALSE;
856 for(entry=this->waiters; entry; entry = entry->next)
857 InterlockedIncrement(&entry->wait->pending_waits);
859 critical_section_unlock(&this->cs);
862 /* ?set@event@Concurrency@@QAEXXZ */
863 /* ?set@event@Concurrency@@QEAAXXZ */
864 DEFINE_THISCALL_WRAPPER(event_set, 4)
865 void __thiscall event_set(event *this)
867 thread_wait_entry *wakeup = NULL;
868 thread_wait_entry *entry, *next;
870 TRACE("(%p)\n", this);
872 critical_section_lock(&this->cs);
873 if(!this->signaled) {
874 this->signaled = TRUE;
875 for(entry=this->waiters; entry; entry=next) {
876 next = entry->next;
877 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
878 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
879 evt_remove_queue(&this->waiters, entry);
880 evt_add_queue(&wakeup, entry);
885 critical_section_unlock(&this->cs);
887 for(entry=wakeup; entry; entry=next) {
888 next = entry->next;
889 entry->next = entry->prev = NULL;
890 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
894 /* ?wait@event@Concurrency@@QAEII@Z */
895 /* ?wait@event@Concurrency@@QEAA_KI@Z */
896 DEFINE_THISCALL_WRAPPER(event_wait, 8)
897 size_t __thiscall event_wait(event *this, unsigned int timeout)
899 thread_wait wait;
900 size_t signaled;
902 TRACE("(%p %u)\n", this, timeout);
904 critical_section_lock(&this->cs);
905 signaled = this->signaled;
906 critical_section_unlock(&this->cs);
908 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
909 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
912 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
913 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
914 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
916 thread_wait *wait;
917 size_t ret;
919 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
921 if(count == 0)
922 return 0;
924 wait = heap_alloc(FIELD_OFFSET(thread_wait, entries[count]));
925 if(!wait)
926 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
927 ret = evt_wait(wait, events, count, wait_all, timeout);
928 heap_free(wait);
930 return ret;
932 #endif
934 #if _MSVCR_VER >= 110
935 typedef struct cv_queue {
936 struct cv_queue *next;
937 BOOL expired;
938 } cv_queue;
940 typedef struct {
941 /* cv_queue structure is not binary compatible */
942 cv_queue *queue;
943 critical_section lock;
944 } _Condition_variable;
946 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
947 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
948 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
949 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
951 TRACE("(%p)\n", this);
953 this->queue = NULL;
954 critical_section_ctor(&this->lock);
955 return this;
958 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
959 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
960 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
961 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
963 TRACE("(%p)\n", this);
965 while(this->queue) {
966 cv_queue *next = this->queue->next;
967 if(!this->queue->expired)
968 ERR("there's an active wait\n");
969 HeapFree(GetProcessHeap(), 0, this->queue);
970 this->queue = next;
972 critical_section_dtor(&this->lock);
975 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
976 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
977 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
978 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
980 cv_queue q;
982 TRACE("(%p, %p)\n", this, cs);
984 critical_section_lock(&this->lock);
985 q.next = this->queue;
986 q.expired = FALSE;
987 this->queue = &q;
988 critical_section_unlock(&this->lock);
990 critical_section_unlock(cs);
991 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
992 critical_section_lock(cs);
995 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
996 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
997 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
998 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
999 critical_section *cs, unsigned int timeout)
1001 LARGE_INTEGER to;
1002 NTSTATUS status;
1003 FILETIME ft;
1004 cv_queue *q;
1006 TRACE("(%p %p %d)\n", this, cs, timeout);
1008 if(!(q = HeapAlloc(GetProcessHeap(), 0, sizeof(cv_queue)))) {
1009 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
1012 critical_section_lock(&this->lock);
1013 q->next = this->queue;
1014 q->expired = FALSE;
1015 this->queue = q;
1016 critical_section_unlock(&this->lock);
1018 critical_section_unlock(cs);
1020 GetSystemTimeAsFileTime(&ft);
1021 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
1022 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
1023 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
1024 if(status == STATUS_TIMEOUT) {
1025 if(!InterlockedExchange(&q->expired, TRUE)) {
1026 critical_section_lock(cs);
1027 return FALSE;
1029 else
1030 NtWaitForKeyedEvent(keyed_event, q, 0, 0);
1033 HeapFree(GetProcessHeap(), 0, q);
1034 critical_section_lock(cs);
1035 return TRUE;
1038 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
1039 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
1040 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
1041 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
1043 cv_queue *node;
1045 TRACE("(%p)\n", this);
1047 if(!this->queue)
1048 return;
1050 while(1) {
1051 critical_section_lock(&this->lock);
1052 node = this->queue;
1053 if(!node) {
1054 critical_section_unlock(&this->lock);
1055 return;
1057 this->queue = node->next;
1058 critical_section_unlock(&this->lock);
1060 if(!InterlockedExchange(&node->expired, TRUE)) {
1061 NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
1062 return;
1063 } else {
1064 HeapFree(GetProcessHeap(), 0, node);
1069 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
1070 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
1071 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
1072 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
1074 cv_queue *ptr;
1076 TRACE("(%p)\n", this);
1078 if(!this->queue)
1079 return;
1081 critical_section_lock(&this->lock);
1082 ptr = this->queue;
1083 this->queue = NULL;
1084 critical_section_unlock(&this->lock);
1086 while(ptr) {
1087 cv_queue *next = ptr->next;
1089 if(!InterlockedExchange(&ptr->expired, TRUE))
1090 NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
1091 else
1092 HeapFree(GetProcessHeap(), 0, ptr);
1093 ptr = next;
1096 #endif
1098 #if _MSVCR_VER >= 100
1099 typedef struct rwl_queue
1101 struct rwl_queue *next;
1102 } rwl_queue;
1104 #define WRITER_WAITING 0x80000000
1105 /* FIXME: reader_writer_lock structure is not binary compatible
1106 * it can't exceed 28/56 bytes */
1107 typedef struct
1109 LONG count;
1110 LONG thread_id;
1111 rwl_queue active;
1112 rwl_queue *writer_head;
1113 rwl_queue *writer_tail;
1114 rwl_queue *reader_head;
1115 } reader_writer_lock;
1117 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
1118 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
1119 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
1120 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
1122 TRACE("(%p)\n", this);
1124 if (!keyed_event) {
1125 HANDLE event;
1127 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1128 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1129 NtClose(event);
1132 memset(this, 0, sizeof(*this));
1133 return this;
1136 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
1137 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
1138 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
1139 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
1141 TRACE("(%p)\n", this);
1143 if (this->thread_id != 0 || this->count)
1144 WARN("destroying locked reader_writer_lock\n");
1147 static inline void spin_wait_for_next_rwl(rwl_queue *q)
1149 SpinWait sw;
1151 if(q->next) return;
1153 SpinWait_ctor(&sw, &spin_wait_yield);
1154 SpinWait__Reset(&sw);
1155 while(!q->next)
1156 SpinWait__SpinOnce(&sw);
1157 SpinWait_dtor(&sw);
1160 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
1161 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
1162 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
1163 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
1165 rwl_queue q = { NULL }, *last;
1167 TRACE("(%p)\n", this);
1169 if (this->thread_id == GetCurrentThreadId())
1170 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
1172 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
1173 if (last) {
1174 last->next = &q;
1175 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1176 } else {
1177 this->writer_head = &q;
1178 if (InterlockedOr(&this->count, WRITER_WAITING))
1179 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1182 this->thread_id = GetCurrentThreadId();
1183 this->writer_head = &this->active;
1184 this->active.next = NULL;
1185 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1186 spin_wait_for_next_rwl(&q);
1187 this->active.next = q.next;
1191 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
1192 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
1193 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
1194 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
1196 rwl_queue q;
1198 TRACE("(%p)\n", this);
1200 if (this->thread_id == GetCurrentThreadId())
1201 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked as writer");
1203 do {
1204 q.next = this->reader_head;
1205 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
1207 if (!q.next) {
1208 rwl_queue *head;
1209 LONG count;
1211 while (!((count = this->count) & WRITER_WAITING))
1212 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
1214 if (count & WRITER_WAITING)
1215 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1217 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1218 while(head && head != &q) {
1219 rwl_queue *next = head->next;
1220 InterlockedIncrement(&this->count);
1221 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1222 head = next;
1224 } else {
1225 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1229 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
1230 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
1231 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
1232 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
1234 rwl_queue q = { NULL };
1236 TRACE("(%p)\n", this);
1238 if (this->thread_id == GetCurrentThreadId())
1239 return FALSE;
1241 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
1242 return FALSE;
1243 this->writer_head = &q;
1244 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
1245 this->thread_id = GetCurrentThreadId();
1246 this->writer_head = &this->active;
1247 this->active.next = NULL;
1248 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1249 spin_wait_for_next_rwl(&q);
1250 this->active.next = q.next;
1252 return TRUE;
1255 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
1256 return FALSE;
1257 spin_wait_for_next_rwl(&q);
1258 this->writer_head = q.next;
1259 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
1260 this->thread_id = GetCurrentThreadId();
1261 this->writer_head = &this->active;
1262 this->active.next = q.next;
1263 return TRUE;
1265 return FALSE;
1268 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
1269 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
1270 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
1271 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
1273 LONG count;
1275 TRACE("(%p)\n", this);
1277 while (!((count = this->count) & WRITER_WAITING))
1278 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
1279 return FALSE;
1282 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
1283 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
1284 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
1285 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
1287 LONG count;
1288 rwl_queue *head, *next;
1290 TRACE("(%p)\n", this);
1292 if ((count = this->count) & ~WRITER_WAITING) {
1293 count = InterlockedDecrement(&this->count);
1294 if (count != WRITER_WAITING)
1295 return;
1296 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
1297 return;
1300 this->thread_id = 0;
1301 next = this->writer_head->next;
1302 if (next) {
1303 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
1304 return;
1306 InterlockedAnd(&this->count, ~WRITER_WAITING);
1307 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1308 while (head) {
1309 next = head->next;
1310 InterlockedIncrement(&this->count);
1311 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1312 head = next;
1315 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
1316 return;
1317 InterlockedOr(&this->count, WRITER_WAITING);
1320 typedef struct {
1321 reader_writer_lock *lock;
1322 } reader_writer_lock_scoped_lock;
1324 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1325 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1326 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
1327 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
1328 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1330 TRACE("(%p %p)\n", this, lock);
1332 this->lock = lock;
1333 reader_writer_lock_lock(lock);
1334 return this;
1337 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
1338 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
1339 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
1340 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
1342 TRACE("(%p)\n", this);
1343 reader_writer_lock_unlock(this->lock);
1346 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1347 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1348 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
1349 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
1350 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1352 TRACE("(%p %p)\n", this, lock);
1354 this->lock = lock;
1355 reader_writer_lock_lock_read(lock);
1356 return this;
1359 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
1360 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
1361 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
1362 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
1364 TRACE("(%p)\n", this);
1365 reader_writer_lock_unlock(this->lock);
1368 typedef struct {
1369 CRITICAL_SECTION cs;
1370 } _ReentrantBlockingLock;
1372 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1373 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1374 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
1375 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
1377 TRACE("(%p)\n", this);
1379 InitializeCriticalSection(&this->cs);
1380 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
1381 return this;
1384 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1385 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1386 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
1387 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
1389 TRACE("(%p)\n", this);
1391 this->cs.DebugInfo->Spare[0] = 0;
1392 DeleteCriticalSection(&this->cs);
1395 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1396 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1397 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
1398 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
1400 TRACE("(%p)\n", this);
1401 EnterCriticalSection(&this->cs);
1404 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1405 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1406 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
1407 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
1409 TRACE("(%p)\n", this);
1410 LeaveCriticalSection(&this->cs);
1413 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
1414 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
1415 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
1416 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
1418 TRACE("(%p)\n", this);
1419 return TryEnterCriticalSection(&this->cs);
1422 /* ?wait@Concurrency@@YAXI@Z */
1423 void __cdecl Concurrency_wait(unsigned int time)
1425 static int once;
1427 if (!once++) FIXME("(%d) stub!\n", time);
1429 Sleep(time);
1431 #endif
1433 #if _MSVCR_VER == 110
1434 static LONG shared_ptr_lock;
1436 void __cdecl _Lock_shared_ptr_spin_lock(void)
1438 LONG l = 0;
1440 while(InterlockedCompareExchange(&shared_ptr_lock, 1, 0) != 0) {
1441 if(l++ == 1000) {
1442 Sleep(0);
1443 l = 0;
1448 void __cdecl _Unlock_shared_ptr_spin_lock(void)
1450 shared_ptr_lock = 0;
1452 #endif
1454 /**********************************************************************
1455 * msvcrt_free_locks (internal)
1457 * Uninitialize all mt locks. Assume that neither _lock or _unlock will
1458 * be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
1461 void msvcrt_free_locks(void)
1463 int i;
1465 TRACE( ": uninitializing all mtlocks\n" );
1467 /* Uninitialize the table */
1468 for( i=0; i < _TOTAL_LOCKS; i++ )
1470 if( lock_table[ i ].bInit )
1472 msvcrt_uninitialize_mlock( i );
1476 #if _MSVCR_VER >= 100
1477 if(keyed_event)
1478 NtClose(keyed_event);
1479 #endif