kernel32/tests: Add a test to check some fields in fake dlls.
[wine.git] / dlls / msvcrt / lock.c
blob932976738a54ce8787e785fc5a9190c696b52f0d
1 /*
2 * Copyright (c) 2002, TransGaming Technologies Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include "config.h"
20 #include "wine/port.h"
22 #include <stdarg.h>
24 #include "wine/debug.h"
25 #include "windef.h"
26 #include "winbase.h"
27 #include "winternl.h"
28 #include "wine/heap.h"
29 #include "msvcrt.h"
30 #include "cppexcept.h"
31 #include "mtdll.h"
32 #include "cxx.h"
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 typedef struct
38 BOOL bInit;
39 CRITICAL_SECTION crit;
40 } LOCKTABLEENTRY;
42 static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
44 static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
46 lock_table[ locknum ].bInit = initialized;
49 static inline void msvcrt_initialize_mlock( int locknum )
51 InitializeCriticalSection( &(lock_table[ locknum ].crit) );
52 lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
53 msvcrt_mlock_set_entry_initialized( locknum, TRUE );
56 static inline void msvcrt_uninitialize_mlock( int locknum )
58 lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
59 DeleteCriticalSection( &(lock_table[ locknum ].crit) );
60 msvcrt_mlock_set_entry_initialized( locknum, FALSE );
63 /**********************************************************************
64 * msvcrt_init_mt_locks (internal)
66 * Initialize the table lock. All other locks will be initialized
67 * upon first use.
70 void msvcrt_init_mt_locks(void)
72 int i;
74 TRACE( "initializing mtlocks\n" );
76 /* Initialize the table */
77 for( i=0; i < _TOTAL_LOCKS; i++ )
79 msvcrt_mlock_set_entry_initialized( i, FALSE );
82 /* Initialize our lock table lock */
83 msvcrt_initialize_mlock( _LOCKTAB_LOCK );
86 /**********************************************************************
87 * _lock (MSVCRT.@)
89 void CDECL _lock( int locknum )
91 TRACE( "(%d)\n", locknum );
93 /* If the lock doesn't exist yet, create it */
94 if( lock_table[ locknum ].bInit == FALSE )
96 /* Lock while we're changing the lock table */
97 _lock( _LOCKTAB_LOCK );
99 /* Check again if we've got a bit of a race on lock creation */
100 if( lock_table[ locknum ].bInit == FALSE )
102 TRACE( ": creating lock #%d\n", locknum );
103 msvcrt_initialize_mlock( locknum );
106 /* Unlock ourselves */
107 _unlock( _LOCKTAB_LOCK );
110 EnterCriticalSection( &(lock_table[ locknum ].crit) );
113 /**********************************************************************
114 * _unlock (MSVCRT.@)
116 * NOTE: There is no error detection to make sure the lock exists and is acquired.
118 void CDECL _unlock( int locknum )
120 TRACE( "(%d)\n", locknum );
122 LeaveCriticalSection( &(lock_table[ locknum ].crit) );
125 #if _MSVCR_VER >= 100
126 typedef enum
128 SPINWAIT_INIT,
129 SPINWAIT_SPIN,
130 SPINWAIT_YIELD,
131 SPINWAIT_DONE
132 } SpinWait_state;
134 typedef void (__cdecl *yield_func)(void);
136 typedef struct
138 ULONG spin;
139 ULONG unknown;
140 SpinWait_state state;
141 yield_func yield_func;
142 } SpinWait;
144 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
145 unsigned int __cdecl SpinCount__Value(void)
147 static unsigned int val = -1;
149 TRACE("()\n");
151 if(val == -1) {
152 SYSTEM_INFO si;
154 GetSystemInfo(&si);
155 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
158 return val;
161 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
162 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
163 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
164 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
166 TRACE("(%p %p)\n", this, yf);
168 this->state = SPINWAIT_INIT;
169 this->unknown = 1;
170 this->yield_func = yf;
171 return this;
174 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
175 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
176 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
177 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
179 TRACE("(%p %p)\n", this, yf);
181 this->state = SPINWAIT_INIT;
182 this->unknown = 0;
183 this->yield_func = yf;
184 return this;
187 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
188 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
189 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
190 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
191 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
192 void __thiscall SpinWait_dtor(SpinWait *this)
194 TRACE("(%p)\n", this);
197 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
198 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
199 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
200 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
201 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
202 void __thiscall SpinWait__DoYield(SpinWait *this)
204 TRACE("(%p)\n", this);
206 if(this->unknown)
207 this->yield_func();
210 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
211 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
212 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
213 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
214 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
215 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
217 TRACE("(%p)\n", this);
218 return 1;
221 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
222 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
223 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
224 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
225 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
226 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
228 TRACE("(%p %d)\n", this, spin);
230 this->spin = spin;
231 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
234 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
235 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
236 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
237 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
238 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
239 void __thiscall SpinWait__Reset(SpinWait *this)
241 SpinWait__SetSpinCount(this, SpinCount__Value());
244 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
245 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
246 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
247 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
248 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
249 MSVCRT_bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
251 TRACE("(%p)\n", this);
253 this->spin--;
254 return this->spin > 0;
257 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
258 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
259 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
260 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
261 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
262 MSVCRT_bool __thiscall SpinWait__SpinOnce(SpinWait *this)
264 switch(this->state) {
265 case SPINWAIT_INIT:
266 SpinWait__Reset(this);
267 /* fall through */
268 case SPINWAIT_SPIN:
269 InterlockedDecrement((LONG*)&this->spin);
270 if(!this->spin)
271 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
272 return TRUE;
273 case SPINWAIT_YIELD:
274 this->state = SPINWAIT_DONE;
275 this->yield_func();
276 return TRUE;
277 default:
278 SpinWait__Reset(this);
279 return FALSE;
283 static HANDLE keyed_event;
285 /* keep in sync with msvcp90/msvcp90.h */
286 typedef struct cs_queue
288 struct cs_queue *next;
289 #if _MSVCR_VER >= 110
290 BOOL free;
291 int unknown;
292 #endif
293 } cs_queue;
295 typedef struct
297 ULONG_PTR unk_thread_id;
298 cs_queue unk_active;
299 #if _MSVCR_VER >= 110
300 void *unknown[2];
301 #else
302 void *unknown[1];
303 #endif
304 cs_queue *head;
305 void *tail;
306 } critical_section;
308 /* ??0critical_section@Concurrency@@QAE@XZ */
309 /* ??0critical_section@Concurrency@@QEAA@XZ */
310 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
311 critical_section* __thiscall critical_section_ctor(critical_section *this)
313 TRACE("(%p)\n", this);
315 if(!keyed_event) {
316 HANDLE event;
318 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
319 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
320 NtClose(event);
323 this->unk_thread_id = 0;
324 this->head = this->tail = NULL;
325 return this;
328 /* ??1critical_section@Concurrency@@QAE@XZ */
329 /* ??1critical_section@Concurrency@@QEAA@XZ */
330 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
331 void __thiscall critical_section_dtor(critical_section *this)
333 TRACE("(%p)\n", this);
336 static void __cdecl spin_wait_yield(void)
338 Sleep(0);
341 static inline void spin_wait_for_next_cs(cs_queue *q)
343 SpinWait sw;
345 if(q->next) return;
347 SpinWait_ctor(&sw, &spin_wait_yield);
348 SpinWait__Reset(&sw);
349 while(!q->next)
350 SpinWait__SpinOnce(&sw);
351 SpinWait_dtor(&sw);
354 static inline void cs_set_head(critical_section *cs, cs_queue *q)
356 cs->unk_thread_id = GetCurrentThreadId();
357 cs->unk_active.next = q->next;
358 cs->head = &cs->unk_active;
361 static inline void cs_lock(critical_section *cs, cs_queue *q)
363 cs_queue *last;
365 if(cs->unk_thread_id == GetCurrentThreadId())
366 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
368 memset(q, 0, sizeof(*q));
369 last = InterlockedExchangePointer(&cs->tail, q);
370 if(last) {
371 last->next = q;
372 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
375 cs_set_head(cs, q);
376 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
377 spin_wait_for_next_cs(q);
378 cs->unk_active.next = q->next;
382 /* ?lock@critical_section@Concurrency@@QAEXXZ */
383 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
384 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
385 void __thiscall critical_section_lock(critical_section *this)
387 cs_queue q;
389 TRACE("(%p)\n", this);
390 cs_lock(this, &q);
393 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
394 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
395 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
396 MSVCRT_bool __thiscall critical_section_try_lock(critical_section *this)
398 cs_queue q;
400 TRACE("(%p)\n", this);
402 if(this->unk_thread_id == GetCurrentThreadId())
403 return FALSE;
405 memset(&q, 0, sizeof(q));
406 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
407 cs_set_head(this, &q);
408 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
409 spin_wait_for_next_cs(&q);
410 this->unk_active.next = q.next;
412 return TRUE;
414 return FALSE;
417 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
418 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
419 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
420 void __thiscall critical_section_unlock(critical_section *this)
422 TRACE("(%p)\n", this);
424 this->unk_thread_id = 0;
425 this->head = NULL;
426 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
427 == &this->unk_active) return;
428 spin_wait_for_next_cs(&this->unk_active);
430 #if _MSVCR_VER >= 110
431 while(1) {
432 cs_queue *next;
434 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
435 break;
437 next = this->unk_active.next;
438 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
439 HeapFree(GetProcessHeap(), 0, next);
440 return;
442 spin_wait_for_next_cs(next);
444 this->unk_active.next = next->next;
445 HeapFree(GetProcessHeap(), 0, next);
447 #endif
449 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
452 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
453 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
454 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
455 critical_section* __thiscall critical_section_native_handle(critical_section *this)
457 TRACE("(%p)\n", this);
458 return this;
461 #if _MSVCR_VER >= 110
462 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
463 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
464 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
465 MSVCRT_bool __thiscall critical_section_try_lock_for(
466 critical_section *this, unsigned int timeout)
468 cs_queue *q, *last;
470 TRACE("(%p %d)\n", this, timeout);
472 if(this->unk_thread_id == GetCurrentThreadId())
473 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
475 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
476 return critical_section_try_lock(this);
478 last = InterlockedExchangePointer(&this->tail, q);
479 if(last) {
480 LARGE_INTEGER to;
481 NTSTATUS status;
482 FILETIME ft;
484 last->next = q;
485 GetSystemTimeAsFileTime(&ft);
486 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
487 ft.dwLowDateTime + (LONGLONG)timeout*10000;
488 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
489 if(status == STATUS_TIMEOUT) {
490 if(!InterlockedExchange(&q->free, TRUE))
491 return FALSE;
492 /* A thread has signaled the event and is block waiting. */
493 /* We need to catch the event to wake the thread. */
494 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
498 cs_set_head(this, q);
499 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
500 spin_wait_for_next_cs(q);
501 this->unk_active.next = q->next;
504 HeapFree(GetProcessHeap(), 0, q);
505 return TRUE;
507 #endif
509 typedef struct
511 critical_section *cs;
512 union {
513 cs_queue q;
514 struct {
515 void *unknown[4];
516 int unknown2[2];
517 } unknown;
518 } lock;
519 } critical_section_scoped_lock;
521 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
522 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
523 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
524 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
525 critical_section_scoped_lock *this, critical_section *cs)
527 TRACE("(%p %p)\n", this, cs);
528 this->cs = cs;
529 cs_lock(this->cs, &this->lock.q);
530 return this;
533 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
534 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
535 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
536 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
538 TRACE("(%p)\n", this);
539 critical_section_unlock(this->cs);
542 typedef struct
544 critical_section cs;
545 } _NonReentrantPPLLock;
547 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
548 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
549 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
550 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
552 TRACE("(%p)\n", this);
554 critical_section_ctor(&this->cs);
555 return this;
558 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
559 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
560 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
561 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
563 TRACE("(%p %p)\n", this, q);
564 cs_lock(&this->cs, q);
567 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
568 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
569 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
570 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
572 TRACE("(%p)\n", this);
573 critical_section_unlock(&this->cs);
576 typedef struct
578 _NonReentrantPPLLock *lock;
579 union {
580 cs_queue q;
581 struct {
582 void *unknown[4];
583 int unknown2[2];
584 } unknown;
585 } wait;
586 } _NonReentrantPPLLock__Scoped_lock;
588 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
589 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
590 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
591 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
592 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
594 TRACE("(%p %p)\n", this, lock);
596 this->lock = lock;
597 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
598 return this;
601 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
602 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
603 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
604 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
606 TRACE("(%p)\n", this);
608 _NonReentrantPPLLock__Release(this->lock);
611 typedef struct
613 critical_section cs;
614 LONG count;
615 LONG owner;
616 } _ReentrantPPLLock;
618 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
619 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
620 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
621 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
623 TRACE("(%p)\n", this);
625 critical_section_ctor(&this->cs);
626 this->count = 0;
627 this->owner = -1;
628 return this;
631 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
632 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
633 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
634 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
636 TRACE("(%p %p)\n", this, q);
638 if(this->owner == GetCurrentThreadId()) {
639 this->count++;
640 return;
643 cs_lock(&this->cs, q);
644 this->count++;
645 this->owner = GetCurrentThreadId();
648 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
649 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
650 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
651 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
653 TRACE("(%p)\n", this);
655 this->count--;
656 if(this->count)
657 return;
659 this->owner = -1;
660 critical_section_unlock(&this->cs);
663 typedef struct
665 _ReentrantPPLLock *lock;
666 union {
667 cs_queue q;
668 struct {
669 void *unknown[4];
670 int unknown2[2];
671 } unknown;
672 } wait;
673 } _ReentrantPPLLock__Scoped_lock;
675 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
676 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
677 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
678 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
679 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
681 TRACE("(%p %p)\n", this, lock);
683 this->lock = lock;
684 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
685 return this;
688 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
689 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
690 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
691 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
693 TRACE("(%p)\n", this);
695 _ReentrantPPLLock__Release(this->lock);
698 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
699 unsigned int __cdecl _GetConcurrency(void)
701 static unsigned int val = -1;
703 TRACE("()\n");
705 if(val == -1) {
706 SYSTEM_INFO si;
708 GetSystemInfo(&si);
709 val = si.dwNumberOfProcessors;
712 return val;
715 #define EVT_RUNNING (void*)1
716 #define EVT_WAITING NULL
718 struct thread_wait;
719 typedef struct thread_wait_entry
721 struct thread_wait *wait;
722 struct thread_wait_entry *next;
723 struct thread_wait_entry *prev;
724 } thread_wait_entry;
726 typedef struct thread_wait
728 void *signaled;
729 int pending_waits;
730 thread_wait_entry entries[1];
731 } thread_wait;
733 typedef struct
735 thread_wait_entry *waiters;
736 INT_PTR signaled;
737 critical_section cs;
738 } event;
740 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
742 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
743 pTime->QuadPart = (ULONGLONG)timeout * -10000;
744 return pTime;
747 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
749 entry->next = *head;
750 entry->prev = NULL;
751 if(*head) (*head)->prev = entry;
752 *head = entry;
755 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
757 if(entry == *head)
758 *head = entry->next;
759 else if(entry->prev)
760 entry->prev->next = entry->next;
761 if(entry->next) entry->next->prev = entry->prev;
764 static MSVCRT_size_t evt_end_wait(thread_wait *wait, event **events, int count)
766 MSVCRT_size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
768 for(i = 0; i < count; i++) {
769 critical_section_lock(&events[i]->cs);
770 if(events[i] == wait->signaled) ret = i;
771 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
772 critical_section_unlock(&events[i]->cs);
775 return ret;
778 static inline int evt_transition(void **state, void *from, void *to)
780 return InterlockedCompareExchangePointer(state, to, from) == from;
783 static MSVCRT_size_t evt_wait(thread_wait *wait, event **events, int count, MSVCRT_bool wait_all, unsigned int timeout)
785 int i;
786 NTSTATUS status;
787 LARGE_INTEGER ntto;
789 wait->signaled = EVT_RUNNING;
790 wait->pending_waits = wait_all ? count : 1;
791 for(i = 0; i < count; i++) {
792 wait->entries[i].wait = wait;
794 critical_section_lock(&events[i]->cs);
795 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
796 if(events[i]->signaled) {
797 if(!InterlockedDecrement(&wait->pending_waits)) {
798 wait->signaled = events[i];
799 critical_section_unlock(&events[i]->cs);
801 return evt_end_wait(wait, events, i+1);
804 critical_section_unlock(&events[i]->cs);
807 if(!timeout)
808 return evt_end_wait(wait, events, count);
810 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
811 return evt_end_wait(wait, events, count);
813 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
815 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
816 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
818 return evt_end_wait(wait, events, count);
821 /* ??0event@Concurrency@@QAE@XZ */
822 /* ??0event@Concurrency@@QEAA@XZ */
823 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
824 event* __thiscall event_ctor(event *this)
826 TRACE("(%p)\n", this);
828 this->waiters = NULL;
829 this->signaled = FALSE;
830 critical_section_ctor(&this->cs);
832 return this;
835 /* ??1event@Concurrency@@QAE@XZ */
836 /* ??1event@Concurrency@@QEAA@XZ */
837 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
838 void __thiscall event_dtor(event *this)
840 TRACE("(%p)\n", this);
841 critical_section_dtor(&this->cs);
842 if(this->waiters)
843 ERR("there's a wait on destroyed event\n");
846 /* ?reset@event@Concurrency@@QAEXXZ */
847 /* ?reset@event@Concurrency@@QEAAXXZ */
848 DEFINE_THISCALL_WRAPPER(event_reset, 4)
849 void __thiscall event_reset(event *this)
851 thread_wait_entry *entry;
853 TRACE("(%p)\n", this);
855 critical_section_lock(&this->cs);
856 if(this->signaled) {
857 this->signaled = FALSE;
858 for(entry=this->waiters; entry; entry = entry->next)
859 InterlockedIncrement(&entry->wait->pending_waits);
861 critical_section_unlock(&this->cs);
864 /* ?set@event@Concurrency@@QAEXXZ */
865 /* ?set@event@Concurrency@@QEAAXXZ */
866 DEFINE_THISCALL_WRAPPER(event_set, 4)
867 void __thiscall event_set(event *this)
869 thread_wait_entry *wakeup = NULL;
870 thread_wait_entry *entry, *next;
872 TRACE("(%p)\n", this);
874 critical_section_lock(&this->cs);
875 if(!this->signaled) {
876 this->signaled = TRUE;
877 for(entry=this->waiters; entry; entry=next) {
878 next = entry->next;
879 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
880 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
881 evt_remove_queue(&this->waiters, entry);
882 evt_add_queue(&wakeup, entry);
887 critical_section_unlock(&this->cs);
889 for(entry=wakeup; entry; entry=next) {
890 next = entry->next;
891 entry->next = entry->prev = NULL;
892 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
896 /* ?wait@event@Concurrency@@QAEII@Z */
897 /* ?wait@event@Concurrency@@QEAA_KI@Z */
898 DEFINE_THISCALL_WRAPPER(event_wait, 8)
899 MSVCRT_size_t __thiscall event_wait(event *this, unsigned int timeout)
901 thread_wait wait;
902 MSVCRT_size_t signaled;
904 TRACE("(%p %u)\n", this, timeout);
906 critical_section_lock(&this->cs);
907 signaled = this->signaled;
908 critical_section_unlock(&this->cs);
910 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
911 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
914 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
915 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
916 int __cdecl event_wait_for_multiple(event **events, MSVCRT_size_t count, MSVCRT_bool wait_all, unsigned int timeout)
918 thread_wait *wait;
919 MSVCRT_size_t ret;
921 TRACE("(%p %ld %d %u)\n", events, count, wait_all, timeout);
923 if(count == 0)
924 return 0;
926 wait = heap_alloc(FIELD_OFFSET(thread_wait, entries[count]));
927 if(!wait)
928 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
929 ret = evt_wait(wait, events, count, wait_all, timeout);
930 heap_free(wait);
932 return ret;
934 #endif
936 #if _MSVCR_VER >= 110
937 typedef struct cv_queue {
938 struct cv_queue *next;
939 BOOL expired;
940 } cv_queue;
942 typedef struct {
943 /* cv_queue structure is not binary compatible */
944 cv_queue *queue;
945 critical_section lock;
946 } _Condition_variable;
948 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
949 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
950 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
951 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
953 TRACE("(%p)\n", this);
955 this->queue = NULL;
956 critical_section_ctor(&this->lock);
957 return this;
960 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
961 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
962 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
963 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
965 TRACE("(%p)\n", this);
967 while(this->queue) {
968 cv_queue *next = this->queue->next;
969 if(!this->queue->expired)
970 ERR("there's an active wait\n");
971 HeapFree(GetProcessHeap(), 0, this->queue);
972 this->queue = next;
974 critical_section_dtor(&this->lock);
977 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
978 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
979 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
980 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
982 cv_queue q;
984 TRACE("(%p, %p)\n", this, cs);
986 critical_section_lock(&this->lock);
987 q.next = this->queue;
988 q.expired = FALSE;
989 this->queue = &q;
990 critical_section_unlock(&this->lock);
992 critical_section_unlock(cs);
993 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
994 critical_section_lock(cs);
997 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
998 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
999 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
1000 MSVCRT_bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
1001 critical_section *cs, unsigned int timeout)
1003 LARGE_INTEGER to;
1004 NTSTATUS status;
1005 FILETIME ft;
1006 cv_queue *q;
1008 TRACE("(%p %p %d)\n", this, cs, timeout);
1010 if(!(q = HeapAlloc(GetProcessHeap(), 0, sizeof(cv_queue)))) {
1011 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
1014 critical_section_lock(&this->lock);
1015 q->next = this->queue;
1016 q->expired = FALSE;
1017 this->queue = q;
1018 critical_section_unlock(&this->lock);
1020 critical_section_unlock(cs);
1022 GetSystemTimeAsFileTime(&ft);
1023 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
1024 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
1025 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
1026 if(status == STATUS_TIMEOUT) {
1027 if(!InterlockedExchange(&q->expired, TRUE)) {
1028 critical_section_lock(cs);
1029 return FALSE;
1031 else
1032 NtWaitForKeyedEvent(keyed_event, q, 0, 0);
1035 HeapFree(GetProcessHeap(), 0, q);
1036 critical_section_lock(cs);
1037 return TRUE;
1040 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
1041 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
1042 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
1043 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
1045 cv_queue *node;
1047 TRACE("(%p)\n", this);
1049 if(!this->queue)
1050 return;
1052 while(1) {
1053 critical_section_lock(&this->lock);
1054 node = this->queue;
1055 if(!node) {
1056 critical_section_unlock(&this->lock);
1057 return;
1059 this->queue = node->next;
1060 critical_section_unlock(&this->lock);
1062 if(!InterlockedExchange(&node->expired, TRUE)) {
1063 NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
1064 return;
1065 } else {
1066 HeapFree(GetProcessHeap(), 0, node);
1071 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
1072 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
1073 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
1074 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
1076 cv_queue *ptr;
1078 TRACE("(%p)\n", this);
1080 if(!this->queue)
1081 return;
1083 critical_section_lock(&this->lock);
1084 ptr = this->queue;
1085 this->queue = NULL;
1086 critical_section_unlock(&this->lock);
1088 while(ptr) {
1089 cv_queue *next = ptr->next;
1091 if(!InterlockedExchange(&ptr->expired, TRUE))
1092 NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
1093 else
1094 HeapFree(GetProcessHeap(), 0, ptr);
1095 ptr = next;
1098 #endif
1100 #if _MSVCR_VER >= 100
1101 typedef struct rwl_queue
1103 struct rwl_queue *next;
1104 } rwl_queue;
1106 #define WRITER_WAITING 0x80000000
1107 /* FIXME: reader_writer_lock structure is not binary compatible
1108 * it can't exceed 28/56 bytes */
1109 typedef struct
1111 LONG count;
1112 LONG thread_id;
1113 rwl_queue active;
1114 rwl_queue *writer_head;
1115 rwl_queue *writer_tail;
1116 rwl_queue *reader_head;
1117 } reader_writer_lock;
1119 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
1120 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
1121 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
1122 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
1124 TRACE("(%p)\n", this);
1126 if (!keyed_event) {
1127 HANDLE event;
1129 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1130 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1131 NtClose(event);
1134 memset(this, 0, sizeof(*this));
1135 return this;
1138 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
1139 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
1140 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
1141 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
1143 TRACE("(%p)\n", this);
1145 if (this->thread_id != 0 || this->count)
1146 WARN("destroying locked reader_writer_lock\n");
1149 static inline void spin_wait_for_next_rwl(rwl_queue *q)
1151 SpinWait sw;
1153 if(q->next) return;
1155 SpinWait_ctor(&sw, &spin_wait_yield);
1156 SpinWait__Reset(&sw);
1157 while(!q->next)
1158 SpinWait__SpinOnce(&sw);
1159 SpinWait_dtor(&sw);
1162 /* Remove when proper InterlockedOr implementation is added to wine */
1163 static LONG InterlockedOr(LONG *d, LONG v)
1165 LONG l;
1166 while (~(l = *d) & v)
1167 if (InterlockedCompareExchange(d, l|v, l) == l) break;
1168 return l;
1171 static LONG InterlockedAnd(LONG *d, LONG v)
1173 LONG l = *d, old;
1174 while ((l & v) != l) {
1175 if((old = InterlockedCompareExchange(d, l&v, l)) == l) break;
1176 l = old;
1178 return l;
1181 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
1182 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
1183 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
1184 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
1186 rwl_queue q = { NULL }, *last;
1188 TRACE("(%p)\n", this);
1190 if (this->thread_id == GetCurrentThreadId())
1191 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
1193 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
1194 if (last) {
1195 last->next = &q;
1196 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1197 } else {
1198 this->writer_head = &q;
1199 if (InterlockedOr(&this->count, WRITER_WAITING))
1200 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1203 this->thread_id = GetCurrentThreadId();
1204 this->writer_head = &this->active;
1205 this->active.next = NULL;
1206 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1207 spin_wait_for_next_rwl(&q);
1208 this->active.next = q.next;
1212 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
1213 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
1214 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
1215 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
1217 rwl_queue q;
1219 TRACE("(%p)\n", this);
1221 if (this->thread_id == GetCurrentThreadId())
1222 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked as writer");
1224 do {
1225 q.next = this->reader_head;
1226 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
1228 if (!q.next) {
1229 rwl_queue *head;
1230 LONG count;
1232 while (!((count = this->count) & WRITER_WAITING))
1233 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
1235 if (count & WRITER_WAITING)
1236 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1238 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1239 while(head && head != &q) {
1240 rwl_queue *next = head->next;
1241 InterlockedIncrement(&this->count);
1242 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1243 head = next;
1245 } else {
1246 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1250 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
1251 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
1252 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
1253 MSVCRT_bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
1255 rwl_queue q = { NULL };
1257 TRACE("(%p)\n", this);
1259 if (this->thread_id == GetCurrentThreadId())
1260 return FALSE;
1262 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
1263 return FALSE;
1264 this->writer_head = &q;
1265 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
1266 this->thread_id = GetCurrentThreadId();
1267 this->writer_head = &this->active;
1268 this->active.next = NULL;
1269 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1270 spin_wait_for_next_rwl(&q);
1271 this->active.next = q.next;
1273 return TRUE;
1276 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
1277 return FALSE;
1278 spin_wait_for_next_rwl(&q);
1279 this->writer_head = q.next;
1280 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
1281 this->thread_id = GetCurrentThreadId();
1282 this->writer_head = &this->active;
1283 this->active.next = q.next;
1284 return TRUE;
1286 return FALSE;
1289 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
1290 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
1291 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
1292 MSVCRT_bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
1294 LONG count;
1296 TRACE("(%p)\n", this);
1298 while (!((count = this->count) & WRITER_WAITING))
1299 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
1300 return FALSE;
1303 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
1304 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
1305 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
1306 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
1308 LONG count;
1309 rwl_queue *head, *next;
1311 TRACE("(%p)\n", this);
1313 if ((count = this->count) & ~WRITER_WAITING) {
1314 count = InterlockedDecrement(&this->count);
1315 if (count != WRITER_WAITING)
1316 return;
1317 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
1318 return;
1321 this->thread_id = 0;
1322 next = this->writer_head->next;
1323 if (next) {
1324 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
1325 return;
1327 InterlockedAnd(&this->count, ~WRITER_WAITING);
1328 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1329 while (head) {
1330 next = head->next;
1331 InterlockedIncrement(&this->count);
1332 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1333 head = next;
1336 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
1337 return;
1338 InterlockedOr(&this->count, WRITER_WAITING);
1341 typedef struct {
1342 reader_writer_lock *lock;
1343 } reader_writer_lock_scoped_lock;
1345 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1346 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1347 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
1348 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
1349 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1351 TRACE("(%p %p)\n", this, lock);
1353 this->lock = lock;
1354 reader_writer_lock_lock(lock);
1355 return this;
1358 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
1359 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
1360 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
1361 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
1363 TRACE("(%p)\n", this);
1364 reader_writer_lock_unlock(this->lock);
1367 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1368 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1369 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
1370 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
1371 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1373 TRACE("(%p %p)\n", this, lock);
1375 this->lock = lock;
1376 reader_writer_lock_lock_read(lock);
1377 return this;
1380 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
1381 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
1382 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
1383 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
1385 TRACE("(%p)\n", this);
1386 reader_writer_lock_unlock(this->lock);
1389 typedef struct {
1390 CRITICAL_SECTION cs;
1391 } _ReentrantBlockingLock;
1393 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1394 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1395 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
1396 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
1398 TRACE("(%p)\n", this);
1400 InitializeCriticalSection(&this->cs);
1401 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
1402 return this;
1405 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1406 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1407 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
1408 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
1410 TRACE("(%p)\n", this);
1412 this->cs.DebugInfo->Spare[0] = 0;
1413 DeleteCriticalSection(&this->cs);
1416 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1417 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1418 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
1419 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
1421 TRACE("(%p)\n", this);
1422 EnterCriticalSection(&this->cs);
1425 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1426 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1427 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
1428 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
1430 TRACE("(%p)\n", this);
1431 LeaveCriticalSection(&this->cs);
1434 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
1435 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
1436 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
1437 MSVCRT_bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
1439 TRACE("(%p)\n", this);
1440 return TryEnterCriticalSection(&this->cs);
1443 /* ?wait@Concurrency@@YAXI@Z */
1444 void __cdecl Concurrency_wait(unsigned int time)
1446 static int once;
1448 if (!once++) FIXME("(%d) stub!\n", time);
1450 Sleep(time);
1452 #endif
1454 #if _MSVCR_VER == 110
1455 static LONG shared_ptr_lock;
1457 void __cdecl _Lock_shared_ptr_spin_lock(void)
1459 LONG l = 0;
1461 while(InterlockedCompareExchange(&shared_ptr_lock, 1, 0) != 0) {
1462 if(l++ == 1000) {
1463 Sleep(0);
1464 l = 0;
1469 void __cdecl _Unlock_shared_ptr_spin_lock(void)
1471 shared_ptr_lock = 0;
1473 #endif
1475 /**********************************************************************
1476 * msvcrt_free_locks (internal)
1478 * Uninitialize all mt locks. Assume that neither _lock or _unlock will
1479 * be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
1482 void msvcrt_free_locks(void)
1484 int i;
1486 TRACE( ": uninitializing all mtlocks\n" );
1488 /* Uninitialize the table */
1489 for( i=0; i < _TOTAL_LOCKS; i++ )
1491 if( lock_table[ i ].bInit )
1493 msvcrt_uninitialize_mlock( i );
1497 #if _MSVCR_VER >= 100
1498 if(keyed_event)
1499 NtClose(keyed_event);
1500 #endif