kernel32: Add GetConsoleFontInfo stub.
[wine.git] / dlls / msvcrt / lock.c
blobe69e1b9dd260cd0f17f2eb17da2c52a489e48619
1 /*
2 * Copyright (c) 2002, TransGaming Technologies Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include "config.h"
20 #include "wine/port.h"
22 #include <stdarg.h>
24 #include "wine/debug.h"
25 #include "windef.h"
26 #include "winbase.h"
27 #include "winternl.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "mtdll.h"
31 #include "cxx.h"
33 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
35 typedef struct
37 BOOL bInit;
38 CRITICAL_SECTION crit;
39 } LOCKTABLEENTRY;
41 static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
43 static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
45 lock_table[ locknum ].bInit = initialized;
48 static inline void msvcrt_initialize_mlock( int locknum )
50 InitializeCriticalSection( &(lock_table[ locknum ].crit) );
51 lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
52 msvcrt_mlock_set_entry_initialized( locknum, TRUE );
55 static inline void msvcrt_uninitialize_mlock( int locknum )
57 lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
58 DeleteCriticalSection( &(lock_table[ locknum ].crit) );
59 msvcrt_mlock_set_entry_initialized( locknum, FALSE );
62 /**********************************************************************
63 * msvcrt_init_mt_locks (internal)
65 * Initialize the table lock. All other locks will be initialized
66 * upon first use.
69 void msvcrt_init_mt_locks(void)
71 int i;
73 TRACE( "initializing mtlocks\n" );
75 /* Initialize the table */
76 for( i=0; i < _TOTAL_LOCKS; i++ )
78 msvcrt_mlock_set_entry_initialized( i, FALSE );
81 /* Initialize our lock table lock */
82 msvcrt_initialize_mlock( _LOCKTAB_LOCK );
85 /**********************************************************************
86 * _lock (MSVCRT.@)
88 void CDECL _lock( int locknum )
90 TRACE( "(%d)\n", locknum );
92 /* If the lock doesn't exist yet, create it */
93 if( lock_table[ locknum ].bInit == FALSE )
95 /* Lock while we're changing the lock table */
96 _lock( _LOCKTAB_LOCK );
98 /* Check again if we've got a bit of a race on lock creation */
99 if( lock_table[ locknum ].bInit == FALSE )
101 TRACE( ": creating lock #%d\n", locknum );
102 msvcrt_initialize_mlock( locknum );
105 /* Unlock ourselves */
106 _unlock( _LOCKTAB_LOCK );
109 EnterCriticalSection( &(lock_table[ locknum ].crit) );
112 /**********************************************************************
113 * _unlock (MSVCRT.@)
115 * NOTE: There is no error detection to make sure the lock exists and is acquired.
117 void CDECL _unlock( int locknum )
119 TRACE( "(%d)\n", locknum );
121 LeaveCriticalSection( &(lock_table[ locknum ].crit) );
124 #if _MSVCR_VER >= 100
125 typedef enum
127 SPINWAIT_INIT,
128 SPINWAIT_SPIN,
129 SPINWAIT_YIELD,
130 SPINWAIT_DONE
131 } SpinWait_state;
133 typedef void (__cdecl *yield_func)(void);
135 typedef struct
137 ULONG spin;
138 ULONG unknown;
139 SpinWait_state state;
140 yield_func yield_func;
141 } SpinWait;
143 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
144 unsigned int __cdecl SpinCount__Value(void)
146 static unsigned int val = -1;
148 TRACE("()\n");
150 if(val == -1) {
151 SYSTEM_INFO si;
153 GetSystemInfo(&si);
154 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
157 return val;
160 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
161 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
162 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
163 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
165 TRACE("(%p %p)\n", this, yf);
167 this->state = SPINWAIT_INIT;
168 this->unknown = 1;
169 this->yield_func = yf;
170 return this;
173 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
174 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
175 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
176 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
178 TRACE("(%p %p)\n", this, yf);
180 this->state = SPINWAIT_INIT;
181 this->unknown = 0;
182 this->yield_func = yf;
183 return this;
186 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
187 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
188 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
189 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
190 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
191 void __thiscall SpinWait_dtor(SpinWait *this)
193 TRACE("(%p)\n", this);
196 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
197 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
198 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
199 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
200 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
201 void __thiscall SpinWait__DoYield(SpinWait *this)
203 TRACE("(%p)\n", this);
205 if(this->unknown)
206 this->yield_func();
209 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
210 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
211 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
212 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
213 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
214 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
216 TRACE("(%p)\n", this);
217 return 1;
220 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
221 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
222 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
223 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
224 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
225 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
227 TRACE("(%p %d)\n", this, spin);
229 this->spin = spin;
230 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
233 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
234 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
235 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
236 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
237 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
238 void __thiscall SpinWait__Reset(SpinWait *this)
240 SpinWait__SetSpinCount(this, SpinCount__Value());
243 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
244 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
245 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
246 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
247 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
248 MSVCRT_bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
250 TRACE("(%p)\n", this);
252 this->spin--;
253 return this->spin > 0;
256 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
257 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
258 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
259 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
260 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
261 MSVCRT_bool __thiscall SpinWait__SpinOnce(SpinWait *this)
263 switch(this->state) {
264 case SPINWAIT_INIT:
265 SpinWait__Reset(this);
266 /* fall through */
267 case SPINWAIT_SPIN:
268 #ifdef __i386__
269 __asm__ __volatile__( "rep;nop" : : : "memory" );
270 #else
271 __asm__ __volatile__( "" : : : "memory" );
272 #endif
274 this->spin--;
275 if(!this->spin)
276 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
277 return TRUE;
278 case SPINWAIT_YIELD:
279 this->state = SPINWAIT_DONE;
280 this->yield_func();
281 return TRUE;
282 default:
283 SpinWait__Reset(this);
284 return FALSE;
288 static HANDLE keyed_event;
290 /* keep in sync with msvcp90/msvcp90.h */
291 typedef struct cs_queue
293 struct cs_queue *next;
294 #if _MSVCR_VER >= 110
295 BOOL free;
296 int unknown;
297 #endif
298 } cs_queue;
300 typedef struct
302 ULONG_PTR unk_thread_id;
303 cs_queue unk_active;
304 #if _MSVCR_VER >= 110
305 void *unknown[2];
306 #else
307 void *unknown[1];
308 #endif
309 cs_queue *head;
310 void *tail;
311 } critical_section;
313 /* ??0critical_section@Concurrency@@QAE@XZ */
314 /* ??0critical_section@Concurrency@@QEAA@XZ */
315 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
316 critical_section* __thiscall critical_section_ctor(critical_section *this)
318 TRACE("(%p)\n", this);
320 if(!keyed_event) {
321 HANDLE event;
323 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
324 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
325 NtClose(event);
328 this->unk_thread_id = 0;
329 this->head = this->tail = NULL;
330 return this;
333 /* ??1critical_section@Concurrency@@QAE@XZ */
334 /* ??1critical_section@Concurrency@@QEAA@XZ */
335 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
336 void __thiscall critical_section_dtor(critical_section *this)
338 TRACE("(%p)\n", this);
341 static void __cdecl spin_wait_yield(void)
343 Sleep(0);
346 static inline void spin_wait_for_next_cs(cs_queue *q)
348 SpinWait sw;
350 if(q->next) return;
352 SpinWait_ctor(&sw, &spin_wait_yield);
353 SpinWait__Reset(&sw);
354 while(!q->next)
355 SpinWait__SpinOnce(&sw);
356 SpinWait_dtor(&sw);
359 static inline void cs_set_head(critical_section *cs, cs_queue *q)
361 cs->unk_thread_id = GetCurrentThreadId();
362 cs->unk_active.next = q->next;
363 cs->head = &cs->unk_active;
366 /* ?lock@critical_section@Concurrency@@QAEXXZ */
367 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
368 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
369 void __thiscall critical_section_lock(critical_section *this)
371 cs_queue q, *last;
373 TRACE("(%p)\n", this);
375 if(this->unk_thread_id == GetCurrentThreadId()) {
376 FIXME("throw exception\n");
377 return;
380 memset(&q, 0, sizeof(q));
381 last = InterlockedExchangePointer(&this->tail, &q);
382 if(last) {
383 last->next = &q;
384 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
387 cs_set_head(this, &q);
388 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
389 spin_wait_for_next_cs(&q);
390 this->unk_active.next = q.next;
394 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
395 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
396 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
397 MSVCRT_bool __thiscall critical_section_try_lock(critical_section *this)
399 cs_queue q;
401 TRACE("(%p)\n", this);
403 if(this->unk_thread_id == GetCurrentThreadId()) {
404 FIXME("throw exception\n");
405 return FALSE;
408 memset(&q, 0, sizeof(q));
409 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
410 cs_set_head(this, &q);
411 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
412 spin_wait_for_next_cs(&q);
413 this->unk_active.next = q.next;
415 return TRUE;
417 return FALSE;
420 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
421 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
422 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
423 void __thiscall critical_section_unlock(critical_section *this)
425 TRACE("(%p)\n", this);
427 this->unk_thread_id = 0;
428 this->head = NULL;
429 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
430 == &this->unk_active) return;
431 spin_wait_for_next_cs(&this->unk_active);
433 #if _MSVCR_VER >= 110
434 while(1) {
435 cs_queue *next;
437 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
438 break;
440 next = this->unk_active.next;
441 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
442 HeapFree(GetProcessHeap(), 0, next);
443 return;
445 spin_wait_for_next_cs(next);
447 this->unk_active.next = next->next;
448 HeapFree(GetProcessHeap(), 0, next);
450 #endif
452 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
455 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
456 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
457 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
458 critical_section* __thiscall critical_section_native_handle(critical_section *this)
460 TRACE("(%p)\n", this);
461 return this;
464 #if _MSVCR_VER >= 110
465 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
466 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
467 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
468 MSVCRT_bool __thiscall critical_section_try_lock_for(
469 critical_section *this, unsigned int timeout)
471 cs_queue *q, *last;
473 TRACE("(%p %d)\n", this, timeout);
475 if(this->unk_thread_id == GetCurrentThreadId()) {
476 FIXME("throw exception\n");
477 return FALSE;
480 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
481 return critical_section_try_lock(this);
483 last = InterlockedExchangePointer(&this->tail, q);
484 if(last) {
485 LARGE_INTEGER to;
486 NTSTATUS status;
487 FILETIME ft;
489 last->next = q;
490 GetSystemTimeAsFileTime(&ft);
491 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
492 ft.dwLowDateTime + (LONGLONG)timeout*10000;
493 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
494 if(status == STATUS_TIMEOUT) {
495 if(!InterlockedExchange(&q->free, TRUE))
496 return FALSE;
497 /* A thread has signaled the event and is block waiting. */
498 /* We need to catch the event to wake the thread. */
499 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
503 cs_set_head(this, q);
504 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
505 spin_wait_for_next_cs(q);
506 this->unk_active.next = q->next;
509 HeapFree(GetProcessHeap(), 0, q);
510 return TRUE;
512 #endif
514 typedef struct
516 critical_section *cs;
517 void *unknown[3];
518 } critical_section_scoped_lock;
520 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
521 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
522 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
523 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
524 critical_section_scoped_lock *this, critical_section *cs)
526 TRACE("(%p %p)\n", this, cs);
527 this->cs = cs;
528 critical_section_lock(this->cs);
529 return this;
532 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
533 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
534 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
535 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
537 TRACE("(%p)\n", this);
538 critical_section_unlock(this->cs);
541 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
542 unsigned int __cdecl _GetConcurrency(void)
544 static unsigned int val = -1;
546 TRACE("()\n");
548 if(val == -1) {
549 SYSTEM_INFO si;
551 GetSystemInfo(&si);
552 val = si.dwNumberOfProcessors;
555 return val;
558 #endif
560 /**********************************************************************
561 * msvcrt_free_locks (internal)
563 * Uninitialize all mt locks. Assume that neither _lock or _unlock will
564 * be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
567 void msvcrt_free_locks(void)
569 int i;
571 TRACE( ": uninitializing all mtlocks\n" );
573 /* Uninitialize the table */
574 for( i=0; i < _TOTAL_LOCKS; i++ )
576 if( lock_table[ i ].bInit )
578 msvcrt_uninitialize_mlock( i );
582 #if _MSVCR_VER >= 100
583 if(keyed_event)
584 NtClose(keyed_event);
585 #endif