msvcrt: Mark function that are only called from assembly as hidden.
[wine.git] / dlls / msvcrt / lock.c
blobb8b867231419dacf85007835ffd8c1a22f6024d1
1 /*
2 * Copyright (c) 2002, TransGaming Technologies Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include "config.h"
20 #include "wine/port.h"
22 #include <stdarg.h>
24 #include "wine/debug.h"
25 #include "windef.h"
26 #include "winbase.h"
27 #include "winternl.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "mtdll.h"
31 #include "cxx.h"
33 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
35 typedef struct
37 BOOL bInit;
38 CRITICAL_SECTION crit;
39 } LOCKTABLEENTRY;
41 static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
43 static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
45 lock_table[ locknum ].bInit = initialized;
48 static inline void msvcrt_initialize_mlock( int locknum )
50 InitializeCriticalSection( &(lock_table[ locknum ].crit) );
51 lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
52 msvcrt_mlock_set_entry_initialized( locknum, TRUE );
55 static inline void msvcrt_uninitialize_mlock( int locknum )
57 lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
58 DeleteCriticalSection( &(lock_table[ locknum ].crit) );
59 msvcrt_mlock_set_entry_initialized( locknum, FALSE );
62 /**********************************************************************
63 * msvcrt_init_mt_locks (internal)
65 * Initialize the table lock. All other locks will be initialized
66 * upon first use.
69 void msvcrt_init_mt_locks(void)
71 int i;
73 TRACE( "initializing mtlocks\n" );
75 /* Initialize the table */
76 for( i=0; i < _TOTAL_LOCKS; i++ )
78 msvcrt_mlock_set_entry_initialized( i, FALSE );
81 /* Initialize our lock table lock */
82 msvcrt_initialize_mlock( _LOCKTAB_LOCK );
85 /**********************************************************************
86 * _lock (MSVCRT.@)
88 void CDECL _lock( int locknum )
90 TRACE( "(%d)\n", locknum );
92 /* If the lock doesn't exist yet, create it */
93 if( lock_table[ locknum ].bInit == FALSE )
95 /* Lock while we're changing the lock table */
96 _lock( _LOCKTAB_LOCK );
98 /* Check again if we've got a bit of a race on lock creation */
99 if( lock_table[ locknum ].bInit == FALSE )
101 TRACE( ": creating lock #%d\n", locknum );
102 msvcrt_initialize_mlock( locknum );
105 /* Unlock ourselves */
106 _unlock( _LOCKTAB_LOCK );
109 EnterCriticalSection( &(lock_table[ locknum ].crit) );
112 /**********************************************************************
113 * _unlock (MSVCRT.@)
115 * NOTE: There is no error detection to make sure the lock exists and is acquired.
117 void CDECL _unlock( int locknum )
119 TRACE( "(%d)\n", locknum );
121 LeaveCriticalSection( &(lock_table[ locknum ].crit) );
124 #if _MSVCR_VER >= 100
125 typedef enum
127 SPINWAIT_INIT,
128 SPINWAIT_SPIN,
129 SPINWAIT_YIELD,
130 SPINWAIT_DONE
131 } SpinWait_state;
133 typedef void (__cdecl *yield_func)(void);
135 typedef struct
137 ULONG spin;
138 ULONG unknown;
139 SpinWait_state state;
140 yield_func yield_func;
141 } SpinWait;
143 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
144 unsigned int __cdecl SpinCount__Value(void)
146 static unsigned int val = -1;
148 TRACE("()\n");
150 if(val == -1) {
151 SYSTEM_INFO si;
153 GetSystemInfo(&si);
154 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
157 return val;
160 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
161 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
162 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
163 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
165 TRACE("(%p %p)\n", this, yf);
167 this->state = SPINWAIT_INIT;
168 this->unknown = 1;
169 this->yield_func = yf;
170 return this;
173 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
174 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
175 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
176 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
178 TRACE("(%p %p)\n", this, yf);
180 this->state = SPINWAIT_INIT;
181 this->unknown = 0;
182 this->yield_func = yf;
183 return this;
186 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
187 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
188 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
189 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
190 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
191 void __thiscall SpinWait_dtor(SpinWait *this)
193 TRACE("(%p)\n", this);
196 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
197 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
198 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
199 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
200 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
201 void __thiscall SpinWait__DoYield(SpinWait *this)
203 TRACE("(%p)\n", this);
205 if(this->unknown)
206 this->yield_func();
209 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
210 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
211 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
212 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
213 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
214 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
216 TRACE("(%p)\n", this);
217 return 1;
220 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
221 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
222 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
223 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
224 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
225 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
227 TRACE("(%p %d)\n", this, spin);
229 this->spin = spin;
230 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
233 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
234 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
235 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
236 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
237 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
238 void __thiscall SpinWait__Reset(SpinWait *this)
240 SpinWait__SetSpinCount(this, SpinCount__Value());
243 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
244 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
245 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
246 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
247 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
248 MSVCRT_bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
250 TRACE("(%p)\n", this);
252 this->spin--;
253 return this->spin > 0;
256 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
257 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
258 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
259 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
260 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
261 MSVCRT_bool __thiscall SpinWait__SpinOnce(SpinWait *this)
263 switch(this->state) {
264 case SPINWAIT_INIT:
265 SpinWait__Reset(this);
266 /* fall through */
267 case SPINWAIT_SPIN:
268 InterlockedDecrement((LONG*)&this->spin);
269 if(!this->spin)
270 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
271 return TRUE;
272 case SPINWAIT_YIELD:
273 this->state = SPINWAIT_DONE;
274 this->yield_func();
275 return TRUE;
276 default:
277 SpinWait__Reset(this);
278 return FALSE;
282 static HANDLE keyed_event;
284 /* keep in sync with msvcp90/msvcp90.h */
285 typedef struct cs_queue
287 struct cs_queue *next;
288 #if _MSVCR_VER >= 110
289 BOOL free;
290 int unknown;
291 #endif
292 } cs_queue;
294 typedef struct
296 ULONG_PTR unk_thread_id;
297 cs_queue unk_active;
298 #if _MSVCR_VER >= 110
299 void *unknown[2];
300 #else
301 void *unknown[1];
302 #endif
303 cs_queue *head;
304 void *tail;
305 } critical_section;
307 /* ??0critical_section@Concurrency@@QAE@XZ */
308 /* ??0critical_section@Concurrency@@QEAA@XZ */
309 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
310 critical_section* __thiscall critical_section_ctor(critical_section *this)
312 TRACE("(%p)\n", this);
314 if(!keyed_event) {
315 HANDLE event;
317 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
318 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
319 NtClose(event);
322 this->unk_thread_id = 0;
323 this->head = this->tail = NULL;
324 return this;
327 /* ??1critical_section@Concurrency@@QAE@XZ */
328 /* ??1critical_section@Concurrency@@QEAA@XZ */
329 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
330 void __thiscall critical_section_dtor(critical_section *this)
332 TRACE("(%p)\n", this);
335 static void __cdecl spin_wait_yield(void)
337 Sleep(0);
340 static inline void spin_wait_for_next_cs(cs_queue *q)
342 SpinWait sw;
344 if(q->next) return;
346 SpinWait_ctor(&sw, &spin_wait_yield);
347 SpinWait__Reset(&sw);
348 while(!q->next)
349 SpinWait__SpinOnce(&sw);
350 SpinWait_dtor(&sw);
353 static inline void cs_set_head(critical_section *cs, cs_queue *q)
355 cs->unk_thread_id = GetCurrentThreadId();
356 cs->unk_active.next = q->next;
357 cs->head = &cs->unk_active;
360 /* ?lock@critical_section@Concurrency@@QAEXXZ */
361 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
362 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
363 void __thiscall critical_section_lock(critical_section *this)
365 cs_queue q, *last;
367 TRACE("(%p)\n", this);
369 if(this->unk_thread_id == GetCurrentThreadId())
370 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
372 memset(&q, 0, sizeof(q));
373 last = InterlockedExchangePointer(&this->tail, &q);
374 if(last) {
375 last->next = &q;
376 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
379 cs_set_head(this, &q);
380 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
381 spin_wait_for_next_cs(&q);
382 this->unk_active.next = q.next;
386 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
387 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
388 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
389 MSVCRT_bool __thiscall critical_section_try_lock(critical_section *this)
391 cs_queue q;
393 TRACE("(%p)\n", this);
395 if(this->unk_thread_id == GetCurrentThreadId())
396 return FALSE;
398 memset(&q, 0, sizeof(q));
399 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
400 cs_set_head(this, &q);
401 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
402 spin_wait_for_next_cs(&q);
403 this->unk_active.next = q.next;
405 return TRUE;
407 return FALSE;
410 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
411 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
412 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
413 void __thiscall critical_section_unlock(critical_section *this)
415 TRACE("(%p)\n", this);
417 this->unk_thread_id = 0;
418 this->head = NULL;
419 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
420 == &this->unk_active) return;
421 spin_wait_for_next_cs(&this->unk_active);
423 #if _MSVCR_VER >= 110
424 while(1) {
425 cs_queue *next;
427 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
428 break;
430 next = this->unk_active.next;
431 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
432 HeapFree(GetProcessHeap(), 0, next);
433 return;
435 spin_wait_for_next_cs(next);
437 this->unk_active.next = next->next;
438 HeapFree(GetProcessHeap(), 0, next);
440 #endif
442 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
445 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
446 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
447 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
448 critical_section* __thiscall critical_section_native_handle(critical_section *this)
450 TRACE("(%p)\n", this);
451 return this;
454 #if _MSVCR_VER >= 110
455 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
456 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
457 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
458 MSVCRT_bool __thiscall critical_section_try_lock_for(
459 critical_section *this, unsigned int timeout)
461 cs_queue *q, *last;
463 TRACE("(%p %d)\n", this, timeout);
465 if(this->unk_thread_id == GetCurrentThreadId())
466 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
468 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
469 return critical_section_try_lock(this);
471 last = InterlockedExchangePointer(&this->tail, q);
472 if(last) {
473 LARGE_INTEGER to;
474 NTSTATUS status;
475 FILETIME ft;
477 last->next = q;
478 GetSystemTimeAsFileTime(&ft);
479 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
480 ft.dwLowDateTime + (LONGLONG)timeout*10000;
481 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
482 if(status == STATUS_TIMEOUT) {
483 if(!InterlockedExchange(&q->free, TRUE))
484 return FALSE;
485 /* A thread has signaled the event and is block waiting. */
486 /* We need to catch the event to wake the thread. */
487 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
491 cs_set_head(this, q);
492 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
493 spin_wait_for_next_cs(q);
494 this->unk_active.next = q->next;
497 HeapFree(GetProcessHeap(), 0, q);
498 return TRUE;
500 #endif
502 typedef struct
504 critical_section *cs;
505 void *unknown[4];
506 int unknown2[2];
507 } critical_section_scoped_lock;
509 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
510 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
511 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
512 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
513 critical_section_scoped_lock *this, critical_section *cs)
515 TRACE("(%p %p)\n", this, cs);
516 this->cs = cs;
517 critical_section_lock(this->cs);
518 return this;
521 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
522 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
523 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
524 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
526 TRACE("(%p)\n", this);
527 critical_section_unlock(this->cs);
530 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
531 unsigned int __cdecl _GetConcurrency(void)
533 static unsigned int val = -1;
535 TRACE("()\n");
537 if(val == -1) {
538 SYSTEM_INFO si;
540 GetSystemInfo(&si);
541 val = si.dwNumberOfProcessors;
544 return val;
547 #define EVT_RUNNING (void*)1
548 #define EVT_WAITING NULL
550 struct thread_wait;
551 typedef struct thread_wait_entry
553 struct thread_wait *wait;
554 struct thread_wait_entry *next;
555 struct thread_wait_entry *prev;
556 } thread_wait_entry;
558 typedef struct thread_wait
560 void *signaled;
561 int pending_waits;
562 thread_wait_entry entries[1];
563 } thread_wait;
565 typedef struct
567 thread_wait_entry *waiters;
568 INT_PTR signaled;
569 critical_section cs;
570 } event;
572 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
574 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
575 pTime->QuadPart = (ULONGLONG)timeout * -10000;
576 return pTime;
579 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
581 entry->next = *head;
582 entry->prev = NULL;
583 if(*head) (*head)->prev = entry;
584 *head = entry;
587 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
589 if(entry == *head)
590 *head = entry->next;
591 else if(entry->prev)
592 entry->prev->next = entry->next;
593 if(entry->next) entry->next->prev = entry->prev;
596 static MSVCRT_size_t evt_end_wait(thread_wait *wait, event **events, int count)
598 MSVCRT_size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
600 for(i = 0; i < count; i++) {
601 critical_section_lock(&events[i]->cs);
602 if(events[i] == wait->signaled) ret = i;
603 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
604 critical_section_unlock(&events[i]->cs);
607 return ret;
610 static inline int evt_transition(void **state, void *from, void *to)
612 return InterlockedCompareExchangePointer(state, to, from) == from;
615 static MSVCRT_size_t evt_wait(thread_wait *wait, event **events, int count, MSVCRT_bool wait_all, unsigned int timeout)
617 int i;
618 NTSTATUS status;
619 LARGE_INTEGER ntto;
621 wait->signaled = EVT_RUNNING;
622 wait->pending_waits = wait_all ? count : 1;
623 for(i = 0; i < count; i++) {
624 wait->entries[i].wait = wait;
626 critical_section_lock(&events[i]->cs);
627 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
628 if(events[i]->signaled) {
629 if(!InterlockedDecrement(&wait->pending_waits)) {
630 wait->signaled = events[i];
631 critical_section_unlock(&events[i]->cs);
633 return evt_end_wait(wait, events, i+1);
636 critical_section_unlock(&events[i]->cs);
639 if(!timeout)
640 return evt_end_wait(wait, events, count);
642 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
643 return evt_end_wait(wait, events, count);
645 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
647 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
648 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
650 return evt_end_wait(wait, events, count);
653 /* ??0event@Concurrency@@QAE@XZ */
654 /* ??0event@Concurrency@@QEAA@XZ */
655 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
656 event* __thiscall event_ctor(event *this)
658 TRACE("(%p)\n", this);
660 this->waiters = NULL;
661 this->signaled = FALSE;
662 critical_section_ctor(&this->cs);
664 return this;
667 /* ??1event@Concurrency@@QAE@XZ */
668 /* ??1event@Concurrency@@QEAA@XZ */
669 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
670 void __thiscall event_dtor(event *this)
672 TRACE("(%p)\n", this);
673 critical_section_dtor(&this->cs);
674 if(this->waiters)
675 ERR("there's a wait on destroyed event\n");
678 /* ?reset@event@Concurrency@@QAEXXZ */
679 /* ?reset@event@Concurrency@@QEAAXXZ */
680 DEFINE_THISCALL_WRAPPER(event_reset, 4)
681 void __thiscall event_reset(event *this)
683 thread_wait_entry *entry;
685 TRACE("(%p)\n", this);
687 critical_section_lock(&this->cs);
688 if(this->signaled) {
689 this->signaled = FALSE;
690 for(entry=this->waiters; entry; entry = entry->next)
691 InterlockedIncrement(&entry->wait->pending_waits);
693 critical_section_unlock(&this->cs);
696 /* ?set@event@Concurrency@@QAEXXZ */
697 /* ?set@event@Concurrency@@QEAAXXZ */
698 DEFINE_THISCALL_WRAPPER(event_set, 4)
699 void __thiscall event_set(event *this)
701 thread_wait_entry *wakeup = NULL;
702 thread_wait_entry *entry, *next;
704 TRACE("(%p)\n", this);
706 critical_section_lock(&this->cs);
707 if(!this->signaled) {
708 this->signaled = TRUE;
709 for(entry=this->waiters; entry; entry=next) {
710 next = entry->next;
711 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
712 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
713 evt_remove_queue(&this->waiters, entry);
714 evt_add_queue(&wakeup, entry);
719 critical_section_unlock(&this->cs);
721 for(entry=wakeup; entry; entry=next) {
722 next = entry->next;
723 entry->next = entry->prev = NULL;
724 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
728 /* ?wait@event@Concurrency@@QAEII@Z */
729 /* ?wait@event@Concurrency@@QEAA_KI@Z */
730 DEFINE_THISCALL_WRAPPER(event_wait, 8)
731 MSVCRT_size_t __thiscall event_wait(event *this, unsigned int timeout)
733 thread_wait wait;
734 MSVCRT_size_t signaled;
736 TRACE("(%p %u)\n", this, timeout);
738 critical_section_lock(&this->cs);
739 signaled = this->signaled;
740 critical_section_unlock(&this->cs);
742 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
743 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
746 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
747 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
748 int __cdecl event_wait_for_multiple(event **events, MSVCRT_size_t count, MSVCRT_bool wait_all, unsigned int timeout)
750 thread_wait *wait;
751 MSVCRT_size_t ret;
753 TRACE("(%p %ld %d %u)\n", events, count, wait_all, timeout);
755 if(count == 0)
756 return 0;
758 wait = heap_alloc(FIELD_OFFSET(thread_wait, entries[count]));
759 if(!wait)
760 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
761 ret = evt_wait(wait, events, count, wait_all, timeout);
762 heap_free(wait);
764 return ret;
766 #endif
768 #if _MSVCR_VER >= 110
769 typedef struct cv_queue {
770 struct cv_queue *next;
771 BOOL expired;
772 } cv_queue;
774 typedef struct {
775 /* cv_queue structure is not binary compatible */
776 cv_queue *queue;
777 critical_section lock;
778 } _Condition_variable;
780 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
781 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
782 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
783 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
785 TRACE("(%p)\n", this);
787 this->queue = NULL;
788 critical_section_ctor(&this->lock);
789 return this;
792 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
793 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
794 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
795 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
797 TRACE("(%p)\n", this);
799 while(this->queue) {
800 cv_queue *next = this->queue->next;
801 if(!this->queue->expired)
802 ERR("there's an active wait\n");
803 HeapFree(GetProcessHeap(), 0, this->queue);
804 this->queue = next;
806 critical_section_dtor(&this->lock);
809 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
810 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
811 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
812 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
814 cv_queue q;
816 TRACE("(%p, %p)\n", this, cs);
818 critical_section_lock(&this->lock);
819 q.next = this->queue;
820 q.expired = FALSE;
821 this->queue = &q;
822 critical_section_unlock(&this->lock);
824 critical_section_unlock(cs);
825 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
826 critical_section_lock(cs);
829 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
830 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
831 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
832 MSVCRT_bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
833 critical_section *cs, unsigned int timeout)
835 LARGE_INTEGER to;
836 NTSTATUS status;
837 FILETIME ft;
838 cv_queue *q;
840 TRACE("(%p %p %d)\n", this, cs, timeout);
842 if(!(q = HeapAlloc(GetProcessHeap(), 0, sizeof(cv_queue)))) {
843 throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
846 critical_section_lock(&this->lock);
847 q->next = this->queue;
848 q->expired = FALSE;
849 this->queue = q;
850 critical_section_unlock(&this->lock);
852 critical_section_unlock(cs);
854 GetSystemTimeAsFileTime(&ft);
855 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
856 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
857 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
858 if(status == STATUS_TIMEOUT) {
859 if(!InterlockedExchange(&q->expired, TRUE)) {
860 critical_section_lock(cs);
861 return FALSE;
863 else
864 NtWaitForKeyedEvent(keyed_event, q, 0, 0);
867 HeapFree(GetProcessHeap(), 0, q);
868 critical_section_lock(cs);
869 return TRUE;
872 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
873 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
874 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
875 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
877 cv_queue *node;
879 TRACE("(%p)\n", this);
881 if(!this->queue)
882 return;
884 while(1) {
885 critical_section_lock(&this->lock);
886 node = this->queue;
887 if(!node) {
888 critical_section_unlock(&this->lock);
889 return;
891 this->queue = node->next;
892 critical_section_unlock(&this->lock);
894 if(!InterlockedExchange(&node->expired, TRUE)) {
895 NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
896 return;
897 } else {
898 HeapFree(GetProcessHeap(), 0, node);
903 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
904 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
905 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
906 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
908 cv_queue *ptr;
910 TRACE("(%p)\n", this);
912 if(!this->queue)
913 return;
915 critical_section_lock(&this->lock);
916 ptr = this->queue;
917 this->queue = NULL;
918 critical_section_unlock(&this->lock);
920 while(ptr) {
921 cv_queue *next = ptr->next;
923 if(!InterlockedExchange(&ptr->expired, TRUE))
924 NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
925 else
926 HeapFree(GetProcessHeap(), 0, ptr);
927 ptr = next;
930 #endif
932 #if _MSVCR_VER >= 100
933 typedef struct rwl_queue
935 struct rwl_queue *next;
936 } rwl_queue;
938 #define WRITER_WAITING 0x80000000
939 /* FIXME: reader_writer_lock structure is not binary compatible
940 * it can't exceed 28/56 bytes */
941 typedef struct
943 LONG count;
944 LONG thread_id;
945 rwl_queue active;
946 rwl_queue *writer_head;
947 rwl_queue *writer_tail;
948 rwl_queue *reader_head;
949 } reader_writer_lock;
951 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
952 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
953 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
954 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
956 TRACE("(%p)\n", this);
958 if (!keyed_event) {
959 HANDLE event;
961 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
962 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
963 NtClose(event);
966 memset(this, 0, sizeof(*this));
967 return this;
970 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
971 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
972 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
973 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
975 TRACE("(%p)\n", this);
977 if (this->thread_id != 0 || this->count)
978 WARN("destroying locked reader_writer_lock\n");
981 static inline void spin_wait_for_next_rwl(rwl_queue *q)
983 SpinWait sw;
985 if(q->next) return;
987 SpinWait_ctor(&sw, &spin_wait_yield);
988 SpinWait__Reset(&sw);
989 while(!q->next)
990 SpinWait__SpinOnce(&sw);
991 SpinWait_dtor(&sw);
994 /* Remove when proper InterlockedOr implementation is added to wine */
995 static LONG InterlockedOr(LONG *d, LONG v)
997 LONG l;
998 while (~(l = *d) & v)
999 if (InterlockedCompareExchange(d, l|v, l) == l) break;
1000 return l;
1003 static LONG InterlockedAnd(LONG *d, LONG v)
1005 LONG l = *d, old;
1006 while ((l & v) != l) {
1007 if((old = InterlockedCompareExchange(d, l&v, l)) == l) break;
1008 l = old;
1010 return l;
1013 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
1014 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
1015 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
1016 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
1018 rwl_queue q = { NULL }, *last;
1020 TRACE("(%p)\n", this);
1022 if (this->thread_id == GetCurrentThreadId())
1023 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
1025 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
1026 if (last) {
1027 last->next = &q;
1028 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1029 } else {
1030 this->writer_head = &q;
1031 if (InterlockedOr(&this->count, WRITER_WAITING))
1032 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1035 this->thread_id = GetCurrentThreadId();
1036 this->writer_head = &this->active;
1037 this->active.next = NULL;
1038 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1039 spin_wait_for_next_rwl(&q);
1040 this->active.next = q.next;
1044 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
1045 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
1046 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
1047 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
1049 rwl_queue q;
1051 TRACE("(%p)\n", this);
1053 if (this->thread_id == GetCurrentThreadId())
1054 throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked as writer");
1056 do {
1057 q.next = this->reader_head;
1058 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
1060 if (!q.next) {
1061 rwl_queue *head;
1062 LONG count;
1064 while (!((count = this->count) & WRITER_WAITING))
1065 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
1067 if (count & WRITER_WAITING)
1068 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1070 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1071 while(head && head != &q) {
1072 rwl_queue *next = head->next;
1073 InterlockedIncrement(&this->count);
1074 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1075 head = next;
1077 } else {
1078 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
1082 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
1083 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
1084 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
1085 MSVCRT_bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
1087 rwl_queue q = { NULL };
1089 TRACE("(%p)\n", this);
1091 if (this->thread_id == GetCurrentThreadId())
1092 return FALSE;
1094 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
1095 return FALSE;
1096 this->writer_head = &q;
1097 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
1098 this->thread_id = GetCurrentThreadId();
1099 this->writer_head = &this->active;
1100 this->active.next = NULL;
1101 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
1102 spin_wait_for_next_rwl(&q);
1103 this->active.next = q.next;
1105 return TRUE;
1108 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
1109 return FALSE;
1110 spin_wait_for_next_rwl(&q);
1111 this->writer_head = q.next;
1112 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
1113 this->thread_id = GetCurrentThreadId();
1114 this->writer_head = &this->active;
1115 this->active.next = q.next;
1116 return TRUE;
1118 return FALSE;
1121 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
1122 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
1123 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
1124 MSVCRT_bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
1126 LONG count;
1128 TRACE("(%p)\n", this);
1130 while (!((count = this->count) & WRITER_WAITING))
1131 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
1132 return FALSE;
1135 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
1136 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
1137 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
1138 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
1140 LONG count;
1141 rwl_queue *head, *next;
1143 TRACE("(%p)\n", this);
1145 if ((count = this->count) & ~WRITER_WAITING) {
1146 count = InterlockedDecrement(&this->count);
1147 if (count != WRITER_WAITING)
1148 return;
1149 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
1150 return;
1153 this->thread_id = 0;
1154 next = this->writer_head->next;
1155 if (next) {
1156 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
1157 return;
1159 InterlockedAnd(&this->count, ~WRITER_WAITING);
1160 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
1161 while (head) {
1162 next = head->next;
1163 InterlockedIncrement(&this->count);
1164 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
1165 head = next;
1168 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
1169 return;
1170 InterlockedOr(&this->count, WRITER_WAITING);
1173 typedef struct {
1174 reader_writer_lock *lock;
1175 } reader_writer_lock_scoped_lock;
1177 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1178 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1179 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
1180 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
1181 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1183 TRACE("(%p %p)\n", this, lock);
1185 this->lock = lock;
1186 reader_writer_lock_lock(lock);
1187 return this;
1190 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
1191 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
1192 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
1193 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
1195 TRACE("(%p)\n", this);
1196 reader_writer_lock_unlock(this->lock);
1199 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
1200 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
1201 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
1202 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
1203 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
1205 TRACE("(%p %p)\n", this, lock);
1207 this->lock = lock;
1208 reader_writer_lock_lock_read(lock);
1209 return this;
1212 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
1213 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
1214 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
1215 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
1217 TRACE("(%p)\n", this);
1218 reader_writer_lock_unlock(this->lock);
1221 typedef struct {
1222 CRITICAL_SECTION cs;
1223 } _ReentrantBlockingLock;
1225 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1226 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1227 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
1228 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
1230 TRACE("(%p)\n", this);
1232 InitializeCriticalSection(&this->cs);
1233 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
1234 return this;
1237 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
1238 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
1239 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
1240 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
1242 TRACE("(%p)\n", this);
1244 this->cs.DebugInfo->Spare[0] = 0;
1245 DeleteCriticalSection(&this->cs);
1248 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1249 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1250 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
1251 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
1253 TRACE("(%p)\n", this);
1254 EnterCriticalSection(&this->cs);
1257 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
1258 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
1259 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
1260 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
1262 TRACE("(%p)\n", this);
1263 LeaveCriticalSection(&this->cs);
1266 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
1267 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
1268 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
1269 MSVCRT_bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
1271 TRACE("(%p)\n", this);
1272 return TryEnterCriticalSection(&this->cs);
1274 #endif
1276 #if _MSVCR_VER == 110
1277 static LONG shared_ptr_lock;
1279 void __cdecl _Lock_shared_ptr_spin_lock(void)
1281 LONG l = 0;
1283 while(InterlockedCompareExchange(&shared_ptr_lock, 1, 0) != 0) {
1284 if(l++ == 1000) {
1285 Sleep(0);
1286 l = 0;
1291 void __cdecl _Unlock_shared_ptr_spin_lock(void)
1293 shared_ptr_lock = 0;
1295 #endif
1297 /**********************************************************************
1298 * msvcrt_free_locks (internal)
1300 * Uninitialize all mt locks. Assume that neither _lock or _unlock will
1301 * be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
1304 void msvcrt_free_locks(void)
1306 int i;
1308 TRACE( ": uninitializing all mtlocks\n" );
1310 /* Uninitialize the table */
1311 for( i=0; i < _TOTAL_LOCKS; i++ )
1313 if( lock_table[ i ].bInit )
1315 msvcrt_uninitialize_mlock( i );
1319 #if _MSVCR_VER >= 100
1320 if(keyed_event)
1321 NtClose(keyed_event);
1322 #endif