win32u: Move NtUserTranslateMessage implementation from user32.
[wine.git] / dlls / msvcrt / concurrency.c
blob2ca6421b0e745a5c58b23d4606120918d91dadd9
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "msvcrt.h"
28 #include "cxx.h"
30 #if _MSVCR_VER >= 100
32 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
34 typedef exception cexception;
35 CREATE_EXCEPTION_OBJECT(cexception)
37 static LONG context_id = -1;
38 static LONG scheduler_id = -1;
40 typedef enum {
41 SchedulerKind,
42 MaxConcurrency,
43 MinConcurrency,
44 TargetOversubscriptionFactor,
45 LocalContextCacheSize,
46 ContextStackSize,
47 ContextPriority,
48 SchedulingProtocol,
49 DynamicProgressFeedback,
50 WinRTInitialization,
51 last_policy_id
52 } PolicyElementKey;
54 typedef struct {
55 struct _policy_container {
56 unsigned int policies[last_policy_id];
57 } *policy_container;
58 } SchedulerPolicy;
60 typedef struct {
61 const vtable_ptr *vtable;
62 } Context;
63 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
64 unsigned int, (const Context*), (this))
65 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
70 Context*, (Context*, unsigned int), (this, flags))
72 typedef struct {
73 Context *context;
74 } _Context;
76 union allocator_cache_entry {
77 struct _free {
78 int depth;
79 union allocator_cache_entry *next;
80 } free;
81 struct _alloc {
82 int bucket;
83 char mem[1];
84 } alloc;
87 struct scheduler_list {
88 struct Scheduler *scheduler;
89 struct scheduler_list *next;
92 typedef struct {
93 Context context;
94 struct scheduler_list scheduler;
95 unsigned int id;
96 union allocator_cache_entry *allocator_cache[8];
97 } ExternalContextBase;
98 extern const vtable_ptr ExternalContextBase_vtable;
99 static void ExternalContextBase_ctor(ExternalContextBase*);
101 typedef struct Scheduler {
102 const vtable_ptr *vtable;
103 } Scheduler;
104 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
105 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
106 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
107 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
108 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
109 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
110 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
111 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
112 #if _MSVCR_VER > 100
113 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
114 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
115 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
116 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
117 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
118 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
119 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
120 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
121 bool, (Scheduler*,const /*location*/void*), (this,placement))
122 #else
123 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
124 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
125 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
126 #endif
128 typedef struct {
129 Scheduler scheduler;
130 LONG ref;
131 unsigned int id;
132 unsigned int virt_proc_no;
133 SchedulerPolicy policy;
134 int shutdown_count;
135 int shutdown_size;
136 HANDLE *shutdown_events;
137 CRITICAL_SECTION cs;
138 } ThreadScheduler;
139 extern const vtable_ptr ThreadScheduler_vtable;
141 typedef struct {
142 Scheduler *scheduler;
143 } _Scheduler;
145 typedef struct {
146 char empty;
147 } _CurrentScheduler;
149 typedef enum
151 SPINWAIT_INIT,
152 SPINWAIT_SPIN,
153 SPINWAIT_YIELD,
154 SPINWAIT_DONE
155 } SpinWait_state;
157 typedef void (__cdecl *yield_func)(void);
159 typedef struct
161 ULONG spin;
162 ULONG unknown;
163 SpinWait_state state;
164 yield_func yield_func;
165 } SpinWait;
167 /* keep in sync with msvcp90/msvcp90.h */
168 typedef struct cs_queue
170 struct cs_queue *next;
171 #if _MSVCR_VER >= 110
172 LONG free;
173 int unknown;
174 #endif
175 } cs_queue;
177 typedef struct
179 ULONG_PTR unk_thread_id;
180 cs_queue unk_active;
181 #if _MSVCR_VER >= 110
182 void *unknown[2];
183 #else
184 void *unknown[1];
185 #endif
186 cs_queue *head;
187 void *tail;
188 } critical_section;
190 typedef struct
192 critical_section *cs;
193 union {
194 cs_queue q;
195 struct {
196 void *unknown[4];
197 int unknown2[2];
198 } unknown;
199 } lock;
200 } critical_section_scoped_lock;
202 typedef struct
204 critical_section cs;
205 } _NonReentrantPPLLock;
207 typedef struct
209 _NonReentrantPPLLock *lock;
210 union {
211 cs_queue q;
212 struct {
213 void *unknown[4];
214 int unknown2[2];
215 } unknown;
216 } wait;
217 } _NonReentrantPPLLock__Scoped_lock;
219 typedef struct
221 critical_section cs;
222 LONG count;
223 LONG owner;
224 } _ReentrantPPLLock;
226 typedef struct
228 _ReentrantPPLLock *lock;
229 union {
230 cs_queue q;
231 struct {
232 void *unknown[4];
233 int unknown2[2];
234 } unknown;
235 } wait;
236 } _ReentrantPPLLock__Scoped_lock;
238 #define EVT_RUNNING (void*)1
239 #define EVT_WAITING NULL
241 struct thread_wait;
242 typedef struct thread_wait_entry
244 struct thread_wait *wait;
245 struct thread_wait_entry *next;
246 struct thread_wait_entry *prev;
247 } thread_wait_entry;
249 typedef struct thread_wait
251 void *signaled;
252 LONG pending_waits;
253 thread_wait_entry entries[1];
254 } thread_wait;
256 typedef struct
258 thread_wait_entry *waiters;
259 INT_PTR signaled;
260 critical_section cs;
261 } event;
263 #if _MSVCR_VER >= 110
264 #define CV_WAKE (void*)1
265 typedef struct cv_queue {
266 struct cv_queue *next;
267 LONG expired;
268 } cv_queue;
270 typedef struct {
271 /* cv_queue structure is not binary compatible */
272 cv_queue *queue;
273 critical_section lock;
274 } _Condition_variable;
275 #endif
277 typedef struct rwl_queue
279 struct rwl_queue *next;
280 } rwl_queue;
282 #define WRITER_WAITING 0x80000000
283 /* FIXME: reader_writer_lock structure is not binary compatible
284 * it can't exceed 28/56 bytes */
285 typedef struct
287 LONG count;
288 LONG thread_id;
289 rwl_queue active;
290 rwl_queue *writer_head;
291 rwl_queue *writer_tail;
292 rwl_queue *reader_head;
293 } reader_writer_lock;
295 typedef struct {
296 reader_writer_lock *lock;
297 } reader_writer_lock_scoped_lock;
299 typedef struct {
300 CRITICAL_SECTION cs;
301 } _ReentrantBlockingLock;
303 #define TICKSPERMSEC 10000
304 typedef struct {
305 const vtable_ptr *vtable;
306 TP_TIMER *timer;
307 unsigned int elapse;
308 bool repeat;
309 } _Timer;
310 extern const vtable_ptr _Timer_vtable;
311 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
313 typedef exception improper_lock;
314 extern const vtable_ptr improper_lock_vtable;
316 typedef exception improper_scheduler_attach;
317 extern const vtable_ptr improper_scheduler_attach_vtable;
319 typedef exception improper_scheduler_detach;
320 extern const vtable_ptr improper_scheduler_detach_vtable;
322 typedef exception invalid_scheduler_policy_key;
323 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
325 typedef exception invalid_scheduler_policy_thread_specification;
326 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
328 typedef exception invalid_scheduler_policy_value;
329 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
331 typedef struct {
332 exception e;
333 HRESULT hr;
334 } scheduler_resource_allocation_error;
335 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
337 enum ConcRT_EventType
339 CONCRT_EVENT_GENERIC,
340 CONCRT_EVENT_START,
341 CONCRT_EVENT_END,
342 CONCRT_EVENT_BLOCK,
343 CONCRT_EVENT_UNBLOCK,
344 CONCRT_EVENT_YIELD,
345 CONCRT_EVENT_ATTACH,
346 CONCRT_EVENT_DETACH
349 static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
351 static CRITICAL_SECTION default_scheduler_cs;
352 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
354 0, 0, &default_scheduler_cs,
355 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
356 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
358 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
359 static SchedulerPolicy default_scheduler_policy;
360 static ThreadScheduler *default_scheduler;
362 static HANDLE keyed_event;
364 static void create_default_scheduler(void);
366 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
367 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
368 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
369 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
371 TRACE("(%p %p)\n", this, str);
372 return __exception_ctor(this, str, &improper_lock_vtable);
375 /* ??0improper_lock@Concurrency@@QAE@XZ */
376 /* ??0improper_lock@Concurrency@@QEAA@XZ */
377 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
378 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
380 return improper_lock_ctor_str(this, NULL);
383 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
384 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
386 TRACE("(%p %p)\n", this, rhs);
387 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
390 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
391 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
392 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
393 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
394 improper_scheduler_attach *this, const char *str)
396 TRACE("(%p %p)\n", this, str);
397 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
400 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
401 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
402 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
403 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
404 improper_scheduler_attach *this)
406 return improper_scheduler_attach_ctor_str(this, NULL);
409 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
410 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
411 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
413 TRACE("(%p %p)\n", _this, rhs);
414 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
417 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
418 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
419 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
420 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
421 improper_scheduler_detach *this, const char *str)
423 TRACE("(%p %p)\n", this, str);
424 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
427 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
428 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
429 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
430 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
431 improper_scheduler_detach *this)
433 return improper_scheduler_detach_ctor_str(this, NULL);
436 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
437 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
438 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
440 TRACE("(%p %p)\n", _this, rhs);
441 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
444 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
445 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
446 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
447 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
448 invalid_scheduler_policy_key *this, const char *str)
450 TRACE("(%p %p)\n", this, str);
451 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
454 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
455 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
456 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
457 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
458 invalid_scheduler_policy_key *this)
460 return invalid_scheduler_policy_key_ctor_str(this, NULL);
463 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
464 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
465 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
467 TRACE("(%p %p)\n", _this, rhs);
468 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
471 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
472 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
473 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
474 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
475 invalid_scheduler_policy_thread_specification *this, const char *str)
477 TRACE("(%p %p)\n", this, str);
478 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
481 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
482 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
483 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
484 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
485 invalid_scheduler_policy_thread_specification *this)
487 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
490 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
491 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
492 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
494 TRACE("(%p %p)\n", _this, rhs);
495 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
498 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
499 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
500 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
501 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
502 invalid_scheduler_policy_value *this, const char *str)
504 TRACE("(%p %p)\n", this, str);
505 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
508 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
509 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
510 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
511 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
512 invalid_scheduler_policy_value *this)
514 return invalid_scheduler_policy_value_ctor_str(this, NULL);
517 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
518 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
519 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
521 TRACE("(%p %p)\n", _this, rhs);
522 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
525 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
526 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
527 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
528 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
529 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
531 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
532 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
533 this->hr = hr;
534 return this;
537 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
538 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
539 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
540 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
541 scheduler_resource_allocation_error *this, HRESULT hr)
543 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
546 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
547 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
548 scheduler_resource_allocation_error *this,
549 const scheduler_resource_allocation_error *rhs)
551 TRACE("(%p,%p)\n", this, rhs);
553 if (!rhs->e.do_free)
554 memcpy(this, rhs, sizeof(*this));
555 else
556 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
557 return this;
560 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
561 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
562 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
563 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
564 const scheduler_resource_allocation_error *this)
566 TRACE("(%p)\n", this);
567 return this->hr;
570 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
571 ".?AVimproper_lock@Concurrency@@")
572 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
573 ".?AVimproper_scheduler_attach@Concurrency@@")
574 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
575 ".?AVimproper_scheduler_detach@Concurrency@@")
576 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
577 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
578 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
579 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
580 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
581 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
582 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
583 ".?AVscheduler_resource_allocation_error@Concurrency@@")
585 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
586 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
587 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
588 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
589 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
590 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
591 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
593 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
594 __ASM_VTABLE(improper_lock,
595 VTABLE_ADD_FUNC(cexception_vector_dtor)
596 VTABLE_ADD_FUNC(cexception_what));
597 __ASM_VTABLE(improper_scheduler_attach,
598 VTABLE_ADD_FUNC(cexception_vector_dtor)
599 VTABLE_ADD_FUNC(cexception_what));
600 __ASM_VTABLE(improper_scheduler_detach,
601 VTABLE_ADD_FUNC(cexception_vector_dtor)
602 VTABLE_ADD_FUNC(cexception_what));
603 __ASM_VTABLE(invalid_scheduler_policy_key,
604 VTABLE_ADD_FUNC(cexception_vector_dtor)
605 VTABLE_ADD_FUNC(cexception_what));
606 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
607 VTABLE_ADD_FUNC(cexception_vector_dtor)
608 VTABLE_ADD_FUNC(cexception_what));
609 __ASM_VTABLE(invalid_scheduler_policy_value,
610 VTABLE_ADD_FUNC(cexception_vector_dtor)
611 VTABLE_ADD_FUNC(cexception_what));
612 __ASM_VTABLE(scheduler_resource_allocation_error,
613 VTABLE_ADD_FUNC(cexception_vector_dtor)
614 VTABLE_ADD_FUNC(cexception_what));
615 __ASM_BLOCK_END
617 static Context* try_get_current_context(void)
619 if (context_tls_index == TLS_OUT_OF_INDEXES)
620 return NULL;
621 return TlsGetValue(context_tls_index);
624 static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
626 context_tls_index = TlsAlloc();
627 return context_tls_index != TLS_OUT_OF_INDEXES;
630 static Context* get_current_context(void)
632 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
633 Context *ret;
635 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
637 scheduler_resource_allocation_error e;
638 scheduler_resource_allocation_error_ctor_name(&e, NULL,
639 HRESULT_FROM_WIN32(GetLastError()));
640 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
643 ret = TlsGetValue(context_tls_index);
644 if (!ret) {
645 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
646 ExternalContextBase_ctor(context);
647 TlsSetValue(context_tls_index, context);
648 ret = &context->context;
650 return ret;
653 static Scheduler* try_get_current_scheduler(void)
655 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
657 if (!context)
658 return NULL;
660 if (context->context.vtable != &ExternalContextBase_vtable) {
661 ERR("unknown context set\n");
662 return NULL;
664 return context->scheduler.scheduler;
667 static Scheduler* get_current_scheduler(void)
669 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
671 if (context->context.vtable != &ExternalContextBase_vtable) {
672 ERR("unknown context set\n");
673 return NULL;
675 return context->scheduler.scheduler;
678 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
679 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
680 Context* __cdecl Context_CurrentContext(void)
682 TRACE("()\n");
683 return get_current_context();
686 /* ?Id@Context@Concurrency@@SAIXZ */
687 unsigned int __cdecl Context_Id(void)
689 Context *ctx = try_get_current_context();
690 TRACE("()\n");
691 return ctx ? call_Context_GetId(ctx) : -1;
694 /* ?Block@Context@Concurrency@@SAXXZ */
695 void __cdecl Context_Block(void)
697 FIXME("()\n");
700 /* ?Yield@Context@Concurrency@@SAXXZ */
701 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
702 void __cdecl Context_Yield(void)
704 FIXME("()\n");
707 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
708 void __cdecl Context__SpinYield(void)
710 FIXME("()\n");
713 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
714 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
716 FIXME("()\n");
717 return FALSE;
720 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
721 void __cdecl Context_Oversubscribe(bool begin)
723 FIXME("(%x)\n", begin);
726 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
727 unsigned int __cdecl Context_ScheduleGroupId(void)
729 Context *ctx = try_get_current_context();
730 TRACE("()\n");
731 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
734 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
735 unsigned int __cdecl Context_VirtualProcessorId(void)
737 Context *ctx = try_get_current_context();
738 TRACE("()\n");
739 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
742 #if _MSVCR_VER > 100
743 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
744 _Context *__cdecl _Context__CurrentContext(_Context *ret)
746 TRACE("(%p)\n", ret);
747 ret->context = Context_CurrentContext();
748 return ret;
750 #endif
752 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
753 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
755 TRACE("(%p)->()\n", this);
756 return this->id;
759 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
760 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
762 FIXME("(%p)->() stub\n", this);
763 return -1;
766 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
767 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
769 FIXME("(%p)->() stub\n", this);
770 return -1;
773 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
774 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
776 FIXME("(%p)->() stub\n", this);
779 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
780 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
782 FIXME("(%p)->() stub\n", this);
783 return FALSE;
786 static void ExternalContextBase_dtor(ExternalContextBase *this)
788 struct scheduler_list *scheduler_cur, *scheduler_next;
789 union allocator_cache_entry *next, *cur;
790 int i;
792 /* TODO: move the allocator cache to scheduler so it can be reused */
793 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
794 for(cur = this->allocator_cache[i]; cur; cur=next) {
795 next = cur->free.next;
796 operator_delete(cur);
800 if (this->scheduler.scheduler) {
801 call_Scheduler_Release(this->scheduler.scheduler);
803 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
804 scheduler_next = scheduler_cur->next;
805 call_Scheduler_Release(scheduler_cur->scheduler);
806 operator_delete(scheduler_cur);
811 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
812 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
814 TRACE("(%p %x)\n", this, flags);
815 if(flags & 2) {
816 /* we have an array, with the number of elements stored before the first object */
817 INT_PTR i, *ptr = (INT_PTR *)this-1;
819 for(i=*ptr-1; i>=0; i--)
820 ExternalContextBase_dtor(this+i);
821 operator_delete(ptr);
822 } else {
823 ExternalContextBase_dtor(this);
824 if(flags & 1)
825 operator_delete(this);
828 return &this->context;
831 static void ExternalContextBase_ctor(ExternalContextBase *this)
833 TRACE("(%p)->()\n", this);
835 memset(this, 0, sizeof(*this));
836 this->context.vtable = &ExternalContextBase_vtable;
837 this->id = InterlockedIncrement(&context_id);
839 create_default_scheduler();
840 this->scheduler.scheduler = &default_scheduler->scheduler;
841 call_Scheduler_Reference(&default_scheduler->scheduler);
844 /* ?Alloc@Concurrency@@YAPAXI@Z */
845 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
846 void * CDECL Concurrency_Alloc(size_t size)
848 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
849 union allocator_cache_entry *p;
851 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
852 if (size < sizeof(*p))
853 size = sizeof(*p);
855 if (context->context.vtable != &ExternalContextBase_vtable) {
856 p = operator_new(size);
857 p->alloc.bucket = -1;
858 }else {
859 int i;
861 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
862 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
863 if (1 << (i+4) >= size) break;
865 if(i==ARRAY_SIZE(context->allocator_cache)) {
866 p = operator_new(size);
867 p->alloc.bucket = -1;
868 }else if (context->allocator_cache[i]) {
869 p = context->allocator_cache[i];
870 context->allocator_cache[i] = p->free.next;
871 p->alloc.bucket = i;
872 }else {
873 p = operator_new(1 << (i+4));
874 p->alloc.bucket = i;
878 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
879 return p->alloc.mem;
882 /* ?Free@Concurrency@@YAXPAX@Z */
883 /* ?Free@Concurrency@@YAXPEAX@Z */
884 void CDECL Concurrency_Free(void* mem)
886 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
887 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
888 int bucket = p->alloc.bucket;
890 TRACE("(%p)\n", mem);
892 if (context->context.vtable != &ExternalContextBase_vtable) {
893 operator_delete(p);
894 }else {
895 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
896 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
897 p->free.next = context->allocator_cache[bucket];
898 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
899 context->allocator_cache[bucket] = p;
900 }else {
901 operator_delete(p);
906 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
907 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
908 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
909 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
910 PolicyElementKey policy, unsigned int val)
912 unsigned int ret;
914 TRACE("(%p %d %d)\n", this, policy, val);
916 if (policy == MinConcurrency) {
917 invalid_scheduler_policy_key e;
918 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
919 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
921 if (policy == MaxConcurrency) {
922 invalid_scheduler_policy_key e;
923 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
924 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
926 if (policy >= last_policy_id) {
927 invalid_scheduler_policy_key e;
928 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
929 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
932 switch(policy) {
933 case SchedulerKind:
934 if (val) {
935 invalid_scheduler_policy_value e;
936 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
937 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
939 break;
940 case TargetOversubscriptionFactor:
941 if (!val) {
942 invalid_scheduler_policy_value e;
943 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
944 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
946 break;
947 case ContextPriority:
948 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
949 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
950 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
951 && val != INHERIT_THREAD_PRIORITY) {
952 invalid_scheduler_policy_value e;
953 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
954 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
956 break;
957 case SchedulingProtocol:
958 case DynamicProgressFeedback:
959 case WinRTInitialization:
960 if (val != 0 && val != 1) {
961 invalid_scheduler_policy_value e;
962 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
963 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
965 break;
966 default:
967 break;
970 ret = this->policy_container->policies[policy];
971 this->policy_container->policies[policy] = val;
972 return ret;
975 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
976 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
977 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
978 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
979 unsigned int min_concurrency, unsigned int max_concurrency)
981 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
983 if (min_concurrency > max_concurrency) {
984 invalid_scheduler_policy_thread_specification e;
985 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
986 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
988 if (!max_concurrency) {
989 invalid_scheduler_policy_value e;
990 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
991 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
994 this->policy_container->policies[MinConcurrency] = min_concurrency;
995 this->policy_container->policies[MaxConcurrency] = max_concurrency;
998 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
999 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1000 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1001 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1002 const SchedulerPolicy *this, PolicyElementKey policy)
1004 TRACE("(%p %d)\n", this, policy);
1006 if (policy >= last_policy_id) {
1007 invalid_scheduler_policy_key e;
1008 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1009 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1011 return this->policy_container->policies[policy];
1014 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1015 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1016 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1017 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1019 TRACE("(%p)\n", this);
1021 this->policy_container = operator_new(sizeof(*this->policy_container));
1022 /* TODO: default values can probably be affected by CurrentScheduler */
1023 this->policy_container->policies[SchedulerKind] = 0;
1024 this->policy_container->policies[MaxConcurrency] = -1;
1025 this->policy_container->policies[MinConcurrency] = 1;
1026 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1027 this->policy_container->policies[LocalContextCacheSize] = 8;
1028 this->policy_container->policies[ContextStackSize] = 0;
1029 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1030 this->policy_container->policies[SchedulingProtocol] = 0;
1031 this->policy_container->policies[DynamicProgressFeedback] = 1;
1032 return this;
1035 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1036 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1037 /* TODO: don't leak policy_container on exception */
1038 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1039 SchedulerPolicy *this, size_t n, ...)
1041 unsigned int min_concurrency, max_concurrency;
1042 va_list valist;
1043 size_t i;
1045 TRACE("(%p %Iu)\n", this, n);
1047 SchedulerPolicy_ctor(this);
1048 min_concurrency = this->policy_container->policies[MinConcurrency];
1049 max_concurrency = this->policy_container->policies[MaxConcurrency];
1051 va_start(valist, n);
1052 for(i=0; i<n; i++) {
1053 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1054 unsigned int val = va_arg(valist, unsigned int);
1056 if(policy == MinConcurrency)
1057 min_concurrency = val;
1058 else if(policy == MaxConcurrency)
1059 max_concurrency = val;
1060 else
1061 SchedulerPolicy_SetPolicyValue(this, policy, val);
1063 va_end(valist);
1065 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1066 return this;
1069 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1070 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1071 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1072 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1073 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1075 TRACE("(%p %p)\n", this, rhs);
1076 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1077 sizeof(this->policy_container->policies));
1078 return this;
1081 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1082 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1083 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1084 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1085 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1087 TRACE("(%p %p)\n", this, rhs);
1088 SchedulerPolicy_ctor(this);
1089 return SchedulerPolicy_op_assign(this, rhs);
1092 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1093 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1094 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1095 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1097 TRACE("(%p)\n", this);
1098 operator_delete(this->policy_container);
1101 static void ThreadScheduler_dtor(ThreadScheduler *this)
1103 int i;
1105 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1106 SchedulerPolicy_dtor(&this->policy);
1108 for(i=0; i<this->shutdown_count; i++)
1109 SetEvent(this->shutdown_events[i]);
1110 operator_delete(this->shutdown_events);
1112 this->cs.DebugInfo->Spare[0] = 0;
1113 DeleteCriticalSection(&this->cs);
1116 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1117 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1119 TRACE("(%p)\n", this);
1120 return this->id;
1123 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1124 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1126 TRACE("(%p)\n", this);
1127 return this->virt_proc_no;
1130 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1131 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1132 const ThreadScheduler *this, SchedulerPolicy *ret)
1134 TRACE("(%p %p)\n", this, ret);
1135 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1138 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1139 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1141 TRACE("(%p)\n", this);
1142 return InterlockedIncrement(&this->ref);
1145 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1146 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1148 unsigned int ret = InterlockedDecrement(&this->ref);
1150 TRACE("(%p)\n", this);
1152 if(!ret) {
1153 ThreadScheduler_dtor(this);
1154 operator_delete(this);
1156 return ret;
1159 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1160 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1162 HANDLE *shutdown_events;
1163 int size;
1165 TRACE("(%p %p)\n", this, event);
1167 EnterCriticalSection(&this->cs);
1169 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1170 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1171 memcpy(shutdown_events, this->shutdown_events,
1172 this->shutdown_count * sizeof(*shutdown_events));
1173 operator_delete(this->shutdown_events);
1174 this->shutdown_size = size;
1175 this->shutdown_events = shutdown_events;
1176 this->shutdown_events[this->shutdown_count++] = event;
1178 LeaveCriticalSection(&this->cs);
1181 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1182 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1184 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1186 TRACE("(%p)\n", this);
1188 if(context->context.vtable != &ExternalContextBase_vtable) {
1189 ERR("unknown context set\n");
1190 return;
1193 if(context->scheduler.scheduler == &this->scheduler) {
1194 improper_scheduler_attach e;
1195 improper_scheduler_attach_ctor_str(&e, NULL);
1196 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1199 if(context->scheduler.scheduler) {
1200 struct scheduler_list *l = operator_new(sizeof(*l));
1201 *l = context->scheduler;
1202 context->scheduler.next = l;
1204 context->scheduler.scheduler = &this->scheduler;
1205 ThreadScheduler_Reference(this);
1208 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1209 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1210 ThreadScheduler *this, /*location*/void *placement)
1212 FIXME("(%p %p) stub\n", this, placement);
1213 return NULL;
1216 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1217 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1219 FIXME("(%p) stub\n", this);
1220 return NULL;
1223 typedef struct
1225 void (__cdecl *proc)(void*);
1226 void *data;
1227 } schedule_task_arg;
1229 static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1231 schedule_task_arg arg;
1233 arg = *(schedule_task_arg*)context;
1234 operator_delete(context);
1235 arg.proc(arg.data);
1238 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1239 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1240 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1242 schedule_task_arg *arg;
1243 TP_WORK *work;
1245 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1247 arg = operator_new(sizeof(*arg));
1248 arg->proc = proc;
1249 arg->data = data;
1251 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1252 if(!work) {
1253 scheduler_resource_allocation_error e;
1255 operator_delete(arg);
1256 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1257 HRESULT_FROM_WIN32(GetLastError()));
1258 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1260 SubmitThreadpoolWork(work);
1261 CloseThreadpoolWork(work);
1264 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1265 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1266 void (__cdecl *proc)(void*), void* data)
1268 FIXME("(%p %p %p) stub\n", this, proc, data);
1269 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1272 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1273 bool __thiscall ThreadScheduler_IsAvailableLocation(
1274 const ThreadScheduler *this, const /*location*/void *placement)
1276 FIXME("(%p %p) stub\n", this, placement);
1277 return FALSE;
1280 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1281 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1283 TRACE("(%p %x)\n", this, flags);
1284 if(flags & 2) {
1285 /* we have an array, with the number of elements stored before the first object */
1286 INT_PTR i, *ptr = (INT_PTR *)this-1;
1288 for(i=*ptr-1; i>=0; i--)
1289 ThreadScheduler_dtor(this+i);
1290 operator_delete(ptr);
1291 } else {
1292 ThreadScheduler_dtor(this);
1293 if(flags & 1)
1294 operator_delete(this);
1297 return &this->scheduler;
1300 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1301 const SchedulerPolicy *policy)
1303 SYSTEM_INFO si;
1305 TRACE("(%p)->()\n", this);
1307 this->scheduler.vtable = &ThreadScheduler_vtable;
1308 this->ref = 1;
1309 this->id = InterlockedIncrement(&scheduler_id);
1310 SchedulerPolicy_copy_ctor(&this->policy, policy);
1312 GetSystemInfo(&si);
1313 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1314 if(this->virt_proc_no > si.dwNumberOfProcessors)
1315 this->virt_proc_no = si.dwNumberOfProcessors;
1317 this->shutdown_count = this->shutdown_size = 0;
1318 this->shutdown_events = NULL;
1320 InitializeCriticalSection(&this->cs);
1321 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1322 return this;
1325 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1326 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1327 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1329 ThreadScheduler *ret;
1331 TRACE("(%p)\n", policy);
1333 ret = operator_new(sizeof(*ret));
1334 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1337 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1338 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1340 TRACE("()\n");
1342 EnterCriticalSection(&default_scheduler_cs);
1343 if(default_scheduler_policy.policy_container)
1344 SchedulerPolicy_dtor(&default_scheduler_policy);
1345 SchedulerPolicy_ctor(&default_scheduler_policy);
1346 LeaveCriticalSection(&default_scheduler_cs);
1349 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1350 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1351 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1353 TRACE("(%p)\n", policy);
1355 EnterCriticalSection(&default_scheduler_cs);
1356 if(!default_scheduler_policy.policy_container)
1357 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1358 else
1359 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1360 LeaveCriticalSection(&default_scheduler_cs);
1363 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1364 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1365 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1367 Scheduler *scheduler;
1369 TRACE("(%p)\n", policy);
1371 scheduler = Scheduler_Create(policy);
1372 call_Scheduler_Attach(scheduler);
1375 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1376 void __cdecl CurrentScheduler_Detach(void)
1378 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1380 TRACE("()\n");
1382 if(!context) {
1383 improper_scheduler_detach e;
1384 improper_scheduler_detach_ctor_str(&e, NULL);
1385 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1388 if(context->context.vtable != &ExternalContextBase_vtable) {
1389 ERR("unknown context set\n");
1390 return;
1393 if(!context->scheduler.next) {
1394 improper_scheduler_detach e;
1395 improper_scheduler_detach_ctor_str(&e, NULL);
1396 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1399 call_Scheduler_Release(context->scheduler.scheduler);
1400 if(!context->scheduler.next) {
1401 context->scheduler.scheduler = NULL;
1402 }else {
1403 struct scheduler_list *entry = context->scheduler.next;
1404 context->scheduler.scheduler = entry->scheduler;
1405 context->scheduler.next = entry->next;
1406 operator_delete(entry);
1410 static void create_default_scheduler(void)
1412 if(default_scheduler)
1413 return;
1415 EnterCriticalSection(&default_scheduler_cs);
1416 if(!default_scheduler) {
1417 ThreadScheduler *scheduler;
1419 if(!default_scheduler_policy.policy_container)
1420 SchedulerPolicy_ctor(&default_scheduler_policy);
1422 scheduler = operator_new(sizeof(*scheduler));
1423 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1424 default_scheduler = scheduler;
1426 LeaveCriticalSection(&default_scheduler_cs);
1429 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1430 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1431 Scheduler* __cdecl CurrentScheduler_Get(void)
1433 TRACE("()\n");
1434 return get_current_scheduler();
1437 #if _MSVCR_VER > 100
1438 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1439 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1440 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1442 TRACE("(%p)\n", placement);
1443 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1445 #endif
1447 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1448 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1449 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1451 TRACE("()\n");
1452 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1455 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1456 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1458 Scheduler *scheduler = try_get_current_scheduler();
1460 TRACE("()\n");
1462 if(!scheduler)
1463 return -1;
1464 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1467 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1468 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1470 TRACE("(%p)\n", policy);
1471 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1474 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1475 unsigned int __cdecl CurrentScheduler_Id(void)
1477 Scheduler *scheduler = try_get_current_scheduler();
1479 TRACE("()\n");
1481 if(!scheduler)
1482 return -1;
1483 return call_Scheduler_Id(scheduler);
1486 #if _MSVCR_VER > 100
1487 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1488 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1489 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1491 Scheduler *scheduler = try_get_current_scheduler();
1493 TRACE("(%p)\n", placement);
1495 if(!scheduler)
1496 return FALSE;
1497 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1499 #endif
1501 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1502 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1503 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1505 TRACE("(%p)\n", event);
1506 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1509 #if _MSVCR_VER > 100
1510 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1511 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1512 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1513 void *data, /*location*/void *placement)
1515 TRACE("(%p %p %p)\n", proc, data, placement);
1516 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1518 #endif
1520 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1521 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1522 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1524 TRACE("(%p %p)\n", proc, data);
1525 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1528 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1529 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1530 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1531 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1533 TRACE("(%p %p)\n", this, scheduler);
1535 this->scheduler = scheduler;
1536 return this;
1539 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1540 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1541 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1542 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1544 return _Scheduler_ctor_sched(this, NULL);
1547 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1548 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1549 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1550 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1552 TRACE("(%p)\n", this);
1553 return this->scheduler;
1556 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1557 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1558 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1559 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1561 TRACE("(%p)\n", this);
1562 return call_Scheduler_Reference(this->scheduler);
1565 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1566 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1567 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1568 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1570 TRACE("(%p)\n", this);
1571 return call_Scheduler_Release(this->scheduler);
1574 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1575 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1577 TRACE("()\n");
1578 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1581 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1582 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1584 TRACE("()\n");
1585 get_current_scheduler();
1586 return CurrentScheduler_GetNumberOfVirtualProcessors();
1589 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1590 unsigned int __cdecl _CurrentScheduler__Id(void)
1592 TRACE("()\n");
1593 get_current_scheduler();
1594 return CurrentScheduler_Id();
1597 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1598 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1599 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1601 TRACE("(%p %p)\n", proc, data);
1602 CurrentScheduler_ScheduleTask(proc, data);
1605 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1606 unsigned int __cdecl SpinCount__Value(void)
1608 static unsigned int val = -1;
1610 TRACE("()\n");
1612 if(val == -1) {
1613 SYSTEM_INFO si;
1615 GetSystemInfo(&si);
1616 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1619 return val;
1622 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1623 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1624 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1625 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1627 TRACE("(%p %p)\n", this, yf);
1629 this->state = SPINWAIT_INIT;
1630 this->unknown = 1;
1631 this->yield_func = yf;
1632 return this;
1635 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1636 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1637 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1638 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1640 TRACE("(%p %p)\n", this, yf);
1642 this->state = SPINWAIT_INIT;
1643 this->unknown = 0;
1644 this->yield_func = yf;
1645 return this;
1648 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1649 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1650 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1651 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1652 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1653 void __thiscall SpinWait_dtor(SpinWait *this)
1655 TRACE("(%p)\n", this);
1658 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1659 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1660 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1661 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1662 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1663 void __thiscall SpinWait__DoYield(SpinWait *this)
1665 TRACE("(%p)\n", this);
1667 if(this->unknown)
1668 this->yield_func();
1671 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1672 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1673 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1674 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1675 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1676 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
1678 TRACE("(%p)\n", this);
1679 return 1;
1682 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1683 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1684 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1685 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1686 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
1687 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
1689 TRACE("(%p %d)\n", this, spin);
1691 this->spin = spin;
1692 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
1695 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1696 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1697 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1698 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1699 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
1700 void __thiscall SpinWait__Reset(SpinWait *this)
1702 SpinWait__SetSpinCount(this, SpinCount__Value());
1705 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1706 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1707 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1708 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1709 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
1710 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
1712 TRACE("(%p)\n", this);
1714 this->spin--;
1715 return this->spin > 0;
1718 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
1719 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
1720 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
1721 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
1722 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
1723 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
1725 switch(this->state) {
1726 case SPINWAIT_INIT:
1727 SpinWait__Reset(this);
1728 /* fall through */
1729 case SPINWAIT_SPIN:
1730 InterlockedDecrement((LONG*)&this->spin);
1731 if(!this->spin)
1732 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
1733 return TRUE;
1734 case SPINWAIT_YIELD:
1735 this->state = SPINWAIT_DONE;
1736 this->yield_func();
1737 return TRUE;
1738 default:
1739 SpinWait__Reset(this);
1740 return FALSE;
1744 /* ??0critical_section@Concurrency@@QAE@XZ */
1745 /* ??0critical_section@Concurrency@@QEAA@XZ */
1746 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
1747 critical_section* __thiscall critical_section_ctor(critical_section *this)
1749 TRACE("(%p)\n", this);
1751 if(!keyed_event) {
1752 HANDLE event;
1754 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1755 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1756 NtClose(event);
1759 this->unk_thread_id = 0;
1760 this->head = this->tail = NULL;
1761 return this;
1764 /* ??1critical_section@Concurrency@@QAE@XZ */
1765 /* ??1critical_section@Concurrency@@QEAA@XZ */
1766 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
1767 void __thiscall critical_section_dtor(critical_section *this)
1769 TRACE("(%p)\n", this);
1772 static void __cdecl spin_wait_yield(void)
1774 Sleep(0);
1777 static inline void spin_wait_for_next_cs(cs_queue *q)
1779 SpinWait sw;
1781 if(q->next) return;
1783 SpinWait_ctor(&sw, &spin_wait_yield);
1784 SpinWait__Reset(&sw);
1785 while(!q->next)
1786 SpinWait__SpinOnce(&sw);
1787 SpinWait_dtor(&sw);
1790 static inline void cs_set_head(critical_section *cs, cs_queue *q)
1792 cs->unk_thread_id = GetCurrentThreadId();
1793 cs->unk_active.next = q->next;
1794 cs->head = &cs->unk_active;
1797 static inline void cs_lock(critical_section *cs, cs_queue *q)
1799 cs_queue *last;
1801 if(cs->unk_thread_id == GetCurrentThreadId()) {
1802 improper_lock e;
1803 improper_lock_ctor_str(&e, "Already locked");
1804 _CxxThrowException(&e, &improper_lock_exception_type);
1807 memset(q, 0, sizeof(*q));
1808 last = InterlockedExchangePointer(&cs->tail, q);
1809 if(last) {
1810 last->next = q;
1811 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
1814 cs_set_head(cs, q);
1815 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
1816 spin_wait_for_next_cs(q);
1817 cs->unk_active.next = q->next;
1821 /* ?lock@critical_section@Concurrency@@QAEXXZ */
1822 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
1823 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
1824 void __thiscall critical_section_lock(critical_section *this)
1826 cs_queue q;
1828 TRACE("(%p)\n", this);
1829 cs_lock(this, &q);
1832 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
1833 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
1834 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
1835 bool __thiscall critical_section_try_lock(critical_section *this)
1837 cs_queue q;
1839 TRACE("(%p)\n", this);
1841 if(this->unk_thread_id == GetCurrentThreadId())
1842 return FALSE;
1844 memset(&q, 0, sizeof(q));
1845 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
1846 cs_set_head(this, &q);
1847 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
1848 spin_wait_for_next_cs(&q);
1849 this->unk_active.next = q.next;
1851 return TRUE;
1853 return FALSE;
1856 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
1857 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
1858 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
1859 void __thiscall critical_section_unlock(critical_section *this)
1861 TRACE("(%p)\n", this);
1863 this->unk_thread_id = 0;
1864 this->head = NULL;
1865 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
1866 == &this->unk_active) return;
1867 spin_wait_for_next_cs(&this->unk_active);
1869 #if _MSVCR_VER >= 110
1870 while(1) {
1871 cs_queue *next;
1873 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
1874 break;
1876 next = this->unk_active.next;
1877 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
1878 HeapFree(GetProcessHeap(), 0, next);
1879 return;
1881 spin_wait_for_next_cs(next);
1883 this->unk_active.next = next->next;
1884 HeapFree(GetProcessHeap(), 0, next);
1886 #endif
1888 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
1891 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
1892 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
1893 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
1894 critical_section* __thiscall critical_section_native_handle(critical_section *this)
1896 TRACE("(%p)\n", this);
1897 return this;
1900 #if _MSVCR_VER >= 110
1901 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
1902 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
1903 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
1904 bool __thiscall critical_section_try_lock_for(
1905 critical_section *this, unsigned int timeout)
1907 cs_queue *q, *last;
1909 TRACE("(%p %d)\n", this, timeout);
1911 if(this->unk_thread_id == GetCurrentThreadId()) {
1912 improper_lock e;
1913 improper_lock_ctor_str(&e, "Already locked");
1914 _CxxThrowException(&e, &improper_lock_exception_type);
1917 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
1918 return critical_section_try_lock(this);
1920 last = InterlockedExchangePointer(&this->tail, q);
1921 if(last) {
1922 LARGE_INTEGER to;
1923 NTSTATUS status;
1924 FILETIME ft;
1926 last->next = q;
1927 GetSystemTimeAsFileTime(&ft);
1928 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
1929 ft.dwLowDateTime + (LONGLONG)timeout*10000;
1930 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
1931 if(status == STATUS_TIMEOUT) {
1932 if(!InterlockedExchange(&q->free, TRUE))
1933 return FALSE;
1934 /* A thread has signaled the event and is block waiting. */
1935 /* We need to catch the event to wake the thread. */
1936 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
1940 cs_set_head(this, q);
1941 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
1942 spin_wait_for_next_cs(q);
1943 this->unk_active.next = q->next;
1946 HeapFree(GetProcessHeap(), 0, q);
1947 return TRUE;
1949 #endif
1951 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
1952 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
1953 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
1954 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
1955 critical_section_scoped_lock *this, critical_section *cs)
1957 TRACE("(%p %p)\n", this, cs);
1958 this->cs = cs;
1959 cs_lock(this->cs, &this->lock.q);
1960 return this;
1963 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
1964 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
1965 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
1966 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
1968 TRACE("(%p)\n", this);
1969 critical_section_unlock(this->cs);
1972 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
1973 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1974 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
1975 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
1977 TRACE("(%p)\n", this);
1979 critical_section_ctor(&this->cs);
1980 return this;
1983 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
1984 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
1985 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
1986 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
1988 TRACE("(%p %p)\n", this, q);
1989 cs_lock(&this->cs, q);
1992 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
1993 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
1994 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
1995 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
1997 TRACE("(%p)\n", this);
1998 critical_section_unlock(&this->cs);
2001 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2002 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2003 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2004 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2005 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2007 TRACE("(%p %p)\n", this, lock);
2009 this->lock = lock;
2010 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2011 return this;
2014 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2015 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2016 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2017 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2019 TRACE("(%p)\n", this);
2021 _NonReentrantPPLLock__Release(this->lock);
2024 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2025 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2026 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2027 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2029 TRACE("(%p)\n", this);
2031 critical_section_ctor(&this->cs);
2032 this->count = 0;
2033 this->owner = -1;
2034 return this;
2037 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2038 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2039 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2040 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2042 TRACE("(%p %p)\n", this, q);
2044 if(this->owner == GetCurrentThreadId()) {
2045 this->count++;
2046 return;
2049 cs_lock(&this->cs, q);
2050 this->count++;
2051 this->owner = GetCurrentThreadId();
2054 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2055 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2056 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2057 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2059 TRACE("(%p)\n", this);
2061 this->count--;
2062 if(this->count)
2063 return;
2065 this->owner = -1;
2066 critical_section_unlock(&this->cs);
2069 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2070 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2071 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2072 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2073 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2075 TRACE("(%p %p)\n", this, lock);
2077 this->lock = lock;
2078 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2079 return this;
2082 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2083 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2084 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2085 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2087 TRACE("(%p)\n", this);
2089 _ReentrantPPLLock__Release(this->lock);
2092 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2093 unsigned int __cdecl _GetConcurrency(void)
2095 static unsigned int val = -1;
2097 TRACE("()\n");
2099 if(val == -1) {
2100 SYSTEM_INFO si;
2102 GetSystemInfo(&si);
2103 val = si.dwNumberOfProcessors;
2106 return val;
2109 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
2111 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
2112 pTime->QuadPart = (ULONGLONG)timeout * -10000;
2113 return pTime;
2116 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2118 entry->next = *head;
2119 entry->prev = NULL;
2120 if(*head) (*head)->prev = entry;
2121 *head = entry;
2124 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2126 if(entry == *head)
2127 *head = entry->next;
2128 else if(entry->prev)
2129 entry->prev->next = entry->next;
2130 if(entry->next) entry->next->prev = entry->prev;
2133 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2135 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2137 for(i = 0; i < count; i++) {
2138 critical_section_lock(&events[i]->cs);
2139 if(events[i] == wait->signaled) ret = i;
2140 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2141 critical_section_unlock(&events[i]->cs);
2144 return ret;
2147 static inline int evt_transition(void **state, void *from, void *to)
2149 return InterlockedCompareExchangePointer(state, to, from) == from;
2152 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2154 int i;
2155 NTSTATUS status;
2156 LARGE_INTEGER ntto;
2158 wait->signaled = EVT_RUNNING;
2159 wait->pending_waits = wait_all ? count : 1;
2160 for(i = 0; i < count; i++) {
2161 wait->entries[i].wait = wait;
2163 critical_section_lock(&events[i]->cs);
2164 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2165 if(events[i]->signaled) {
2166 if(!InterlockedDecrement(&wait->pending_waits)) {
2167 wait->signaled = events[i];
2168 critical_section_unlock(&events[i]->cs);
2170 return evt_end_wait(wait, events, i+1);
2173 critical_section_unlock(&events[i]->cs);
2176 if(!timeout)
2177 return evt_end_wait(wait, events, count);
2179 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2180 return evt_end_wait(wait, events, count);
2182 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
2184 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2185 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
2187 return evt_end_wait(wait, events, count);
2190 /* ??0event@Concurrency@@QAE@XZ */
2191 /* ??0event@Concurrency@@QEAA@XZ */
2192 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2193 event* __thiscall event_ctor(event *this)
2195 TRACE("(%p)\n", this);
2197 this->waiters = NULL;
2198 this->signaled = FALSE;
2199 critical_section_ctor(&this->cs);
2201 return this;
2204 /* ??1event@Concurrency@@QAE@XZ */
2205 /* ??1event@Concurrency@@QEAA@XZ */
2206 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2207 void __thiscall event_dtor(event *this)
2209 TRACE("(%p)\n", this);
2210 critical_section_dtor(&this->cs);
2211 if(this->waiters)
2212 ERR("there's a wait on destroyed event\n");
2215 /* ?reset@event@Concurrency@@QAEXXZ */
2216 /* ?reset@event@Concurrency@@QEAAXXZ */
2217 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2218 void __thiscall event_reset(event *this)
2220 thread_wait_entry *entry;
2222 TRACE("(%p)\n", this);
2224 critical_section_lock(&this->cs);
2225 if(this->signaled) {
2226 this->signaled = FALSE;
2227 for(entry=this->waiters; entry; entry = entry->next)
2228 InterlockedIncrement(&entry->wait->pending_waits);
2230 critical_section_unlock(&this->cs);
2233 /* ?set@event@Concurrency@@QAEXXZ */
2234 /* ?set@event@Concurrency@@QEAAXXZ */
2235 DEFINE_THISCALL_WRAPPER(event_set, 4)
2236 void __thiscall event_set(event *this)
2238 thread_wait_entry *wakeup = NULL;
2239 thread_wait_entry *entry, *next;
2241 TRACE("(%p)\n", this);
2243 critical_section_lock(&this->cs);
2244 if(!this->signaled) {
2245 this->signaled = TRUE;
2246 for(entry=this->waiters; entry; entry=next) {
2247 next = entry->next;
2248 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
2249 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
2250 evt_remove_queue(&this->waiters, entry);
2251 evt_add_queue(&wakeup, entry);
2256 critical_section_unlock(&this->cs);
2258 for(entry=wakeup; entry; entry=next) {
2259 next = entry->next;
2260 entry->next = entry->prev = NULL;
2261 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
2265 /* ?wait@event@Concurrency@@QAEII@Z */
2266 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2267 DEFINE_THISCALL_WRAPPER(event_wait, 8)
2268 size_t __thiscall event_wait(event *this, unsigned int timeout)
2270 thread_wait wait;
2271 size_t signaled;
2273 TRACE("(%p %u)\n", this, timeout);
2275 critical_section_lock(&this->cs);
2276 signaled = this->signaled;
2277 critical_section_unlock(&this->cs);
2279 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
2280 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
2283 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2284 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2285 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
2287 thread_wait *wait;
2288 size_t ret;
2290 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
2292 if(count == 0)
2293 return 0;
2295 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
2296 ret = evt_wait(wait, events, count, wait_all, timeout);
2297 operator_delete(wait);
2299 return ret;
2302 #if _MSVCR_VER >= 110
2304 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
2305 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
2306 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
2307 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
2309 TRACE("(%p)\n", this);
2311 this->queue = NULL;
2312 critical_section_ctor(&this->lock);
2313 return this;
2316 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
2317 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
2318 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
2319 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
2321 TRACE("(%p)\n", this);
2323 while(this->queue) {
2324 cv_queue *next = this->queue->next;
2325 if(!this->queue->expired)
2326 ERR("there's an active wait\n");
2327 HeapFree(GetProcessHeap(), 0, this->queue);
2328 this->queue = next;
2330 critical_section_dtor(&this->lock);
2333 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
2334 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
2335 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
2336 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
2338 cv_queue q, *next;
2340 TRACE("(%p, %p)\n", this, cs);
2342 critical_section_lock(&this->lock);
2343 q.next = this->queue;
2344 q.expired = FALSE;
2345 next = q.next;
2346 this->queue = &q;
2347 critical_section_unlock(&this->lock);
2349 critical_section_unlock(cs);
2350 while (q.next != CV_WAKE)
2351 RtlWaitOnAddress(&q.next, &next, sizeof(next), NULL);
2352 critical_section_lock(cs);
2355 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
2356 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
2357 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
2358 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
2359 critical_section *cs, unsigned int timeout)
2361 LARGE_INTEGER to;
2362 NTSTATUS status;
2363 FILETIME ft;
2364 cv_queue *q, *next;
2366 TRACE("(%p %p %d)\n", this, cs, timeout);
2368 q = operator_new(sizeof(cv_queue));
2369 critical_section_lock(&this->lock);
2370 q->next = this->queue;
2371 q->expired = FALSE;
2372 next = q->next;
2373 this->queue = q;
2374 critical_section_unlock(&this->lock);
2376 critical_section_unlock(cs);
2378 GetSystemTimeAsFileTime(&ft);
2379 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
2380 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
2381 while (q->next != CV_WAKE) {
2382 status = RtlWaitOnAddress(&q->next, &next, sizeof(next), &to);
2383 if(status == STATUS_TIMEOUT) {
2384 if(!InterlockedExchange(&q->expired, TRUE)) {
2385 critical_section_lock(cs);
2386 return FALSE;
2388 break;
2392 operator_delete(q);
2393 critical_section_lock(cs);
2394 return TRUE;
2397 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
2398 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
2399 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
2400 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
2402 cv_queue *node;
2404 TRACE("(%p)\n", this);
2406 if(!this->queue)
2407 return;
2409 while(1) {
2410 critical_section_lock(&this->lock);
2411 node = this->queue;
2412 if(!node) {
2413 critical_section_unlock(&this->lock);
2414 return;
2416 this->queue = node->next;
2417 critical_section_unlock(&this->lock);
2419 node->next = CV_WAKE;
2420 if(!InterlockedExchange(&node->expired, TRUE)) {
2421 RtlWakeAddressSingle(&node->next);
2422 return;
2423 } else {
2424 HeapFree(GetProcessHeap(), 0, node);
2429 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
2430 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
2431 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
2432 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
2434 cv_queue *ptr;
2436 TRACE("(%p)\n", this);
2438 if(!this->queue)
2439 return;
2441 critical_section_lock(&this->lock);
2442 ptr = this->queue;
2443 this->queue = NULL;
2444 critical_section_unlock(&this->lock);
2446 while(ptr) {
2447 cv_queue *next = ptr->next;
2449 ptr->next = CV_WAKE;
2450 if(!InterlockedExchange(&ptr->expired, TRUE))
2451 RtlWakeAddressSingle(&ptr->next);
2452 else
2453 HeapFree(GetProcessHeap(), 0, ptr);
2454 ptr = next;
2457 #endif
2459 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
2460 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
2461 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
2462 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
2464 TRACE("(%p)\n", this);
2466 if (!keyed_event) {
2467 HANDLE event;
2469 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
2470 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
2471 NtClose(event);
2474 memset(this, 0, sizeof(*this));
2475 return this;
2478 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
2479 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
2480 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
2481 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
2483 TRACE("(%p)\n", this);
2485 if (this->thread_id != 0 || this->count)
2486 WARN("destroying locked reader_writer_lock\n");
2489 static inline void spin_wait_for_next_rwl(rwl_queue *q)
2491 SpinWait sw;
2493 if(q->next) return;
2495 SpinWait_ctor(&sw, &spin_wait_yield);
2496 SpinWait__Reset(&sw);
2497 while(!q->next)
2498 SpinWait__SpinOnce(&sw);
2499 SpinWait_dtor(&sw);
2502 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
2503 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
2504 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
2505 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
2507 rwl_queue q = { NULL }, *last;
2509 TRACE("(%p)\n", this);
2511 if (this->thread_id == GetCurrentThreadId()) {
2512 improper_lock e;
2513 improper_lock_ctor_str(&e, "Already locked");
2514 _CxxThrowException(&e, &improper_lock_exception_type);
2517 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
2518 if (last) {
2519 last->next = &q;
2520 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2521 } else {
2522 this->writer_head = &q;
2523 if (InterlockedOr(&this->count, WRITER_WAITING))
2524 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2527 this->thread_id = GetCurrentThreadId();
2528 this->writer_head = &this->active;
2529 this->active.next = NULL;
2530 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2531 spin_wait_for_next_rwl(&q);
2532 this->active.next = q.next;
2536 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
2537 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
2538 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
2539 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
2541 rwl_queue q;
2543 TRACE("(%p)\n", this);
2545 if (this->thread_id == GetCurrentThreadId()) {
2546 improper_lock e;
2547 improper_lock_ctor_str(&e, "Already locked as writer");
2548 _CxxThrowException(&e, &improper_lock_exception_type);
2551 do {
2552 q.next = this->reader_head;
2553 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
2555 if (!q.next) {
2556 rwl_queue *head;
2557 LONG count;
2559 while (!((count = this->count) & WRITER_WAITING))
2560 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
2562 if (count & WRITER_WAITING)
2563 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2565 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2566 while(head && head != &q) {
2567 rwl_queue *next = head->next;
2568 InterlockedIncrement(&this->count);
2569 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2570 head = next;
2572 } else {
2573 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2577 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
2578 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
2579 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
2580 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
2582 rwl_queue q = { NULL };
2584 TRACE("(%p)\n", this);
2586 if (this->thread_id == GetCurrentThreadId())
2587 return FALSE;
2589 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
2590 return FALSE;
2591 this->writer_head = &q;
2592 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
2593 this->thread_id = GetCurrentThreadId();
2594 this->writer_head = &this->active;
2595 this->active.next = NULL;
2596 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2597 spin_wait_for_next_rwl(&q);
2598 this->active.next = q.next;
2600 return TRUE;
2603 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
2604 return FALSE;
2605 spin_wait_for_next_rwl(&q);
2606 this->writer_head = q.next;
2607 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
2608 this->thread_id = GetCurrentThreadId();
2609 this->writer_head = &this->active;
2610 this->active.next = q.next;
2611 return TRUE;
2613 return FALSE;
2616 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
2617 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
2618 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
2619 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
2621 LONG count;
2623 TRACE("(%p)\n", this);
2625 while (!((count = this->count) & WRITER_WAITING))
2626 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
2627 return FALSE;
2630 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
2631 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
2632 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
2633 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
2635 LONG count;
2636 rwl_queue *head, *next;
2638 TRACE("(%p)\n", this);
2640 if ((count = this->count) & ~WRITER_WAITING) {
2641 count = InterlockedDecrement(&this->count);
2642 if (count != WRITER_WAITING)
2643 return;
2644 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
2645 return;
2648 this->thread_id = 0;
2649 next = this->writer_head->next;
2650 if (next) {
2651 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
2652 return;
2654 InterlockedAnd(&this->count, ~WRITER_WAITING);
2655 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2656 while (head) {
2657 next = head->next;
2658 InterlockedIncrement(&this->count);
2659 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2660 head = next;
2663 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
2664 return;
2665 InterlockedOr(&this->count, WRITER_WAITING);
2668 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2669 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2670 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
2671 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
2672 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2674 TRACE("(%p %p)\n", this, lock);
2676 this->lock = lock;
2677 reader_writer_lock_lock(lock);
2678 return this;
2681 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
2682 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
2683 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
2684 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
2686 TRACE("(%p)\n", this);
2687 reader_writer_lock_unlock(this->lock);
2690 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2691 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2692 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
2693 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
2694 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2696 TRACE("(%p %p)\n", this, lock);
2698 this->lock = lock;
2699 reader_writer_lock_lock_read(lock);
2700 return this;
2703 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
2704 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
2705 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
2706 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
2708 TRACE("(%p)\n", this);
2709 reader_writer_lock_unlock(this->lock);
2712 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2713 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2714 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
2715 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
2717 TRACE("(%p)\n", this);
2719 InitializeCriticalSection(&this->cs);
2720 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
2721 return this;
2724 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2725 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2726 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
2727 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
2729 TRACE("(%p)\n", this);
2731 this->cs.DebugInfo->Spare[0] = 0;
2732 DeleteCriticalSection(&this->cs);
2735 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2736 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2737 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
2738 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
2740 TRACE("(%p)\n", this);
2741 EnterCriticalSection(&this->cs);
2744 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2745 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2746 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
2747 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
2749 TRACE("(%p)\n", this);
2750 LeaveCriticalSection(&this->cs);
2753 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
2754 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
2755 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
2756 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
2758 TRACE("(%p)\n", this);
2759 return TryEnterCriticalSection(&this->cs);
2762 /* ?wait@Concurrency@@YAXI@Z */
2763 void __cdecl Concurrency_wait(unsigned int time)
2765 static int once;
2767 if (!once++) FIXME("(%d) stub!\n", time);
2769 Sleep(time);
2772 #if _MSVCR_VER>=110
2773 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
2774 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
2776 FIXME("(%d %#I64x)\n", type, id);
2778 #endif
2780 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
2781 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
2782 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
2784 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
2787 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
2788 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
2789 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
2790 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
2792 TRACE("(%p %u %x)\n", this, elapse, repeat);
2794 this->vtable = &_Timer_vtable;
2795 this->timer = NULL;
2796 this->elapse = elapse;
2797 this->repeat = repeat;
2798 return this;
2801 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2803 _Timer *this = ctx;
2804 TRACE("calling _Timer(%p) callback\n", this);
2805 call__Timer_callback(this);
2808 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
2809 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
2810 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
2811 void __thiscall _Timer__Start(_Timer *this)
2813 LONGLONG ll;
2814 FILETIME ft;
2816 TRACE("(%p)\n", this);
2818 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
2819 if (!this->timer)
2821 FIXME("throw exception?\n");
2822 return;
2825 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
2826 ft.dwLowDateTime = ll & 0xffffffff;
2827 ft.dwHighDateTime = ll >> 32;
2828 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
2831 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
2832 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
2833 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
2834 void __thiscall _Timer__Stop(_Timer *this)
2836 TRACE("(%p)\n", this);
2838 SetThreadpoolTimer(this->timer, NULL, 0, 0);
2839 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
2840 CloseThreadpoolTimer(this->timer);
2841 this->timer = NULL;
2844 /* ??1_Timer@details@Concurrency@@MAE@XZ */
2845 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
2846 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
2847 void __thiscall _Timer_dtor(_Timer *this)
2849 TRACE("(%p)\n", this);
2851 if (this->timer)
2852 _Timer__Stop(this);
2855 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
2856 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
2858 TRACE("(%p %x)\n", this, flags);
2859 if (flags & 2) {
2860 /* we have an array, with the number of elements stored before the first object */
2861 INT_PTR i, *ptr = (INT_PTR *)this-1;
2863 for (i=*ptr-1; i>=0; i--)
2864 _Timer_dtor(this+i);
2865 operator_delete(ptr);
2866 } else {
2867 _Timer_dtor(this);
2868 if (flags & 1)
2869 operator_delete(this);
2872 return this;
2875 #ifdef __ASM_USE_THISCALL_WRAPPER
2877 #define DEFINE_VTBL_WRAPPER(off) \
2878 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
2879 "popl %eax\n\t" \
2880 "popl %ecx\n\t" \
2881 "pushl %eax\n\t" \
2882 "movl 0(%ecx), %eax\n\t" \
2883 "jmp *" #off "(%eax)\n\t")
2885 DEFINE_VTBL_WRAPPER(0);
2886 DEFINE_VTBL_WRAPPER(4);
2887 DEFINE_VTBL_WRAPPER(8);
2888 DEFINE_VTBL_WRAPPER(12);
2889 DEFINE_VTBL_WRAPPER(16);
2890 DEFINE_VTBL_WRAPPER(20);
2891 DEFINE_VTBL_WRAPPER(24);
2892 DEFINE_VTBL_WRAPPER(28);
2893 DEFINE_VTBL_WRAPPER(32);
2894 DEFINE_VTBL_WRAPPER(36);
2895 DEFINE_VTBL_WRAPPER(40);
2896 DEFINE_VTBL_WRAPPER(44);
2897 DEFINE_VTBL_WRAPPER(48);
2899 #endif
2901 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
2902 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
2903 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
2904 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
2905 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
2906 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
2907 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
2908 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
2909 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
2911 __ASM_BLOCK_BEGIN(concurrency_vtables)
2912 __ASM_VTABLE(ExternalContextBase,
2913 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
2914 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
2915 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
2916 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
2917 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
2918 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
2919 __ASM_VTABLE(ThreadScheduler,
2920 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
2921 VTABLE_ADD_FUNC(ThreadScheduler_Id)
2922 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
2923 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
2924 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
2925 VTABLE_ADD_FUNC(ThreadScheduler_Release)
2926 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
2927 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
2928 #if _MSVCR_VER > 100
2929 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
2930 #endif
2931 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
2932 #if _MSVCR_VER > 100
2933 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
2934 #endif
2935 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
2936 #if _MSVCR_VER > 100
2937 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
2938 #endif
2940 __ASM_VTABLE(_Timer,
2941 VTABLE_ADD_FUNC(_Timer_vector_dtor));
2942 __ASM_BLOCK_END
2944 void msvcrt_init_concurrency(void *base)
2946 #ifdef __x86_64__
2947 init_cexception_rtti(base);
2948 init_improper_lock_rtti(base);
2949 init_improper_scheduler_attach_rtti(base);
2950 init_improper_scheduler_detach_rtti(base);
2951 init_invalid_scheduler_policy_key_rtti(base);
2952 init_invalid_scheduler_policy_thread_specification_rtti(base);
2953 init_invalid_scheduler_policy_value_rtti(base);
2954 init_scheduler_resource_allocation_error_rtti(base);
2955 init_Context_rtti(base);
2956 init_ContextBase_rtti(base);
2957 init_ExternalContextBase_rtti(base);
2958 init_Scheduler_rtti(base);
2959 init_SchedulerBase_rtti(base);
2960 init_ThreadScheduler_rtti(base);
2961 init__Timer_rtti(base);
2963 init_cexception_cxx_type_info(base);
2964 init_improper_lock_cxx(base);
2965 init_improper_scheduler_attach_cxx(base);
2966 init_improper_scheduler_detach_cxx(base);
2967 init_invalid_scheduler_policy_key_cxx(base);
2968 init_invalid_scheduler_policy_thread_specification_cxx(base);
2969 init_invalid_scheduler_policy_value_cxx(base);
2970 init_scheduler_resource_allocation_error_cxx(base);
2971 #endif
2974 void msvcrt_free_concurrency(void)
2976 if (context_tls_index != TLS_OUT_OF_INDEXES)
2977 TlsFree(context_tls_index);
2978 if(default_scheduler_policy.policy_container)
2979 SchedulerPolicy_dtor(&default_scheduler_policy);
2980 if(default_scheduler) {
2981 ThreadScheduler_dtor(default_scheduler);
2982 operator_delete(default_scheduler);
2985 if(keyed_event)
2986 NtClose(keyed_event);
2989 void msvcrt_free_scheduler_thread(void)
2991 Context *context = try_get_current_context();
2992 if (!context) return;
2993 call_Context_dtor(context, 1);
2996 #endif /* _MSVCR_VER >= 100 */