msdasql/tests: Test for ITransaction* interfaces on a session.
[wine.git] / dlls / msvcrt / concurrency.c
blobe9047ab41b926edcfd27cbf15d9614169eccfcc5
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "msvcrt.h"
28 #include "cxx.h"
30 #if _MSVCR_VER >= 100
32 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
34 typedef exception cexception;
35 CREATE_EXCEPTION_OBJECT(cexception)
37 static int context_id = -1;
38 static int scheduler_id = -1;
40 typedef enum {
41 SchedulerKind,
42 MaxConcurrency,
43 MinConcurrency,
44 TargetOversubscriptionFactor,
45 LocalContextCacheSize,
46 ContextStackSize,
47 ContextPriority,
48 SchedulingProtocol,
49 DynamicProgressFeedback,
50 WinRTInitialization,
51 last_policy_id
52 } PolicyElementKey;
54 typedef struct {
55 struct _policy_container {
56 unsigned int policies[last_policy_id];
57 } *policy_container;
58 } SchedulerPolicy;
60 typedef struct {
61 const vtable_ptr *vtable;
62 } Context;
63 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
64 unsigned int, (const Context*), (this))
65 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
70 Context*, (Context*, unsigned int), (this, flags))
72 typedef struct {
73 Context *context;
74 } _Context;
76 union allocator_cache_entry {
77 struct _free {
78 int depth;
79 union allocator_cache_entry *next;
80 } free;
81 struct _alloc {
82 int bucket;
83 char mem[1];
84 } alloc;
87 struct scheduler_list {
88 struct Scheduler *scheduler;
89 struct scheduler_list *next;
92 typedef struct {
93 Context context;
94 struct scheduler_list scheduler;
95 unsigned int id;
96 union allocator_cache_entry *allocator_cache[8];
97 } ExternalContextBase;
98 extern const vtable_ptr ExternalContextBase_vtable;
99 static void ExternalContextBase_ctor(ExternalContextBase*);
101 typedef struct Scheduler {
102 const vtable_ptr *vtable;
103 } Scheduler;
104 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
105 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
106 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
107 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
108 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
109 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
110 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
111 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
112 #if _MSVCR_VER > 100
113 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
114 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
115 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
116 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
117 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
118 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
119 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
120 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
121 bool, (Scheduler*,const /*location*/void*), (this,placement))
122 #else
123 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
124 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
125 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
126 #endif
128 typedef struct {
129 Scheduler scheduler;
130 LONG ref;
131 unsigned int id;
132 unsigned int virt_proc_no;
133 SchedulerPolicy policy;
134 int shutdown_count;
135 int shutdown_size;
136 HANDLE *shutdown_events;
137 CRITICAL_SECTION cs;
138 } ThreadScheduler;
139 extern const vtable_ptr ThreadScheduler_vtable;
141 typedef struct {
142 Scheduler *scheduler;
143 } _Scheduler;
145 typedef struct {
146 char empty;
147 } _CurrentScheduler;
149 typedef enum
151 SPINWAIT_INIT,
152 SPINWAIT_SPIN,
153 SPINWAIT_YIELD,
154 SPINWAIT_DONE
155 } SpinWait_state;
157 typedef void (__cdecl *yield_func)(void);
159 typedef struct
161 ULONG spin;
162 ULONG unknown;
163 SpinWait_state state;
164 yield_func yield_func;
165 } SpinWait;
167 /* keep in sync with msvcp90/msvcp90.h */
168 typedef struct cs_queue
170 struct cs_queue *next;
171 #if _MSVCR_VER >= 110
172 BOOL free;
173 int unknown;
174 #endif
175 } cs_queue;
177 typedef struct
179 ULONG_PTR unk_thread_id;
180 cs_queue unk_active;
181 #if _MSVCR_VER >= 110
182 void *unknown[2];
183 #else
184 void *unknown[1];
185 #endif
186 cs_queue *head;
187 void *tail;
188 } critical_section;
190 typedef struct
192 critical_section *cs;
193 union {
194 cs_queue q;
195 struct {
196 void *unknown[4];
197 int unknown2[2];
198 } unknown;
199 } lock;
200 } critical_section_scoped_lock;
202 typedef struct
204 critical_section cs;
205 } _NonReentrantPPLLock;
207 typedef struct
209 _NonReentrantPPLLock *lock;
210 union {
211 cs_queue q;
212 struct {
213 void *unknown[4];
214 int unknown2[2];
215 } unknown;
216 } wait;
217 } _NonReentrantPPLLock__Scoped_lock;
219 typedef struct
221 critical_section cs;
222 LONG count;
223 LONG owner;
224 } _ReentrantPPLLock;
226 typedef struct
228 _ReentrantPPLLock *lock;
229 union {
230 cs_queue q;
231 struct {
232 void *unknown[4];
233 int unknown2[2];
234 } unknown;
235 } wait;
236 } _ReentrantPPLLock__Scoped_lock;
238 #define EVT_RUNNING (void*)1
239 #define EVT_WAITING NULL
241 struct thread_wait;
242 typedef struct thread_wait_entry
244 struct thread_wait *wait;
245 struct thread_wait_entry *next;
246 struct thread_wait_entry *prev;
247 } thread_wait_entry;
249 typedef struct thread_wait
251 void *signaled;
252 int pending_waits;
253 thread_wait_entry entries[1];
254 } thread_wait;
256 typedef struct
258 thread_wait_entry *waiters;
259 INT_PTR signaled;
260 critical_section cs;
261 } event;
263 #if _MSVCR_VER >= 110
264 typedef struct cv_queue {
265 struct cv_queue *next;
266 BOOL expired;
267 } cv_queue;
269 typedef struct {
270 /* cv_queue structure is not binary compatible */
271 cv_queue *queue;
272 critical_section lock;
273 } _Condition_variable;
274 #endif
276 typedef struct rwl_queue
278 struct rwl_queue *next;
279 } rwl_queue;
281 #define WRITER_WAITING 0x80000000
282 /* FIXME: reader_writer_lock structure is not binary compatible
283 * it can't exceed 28/56 bytes */
284 typedef struct
286 LONG count;
287 LONG thread_id;
288 rwl_queue active;
289 rwl_queue *writer_head;
290 rwl_queue *writer_tail;
291 rwl_queue *reader_head;
292 } reader_writer_lock;
294 typedef struct {
295 reader_writer_lock *lock;
296 } reader_writer_lock_scoped_lock;
298 typedef struct {
299 CRITICAL_SECTION cs;
300 } _ReentrantBlockingLock;
302 #define TICKSPERMSEC 10000
303 typedef struct {
304 const vtable_ptr *vtable;
305 TP_TIMER *timer;
306 unsigned int elapse;
307 bool repeat;
308 } _Timer;
309 extern const vtable_ptr _Timer_vtable;
310 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
312 typedef exception improper_lock;
313 extern const vtable_ptr improper_lock_vtable;
315 typedef exception improper_scheduler_attach;
316 extern const vtable_ptr improper_scheduler_attach_vtable;
318 typedef exception improper_scheduler_detach;
319 extern const vtable_ptr improper_scheduler_detach_vtable;
321 typedef exception invalid_scheduler_policy_key;
322 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
324 typedef exception invalid_scheduler_policy_thread_specification;
325 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
327 typedef exception invalid_scheduler_policy_value;
328 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
330 typedef struct {
331 exception e;
332 HRESULT hr;
333 } scheduler_resource_allocation_error;
334 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
336 enum ConcRT_EventType
338 CONCRT_EVENT_GENERIC,
339 CONCRT_EVENT_START,
340 CONCRT_EVENT_END,
341 CONCRT_EVENT_BLOCK,
342 CONCRT_EVENT_UNBLOCK,
343 CONCRT_EVENT_YIELD,
344 CONCRT_EVENT_ATTACH,
345 CONCRT_EVENT_DETACH
348 static int context_tls_index = TLS_OUT_OF_INDEXES;
350 static CRITICAL_SECTION default_scheduler_cs;
351 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
353 0, 0, &default_scheduler_cs,
354 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
355 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
357 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
358 static SchedulerPolicy default_scheduler_policy;
359 static ThreadScheduler *default_scheduler;
361 static HANDLE keyed_event;
363 static void create_default_scheduler(void);
365 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
366 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
367 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
368 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
370 TRACE("(%p %p)\n", this, str);
371 return __exception_ctor(this, str, &improper_lock_vtable);
374 /* ??0improper_lock@Concurrency@@QAE@XZ */
375 /* ??0improper_lock@Concurrency@@QEAA@XZ */
376 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
377 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
379 return improper_lock_ctor_str(this, NULL);
382 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
383 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
385 TRACE("(%p %p)\n", this, rhs);
386 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
389 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
390 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
391 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
392 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
393 improper_scheduler_attach *this, const char *str)
395 TRACE("(%p %p)\n", this, str);
396 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
399 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
400 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
401 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
402 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
403 improper_scheduler_attach *this)
405 return improper_scheduler_attach_ctor_str(this, NULL);
408 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
409 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
410 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
412 TRACE("(%p %p)\n", _this, rhs);
413 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
416 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
417 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
418 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
419 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
420 improper_scheduler_detach *this, const char *str)
422 TRACE("(%p %p)\n", this, str);
423 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
426 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
427 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
428 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
429 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
430 improper_scheduler_detach *this)
432 return improper_scheduler_detach_ctor_str(this, NULL);
435 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
436 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
437 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
439 TRACE("(%p %p)\n", _this, rhs);
440 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
443 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
444 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
445 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
446 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
447 invalid_scheduler_policy_key *this, const char *str)
449 TRACE("(%p %p)\n", this, str);
450 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
453 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
454 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
455 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
456 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
457 invalid_scheduler_policy_key *this)
459 return invalid_scheduler_policy_key_ctor_str(this, NULL);
462 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
463 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
464 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
466 TRACE("(%p %p)\n", _this, rhs);
467 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
470 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
471 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
472 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
473 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
474 invalid_scheduler_policy_thread_specification *this, const char *str)
476 TRACE("(%p %p)\n", this, str);
477 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
480 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
481 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
482 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
483 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
484 invalid_scheduler_policy_thread_specification *this)
486 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
489 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
490 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
491 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
493 TRACE("(%p %p)\n", _this, rhs);
494 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
497 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
498 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
499 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
500 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
501 invalid_scheduler_policy_value *this, const char *str)
503 TRACE("(%p %p)\n", this, str);
504 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
507 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
508 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
509 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
510 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
511 invalid_scheduler_policy_value *this)
513 return invalid_scheduler_policy_value_ctor_str(this, NULL);
516 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
517 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
518 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
520 TRACE("(%p %p)\n", _this, rhs);
521 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
524 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
525 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
526 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
527 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
528 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
530 TRACE("(%p %s %x)\n", this, wine_dbgstr_a(name), hr);
531 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
532 this->hr = hr;
533 return this;
536 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
537 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
538 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
539 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
540 scheduler_resource_allocation_error *this, HRESULT hr)
542 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
545 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
546 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
547 scheduler_resource_allocation_error *this,
548 const scheduler_resource_allocation_error *rhs)
550 TRACE("(%p,%p)\n", this, rhs);
552 if (!rhs->e.do_free)
553 memcpy(this, rhs, sizeof(*this));
554 else
555 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
556 return this;
559 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
560 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
561 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
562 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
563 const scheduler_resource_allocation_error *this)
565 TRACE("(%p)\n", this);
566 return this->hr;
569 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
570 ".?AVimproper_lock@Concurrency@@")
571 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
572 ".?AVimproper_scheduler_attach@Concurrency@@")
573 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
574 ".?AVimproper_scheduler_detach@Concurrency@@")
575 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
576 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
577 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
578 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
579 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
580 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
581 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
582 ".?AVscheduler_resource_allocation_error@Concurrency@@")
584 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
585 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
586 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
587 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
588 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
589 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
590 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
592 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
593 __ASM_VTABLE(improper_lock,
594 VTABLE_ADD_FUNC(cexception_vector_dtor)
595 VTABLE_ADD_FUNC(cexception_what));
596 __ASM_VTABLE(improper_scheduler_attach,
597 VTABLE_ADD_FUNC(cexception_vector_dtor)
598 VTABLE_ADD_FUNC(cexception_what));
599 __ASM_VTABLE(improper_scheduler_detach,
600 VTABLE_ADD_FUNC(cexception_vector_dtor)
601 VTABLE_ADD_FUNC(cexception_what));
602 __ASM_VTABLE(invalid_scheduler_policy_key,
603 VTABLE_ADD_FUNC(cexception_vector_dtor)
604 VTABLE_ADD_FUNC(cexception_what));
605 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
606 VTABLE_ADD_FUNC(cexception_vector_dtor)
607 VTABLE_ADD_FUNC(cexception_what));
608 __ASM_VTABLE(invalid_scheduler_policy_value,
609 VTABLE_ADD_FUNC(cexception_vector_dtor)
610 VTABLE_ADD_FUNC(cexception_what));
611 __ASM_VTABLE(scheduler_resource_allocation_error,
612 VTABLE_ADD_FUNC(cexception_vector_dtor)
613 VTABLE_ADD_FUNC(cexception_what));
614 __ASM_BLOCK_END
616 static Context* try_get_current_context(void)
618 if (context_tls_index == TLS_OUT_OF_INDEXES)
619 return NULL;
620 return TlsGetValue(context_tls_index);
623 static Context* get_current_context(void)
625 Context *ret;
627 if (context_tls_index == TLS_OUT_OF_INDEXES) {
628 int tls_index = TlsAlloc();
629 if (tls_index == TLS_OUT_OF_INDEXES) {
630 scheduler_resource_allocation_error e;
631 scheduler_resource_allocation_error_ctor_name(&e, NULL,
632 HRESULT_FROM_WIN32(GetLastError()));
633 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
636 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
637 TlsFree(tls_index);
640 ret = TlsGetValue(context_tls_index);
641 if (!ret) {
642 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
643 ExternalContextBase_ctor(context);
644 TlsSetValue(context_tls_index, context);
645 ret = &context->context;
647 return ret;
650 static Scheduler* try_get_current_scheduler(void)
652 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
654 if (!context)
655 return NULL;
657 if (context->context.vtable != &ExternalContextBase_vtable) {
658 ERR("unknown context set\n");
659 return NULL;
661 return context->scheduler.scheduler;
664 static Scheduler* get_current_scheduler(void)
666 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
668 if (context->context.vtable != &ExternalContextBase_vtable) {
669 ERR("unknown context set\n");
670 return NULL;
672 return context->scheduler.scheduler;
675 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
676 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
677 Context* __cdecl Context_CurrentContext(void)
679 TRACE("()\n");
680 return get_current_context();
683 /* ?Id@Context@Concurrency@@SAIXZ */
684 unsigned int __cdecl Context_Id(void)
686 Context *ctx = try_get_current_context();
687 TRACE("()\n");
688 return ctx ? call_Context_GetId(ctx) : -1;
691 /* ?Block@Context@Concurrency@@SAXXZ */
692 void __cdecl Context_Block(void)
694 FIXME("()\n");
697 /* ?Yield@Context@Concurrency@@SAXXZ */
698 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
699 void __cdecl Context_Yield(void)
701 FIXME("()\n");
704 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
705 void __cdecl Context__SpinYield(void)
707 FIXME("()\n");
710 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
711 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
713 FIXME("()\n");
714 return FALSE;
717 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
718 void __cdecl Context_Oversubscribe(bool begin)
720 FIXME("(%x)\n", begin);
723 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
724 unsigned int __cdecl Context_ScheduleGroupId(void)
726 Context *ctx = try_get_current_context();
727 TRACE("()\n");
728 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
731 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
732 unsigned int __cdecl Context_VirtualProcessorId(void)
734 Context *ctx = try_get_current_context();
735 TRACE("()\n");
736 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
739 #if _MSVCR_VER > 100
740 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
741 _Context *__cdecl _Context__CurrentContext(_Context *ret)
743 TRACE("(%p)\n", ret);
744 ret->context = Context_CurrentContext();
745 return ret;
747 #endif
749 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
750 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
752 TRACE("(%p)->()\n", this);
753 return this->id;
756 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
757 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
759 FIXME("(%p)->() stub\n", this);
760 return -1;
763 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
764 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
766 FIXME("(%p)->() stub\n", this);
767 return -1;
770 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
771 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
773 FIXME("(%p)->() stub\n", this);
776 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
777 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
779 FIXME("(%p)->() stub\n", this);
780 return FALSE;
783 static void ExternalContextBase_dtor(ExternalContextBase *this)
785 struct scheduler_list *scheduler_cur, *scheduler_next;
786 union allocator_cache_entry *next, *cur;
787 int i;
789 /* TODO: move the allocator cache to scheduler so it can be reused */
790 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
791 for(cur = this->allocator_cache[i]; cur; cur=next) {
792 next = cur->free.next;
793 operator_delete(cur);
797 if (this->scheduler.scheduler) {
798 call_Scheduler_Release(this->scheduler.scheduler);
800 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
801 scheduler_next = scheduler_cur->next;
802 call_Scheduler_Release(scheduler_cur->scheduler);
803 operator_delete(scheduler_cur);
808 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
809 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
811 TRACE("(%p %x)\n", this, flags);
812 if(flags & 2) {
813 /* we have an array, with the number of elements stored before the first object */
814 INT_PTR i, *ptr = (INT_PTR *)this-1;
816 for(i=*ptr-1; i>=0; i--)
817 ExternalContextBase_dtor(this+i);
818 operator_delete(ptr);
819 } else {
820 ExternalContextBase_dtor(this);
821 if(flags & 1)
822 operator_delete(this);
825 return &this->context;
828 static void ExternalContextBase_ctor(ExternalContextBase *this)
830 TRACE("(%p)->()\n", this);
832 memset(this, 0, sizeof(*this));
833 this->context.vtable = &ExternalContextBase_vtable;
834 this->id = InterlockedIncrement(&context_id);
836 create_default_scheduler();
837 this->scheduler.scheduler = &default_scheduler->scheduler;
838 call_Scheduler_Reference(&default_scheduler->scheduler);
841 /* ?Alloc@Concurrency@@YAPAXI@Z */
842 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
843 void * CDECL Concurrency_Alloc(size_t size)
845 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
846 union allocator_cache_entry *p;
848 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
849 if (size < sizeof(*p))
850 size = sizeof(*p);
852 if (context->context.vtable != &ExternalContextBase_vtable) {
853 p = operator_new(size);
854 p->alloc.bucket = -1;
855 }else {
856 int i;
858 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
859 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
860 if (1 << (i+4) >= size) break;
862 if(i==ARRAY_SIZE(context->allocator_cache)) {
863 p = operator_new(size);
864 p->alloc.bucket = -1;
865 }else if (context->allocator_cache[i]) {
866 p = context->allocator_cache[i];
867 context->allocator_cache[i] = p->free.next;
868 p->alloc.bucket = i;
869 }else {
870 p = operator_new(1 << (i+4));
871 p->alloc.bucket = i;
875 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
876 return p->alloc.mem;
879 /* ?Free@Concurrency@@YAXPAX@Z */
880 /* ?Free@Concurrency@@YAXPEAX@Z */
881 void CDECL Concurrency_Free(void* mem)
883 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
884 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
885 int bucket = p->alloc.bucket;
887 TRACE("(%p)\n", mem);
889 if (context->context.vtable != &ExternalContextBase_vtable) {
890 operator_delete(p);
891 }else {
892 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
893 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
894 p->free.next = context->allocator_cache[bucket];
895 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
896 context->allocator_cache[bucket] = p;
897 }else {
898 operator_delete(p);
903 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
904 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
905 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
906 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
907 PolicyElementKey policy, unsigned int val)
909 unsigned int ret;
911 TRACE("(%p %d %d)\n", this, policy, val);
913 if (policy == MinConcurrency) {
914 invalid_scheduler_policy_key e;
915 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
916 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
918 if (policy == MaxConcurrency) {
919 invalid_scheduler_policy_key e;
920 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
921 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
923 if (policy >= last_policy_id) {
924 invalid_scheduler_policy_key e;
925 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
926 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
929 switch(policy) {
930 case SchedulerKind:
931 if (val) {
932 invalid_scheduler_policy_value e;
933 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
934 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
936 break;
937 case TargetOversubscriptionFactor:
938 if (!val) {
939 invalid_scheduler_policy_value e;
940 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
941 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
943 break;
944 case ContextPriority:
945 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
946 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
947 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
948 && val != INHERIT_THREAD_PRIORITY) {
949 invalid_scheduler_policy_value e;
950 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
951 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
953 break;
954 case SchedulingProtocol:
955 case DynamicProgressFeedback:
956 case WinRTInitialization:
957 if (val != 0 && val != 1) {
958 invalid_scheduler_policy_value e;
959 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
960 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
962 break;
963 default:
964 break;
967 ret = this->policy_container->policies[policy];
968 this->policy_container->policies[policy] = val;
969 return ret;
972 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
973 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
974 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
975 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
976 unsigned int min_concurrency, unsigned int max_concurrency)
978 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
980 if (min_concurrency > max_concurrency) {
981 invalid_scheduler_policy_thread_specification e;
982 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
983 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
985 if (!max_concurrency) {
986 invalid_scheduler_policy_value e;
987 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
988 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
991 this->policy_container->policies[MinConcurrency] = min_concurrency;
992 this->policy_container->policies[MaxConcurrency] = max_concurrency;
995 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
996 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
997 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
998 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
999 const SchedulerPolicy *this, PolicyElementKey policy)
1001 TRACE("(%p %d)\n", this, policy);
1003 if (policy >= last_policy_id) {
1004 invalid_scheduler_policy_key e;
1005 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1006 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1008 return this->policy_container->policies[policy];
1011 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1012 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1013 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1014 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1016 TRACE("(%p)\n", this);
1018 this->policy_container = operator_new(sizeof(*this->policy_container));
1019 /* TODO: default values can probably be affected by CurrentScheduler */
1020 this->policy_container->policies[SchedulerKind] = 0;
1021 this->policy_container->policies[MaxConcurrency] = -1;
1022 this->policy_container->policies[MinConcurrency] = 1;
1023 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1024 this->policy_container->policies[LocalContextCacheSize] = 8;
1025 this->policy_container->policies[ContextStackSize] = 0;
1026 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1027 this->policy_container->policies[SchedulingProtocol] = 0;
1028 this->policy_container->policies[DynamicProgressFeedback] = 1;
1029 return this;
1032 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1033 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1034 /* TODO: don't leak policy_container on exception */
1035 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1036 SchedulerPolicy *this, size_t n, ...)
1038 unsigned int min_concurrency, max_concurrency;
1039 va_list valist;
1040 size_t i;
1042 TRACE("(%p %Iu)\n", this, n);
1044 SchedulerPolicy_ctor(this);
1045 min_concurrency = this->policy_container->policies[MinConcurrency];
1046 max_concurrency = this->policy_container->policies[MaxConcurrency];
1048 va_start(valist, n);
1049 for(i=0; i<n; i++) {
1050 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1051 unsigned int val = va_arg(valist, unsigned int);
1053 if(policy == MinConcurrency)
1054 min_concurrency = val;
1055 else if(policy == MaxConcurrency)
1056 max_concurrency = val;
1057 else
1058 SchedulerPolicy_SetPolicyValue(this, policy, val);
1060 va_end(valist);
1062 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1063 return this;
1066 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1067 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1068 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1069 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1070 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1072 TRACE("(%p %p)\n", this, rhs);
1073 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1074 sizeof(this->policy_container->policies));
1075 return this;
1078 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1079 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1080 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1081 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1082 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1084 TRACE("(%p %p)\n", this, rhs);
1085 SchedulerPolicy_ctor(this);
1086 return SchedulerPolicy_op_assign(this, rhs);
1089 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1090 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1091 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1092 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1094 TRACE("(%p)\n", this);
1095 operator_delete(this->policy_container);
1098 static void ThreadScheduler_dtor(ThreadScheduler *this)
1100 int i;
1102 if(this->ref != 0) WARN("ref = %d\n", this->ref);
1103 SchedulerPolicy_dtor(&this->policy);
1105 for(i=0; i<this->shutdown_count; i++)
1106 SetEvent(this->shutdown_events[i]);
1107 operator_delete(this->shutdown_events);
1109 this->cs.DebugInfo->Spare[0] = 0;
1110 DeleteCriticalSection(&this->cs);
1113 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1114 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1116 TRACE("(%p)\n", this);
1117 return this->id;
1120 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1121 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1123 TRACE("(%p)\n", this);
1124 return this->virt_proc_no;
1127 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1128 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1129 const ThreadScheduler *this, SchedulerPolicy *ret)
1131 TRACE("(%p %p)\n", this, ret);
1132 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1135 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1136 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1138 TRACE("(%p)\n", this);
1139 return InterlockedIncrement(&this->ref);
1142 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1143 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1145 unsigned int ret = InterlockedDecrement(&this->ref);
1147 TRACE("(%p)\n", this);
1149 if(!ret) {
1150 ThreadScheduler_dtor(this);
1151 operator_delete(this);
1153 return ret;
1156 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1157 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1159 HANDLE *shutdown_events;
1160 int size;
1162 TRACE("(%p %p)\n", this, event);
1164 EnterCriticalSection(&this->cs);
1166 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1167 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1168 memcpy(shutdown_events, this->shutdown_events,
1169 this->shutdown_count * sizeof(*shutdown_events));
1170 operator_delete(this->shutdown_events);
1171 this->shutdown_size = size;
1172 this->shutdown_events = shutdown_events;
1173 this->shutdown_events[this->shutdown_count++] = event;
1175 LeaveCriticalSection(&this->cs);
1178 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1179 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1181 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1183 TRACE("(%p)\n", this);
1185 if(context->context.vtable != &ExternalContextBase_vtable) {
1186 ERR("unknown context set\n");
1187 return;
1190 if(context->scheduler.scheduler == &this->scheduler) {
1191 improper_scheduler_attach e;
1192 improper_scheduler_attach_ctor_str(&e, NULL);
1193 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1196 if(context->scheduler.scheduler) {
1197 struct scheduler_list *l = operator_new(sizeof(*l));
1198 *l = context->scheduler;
1199 context->scheduler.next = l;
1201 context->scheduler.scheduler = &this->scheduler;
1202 ThreadScheduler_Reference(this);
1205 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1206 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1207 ThreadScheduler *this, /*location*/void *placement)
1209 FIXME("(%p %p) stub\n", this, placement);
1210 return NULL;
1213 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1214 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1216 FIXME("(%p) stub\n", this);
1217 return NULL;
1220 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1221 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1222 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1224 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1227 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1228 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1229 void (__cdecl *proc)(void*), void* data)
1231 FIXME("(%p %p %p) stub\n", this, proc, data);
1234 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1235 bool __thiscall ThreadScheduler_IsAvailableLocation(
1236 const ThreadScheduler *this, const /*location*/void *placement)
1238 FIXME("(%p %p) stub\n", this, placement);
1239 return FALSE;
1242 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1243 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1245 TRACE("(%p %x)\n", this, flags);
1246 if(flags & 2) {
1247 /* we have an array, with the number of elements stored before the first object */
1248 INT_PTR i, *ptr = (INT_PTR *)this-1;
1250 for(i=*ptr-1; i>=0; i--)
1251 ThreadScheduler_dtor(this+i);
1252 operator_delete(ptr);
1253 } else {
1254 ThreadScheduler_dtor(this);
1255 if(flags & 1)
1256 operator_delete(this);
1259 return &this->scheduler;
1262 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1263 const SchedulerPolicy *policy)
1265 SYSTEM_INFO si;
1267 TRACE("(%p)->()\n", this);
1269 this->scheduler.vtable = &ThreadScheduler_vtable;
1270 this->ref = 1;
1271 this->id = InterlockedIncrement(&scheduler_id);
1272 SchedulerPolicy_copy_ctor(&this->policy, policy);
1274 GetSystemInfo(&si);
1275 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1276 if(this->virt_proc_no > si.dwNumberOfProcessors)
1277 this->virt_proc_no = si.dwNumberOfProcessors;
1279 this->shutdown_count = this->shutdown_size = 0;
1280 this->shutdown_events = NULL;
1282 InitializeCriticalSection(&this->cs);
1283 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1284 return this;
1287 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1288 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1289 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1291 ThreadScheduler *ret;
1293 TRACE("(%p)\n", policy);
1295 ret = operator_new(sizeof(*ret));
1296 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1299 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1300 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1302 TRACE("()\n");
1304 EnterCriticalSection(&default_scheduler_cs);
1305 if(default_scheduler_policy.policy_container)
1306 SchedulerPolicy_dtor(&default_scheduler_policy);
1307 SchedulerPolicy_ctor(&default_scheduler_policy);
1308 LeaveCriticalSection(&default_scheduler_cs);
1311 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1312 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1313 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1315 TRACE("(%p)\n", policy);
1317 EnterCriticalSection(&default_scheduler_cs);
1318 if(!default_scheduler_policy.policy_container)
1319 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1320 else
1321 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1322 LeaveCriticalSection(&default_scheduler_cs);
1325 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1326 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1327 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1329 Scheduler *scheduler;
1331 TRACE("(%p)\n", policy);
1333 scheduler = Scheduler_Create(policy);
1334 call_Scheduler_Attach(scheduler);
1337 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1338 void __cdecl CurrentScheduler_Detach(void)
1340 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1342 TRACE("()\n");
1344 if(!context) {
1345 improper_scheduler_detach e;
1346 improper_scheduler_detach_ctor_str(&e, NULL);
1347 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1350 if(context->context.vtable != &ExternalContextBase_vtable) {
1351 ERR("unknown context set\n");
1352 return;
1355 if(!context->scheduler.next) {
1356 improper_scheduler_detach e;
1357 improper_scheduler_detach_ctor_str(&e, NULL);
1358 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1361 call_Scheduler_Release(context->scheduler.scheduler);
1362 if(!context->scheduler.next) {
1363 context->scheduler.scheduler = NULL;
1364 }else {
1365 struct scheduler_list *entry = context->scheduler.next;
1366 context->scheduler.scheduler = entry->scheduler;
1367 context->scheduler.next = entry->next;
1368 operator_delete(entry);
1372 static void create_default_scheduler(void)
1374 if(default_scheduler)
1375 return;
1377 EnterCriticalSection(&default_scheduler_cs);
1378 if(!default_scheduler) {
1379 ThreadScheduler *scheduler;
1381 if(!default_scheduler_policy.policy_container)
1382 SchedulerPolicy_ctor(&default_scheduler_policy);
1384 scheduler = operator_new(sizeof(*scheduler));
1385 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1386 default_scheduler = scheduler;
1388 LeaveCriticalSection(&default_scheduler_cs);
1391 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1392 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1393 Scheduler* __cdecl CurrentScheduler_Get(void)
1395 TRACE("()\n");
1396 return get_current_scheduler();
1399 #if _MSVCR_VER > 100
1400 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1401 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1402 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1404 TRACE("(%p)\n", placement);
1405 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1407 #endif
1409 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1410 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1411 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1413 TRACE("()\n");
1414 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1417 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1418 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1420 Scheduler *scheduler = try_get_current_scheduler();
1422 TRACE("()\n");
1424 if(!scheduler)
1425 return -1;
1426 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1429 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1430 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1432 TRACE("(%p)\n", policy);
1433 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1436 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1437 unsigned int __cdecl CurrentScheduler_Id(void)
1439 Scheduler *scheduler = try_get_current_scheduler();
1441 TRACE("()\n");
1443 if(!scheduler)
1444 return -1;
1445 return call_Scheduler_Id(scheduler);
1448 #if _MSVCR_VER > 100
1449 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1450 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1451 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1453 Scheduler *scheduler = try_get_current_scheduler();
1455 TRACE("(%p)\n", placement);
1457 if(!scheduler)
1458 return FALSE;
1459 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1461 #endif
1463 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1464 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1465 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1467 TRACE("(%p)\n", event);
1468 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1471 #if _MSVCR_VER > 100
1472 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1473 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1474 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1475 void *data, /*location*/void *placement)
1477 TRACE("(%p %p %p)\n", proc, data, placement);
1478 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1480 #endif
1482 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1483 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1484 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1486 TRACE("(%p %p)\n", proc, data);
1487 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1490 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1491 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1492 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1493 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1495 TRACE("(%p %p)\n", this, scheduler);
1497 this->scheduler = scheduler;
1498 return this;
1501 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1502 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1503 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1504 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1506 return _Scheduler_ctor_sched(this, NULL);
1509 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1510 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1511 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1512 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1514 TRACE("(%p)\n", this);
1515 return this->scheduler;
1518 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1519 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1520 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1521 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1523 TRACE("(%p)\n", this);
1524 return call_Scheduler_Reference(this->scheduler);
1527 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1528 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1529 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1530 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1532 TRACE("(%p)\n", this);
1533 return call_Scheduler_Release(this->scheduler);
1536 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1537 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1539 TRACE("()\n");
1540 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1543 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1544 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1546 TRACE("()\n");
1547 get_current_scheduler();
1548 return CurrentScheduler_GetNumberOfVirtualProcessors();
1551 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1552 unsigned int __cdecl _CurrentScheduler__Id(void)
1554 TRACE("()\n");
1555 get_current_scheduler();
1556 return CurrentScheduler_Id();
1559 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1560 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1561 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1563 TRACE("(%p %p)\n", proc, data);
1564 CurrentScheduler_ScheduleTask(proc, data);
1567 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1568 unsigned int __cdecl SpinCount__Value(void)
1570 static unsigned int val = -1;
1572 TRACE("()\n");
1574 if(val == -1) {
1575 SYSTEM_INFO si;
1577 GetSystemInfo(&si);
1578 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1581 return val;
1584 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1585 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1586 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1587 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1589 TRACE("(%p %p)\n", this, yf);
1591 this->state = SPINWAIT_INIT;
1592 this->unknown = 1;
1593 this->yield_func = yf;
1594 return this;
1597 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1598 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1599 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1600 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1602 TRACE("(%p %p)\n", this, yf);
1604 this->state = SPINWAIT_INIT;
1605 this->unknown = 0;
1606 this->yield_func = yf;
1607 return this;
1610 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1611 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1612 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1613 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1614 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1615 void __thiscall SpinWait_dtor(SpinWait *this)
1617 TRACE("(%p)\n", this);
1620 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1621 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1622 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1623 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1624 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1625 void __thiscall SpinWait__DoYield(SpinWait *this)
1627 TRACE("(%p)\n", this);
1629 if(this->unknown)
1630 this->yield_func();
1633 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1634 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1635 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1636 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1637 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1638 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
1640 TRACE("(%p)\n", this);
1641 return 1;
1644 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1645 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1646 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1647 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1648 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
1649 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
1651 TRACE("(%p %d)\n", this, spin);
1653 this->spin = spin;
1654 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
1657 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1658 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1659 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1660 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1661 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
1662 void __thiscall SpinWait__Reset(SpinWait *this)
1664 SpinWait__SetSpinCount(this, SpinCount__Value());
1667 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1668 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1669 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1670 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1671 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
1672 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
1674 TRACE("(%p)\n", this);
1676 this->spin--;
1677 return this->spin > 0;
1680 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
1681 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
1682 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
1683 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
1684 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
1685 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
1687 switch(this->state) {
1688 case SPINWAIT_INIT:
1689 SpinWait__Reset(this);
1690 /* fall through */
1691 case SPINWAIT_SPIN:
1692 InterlockedDecrement((LONG*)&this->spin);
1693 if(!this->spin)
1694 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
1695 return TRUE;
1696 case SPINWAIT_YIELD:
1697 this->state = SPINWAIT_DONE;
1698 this->yield_func();
1699 return TRUE;
1700 default:
1701 SpinWait__Reset(this);
1702 return FALSE;
1706 /* ??0critical_section@Concurrency@@QAE@XZ */
1707 /* ??0critical_section@Concurrency@@QEAA@XZ */
1708 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
1709 critical_section* __thiscall critical_section_ctor(critical_section *this)
1711 TRACE("(%p)\n", this);
1713 if(!keyed_event) {
1714 HANDLE event;
1716 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1717 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1718 NtClose(event);
1721 this->unk_thread_id = 0;
1722 this->head = this->tail = NULL;
1723 return this;
1726 /* ??1critical_section@Concurrency@@QAE@XZ */
1727 /* ??1critical_section@Concurrency@@QEAA@XZ */
1728 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
1729 void __thiscall critical_section_dtor(critical_section *this)
1731 TRACE("(%p)\n", this);
1734 static void __cdecl spin_wait_yield(void)
1736 Sleep(0);
1739 static inline void spin_wait_for_next_cs(cs_queue *q)
1741 SpinWait sw;
1743 if(q->next) return;
1745 SpinWait_ctor(&sw, &spin_wait_yield);
1746 SpinWait__Reset(&sw);
1747 while(!q->next)
1748 SpinWait__SpinOnce(&sw);
1749 SpinWait_dtor(&sw);
1752 static inline void cs_set_head(critical_section *cs, cs_queue *q)
1754 cs->unk_thread_id = GetCurrentThreadId();
1755 cs->unk_active.next = q->next;
1756 cs->head = &cs->unk_active;
1759 static inline void cs_lock(critical_section *cs, cs_queue *q)
1761 cs_queue *last;
1763 if(cs->unk_thread_id == GetCurrentThreadId()) {
1764 improper_lock e;
1765 improper_lock_ctor_str(&e, "Already locked");
1766 _CxxThrowException(&e, &improper_lock_exception_type);
1769 memset(q, 0, sizeof(*q));
1770 last = InterlockedExchangePointer(&cs->tail, q);
1771 if(last) {
1772 last->next = q;
1773 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
1776 cs_set_head(cs, q);
1777 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
1778 spin_wait_for_next_cs(q);
1779 cs->unk_active.next = q->next;
1783 /* ?lock@critical_section@Concurrency@@QAEXXZ */
1784 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
1785 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
1786 void __thiscall critical_section_lock(critical_section *this)
1788 cs_queue q;
1790 TRACE("(%p)\n", this);
1791 cs_lock(this, &q);
1794 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
1795 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
1796 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
1797 bool __thiscall critical_section_try_lock(critical_section *this)
1799 cs_queue q;
1801 TRACE("(%p)\n", this);
1803 if(this->unk_thread_id == GetCurrentThreadId())
1804 return FALSE;
1806 memset(&q, 0, sizeof(q));
1807 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
1808 cs_set_head(this, &q);
1809 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
1810 spin_wait_for_next_cs(&q);
1811 this->unk_active.next = q.next;
1813 return TRUE;
1815 return FALSE;
1818 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
1819 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
1820 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
1821 void __thiscall critical_section_unlock(critical_section *this)
1823 TRACE("(%p)\n", this);
1825 this->unk_thread_id = 0;
1826 this->head = NULL;
1827 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
1828 == &this->unk_active) return;
1829 spin_wait_for_next_cs(&this->unk_active);
1831 #if _MSVCR_VER >= 110
1832 while(1) {
1833 cs_queue *next;
1835 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
1836 break;
1838 next = this->unk_active.next;
1839 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
1840 HeapFree(GetProcessHeap(), 0, next);
1841 return;
1843 spin_wait_for_next_cs(next);
1845 this->unk_active.next = next->next;
1846 HeapFree(GetProcessHeap(), 0, next);
1848 #endif
1850 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
1853 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
1854 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
1855 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
1856 critical_section* __thiscall critical_section_native_handle(critical_section *this)
1858 TRACE("(%p)\n", this);
1859 return this;
1862 #if _MSVCR_VER >= 110
1863 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
1864 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
1865 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
1866 bool __thiscall critical_section_try_lock_for(
1867 critical_section *this, unsigned int timeout)
1869 cs_queue *q, *last;
1871 TRACE("(%p %d)\n", this, timeout);
1873 if(this->unk_thread_id == GetCurrentThreadId()) {
1874 improper_lock e;
1875 improper_lock_ctor_str(&e, "Already locked");
1876 _CxxThrowException(&e, &improper_lock_exception_type);
1879 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
1880 return critical_section_try_lock(this);
1882 last = InterlockedExchangePointer(&this->tail, q);
1883 if(last) {
1884 LARGE_INTEGER to;
1885 NTSTATUS status;
1886 FILETIME ft;
1888 last->next = q;
1889 GetSystemTimeAsFileTime(&ft);
1890 to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
1891 ft.dwLowDateTime + (LONGLONG)timeout*10000;
1892 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
1893 if(status == STATUS_TIMEOUT) {
1894 if(!InterlockedExchange(&q->free, TRUE))
1895 return FALSE;
1896 /* A thread has signaled the event and is block waiting. */
1897 /* We need to catch the event to wake the thread. */
1898 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
1902 cs_set_head(this, q);
1903 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
1904 spin_wait_for_next_cs(q);
1905 this->unk_active.next = q->next;
1908 HeapFree(GetProcessHeap(), 0, q);
1909 return TRUE;
1911 #endif
1913 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
1914 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
1915 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
1916 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
1917 critical_section_scoped_lock *this, critical_section *cs)
1919 TRACE("(%p %p)\n", this, cs);
1920 this->cs = cs;
1921 cs_lock(this->cs, &this->lock.q);
1922 return this;
1925 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
1926 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
1927 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
1928 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
1930 TRACE("(%p)\n", this);
1931 critical_section_unlock(this->cs);
1934 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
1935 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1936 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
1937 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
1939 TRACE("(%p)\n", this);
1941 critical_section_ctor(&this->cs);
1942 return this;
1945 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
1946 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
1947 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
1948 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
1950 TRACE("(%p %p)\n", this, q);
1951 cs_lock(&this->cs, q);
1954 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
1955 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
1956 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
1957 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
1959 TRACE("(%p)\n", this);
1960 critical_section_unlock(&this->cs);
1963 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
1964 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
1965 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
1966 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
1967 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
1969 TRACE("(%p %p)\n", this, lock);
1971 this->lock = lock;
1972 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
1973 return this;
1976 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
1977 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1978 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
1979 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
1981 TRACE("(%p)\n", this);
1983 _NonReentrantPPLLock__Release(this->lock);
1986 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
1987 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1988 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
1989 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
1991 TRACE("(%p)\n", this);
1993 critical_section_ctor(&this->cs);
1994 this->count = 0;
1995 this->owner = -1;
1996 return this;
1999 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2000 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2001 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2002 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2004 TRACE("(%p %p)\n", this, q);
2006 if(this->owner == GetCurrentThreadId()) {
2007 this->count++;
2008 return;
2011 cs_lock(&this->cs, q);
2012 this->count++;
2013 this->owner = GetCurrentThreadId();
2016 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2017 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2018 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2019 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2021 TRACE("(%p)\n", this);
2023 this->count--;
2024 if(this->count)
2025 return;
2027 this->owner = -1;
2028 critical_section_unlock(&this->cs);
2031 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2032 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2033 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2034 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2035 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2037 TRACE("(%p %p)\n", this, lock);
2039 this->lock = lock;
2040 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2041 return this;
2044 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2045 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2046 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2047 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2049 TRACE("(%p)\n", this);
2051 _ReentrantPPLLock__Release(this->lock);
2054 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2055 unsigned int __cdecl _GetConcurrency(void)
2057 static unsigned int val = -1;
2059 TRACE("()\n");
2061 if(val == -1) {
2062 SYSTEM_INFO si;
2064 GetSystemInfo(&si);
2065 val = si.dwNumberOfProcessors;
2068 return val;
2071 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
2073 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
2074 pTime->QuadPart = (ULONGLONG)timeout * -10000;
2075 return pTime;
2078 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2080 entry->next = *head;
2081 entry->prev = NULL;
2082 if(*head) (*head)->prev = entry;
2083 *head = entry;
2086 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2088 if(entry == *head)
2089 *head = entry->next;
2090 else if(entry->prev)
2091 entry->prev->next = entry->next;
2092 if(entry->next) entry->next->prev = entry->prev;
2095 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2097 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2099 for(i = 0; i < count; i++) {
2100 critical_section_lock(&events[i]->cs);
2101 if(events[i] == wait->signaled) ret = i;
2102 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2103 critical_section_unlock(&events[i]->cs);
2106 return ret;
2109 static inline int evt_transition(void **state, void *from, void *to)
2111 return InterlockedCompareExchangePointer(state, to, from) == from;
2114 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2116 int i;
2117 NTSTATUS status;
2118 LARGE_INTEGER ntto;
2120 wait->signaled = EVT_RUNNING;
2121 wait->pending_waits = wait_all ? count : 1;
2122 for(i = 0; i < count; i++) {
2123 wait->entries[i].wait = wait;
2125 critical_section_lock(&events[i]->cs);
2126 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2127 if(events[i]->signaled) {
2128 if(!InterlockedDecrement(&wait->pending_waits)) {
2129 wait->signaled = events[i];
2130 critical_section_unlock(&events[i]->cs);
2132 return evt_end_wait(wait, events, i+1);
2135 critical_section_unlock(&events[i]->cs);
2138 if(!timeout)
2139 return evt_end_wait(wait, events, count);
2141 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2142 return evt_end_wait(wait, events, count);
2144 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
2146 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2147 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
2149 return evt_end_wait(wait, events, count);
2152 /* ??0event@Concurrency@@QAE@XZ */
2153 /* ??0event@Concurrency@@QEAA@XZ */
2154 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2155 event* __thiscall event_ctor(event *this)
2157 TRACE("(%p)\n", this);
2159 this->waiters = NULL;
2160 this->signaled = FALSE;
2161 critical_section_ctor(&this->cs);
2163 return this;
2166 /* ??1event@Concurrency@@QAE@XZ */
2167 /* ??1event@Concurrency@@QEAA@XZ */
2168 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2169 void __thiscall event_dtor(event *this)
2171 TRACE("(%p)\n", this);
2172 critical_section_dtor(&this->cs);
2173 if(this->waiters)
2174 ERR("there's a wait on destroyed event\n");
2177 /* ?reset@event@Concurrency@@QAEXXZ */
2178 /* ?reset@event@Concurrency@@QEAAXXZ */
2179 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2180 void __thiscall event_reset(event *this)
2182 thread_wait_entry *entry;
2184 TRACE("(%p)\n", this);
2186 critical_section_lock(&this->cs);
2187 if(this->signaled) {
2188 this->signaled = FALSE;
2189 for(entry=this->waiters; entry; entry = entry->next)
2190 InterlockedIncrement(&entry->wait->pending_waits);
2192 critical_section_unlock(&this->cs);
2195 /* ?set@event@Concurrency@@QAEXXZ */
2196 /* ?set@event@Concurrency@@QEAAXXZ */
2197 DEFINE_THISCALL_WRAPPER(event_set, 4)
2198 void __thiscall event_set(event *this)
2200 thread_wait_entry *wakeup = NULL;
2201 thread_wait_entry *entry, *next;
2203 TRACE("(%p)\n", this);
2205 critical_section_lock(&this->cs);
2206 if(!this->signaled) {
2207 this->signaled = TRUE;
2208 for(entry=this->waiters; entry; entry=next) {
2209 next = entry->next;
2210 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
2211 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
2212 evt_remove_queue(&this->waiters, entry);
2213 evt_add_queue(&wakeup, entry);
2218 critical_section_unlock(&this->cs);
2220 for(entry=wakeup; entry; entry=next) {
2221 next = entry->next;
2222 entry->next = entry->prev = NULL;
2223 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
2227 /* ?wait@event@Concurrency@@QAEII@Z */
2228 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2229 DEFINE_THISCALL_WRAPPER(event_wait, 8)
2230 size_t __thiscall event_wait(event *this, unsigned int timeout)
2232 thread_wait wait;
2233 size_t signaled;
2235 TRACE("(%p %u)\n", this, timeout);
2237 critical_section_lock(&this->cs);
2238 signaled = this->signaled;
2239 critical_section_unlock(&this->cs);
2241 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
2242 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
2245 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2246 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2247 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
2249 thread_wait *wait;
2250 size_t ret;
2252 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
2254 if(count == 0)
2255 return 0;
2257 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
2258 ret = evt_wait(wait, events, count, wait_all, timeout);
2259 operator_delete(wait);
2261 return ret;
2264 #if _MSVCR_VER >= 110
2266 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
2267 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
2268 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
2269 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
2271 TRACE("(%p)\n", this);
2273 this->queue = NULL;
2274 critical_section_ctor(&this->lock);
2275 return this;
2278 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
2279 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
2280 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
2281 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
2283 TRACE("(%p)\n", this);
2285 while(this->queue) {
2286 cv_queue *next = this->queue->next;
2287 if(!this->queue->expired)
2288 ERR("there's an active wait\n");
2289 HeapFree(GetProcessHeap(), 0, this->queue);
2290 this->queue = next;
2292 critical_section_dtor(&this->lock);
2295 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
2296 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
2297 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
2298 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
2300 cv_queue q;
2302 TRACE("(%p, %p)\n", this, cs);
2304 critical_section_lock(&this->lock);
2305 q.next = this->queue;
2306 q.expired = FALSE;
2307 this->queue = &q;
2308 critical_section_unlock(&this->lock);
2310 critical_section_unlock(cs);
2311 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2312 critical_section_lock(cs);
2315 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
2316 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
2317 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
2318 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
2319 critical_section *cs, unsigned int timeout)
2321 LARGE_INTEGER to;
2322 NTSTATUS status;
2323 FILETIME ft;
2324 cv_queue *q;
2326 TRACE("(%p %p %d)\n", this, cs, timeout);
2328 q = operator_new(sizeof(cv_queue));
2329 critical_section_lock(&this->lock);
2330 q->next = this->queue;
2331 q->expired = FALSE;
2332 this->queue = q;
2333 critical_section_unlock(&this->lock);
2335 critical_section_unlock(cs);
2337 GetSystemTimeAsFileTime(&ft);
2338 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
2339 ft.dwLowDateTime + (LONGLONG)timeout * 10000;
2340 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
2341 if(status == STATUS_TIMEOUT) {
2342 if(!InterlockedExchange(&q->expired, TRUE)) {
2343 critical_section_lock(cs);
2344 return FALSE;
2346 else
2347 NtWaitForKeyedEvent(keyed_event, q, 0, 0);
2350 operator_delete(q);
2351 critical_section_lock(cs);
2352 return TRUE;
2355 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
2356 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
2357 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
2358 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
2360 cv_queue *node;
2362 TRACE("(%p)\n", this);
2364 if(!this->queue)
2365 return;
2367 while(1) {
2368 critical_section_lock(&this->lock);
2369 node = this->queue;
2370 if(!node) {
2371 critical_section_unlock(&this->lock);
2372 return;
2374 this->queue = node->next;
2375 critical_section_unlock(&this->lock);
2377 if(!InterlockedExchange(&node->expired, TRUE)) {
2378 NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
2379 return;
2380 } else {
2381 HeapFree(GetProcessHeap(), 0, node);
2386 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
2387 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
2388 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
2389 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
2391 cv_queue *ptr;
2393 TRACE("(%p)\n", this);
2395 if(!this->queue)
2396 return;
2398 critical_section_lock(&this->lock);
2399 ptr = this->queue;
2400 this->queue = NULL;
2401 critical_section_unlock(&this->lock);
2403 while(ptr) {
2404 cv_queue *next = ptr->next;
2406 if(!InterlockedExchange(&ptr->expired, TRUE))
2407 NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
2408 else
2409 HeapFree(GetProcessHeap(), 0, ptr);
2410 ptr = next;
2413 #endif
2415 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
2416 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
2417 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
2418 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
2420 TRACE("(%p)\n", this);
2422 if (!keyed_event) {
2423 HANDLE event;
2425 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
2426 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
2427 NtClose(event);
2430 memset(this, 0, sizeof(*this));
2431 return this;
2434 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
2435 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
2436 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
2437 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
2439 TRACE("(%p)\n", this);
2441 if (this->thread_id != 0 || this->count)
2442 WARN("destroying locked reader_writer_lock\n");
2445 static inline void spin_wait_for_next_rwl(rwl_queue *q)
2447 SpinWait sw;
2449 if(q->next) return;
2451 SpinWait_ctor(&sw, &spin_wait_yield);
2452 SpinWait__Reset(&sw);
2453 while(!q->next)
2454 SpinWait__SpinOnce(&sw);
2455 SpinWait_dtor(&sw);
2458 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
2459 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
2460 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
2461 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
2463 rwl_queue q = { NULL }, *last;
2465 TRACE("(%p)\n", this);
2467 if (this->thread_id == GetCurrentThreadId()) {
2468 improper_lock e;
2469 improper_lock_ctor_str(&e, "Already locked");
2470 _CxxThrowException(&e, &improper_lock_exception_type);
2473 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
2474 if (last) {
2475 last->next = &q;
2476 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2477 } else {
2478 this->writer_head = &q;
2479 if (InterlockedOr(&this->count, WRITER_WAITING))
2480 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2483 this->thread_id = GetCurrentThreadId();
2484 this->writer_head = &this->active;
2485 this->active.next = NULL;
2486 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2487 spin_wait_for_next_rwl(&q);
2488 this->active.next = q.next;
2492 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
2493 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
2494 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
2495 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
2497 rwl_queue q;
2499 TRACE("(%p)\n", this);
2501 if (this->thread_id == GetCurrentThreadId()) {
2502 improper_lock e;
2503 improper_lock_ctor_str(&e, "Already locked as writer");
2504 _CxxThrowException(&e, &improper_lock_exception_type);
2507 do {
2508 q.next = this->reader_head;
2509 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
2511 if (!q.next) {
2512 rwl_queue *head;
2513 LONG count;
2515 while (!((count = this->count) & WRITER_WAITING))
2516 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
2518 if (count & WRITER_WAITING)
2519 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2521 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2522 while(head && head != &q) {
2523 rwl_queue *next = head->next;
2524 InterlockedIncrement(&this->count);
2525 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2526 head = next;
2528 } else {
2529 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2533 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
2534 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
2535 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
2536 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
2538 rwl_queue q = { NULL };
2540 TRACE("(%p)\n", this);
2542 if (this->thread_id == GetCurrentThreadId())
2543 return FALSE;
2545 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
2546 return FALSE;
2547 this->writer_head = &q;
2548 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
2549 this->thread_id = GetCurrentThreadId();
2550 this->writer_head = &this->active;
2551 this->active.next = NULL;
2552 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2553 spin_wait_for_next_rwl(&q);
2554 this->active.next = q.next;
2556 return TRUE;
2559 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
2560 return FALSE;
2561 spin_wait_for_next_rwl(&q);
2562 this->writer_head = q.next;
2563 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
2564 this->thread_id = GetCurrentThreadId();
2565 this->writer_head = &this->active;
2566 this->active.next = q.next;
2567 return TRUE;
2569 return FALSE;
2572 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
2573 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
2574 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
2575 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
2577 LONG count;
2579 TRACE("(%p)\n", this);
2581 while (!((count = this->count) & WRITER_WAITING))
2582 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
2583 return FALSE;
2586 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
2587 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
2588 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
2589 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
2591 LONG count;
2592 rwl_queue *head, *next;
2594 TRACE("(%p)\n", this);
2596 if ((count = this->count) & ~WRITER_WAITING) {
2597 count = InterlockedDecrement(&this->count);
2598 if (count != WRITER_WAITING)
2599 return;
2600 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
2601 return;
2604 this->thread_id = 0;
2605 next = this->writer_head->next;
2606 if (next) {
2607 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
2608 return;
2610 InterlockedAnd(&this->count, ~WRITER_WAITING);
2611 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2612 while (head) {
2613 next = head->next;
2614 InterlockedIncrement(&this->count);
2615 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2616 head = next;
2619 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
2620 return;
2621 InterlockedOr(&this->count, WRITER_WAITING);
2624 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2625 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2626 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
2627 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
2628 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2630 TRACE("(%p %p)\n", this, lock);
2632 this->lock = lock;
2633 reader_writer_lock_lock(lock);
2634 return this;
2637 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
2638 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
2639 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
2640 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
2642 TRACE("(%p)\n", this);
2643 reader_writer_lock_unlock(this->lock);
2646 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2647 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2648 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
2649 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
2650 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2652 TRACE("(%p %p)\n", this, lock);
2654 this->lock = lock;
2655 reader_writer_lock_lock_read(lock);
2656 return this;
2659 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
2660 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
2661 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
2662 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
2664 TRACE("(%p)\n", this);
2665 reader_writer_lock_unlock(this->lock);
2668 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2669 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2670 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
2671 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
2673 TRACE("(%p)\n", this);
2675 InitializeCriticalSection(&this->cs);
2676 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
2677 return this;
2680 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2681 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2682 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
2683 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
2685 TRACE("(%p)\n", this);
2687 this->cs.DebugInfo->Spare[0] = 0;
2688 DeleteCriticalSection(&this->cs);
2691 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2692 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2693 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
2694 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
2696 TRACE("(%p)\n", this);
2697 EnterCriticalSection(&this->cs);
2700 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2701 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2702 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
2703 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
2705 TRACE("(%p)\n", this);
2706 LeaveCriticalSection(&this->cs);
2709 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
2710 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
2711 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
2712 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
2714 TRACE("(%p)\n", this);
2715 return TryEnterCriticalSection(&this->cs);
2718 /* ?wait@Concurrency@@YAXI@Z */
2719 void __cdecl Concurrency_wait(unsigned int time)
2721 static int once;
2723 if (!once++) FIXME("(%d) stub!\n", time);
2725 Sleep(time);
2728 #if _MSVCR_VER>=110
2729 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
2730 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
2732 FIXME("(%d %s)\n", type, wine_dbgstr_longlong(id));
2734 #endif
2736 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
2737 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
2738 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
2740 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
2743 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
2744 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
2745 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
2746 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
2748 TRACE("(%p %u %x)\n", this, elapse, repeat);
2750 this->vtable = &_Timer_vtable;
2751 this->timer = NULL;
2752 this->elapse = elapse;
2753 this->repeat = repeat;
2754 return this;
2757 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2759 _Timer *this = ctx;
2760 TRACE("calling _Timer(%p) callback\n", this);
2761 call__Timer_callback(this);
2764 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
2765 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
2766 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
2767 void __thiscall _Timer__Start(_Timer *this)
2769 LONGLONG ll;
2770 FILETIME ft;
2772 TRACE("(%p)\n", this);
2774 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
2775 if (!this->timer)
2777 FIXME("throw exception?\n");
2778 return;
2781 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
2782 ft.dwLowDateTime = ll & 0xffffffff;
2783 ft.dwHighDateTime = ll >> 32;
2784 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
2787 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
2788 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
2789 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
2790 void __thiscall _Timer__Stop(_Timer *this)
2792 TRACE("(%p)\n", this);
2794 SetThreadpoolTimer(this->timer, NULL, 0, 0);
2795 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
2796 CloseThreadpoolTimer(this->timer);
2797 this->timer = NULL;
2800 /* ??1_Timer@details@Concurrency@@MAE@XZ */
2801 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
2802 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
2803 void __thiscall _Timer_dtor(_Timer *this)
2805 TRACE("(%p)\n", this);
2807 if (this->timer)
2808 _Timer__Stop(this);
2811 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
2812 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
2814 TRACE("(%p %x)\n", this, flags);
2815 if (flags & 2) {
2816 /* we have an array, with the number of elements stored before the first object */
2817 INT_PTR i, *ptr = (INT_PTR *)this-1;
2819 for (i=*ptr-1; i>=0; i--)
2820 _Timer_dtor(this+i);
2821 operator_delete(ptr);
2822 } else {
2823 _Timer_dtor(this);
2824 if (flags & 1)
2825 operator_delete(this);
2828 return this;
2831 #ifdef __ASM_USE_THISCALL_WRAPPER
2833 #define DEFINE_VTBL_WRAPPER(off) \
2834 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
2835 "popl %eax\n\t" \
2836 "popl %ecx\n\t" \
2837 "pushl %eax\n\t" \
2838 "movl 0(%ecx), %eax\n\t" \
2839 "jmp *" #off "(%eax)\n\t")
2841 DEFINE_VTBL_WRAPPER(0);
2842 DEFINE_VTBL_WRAPPER(4);
2843 DEFINE_VTBL_WRAPPER(8);
2844 DEFINE_VTBL_WRAPPER(12);
2845 DEFINE_VTBL_WRAPPER(16);
2846 DEFINE_VTBL_WRAPPER(20);
2847 DEFINE_VTBL_WRAPPER(24);
2848 DEFINE_VTBL_WRAPPER(28);
2849 DEFINE_VTBL_WRAPPER(32);
2850 DEFINE_VTBL_WRAPPER(36);
2851 DEFINE_VTBL_WRAPPER(40);
2852 DEFINE_VTBL_WRAPPER(44);
2853 DEFINE_VTBL_WRAPPER(48);
2855 #endif
2857 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
2858 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
2859 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
2860 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
2861 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
2862 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
2863 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
2864 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
2865 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
2867 __ASM_BLOCK_BEGIN(concurrency_vtables)
2868 __ASM_VTABLE(ExternalContextBase,
2869 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
2870 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
2871 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
2872 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
2873 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
2874 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
2875 __ASM_VTABLE(ThreadScheduler,
2876 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
2877 VTABLE_ADD_FUNC(ThreadScheduler_Id)
2878 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
2879 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
2880 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
2881 VTABLE_ADD_FUNC(ThreadScheduler_Release)
2882 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
2883 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
2884 #if _MSVCR_VER > 100
2885 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
2886 #endif
2887 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
2888 #if _MSVCR_VER > 100
2889 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
2890 #endif
2891 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
2892 #if _MSVCR_VER > 100
2893 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
2894 #endif
2896 __ASM_VTABLE(_Timer,
2897 VTABLE_ADD_FUNC(_Timer_vector_dtor));
2898 __ASM_BLOCK_END
2900 void msvcrt_init_concurrency(void *base)
2902 #ifdef __x86_64__
2903 init_cexception_rtti(base);
2904 init_improper_lock_rtti(base);
2905 init_improper_scheduler_attach_rtti(base);
2906 init_improper_scheduler_detach_rtti(base);
2907 init_invalid_scheduler_policy_key_rtti(base);
2908 init_invalid_scheduler_policy_thread_specification_rtti(base);
2909 init_invalid_scheduler_policy_value_rtti(base);
2910 init_scheduler_resource_allocation_error_rtti(base);
2911 init_Context_rtti(base);
2912 init_ContextBase_rtti(base);
2913 init_ExternalContextBase_rtti(base);
2914 init_Scheduler_rtti(base);
2915 init_SchedulerBase_rtti(base);
2916 init_ThreadScheduler_rtti(base);
2917 init__Timer_rtti(base);
2919 init_cexception_cxx_type_info(base);
2920 init_improper_lock_cxx(base);
2921 init_improper_scheduler_attach_cxx(base);
2922 init_improper_scheduler_detach_cxx(base);
2923 init_invalid_scheduler_policy_key_cxx(base);
2924 init_invalid_scheduler_policy_thread_specification_cxx(base);
2925 init_invalid_scheduler_policy_value_cxx(base);
2926 init_scheduler_resource_allocation_error_cxx(base);
2927 #endif
2930 void msvcrt_free_concurrency(void)
2932 if (context_tls_index != TLS_OUT_OF_INDEXES)
2933 TlsFree(context_tls_index);
2934 if(default_scheduler_policy.policy_container)
2935 SchedulerPolicy_dtor(&default_scheduler_policy);
2936 if(default_scheduler) {
2937 ThreadScheduler_dtor(default_scheduler);
2938 operator_delete(default_scheduler);
2941 if(keyed_event)
2942 NtClose(keyed_event);
2945 void msvcrt_free_scheduler_thread(void)
2947 Context *context = try_get_current_context();
2948 if (!context) return;
2949 call_Context_dtor(context, 1);
2952 #endif /* _MSVCR_VER >= 100 */