comctl32/edit: Handle IME composition result string only when EIMES_GETCOMPSTRATONCE...
[wine.git] / dlls / msvcrt / concurrency.c
blobd0c9924c63140985f65b321273d937ed6206fd0c
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "msvcrt.h"
28 #include "cxx.h"
30 #if _MSVCR_VER >= 100
32 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
34 typedef exception cexception;
35 CREATE_EXCEPTION_OBJECT(cexception)
37 static LONG context_id = -1;
38 static LONG scheduler_id = -1;
40 typedef enum {
41 SchedulerKind,
42 MaxConcurrency,
43 MinConcurrency,
44 TargetOversubscriptionFactor,
45 LocalContextCacheSize,
46 ContextStackSize,
47 ContextPriority,
48 SchedulingProtocol,
49 DynamicProgressFeedback,
50 WinRTInitialization,
51 last_policy_id
52 } PolicyElementKey;
54 typedef struct {
55 struct _policy_container {
56 unsigned int policies[last_policy_id];
57 } *policy_container;
58 } SchedulerPolicy;
60 typedef struct {
61 const vtable_ptr *vtable;
62 } Context;
63 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
64 unsigned int, (const Context*), (this))
65 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
70 Context*, (Context*, unsigned int), (this, flags))
72 typedef struct {
73 Context *context;
74 } _Context;
76 union allocator_cache_entry {
77 struct _free {
78 int depth;
79 union allocator_cache_entry *next;
80 } free;
81 struct _alloc {
82 int bucket;
83 char mem[1];
84 } alloc;
87 struct scheduler_list {
88 struct Scheduler *scheduler;
89 struct scheduler_list *next;
92 typedef struct {
93 Context context;
94 struct scheduler_list scheduler;
95 unsigned int id;
96 union allocator_cache_entry *allocator_cache[8];
97 } ExternalContextBase;
98 extern const vtable_ptr ExternalContextBase_vtable;
99 static void ExternalContextBase_ctor(ExternalContextBase*);
101 typedef struct Scheduler {
102 const vtable_ptr *vtable;
103 } Scheduler;
104 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
105 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
106 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
107 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
108 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
109 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
110 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
111 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
112 #if _MSVCR_VER > 100
113 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
114 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
115 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
116 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
117 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
118 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
119 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
120 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
121 bool, (Scheduler*,const /*location*/void*), (this,placement))
122 #else
123 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
124 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
125 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
126 #endif
128 typedef struct {
129 Scheduler scheduler;
130 LONG ref;
131 unsigned int id;
132 unsigned int virt_proc_no;
133 SchedulerPolicy policy;
134 int shutdown_count;
135 int shutdown_size;
136 HANDLE *shutdown_events;
137 CRITICAL_SECTION cs;
138 } ThreadScheduler;
139 extern const vtable_ptr ThreadScheduler_vtable;
141 typedef struct {
142 Scheduler *scheduler;
143 } _Scheduler;
145 typedef struct {
146 char empty;
147 } _CurrentScheduler;
149 typedef enum
151 SPINWAIT_INIT,
152 SPINWAIT_SPIN,
153 SPINWAIT_YIELD,
154 SPINWAIT_DONE
155 } SpinWait_state;
157 typedef void (__cdecl *yield_func)(void);
159 typedef struct
161 ULONG spin;
162 ULONG unknown;
163 SpinWait_state state;
164 yield_func yield_func;
165 } SpinWait;
167 typedef struct
169 char dummy;
170 } _UnrealizedChore;
172 typedef struct
174 char dummy;
175 } _StructuredTaskCollection;
177 /* keep in sync with msvcp90/msvcp90.h */
178 typedef struct cs_queue
180 struct cs_queue *next;
181 #if _MSVCR_VER >= 110
182 LONG free;
183 int unknown;
184 #endif
185 } cs_queue;
187 typedef struct
189 ULONG_PTR unk_thread_id;
190 cs_queue unk_active;
191 #if _MSVCR_VER >= 110
192 void *unknown[2];
193 #else
194 void *unknown[1];
195 #endif
196 cs_queue *head;
197 void *tail;
198 } critical_section;
200 typedef struct
202 critical_section *cs;
203 union {
204 cs_queue q;
205 struct {
206 void *unknown[4];
207 int unknown2[2];
208 } unknown;
209 } lock;
210 } critical_section_scoped_lock;
212 typedef struct
214 critical_section cs;
215 } _NonReentrantPPLLock;
217 typedef struct
219 _NonReentrantPPLLock *lock;
220 union {
221 cs_queue q;
222 struct {
223 void *unknown[4];
224 int unknown2[2];
225 } unknown;
226 } wait;
227 } _NonReentrantPPLLock__Scoped_lock;
229 typedef struct
231 critical_section cs;
232 LONG count;
233 LONG owner;
234 } _ReentrantPPLLock;
236 typedef struct
238 _ReentrantPPLLock *lock;
239 union {
240 cs_queue q;
241 struct {
242 void *unknown[4];
243 int unknown2[2];
244 } unknown;
245 } wait;
246 } _ReentrantPPLLock__Scoped_lock;
248 #define EVT_RUNNING (void*)1
249 #define EVT_WAITING NULL
251 struct thread_wait;
252 typedef struct thread_wait_entry
254 struct thread_wait *wait;
255 struct thread_wait_entry *next;
256 struct thread_wait_entry *prev;
257 } thread_wait_entry;
259 typedef struct thread_wait
261 void *signaled;
262 LONG pending_waits;
263 thread_wait_entry entries[1];
264 } thread_wait;
266 typedef struct
268 thread_wait_entry *waiters;
269 INT_PTR signaled;
270 critical_section cs;
271 } event;
273 #if _MSVCR_VER >= 110
274 #define CV_WAKE (void*)1
275 typedef struct cv_queue {
276 struct cv_queue *next;
277 LONG expired;
278 } cv_queue;
280 typedef struct {
281 /* cv_queue structure is not binary compatible */
282 cv_queue *queue;
283 critical_section lock;
284 } _Condition_variable;
285 #endif
287 typedef struct rwl_queue
289 struct rwl_queue *next;
290 } rwl_queue;
292 #define WRITER_WAITING 0x80000000
293 /* FIXME: reader_writer_lock structure is not binary compatible
294 * it can't exceed 28/56 bytes */
295 typedef struct
297 LONG count;
298 LONG thread_id;
299 rwl_queue active;
300 rwl_queue *writer_head;
301 rwl_queue *writer_tail;
302 rwl_queue *reader_head;
303 } reader_writer_lock;
305 typedef struct {
306 reader_writer_lock *lock;
307 } reader_writer_lock_scoped_lock;
309 typedef struct {
310 CRITICAL_SECTION cs;
311 } _ReentrantBlockingLock;
313 #define TICKSPERMSEC 10000
314 typedef struct {
315 const vtable_ptr *vtable;
316 TP_TIMER *timer;
317 unsigned int elapse;
318 bool repeat;
319 } _Timer;
320 extern const vtable_ptr _Timer_vtable;
321 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
323 typedef exception improper_lock;
324 extern const vtable_ptr improper_lock_vtable;
326 typedef exception improper_scheduler_attach;
327 extern const vtable_ptr improper_scheduler_attach_vtable;
329 typedef exception improper_scheduler_detach;
330 extern const vtable_ptr improper_scheduler_detach_vtable;
332 typedef exception invalid_multiple_scheduling;
333 extern const vtable_ptr invalid_multiple_scheduling_vtable;
335 typedef exception invalid_scheduler_policy_key;
336 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
338 typedef exception invalid_scheduler_policy_thread_specification;
339 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
341 typedef exception invalid_scheduler_policy_value;
342 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
344 typedef struct {
345 exception e;
346 HRESULT hr;
347 } scheduler_resource_allocation_error;
348 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
350 enum ConcRT_EventType
352 CONCRT_EVENT_GENERIC,
353 CONCRT_EVENT_START,
354 CONCRT_EVENT_END,
355 CONCRT_EVENT_BLOCK,
356 CONCRT_EVENT_UNBLOCK,
357 CONCRT_EVENT_YIELD,
358 CONCRT_EVENT_ATTACH,
359 CONCRT_EVENT_DETACH
362 static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
364 static CRITICAL_SECTION default_scheduler_cs;
365 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
367 0, 0, &default_scheduler_cs,
368 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
369 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
371 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
372 static SchedulerPolicy default_scheduler_policy;
373 static ThreadScheduler *default_scheduler;
375 static HANDLE keyed_event;
377 static void create_default_scheduler(void);
379 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
380 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
381 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
382 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
384 TRACE("(%p %s)\n", this, str);
385 return __exception_ctor(this, str, &improper_lock_vtable);
388 /* ??0improper_lock@Concurrency@@QAE@XZ */
389 /* ??0improper_lock@Concurrency@@QEAA@XZ */
390 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
391 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
393 return improper_lock_ctor_str(this, NULL);
396 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
397 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
399 TRACE("(%p %p)\n", this, rhs);
400 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
403 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
404 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
405 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
406 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
407 improper_scheduler_attach *this, const char *str)
409 TRACE("(%p %s)\n", this, str);
410 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
413 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
414 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
415 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
416 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
417 improper_scheduler_attach *this)
419 return improper_scheduler_attach_ctor_str(this, NULL);
422 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
423 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
424 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
426 TRACE("(%p %p)\n", _this, rhs);
427 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
430 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
431 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
432 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
433 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
434 improper_scheduler_detach *this, const char *str)
436 TRACE("(%p %s)\n", this, str);
437 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
440 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
441 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
442 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
443 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
444 improper_scheduler_detach *this)
446 return improper_scheduler_detach_ctor_str(this, NULL);
449 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
450 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
451 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
453 TRACE("(%p %p)\n", _this, rhs);
454 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
457 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
458 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
459 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
460 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str, 8)
461 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor_str(
462 invalid_multiple_scheduling *this, const char *str)
464 TRACE("(%p %s)\n", this, str);
465 return __exception_ctor(this, str, &invalid_multiple_scheduling_vtable);
468 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
469 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
470 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
471 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor, 4)
472 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor(
473 invalid_multiple_scheduling *this)
475 return invalid_multiple_scheduling_ctor_str(this, NULL);
478 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor,8)
479 invalid_multiple_scheduling * __thiscall invalid_multiple_scheduling_copy_ctor(
480 invalid_multiple_scheduling * _this, const invalid_multiple_scheduling * rhs)
482 TRACE("(%p %p)\n", _this, rhs);
483 return __exception_copy_ctor(_this, rhs, &invalid_multiple_scheduling_vtable);
486 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
487 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
488 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
489 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
490 invalid_scheduler_policy_key *this, const char *str)
492 TRACE("(%p %s)\n", this, str);
493 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
496 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
497 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
498 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
499 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
500 invalid_scheduler_policy_key *this)
502 return invalid_scheduler_policy_key_ctor_str(this, NULL);
505 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
506 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
507 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
509 TRACE("(%p %p)\n", _this, rhs);
510 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
513 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
514 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
515 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
516 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
517 invalid_scheduler_policy_thread_specification *this, const char *str)
519 TRACE("(%p %s)\n", this, str);
520 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
523 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
524 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
525 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
526 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
527 invalid_scheduler_policy_thread_specification *this)
529 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
532 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
533 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
534 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
536 TRACE("(%p %p)\n", _this, rhs);
537 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
540 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
541 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
542 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
543 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
544 invalid_scheduler_policy_value *this, const char *str)
546 TRACE("(%p %s)\n", this, str);
547 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
550 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
551 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
552 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
553 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
554 invalid_scheduler_policy_value *this)
556 return invalid_scheduler_policy_value_ctor_str(this, NULL);
559 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
560 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
561 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
563 TRACE("(%p %p)\n", _this, rhs);
564 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
567 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
568 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
569 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
570 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
571 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
573 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
574 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
575 this->hr = hr;
576 return this;
579 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
580 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
581 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
582 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
583 scheduler_resource_allocation_error *this, HRESULT hr)
585 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
588 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
589 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
590 scheduler_resource_allocation_error *this,
591 const scheduler_resource_allocation_error *rhs)
593 TRACE("(%p,%p)\n", this, rhs);
595 if (!rhs->e.do_free)
596 memcpy(this, rhs, sizeof(*this));
597 else
598 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
599 return this;
602 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
603 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
604 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
605 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
606 const scheduler_resource_allocation_error *this)
608 TRACE("(%p)\n", this);
609 return this->hr;
612 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
613 ".?AVimproper_lock@Concurrency@@")
614 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
615 ".?AVimproper_scheduler_attach@Concurrency@@")
616 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
617 ".?AVimproper_scheduler_detach@Concurrency@@")
618 DEFINE_RTTI_DATA1(invalid_multiple_scheduling, 0, &cexception_rtti_base_descriptor,
619 ".?AVinvalid_multiple_scheduling@Concurrency@@")
620 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
621 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
622 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
623 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
624 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
625 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
626 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
627 ".?AVscheduler_resource_allocation_error@Concurrency@@")
629 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
630 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
631 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
632 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
633 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
634 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
635 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
637 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
638 __ASM_VTABLE(improper_lock,
639 VTABLE_ADD_FUNC(cexception_vector_dtor)
640 VTABLE_ADD_FUNC(cexception_what));
641 __ASM_VTABLE(improper_scheduler_attach,
642 VTABLE_ADD_FUNC(cexception_vector_dtor)
643 VTABLE_ADD_FUNC(cexception_what));
644 __ASM_VTABLE(improper_scheduler_detach,
645 VTABLE_ADD_FUNC(cexception_vector_dtor)
646 VTABLE_ADD_FUNC(cexception_what));
647 __ASM_VTABLE(invalid_multiple_scheduling,
648 VTABLE_ADD_FUNC(cexception_vector_dtor)
649 VTABLE_ADD_FUNC(cexception_what));
650 __ASM_VTABLE(invalid_scheduler_policy_key,
651 VTABLE_ADD_FUNC(cexception_vector_dtor)
652 VTABLE_ADD_FUNC(cexception_what));
653 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
654 VTABLE_ADD_FUNC(cexception_vector_dtor)
655 VTABLE_ADD_FUNC(cexception_what));
656 __ASM_VTABLE(invalid_scheduler_policy_value,
657 VTABLE_ADD_FUNC(cexception_vector_dtor)
658 VTABLE_ADD_FUNC(cexception_what));
659 __ASM_VTABLE(scheduler_resource_allocation_error,
660 VTABLE_ADD_FUNC(cexception_vector_dtor)
661 VTABLE_ADD_FUNC(cexception_what));
662 __ASM_BLOCK_END
664 static Context* try_get_current_context(void)
666 if (context_tls_index == TLS_OUT_OF_INDEXES)
667 return NULL;
668 return TlsGetValue(context_tls_index);
671 static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
673 context_tls_index = TlsAlloc();
674 return context_tls_index != TLS_OUT_OF_INDEXES;
677 static Context* get_current_context(void)
679 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
680 Context *ret;
682 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
684 scheduler_resource_allocation_error e;
685 scheduler_resource_allocation_error_ctor_name(&e, NULL,
686 HRESULT_FROM_WIN32(GetLastError()));
687 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
690 ret = TlsGetValue(context_tls_index);
691 if (!ret) {
692 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
693 ExternalContextBase_ctor(context);
694 TlsSetValue(context_tls_index, context);
695 ret = &context->context;
697 return ret;
700 static Scheduler* try_get_current_scheduler(void)
702 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
704 if (!context)
705 return NULL;
707 if (context->context.vtable != &ExternalContextBase_vtable) {
708 ERR("unknown context set\n");
709 return NULL;
711 return context->scheduler.scheduler;
714 static Scheduler* get_current_scheduler(void)
716 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
718 if (context->context.vtable != &ExternalContextBase_vtable) {
719 ERR("unknown context set\n");
720 return NULL;
722 return context->scheduler.scheduler;
725 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
726 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
727 Context* __cdecl Context_CurrentContext(void)
729 TRACE("()\n");
730 return get_current_context();
733 /* ?Id@Context@Concurrency@@SAIXZ */
734 unsigned int __cdecl Context_Id(void)
736 Context *ctx = try_get_current_context();
737 TRACE("()\n");
738 return ctx ? call_Context_GetId(ctx) : -1;
741 /* ?Block@Context@Concurrency@@SAXXZ */
742 void __cdecl Context_Block(void)
744 FIXME("()\n");
747 /* ?Yield@Context@Concurrency@@SAXXZ */
748 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
749 void __cdecl Context_Yield(void)
751 FIXME("()\n");
754 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
755 void __cdecl Context__SpinYield(void)
757 FIXME("()\n");
760 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
761 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
763 FIXME("()\n");
764 return FALSE;
767 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
768 void __cdecl Context_Oversubscribe(bool begin)
770 FIXME("(%x)\n", begin);
773 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
774 unsigned int __cdecl Context_ScheduleGroupId(void)
776 Context *ctx = try_get_current_context();
777 TRACE("()\n");
778 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
781 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
782 unsigned int __cdecl Context_VirtualProcessorId(void)
784 Context *ctx = try_get_current_context();
785 TRACE("()\n");
786 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
789 #if _MSVCR_VER > 100
790 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
791 _Context *__cdecl _Context__CurrentContext(_Context *ret)
793 TRACE("(%p)\n", ret);
794 ret->context = Context_CurrentContext();
795 return ret;
797 #endif
799 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
800 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
802 TRACE("(%p)->()\n", this);
803 return this->id;
806 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
807 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
809 FIXME("(%p)->() stub\n", this);
810 return -1;
813 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
814 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
816 FIXME("(%p)->() stub\n", this);
817 return -1;
820 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
821 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
823 FIXME("(%p)->() stub\n", this);
826 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
827 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
829 FIXME("(%p)->() stub\n", this);
830 return FALSE;
833 static void ExternalContextBase_dtor(ExternalContextBase *this)
835 struct scheduler_list *scheduler_cur, *scheduler_next;
836 union allocator_cache_entry *next, *cur;
837 int i;
839 /* TODO: move the allocator cache to scheduler so it can be reused */
840 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
841 for(cur = this->allocator_cache[i]; cur; cur=next) {
842 next = cur->free.next;
843 operator_delete(cur);
847 if (this->scheduler.scheduler) {
848 call_Scheduler_Release(this->scheduler.scheduler);
850 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
851 scheduler_next = scheduler_cur->next;
852 call_Scheduler_Release(scheduler_cur->scheduler);
853 operator_delete(scheduler_cur);
858 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
859 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
861 TRACE("(%p %x)\n", this, flags);
862 if(flags & 2) {
863 /* we have an array, with the number of elements stored before the first object */
864 INT_PTR i, *ptr = (INT_PTR *)this-1;
866 for(i=*ptr-1; i>=0; i--)
867 ExternalContextBase_dtor(this+i);
868 operator_delete(ptr);
869 } else {
870 ExternalContextBase_dtor(this);
871 if(flags & 1)
872 operator_delete(this);
875 return &this->context;
878 static void ExternalContextBase_ctor(ExternalContextBase *this)
880 TRACE("(%p)->()\n", this);
882 memset(this, 0, sizeof(*this));
883 this->context.vtable = &ExternalContextBase_vtable;
884 this->id = InterlockedIncrement(&context_id);
886 create_default_scheduler();
887 this->scheduler.scheduler = &default_scheduler->scheduler;
888 call_Scheduler_Reference(&default_scheduler->scheduler);
891 /* ?Alloc@Concurrency@@YAPAXI@Z */
892 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
893 void * CDECL Concurrency_Alloc(size_t size)
895 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
896 union allocator_cache_entry *p;
898 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
899 if (size < sizeof(*p))
900 size = sizeof(*p);
902 if (context->context.vtable != &ExternalContextBase_vtable) {
903 p = operator_new(size);
904 p->alloc.bucket = -1;
905 }else {
906 int i;
908 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
909 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
910 if (1 << (i+4) >= size) break;
912 if(i==ARRAY_SIZE(context->allocator_cache)) {
913 p = operator_new(size);
914 p->alloc.bucket = -1;
915 }else if (context->allocator_cache[i]) {
916 p = context->allocator_cache[i];
917 context->allocator_cache[i] = p->free.next;
918 p->alloc.bucket = i;
919 }else {
920 p = operator_new(1 << (i+4));
921 p->alloc.bucket = i;
925 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
926 return p->alloc.mem;
929 /* ?Free@Concurrency@@YAXPAX@Z */
930 /* ?Free@Concurrency@@YAXPEAX@Z */
931 void CDECL Concurrency_Free(void* mem)
933 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
934 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
935 int bucket = p->alloc.bucket;
937 TRACE("(%p)\n", mem);
939 if (context->context.vtable != &ExternalContextBase_vtable) {
940 operator_delete(p);
941 }else {
942 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
943 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
944 p->free.next = context->allocator_cache[bucket];
945 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
946 context->allocator_cache[bucket] = p;
947 }else {
948 operator_delete(p);
953 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
954 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
955 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
956 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
957 PolicyElementKey policy, unsigned int val)
959 unsigned int ret;
961 TRACE("(%p %d %d)\n", this, policy, val);
963 if (policy == MinConcurrency) {
964 invalid_scheduler_policy_key e;
965 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
966 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
968 if (policy == MaxConcurrency) {
969 invalid_scheduler_policy_key e;
970 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
971 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
973 if (policy >= last_policy_id) {
974 invalid_scheduler_policy_key e;
975 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
976 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
979 switch(policy) {
980 case SchedulerKind:
981 if (val) {
982 invalid_scheduler_policy_value e;
983 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
984 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
986 break;
987 case TargetOversubscriptionFactor:
988 if (!val) {
989 invalid_scheduler_policy_value e;
990 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
991 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
993 break;
994 case ContextPriority:
995 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
996 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
997 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
998 && val != INHERIT_THREAD_PRIORITY) {
999 invalid_scheduler_policy_value e;
1000 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
1001 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1003 break;
1004 case SchedulingProtocol:
1005 case DynamicProgressFeedback:
1006 case WinRTInitialization:
1007 if (val != 0 && val != 1) {
1008 invalid_scheduler_policy_value e;
1009 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
1010 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1012 break;
1013 default:
1014 break;
1017 ret = this->policy_container->policies[policy];
1018 this->policy_container->policies[policy] = val;
1019 return ret;
1022 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1023 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1024 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
1025 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
1026 unsigned int min_concurrency, unsigned int max_concurrency)
1028 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
1030 if (min_concurrency > max_concurrency) {
1031 invalid_scheduler_policy_thread_specification e;
1032 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
1033 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
1035 if (!max_concurrency) {
1036 invalid_scheduler_policy_value e;
1037 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
1038 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1041 this->policy_container->policies[MinConcurrency] = min_concurrency;
1042 this->policy_container->policies[MaxConcurrency] = max_concurrency;
1045 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1046 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1047 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1048 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1049 const SchedulerPolicy *this, PolicyElementKey policy)
1051 TRACE("(%p %d)\n", this, policy);
1053 if (policy >= last_policy_id) {
1054 invalid_scheduler_policy_key e;
1055 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1056 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1058 return this->policy_container->policies[policy];
1061 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1062 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1063 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1064 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1066 TRACE("(%p)\n", this);
1068 this->policy_container = operator_new(sizeof(*this->policy_container));
1069 /* TODO: default values can probably be affected by CurrentScheduler */
1070 this->policy_container->policies[SchedulerKind] = 0;
1071 this->policy_container->policies[MaxConcurrency] = -1;
1072 this->policy_container->policies[MinConcurrency] = 1;
1073 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1074 this->policy_container->policies[LocalContextCacheSize] = 8;
1075 this->policy_container->policies[ContextStackSize] = 0;
1076 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1077 this->policy_container->policies[SchedulingProtocol] = 0;
1078 this->policy_container->policies[DynamicProgressFeedback] = 1;
1079 return this;
1082 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1083 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1084 /* TODO: don't leak policy_container on exception */
1085 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1086 SchedulerPolicy *this, size_t n, ...)
1088 unsigned int min_concurrency, max_concurrency;
1089 va_list valist;
1090 size_t i;
1092 TRACE("(%p %Iu)\n", this, n);
1094 SchedulerPolicy_ctor(this);
1095 min_concurrency = this->policy_container->policies[MinConcurrency];
1096 max_concurrency = this->policy_container->policies[MaxConcurrency];
1098 va_start(valist, n);
1099 for(i=0; i<n; i++) {
1100 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1101 unsigned int val = va_arg(valist, unsigned int);
1103 if(policy == MinConcurrency)
1104 min_concurrency = val;
1105 else if(policy == MaxConcurrency)
1106 max_concurrency = val;
1107 else
1108 SchedulerPolicy_SetPolicyValue(this, policy, val);
1110 va_end(valist);
1112 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1113 return this;
1116 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1117 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1118 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1119 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1120 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1122 TRACE("(%p %p)\n", this, rhs);
1123 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1124 sizeof(this->policy_container->policies));
1125 return this;
1128 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1129 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1130 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1131 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1132 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1134 TRACE("(%p %p)\n", this, rhs);
1135 SchedulerPolicy_ctor(this);
1136 return SchedulerPolicy_op_assign(this, rhs);
1139 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1140 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1141 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1142 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1144 TRACE("(%p)\n", this);
1145 operator_delete(this->policy_container);
1148 static void ThreadScheduler_dtor(ThreadScheduler *this)
1150 int i;
1152 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1153 SchedulerPolicy_dtor(&this->policy);
1155 for(i=0; i<this->shutdown_count; i++)
1156 SetEvent(this->shutdown_events[i]);
1157 operator_delete(this->shutdown_events);
1159 this->cs.DebugInfo->Spare[0] = 0;
1160 DeleteCriticalSection(&this->cs);
1163 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1164 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1166 TRACE("(%p)\n", this);
1167 return this->id;
1170 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1171 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1173 TRACE("(%p)\n", this);
1174 return this->virt_proc_no;
1177 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1178 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1179 const ThreadScheduler *this, SchedulerPolicy *ret)
1181 TRACE("(%p %p)\n", this, ret);
1182 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1185 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1186 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1188 TRACE("(%p)\n", this);
1189 return InterlockedIncrement(&this->ref);
1192 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1193 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1195 unsigned int ret = InterlockedDecrement(&this->ref);
1197 TRACE("(%p)\n", this);
1199 if(!ret) {
1200 ThreadScheduler_dtor(this);
1201 operator_delete(this);
1203 return ret;
1206 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1207 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1209 HANDLE *shutdown_events;
1210 int size;
1212 TRACE("(%p %p)\n", this, event);
1214 EnterCriticalSection(&this->cs);
1216 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1217 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1218 memcpy(shutdown_events, this->shutdown_events,
1219 this->shutdown_count * sizeof(*shutdown_events));
1220 operator_delete(this->shutdown_events);
1221 this->shutdown_size = size;
1222 this->shutdown_events = shutdown_events;
1223 this->shutdown_events[this->shutdown_count++] = event;
1225 LeaveCriticalSection(&this->cs);
1228 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1229 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1231 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1233 TRACE("(%p)\n", this);
1235 if(context->context.vtable != &ExternalContextBase_vtable) {
1236 ERR("unknown context set\n");
1237 return;
1240 if(context->scheduler.scheduler == &this->scheduler) {
1241 improper_scheduler_attach e;
1242 improper_scheduler_attach_ctor_str(&e, NULL);
1243 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1246 if(context->scheduler.scheduler) {
1247 struct scheduler_list *l = operator_new(sizeof(*l));
1248 *l = context->scheduler;
1249 context->scheduler.next = l;
1251 context->scheduler.scheduler = &this->scheduler;
1252 ThreadScheduler_Reference(this);
1255 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1256 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1257 ThreadScheduler *this, /*location*/void *placement)
1259 FIXME("(%p %p) stub\n", this, placement);
1260 return NULL;
1263 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1264 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1266 FIXME("(%p) stub\n", this);
1267 return NULL;
1270 typedef struct
1272 void (__cdecl *proc)(void*);
1273 void *data;
1274 } schedule_task_arg;
1276 static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1278 schedule_task_arg arg;
1280 arg = *(schedule_task_arg*)context;
1281 operator_delete(context);
1282 arg.proc(arg.data);
1285 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1286 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1287 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1289 schedule_task_arg *arg;
1290 TP_WORK *work;
1292 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1294 arg = operator_new(sizeof(*arg));
1295 arg->proc = proc;
1296 arg->data = data;
1298 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1299 if(!work) {
1300 scheduler_resource_allocation_error e;
1302 operator_delete(arg);
1303 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1304 HRESULT_FROM_WIN32(GetLastError()));
1305 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1307 SubmitThreadpoolWork(work);
1308 CloseThreadpoolWork(work);
1311 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1312 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1313 void (__cdecl *proc)(void*), void* data)
1315 FIXME("(%p %p %p) stub\n", this, proc, data);
1316 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1319 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1320 bool __thiscall ThreadScheduler_IsAvailableLocation(
1321 const ThreadScheduler *this, const /*location*/void *placement)
1323 FIXME("(%p %p) stub\n", this, placement);
1324 return FALSE;
1327 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1328 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1330 TRACE("(%p %x)\n", this, flags);
1331 if(flags & 2) {
1332 /* we have an array, with the number of elements stored before the first object */
1333 INT_PTR i, *ptr = (INT_PTR *)this-1;
1335 for(i=*ptr-1; i>=0; i--)
1336 ThreadScheduler_dtor(this+i);
1337 operator_delete(ptr);
1338 } else {
1339 ThreadScheduler_dtor(this);
1340 if(flags & 1)
1341 operator_delete(this);
1344 return &this->scheduler;
1347 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1348 const SchedulerPolicy *policy)
1350 SYSTEM_INFO si;
1352 TRACE("(%p)->()\n", this);
1354 this->scheduler.vtable = &ThreadScheduler_vtable;
1355 this->ref = 1;
1356 this->id = InterlockedIncrement(&scheduler_id);
1357 SchedulerPolicy_copy_ctor(&this->policy, policy);
1359 GetSystemInfo(&si);
1360 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1361 if(this->virt_proc_no > si.dwNumberOfProcessors)
1362 this->virt_proc_no = si.dwNumberOfProcessors;
1364 this->shutdown_count = this->shutdown_size = 0;
1365 this->shutdown_events = NULL;
1367 InitializeCriticalSection(&this->cs);
1368 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1369 return this;
1372 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1373 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1374 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1376 ThreadScheduler *ret;
1378 TRACE("(%p)\n", policy);
1380 ret = operator_new(sizeof(*ret));
1381 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1384 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1385 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1387 TRACE("()\n");
1389 EnterCriticalSection(&default_scheduler_cs);
1390 if(default_scheduler_policy.policy_container)
1391 SchedulerPolicy_dtor(&default_scheduler_policy);
1392 SchedulerPolicy_ctor(&default_scheduler_policy);
1393 LeaveCriticalSection(&default_scheduler_cs);
1396 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1397 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1398 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1400 TRACE("(%p)\n", policy);
1402 EnterCriticalSection(&default_scheduler_cs);
1403 if(!default_scheduler_policy.policy_container)
1404 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1405 else
1406 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1407 LeaveCriticalSection(&default_scheduler_cs);
1410 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1411 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1412 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1414 Scheduler *scheduler;
1416 TRACE("(%p)\n", policy);
1418 scheduler = Scheduler_Create(policy);
1419 call_Scheduler_Attach(scheduler);
1422 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1423 void __cdecl CurrentScheduler_Detach(void)
1425 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1427 TRACE("()\n");
1429 if(!context) {
1430 improper_scheduler_detach e;
1431 improper_scheduler_detach_ctor_str(&e, NULL);
1432 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1435 if(context->context.vtable != &ExternalContextBase_vtable) {
1436 ERR("unknown context set\n");
1437 return;
1440 if(!context->scheduler.next) {
1441 improper_scheduler_detach e;
1442 improper_scheduler_detach_ctor_str(&e, NULL);
1443 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1446 call_Scheduler_Release(context->scheduler.scheduler);
1447 if(!context->scheduler.next) {
1448 context->scheduler.scheduler = NULL;
1449 }else {
1450 struct scheduler_list *entry = context->scheduler.next;
1451 context->scheduler.scheduler = entry->scheduler;
1452 context->scheduler.next = entry->next;
1453 operator_delete(entry);
1457 static void create_default_scheduler(void)
1459 if(default_scheduler)
1460 return;
1462 EnterCriticalSection(&default_scheduler_cs);
1463 if(!default_scheduler) {
1464 ThreadScheduler *scheduler;
1466 if(!default_scheduler_policy.policy_container)
1467 SchedulerPolicy_ctor(&default_scheduler_policy);
1469 scheduler = operator_new(sizeof(*scheduler));
1470 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1471 default_scheduler = scheduler;
1473 LeaveCriticalSection(&default_scheduler_cs);
1476 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1477 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1478 Scheduler* __cdecl CurrentScheduler_Get(void)
1480 TRACE("()\n");
1481 return get_current_scheduler();
1484 #if _MSVCR_VER > 100
1485 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1486 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1487 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1489 TRACE("(%p)\n", placement);
1490 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1492 #endif
1494 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1495 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1496 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1498 TRACE("()\n");
1499 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1502 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1503 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1505 Scheduler *scheduler = try_get_current_scheduler();
1507 TRACE("()\n");
1509 if(!scheduler)
1510 return -1;
1511 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1514 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1515 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1517 TRACE("(%p)\n", policy);
1518 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1521 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1522 unsigned int __cdecl CurrentScheduler_Id(void)
1524 Scheduler *scheduler = try_get_current_scheduler();
1526 TRACE("()\n");
1528 if(!scheduler)
1529 return -1;
1530 return call_Scheduler_Id(scheduler);
1533 #if _MSVCR_VER > 100
1534 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1535 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1536 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1538 Scheduler *scheduler = try_get_current_scheduler();
1540 TRACE("(%p)\n", placement);
1542 if(!scheduler)
1543 return FALSE;
1544 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1546 #endif
1548 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1549 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1550 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1552 TRACE("(%p)\n", event);
1553 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1556 #if _MSVCR_VER > 100
1557 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1558 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1559 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1560 void *data, /*location*/void *placement)
1562 TRACE("(%p %p %p)\n", proc, data, placement);
1563 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1565 #endif
1567 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1568 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1569 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1571 TRACE("(%p %p)\n", proc, data);
1572 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1575 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1576 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1577 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1578 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1580 TRACE("(%p %p)\n", this, scheduler);
1582 this->scheduler = scheduler;
1583 return this;
1586 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1587 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1588 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1589 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1591 return _Scheduler_ctor_sched(this, NULL);
1594 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1595 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1596 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1597 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1599 TRACE("(%p)\n", this);
1600 return this->scheduler;
1603 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1604 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1605 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1606 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1608 TRACE("(%p)\n", this);
1609 return call_Scheduler_Reference(this->scheduler);
1612 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1613 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1614 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1615 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1617 TRACE("(%p)\n", this);
1618 return call_Scheduler_Release(this->scheduler);
1621 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1622 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1624 TRACE("()\n");
1625 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1628 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1629 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1631 TRACE("()\n");
1632 get_current_scheduler();
1633 return CurrentScheduler_GetNumberOfVirtualProcessors();
1636 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1637 unsigned int __cdecl _CurrentScheduler__Id(void)
1639 TRACE("()\n");
1640 get_current_scheduler();
1641 return CurrentScheduler_Id();
1644 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1645 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1646 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1648 TRACE("(%p %p)\n", proc, data);
1649 CurrentScheduler_ScheduleTask(proc, data);
1652 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1653 unsigned int __cdecl SpinCount__Value(void)
1655 static unsigned int val = -1;
1657 TRACE("()\n");
1659 if(val == -1) {
1660 SYSTEM_INFO si;
1662 GetSystemInfo(&si);
1663 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1666 return val;
1669 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1670 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1671 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1672 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1674 TRACE("(%p %p)\n", this, yf);
1676 this->state = SPINWAIT_INIT;
1677 this->unknown = 1;
1678 this->yield_func = yf;
1679 return this;
1682 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1683 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1684 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1685 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1687 TRACE("(%p %p)\n", this, yf);
1689 this->state = SPINWAIT_INIT;
1690 this->unknown = 0;
1691 this->yield_func = yf;
1692 return this;
1695 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1696 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1697 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1698 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1699 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1700 void __thiscall SpinWait_dtor(SpinWait *this)
1702 TRACE("(%p)\n", this);
1705 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1706 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1707 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1708 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1709 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1710 void __thiscall SpinWait__DoYield(SpinWait *this)
1712 TRACE("(%p)\n", this);
1714 if(this->unknown)
1715 this->yield_func();
1718 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1719 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1720 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1721 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1722 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1723 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
1725 TRACE("(%p)\n", this);
1726 return 1;
1729 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1730 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1731 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1732 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1733 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
1734 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
1736 TRACE("(%p %d)\n", this, spin);
1738 this->spin = spin;
1739 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
1742 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1743 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1744 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1745 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1746 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
1747 void __thiscall SpinWait__Reset(SpinWait *this)
1749 SpinWait__SetSpinCount(this, SpinCount__Value());
1752 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1753 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1754 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1755 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1756 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
1757 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
1759 TRACE("(%p)\n", this);
1761 this->spin--;
1762 return this->spin > 0;
1765 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
1766 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
1767 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
1768 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
1769 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
1770 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
1772 switch(this->state) {
1773 case SPINWAIT_INIT:
1774 SpinWait__Reset(this);
1775 /* fall through */
1776 case SPINWAIT_SPIN:
1777 InterlockedDecrement((LONG*)&this->spin);
1778 if(!this->spin)
1779 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
1780 return TRUE;
1781 case SPINWAIT_YIELD:
1782 this->state = SPINWAIT_DONE;
1783 this->yield_func();
1784 return TRUE;
1785 default:
1786 SpinWait__Reset(this);
1787 return FALSE;
1791 #if _MSVCR_VER >= 110
1793 /* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
1794 /* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
1795 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor, 8)
1796 _StructuredTaskCollection* __thiscall _StructuredTaskCollection_ctor(
1797 _StructuredTaskCollection *this, /*_CancellationTokenState*/void *token)
1799 FIXME("(%p): stub\n", this);
1800 return NULL;
1803 #endif /* _MSVCR_VER >= 110 */
1805 #if _MSVCR_VER >= 120
1807 /* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
1808 /* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
1809 /* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
1810 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor, 4)
1811 void __thiscall _StructuredTaskCollection_dtor(_StructuredTaskCollection *this)
1813 FIXME("(%p): stub!\n", this);
1816 #endif /* _MSVCR_VER >= 120 */
1818 #if _MSVCR_VER >= 110
1820 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
1821 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
1822 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
1823 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc, 12)
1824 void __thiscall _StructuredTaskCollection__Schedule_loc(
1825 _StructuredTaskCollection *this, _UnrealizedChore *chore,
1826 /*location*/void *placement)
1828 FIXME("(%p %p %p): stub!\n", this, chore, placement);
1831 #endif /* _MSVCR_VER >= 110 */
1833 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
1834 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
1835 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
1836 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule, 8)
1837 void __thiscall _StructuredTaskCollection__Schedule(
1838 _StructuredTaskCollection *this, _UnrealizedChore *chore)
1840 FIXME("(%p %p): stub!\n", this, chore);
1843 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
1844 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
1845 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
1846 /*enum Concurrency::details::_TaskCollectionStatus*/int __stdcall
1847 _StructuredTaskCollection__RunAndWait(
1848 _StructuredTaskCollection *this, _UnrealizedChore *chore)
1850 FIXME("(%p %p): stub!\n", this, chore);
1851 return 1;
1854 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
1855 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
1856 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
1857 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel, 4)
1858 void __thiscall _StructuredTaskCollection__Cancel(
1859 _StructuredTaskCollection *this)
1861 FIXME("(%p): stub!\n", this);
1864 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
1865 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
1866 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
1867 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling, 4)
1868 bool __thiscall _StructuredTaskCollection__IsCanceling(
1869 _StructuredTaskCollection *this)
1871 FIXME("(%p): stub!\n", this);
1872 return FALSE;
1875 /* ??0critical_section@Concurrency@@QAE@XZ */
1876 /* ??0critical_section@Concurrency@@QEAA@XZ */
1877 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
1878 critical_section* __thiscall critical_section_ctor(critical_section *this)
1880 TRACE("(%p)\n", this);
1882 if(!keyed_event) {
1883 HANDLE event;
1885 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
1886 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
1887 NtClose(event);
1890 this->unk_thread_id = 0;
1891 this->head = this->tail = NULL;
1892 return this;
1895 /* ??1critical_section@Concurrency@@QAE@XZ */
1896 /* ??1critical_section@Concurrency@@QEAA@XZ */
1897 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
1898 void __thiscall critical_section_dtor(critical_section *this)
1900 TRACE("(%p)\n", this);
1903 static void __cdecl spin_wait_yield(void)
1905 Sleep(0);
1908 static inline void spin_wait_for_next_cs(cs_queue *q)
1910 SpinWait sw;
1912 if(q->next) return;
1914 SpinWait_ctor(&sw, &spin_wait_yield);
1915 SpinWait__Reset(&sw);
1916 while(!q->next)
1917 SpinWait__SpinOnce(&sw);
1918 SpinWait_dtor(&sw);
1921 static inline void cs_set_head(critical_section *cs, cs_queue *q)
1923 cs->unk_thread_id = GetCurrentThreadId();
1924 cs->unk_active.next = q->next;
1925 cs->head = &cs->unk_active;
1928 static inline void cs_lock(critical_section *cs, cs_queue *q)
1930 cs_queue *last;
1932 if(cs->unk_thread_id == GetCurrentThreadId()) {
1933 improper_lock e;
1934 improper_lock_ctor_str(&e, "Already locked");
1935 _CxxThrowException(&e, &improper_lock_exception_type);
1938 memset(q, 0, sizeof(*q));
1939 last = InterlockedExchangePointer(&cs->tail, q);
1940 if(last) {
1941 last->next = q;
1942 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
1945 cs_set_head(cs, q);
1946 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
1947 spin_wait_for_next_cs(q);
1948 cs->unk_active.next = q->next;
1952 /* ?lock@critical_section@Concurrency@@QAEXXZ */
1953 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
1954 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
1955 void __thiscall critical_section_lock(critical_section *this)
1957 cs_queue q;
1959 TRACE("(%p)\n", this);
1960 cs_lock(this, &q);
1963 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
1964 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
1965 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
1966 bool __thiscall critical_section_try_lock(critical_section *this)
1968 cs_queue q;
1970 TRACE("(%p)\n", this);
1972 if(this->unk_thread_id == GetCurrentThreadId())
1973 return FALSE;
1975 memset(&q, 0, sizeof(q));
1976 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
1977 cs_set_head(this, &q);
1978 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
1979 spin_wait_for_next_cs(&q);
1980 this->unk_active.next = q.next;
1982 return TRUE;
1984 return FALSE;
1987 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
1988 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
1989 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
1990 void __thiscall critical_section_unlock(critical_section *this)
1992 TRACE("(%p)\n", this);
1994 this->unk_thread_id = 0;
1995 this->head = NULL;
1996 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
1997 == &this->unk_active) return;
1998 spin_wait_for_next_cs(&this->unk_active);
2000 #if _MSVCR_VER >= 110
2001 while(1) {
2002 cs_queue *next;
2004 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
2005 break;
2007 next = this->unk_active.next;
2008 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
2009 HeapFree(GetProcessHeap(), 0, next);
2010 return;
2012 spin_wait_for_next_cs(next);
2014 this->unk_active.next = next->next;
2015 HeapFree(GetProcessHeap(), 0, next);
2017 #endif
2019 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
2022 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2023 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2024 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
2025 critical_section* __thiscall critical_section_native_handle(critical_section *this)
2027 TRACE("(%p)\n", this);
2028 return this;
2031 #if _MSVCR_VER >= 110
2032 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2033 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2034 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
2035 bool __thiscall critical_section_try_lock_for(
2036 critical_section *this, unsigned int timeout)
2038 cs_queue *q, *last;
2040 TRACE("(%p %d)\n", this, timeout);
2042 if(this->unk_thread_id == GetCurrentThreadId()) {
2043 improper_lock e;
2044 improper_lock_ctor_str(&e, "Already locked");
2045 _CxxThrowException(&e, &improper_lock_exception_type);
2048 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
2049 return critical_section_try_lock(this);
2051 last = InterlockedExchangePointer(&this->tail, q);
2052 if(last) {
2053 LARGE_INTEGER to;
2054 NTSTATUS status;
2055 FILETIME ft;
2057 last->next = q;
2058 GetSystemTimeAsFileTime(&ft);
2059 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
2060 ft.dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
2061 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
2062 if(status == STATUS_TIMEOUT) {
2063 if(!InterlockedExchange(&q->free, TRUE))
2064 return FALSE;
2065 /* A thread has signaled the event and is block waiting. */
2066 /* We need to catch the event to wake the thread. */
2067 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
2071 cs_set_head(this, q);
2072 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
2073 spin_wait_for_next_cs(q);
2074 this->unk_active.next = q->next;
2077 HeapFree(GetProcessHeap(), 0, q);
2078 return TRUE;
2080 #endif
2082 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2083 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2084 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
2085 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
2086 critical_section_scoped_lock *this, critical_section *cs)
2088 TRACE("(%p %p)\n", this, cs);
2089 this->cs = cs;
2090 cs_lock(this->cs, &this->lock.q);
2091 return this;
2094 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2095 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2096 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
2097 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
2099 TRACE("(%p)\n", this);
2100 critical_section_unlock(this->cs);
2103 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2104 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2105 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
2106 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
2108 TRACE("(%p)\n", this);
2110 critical_section_ctor(&this->cs);
2111 return this;
2114 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2115 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2116 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
2117 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
2119 TRACE("(%p %p)\n", this, q);
2120 cs_lock(&this->cs, q);
2123 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2124 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2125 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
2126 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
2128 TRACE("(%p)\n", this);
2129 critical_section_unlock(&this->cs);
2132 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2133 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2134 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2135 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2136 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2138 TRACE("(%p %p)\n", this, lock);
2140 this->lock = lock;
2141 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2142 return this;
2145 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2146 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2147 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2148 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2150 TRACE("(%p)\n", this);
2152 _NonReentrantPPLLock__Release(this->lock);
2155 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2156 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2157 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2158 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2160 TRACE("(%p)\n", this);
2162 critical_section_ctor(&this->cs);
2163 this->count = 0;
2164 this->owner = -1;
2165 return this;
2168 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2169 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2170 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2171 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2173 TRACE("(%p %p)\n", this, q);
2175 if(this->owner == GetCurrentThreadId()) {
2176 this->count++;
2177 return;
2180 cs_lock(&this->cs, q);
2181 this->count++;
2182 this->owner = GetCurrentThreadId();
2185 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2186 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2187 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2188 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2190 TRACE("(%p)\n", this);
2192 this->count--;
2193 if(this->count)
2194 return;
2196 this->owner = -1;
2197 critical_section_unlock(&this->cs);
2200 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2201 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2202 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2203 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2204 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2206 TRACE("(%p %p)\n", this, lock);
2208 this->lock = lock;
2209 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2210 return this;
2213 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2214 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2215 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2216 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2218 TRACE("(%p)\n", this);
2220 _ReentrantPPLLock__Release(this->lock);
2223 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2224 unsigned int __cdecl _GetConcurrency(void)
2226 static unsigned int val = -1;
2228 TRACE("()\n");
2230 if(val == -1) {
2231 SYSTEM_INFO si;
2233 GetSystemInfo(&si);
2234 val = si.dwNumberOfProcessors;
2237 return val;
2240 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
2242 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
2243 pTime->QuadPart = (ULONGLONG)timeout * -TICKSPERMSEC;
2244 return pTime;
2247 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2249 entry->next = *head;
2250 entry->prev = NULL;
2251 if(*head) (*head)->prev = entry;
2252 *head = entry;
2255 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2257 if(entry == *head)
2258 *head = entry->next;
2259 else if(entry->prev)
2260 entry->prev->next = entry->next;
2261 if(entry->next) entry->next->prev = entry->prev;
2264 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2266 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2268 for(i = 0; i < count; i++) {
2269 critical_section_lock(&events[i]->cs);
2270 if(events[i] == wait->signaled) ret = i;
2271 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2272 critical_section_unlock(&events[i]->cs);
2275 return ret;
2278 static inline int evt_transition(void **state, void *from, void *to)
2280 return InterlockedCompareExchangePointer(state, to, from) == from;
2283 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2285 int i;
2286 NTSTATUS status;
2287 LARGE_INTEGER ntto;
2289 wait->signaled = EVT_RUNNING;
2290 wait->pending_waits = wait_all ? count : 1;
2291 for(i = 0; i < count; i++) {
2292 wait->entries[i].wait = wait;
2294 critical_section_lock(&events[i]->cs);
2295 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2296 if(events[i]->signaled) {
2297 if(!InterlockedDecrement(&wait->pending_waits)) {
2298 wait->signaled = events[i];
2299 critical_section_unlock(&events[i]->cs);
2301 return evt_end_wait(wait, events, i+1);
2304 critical_section_unlock(&events[i]->cs);
2307 if(!timeout)
2308 return evt_end_wait(wait, events, count);
2310 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2311 return evt_end_wait(wait, events, count);
2313 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
2315 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2316 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
2318 return evt_end_wait(wait, events, count);
2321 /* ??0event@Concurrency@@QAE@XZ */
2322 /* ??0event@Concurrency@@QEAA@XZ */
2323 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2324 event* __thiscall event_ctor(event *this)
2326 TRACE("(%p)\n", this);
2328 this->waiters = NULL;
2329 this->signaled = FALSE;
2330 critical_section_ctor(&this->cs);
2332 return this;
2335 /* ??1event@Concurrency@@QAE@XZ */
2336 /* ??1event@Concurrency@@QEAA@XZ */
2337 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2338 void __thiscall event_dtor(event *this)
2340 TRACE("(%p)\n", this);
2341 critical_section_dtor(&this->cs);
2342 if(this->waiters)
2343 ERR("there's a wait on destroyed event\n");
2346 /* ?reset@event@Concurrency@@QAEXXZ */
2347 /* ?reset@event@Concurrency@@QEAAXXZ */
2348 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2349 void __thiscall event_reset(event *this)
2351 thread_wait_entry *entry;
2353 TRACE("(%p)\n", this);
2355 critical_section_lock(&this->cs);
2356 if(this->signaled) {
2357 this->signaled = FALSE;
2358 for(entry=this->waiters; entry; entry = entry->next)
2359 InterlockedIncrement(&entry->wait->pending_waits);
2361 critical_section_unlock(&this->cs);
2364 /* ?set@event@Concurrency@@QAEXXZ */
2365 /* ?set@event@Concurrency@@QEAAXXZ */
2366 DEFINE_THISCALL_WRAPPER(event_set, 4)
2367 void __thiscall event_set(event *this)
2369 thread_wait_entry *wakeup = NULL;
2370 thread_wait_entry *entry, *next;
2372 TRACE("(%p)\n", this);
2374 critical_section_lock(&this->cs);
2375 if(!this->signaled) {
2376 this->signaled = TRUE;
2377 for(entry=this->waiters; entry; entry=next) {
2378 next = entry->next;
2379 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
2380 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
2381 evt_remove_queue(&this->waiters, entry);
2382 evt_add_queue(&wakeup, entry);
2387 critical_section_unlock(&this->cs);
2389 for(entry=wakeup; entry; entry=next) {
2390 next = entry->next;
2391 entry->next = entry->prev = NULL;
2392 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
2396 /* ?wait@event@Concurrency@@QAEII@Z */
2397 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2398 DEFINE_THISCALL_WRAPPER(event_wait, 8)
2399 size_t __thiscall event_wait(event *this, unsigned int timeout)
2401 thread_wait wait;
2402 size_t signaled;
2404 TRACE("(%p %u)\n", this, timeout);
2406 critical_section_lock(&this->cs);
2407 signaled = this->signaled;
2408 critical_section_unlock(&this->cs);
2410 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
2411 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
2414 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2415 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2416 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
2418 thread_wait *wait;
2419 size_t ret;
2421 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
2423 if(count == 0)
2424 return 0;
2426 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
2427 ret = evt_wait(wait, events, count, wait_all, timeout);
2428 operator_delete(wait);
2430 return ret;
2433 #if _MSVCR_VER >= 110
2435 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
2436 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
2437 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
2438 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
2440 TRACE("(%p)\n", this);
2442 this->queue = NULL;
2443 critical_section_ctor(&this->lock);
2444 return this;
2447 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
2448 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
2449 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
2450 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
2452 TRACE("(%p)\n", this);
2454 while(this->queue) {
2455 cv_queue *next = this->queue->next;
2456 if(!this->queue->expired)
2457 ERR("there's an active wait\n");
2458 HeapFree(GetProcessHeap(), 0, this->queue);
2459 this->queue = next;
2461 critical_section_dtor(&this->lock);
2464 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
2465 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
2466 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
2467 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
2469 cv_queue q, *next;
2471 TRACE("(%p, %p)\n", this, cs);
2473 critical_section_lock(&this->lock);
2474 q.next = this->queue;
2475 q.expired = FALSE;
2476 next = q.next;
2477 this->queue = &q;
2478 critical_section_unlock(&this->lock);
2480 critical_section_unlock(cs);
2481 while (q.next != CV_WAKE)
2482 RtlWaitOnAddress(&q.next, &next, sizeof(next), NULL);
2483 critical_section_lock(cs);
2486 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
2487 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
2488 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
2489 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
2490 critical_section *cs, unsigned int timeout)
2492 LARGE_INTEGER to;
2493 NTSTATUS status;
2494 FILETIME ft;
2495 cv_queue *q, *next;
2497 TRACE("(%p %p %d)\n", this, cs, timeout);
2499 q = operator_new(sizeof(cv_queue));
2500 critical_section_lock(&this->lock);
2501 q->next = this->queue;
2502 q->expired = FALSE;
2503 next = q->next;
2504 this->queue = q;
2505 critical_section_unlock(&this->lock);
2507 critical_section_unlock(cs);
2509 GetSystemTimeAsFileTime(&ft);
2510 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
2511 ft.dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
2512 while (q->next != CV_WAKE) {
2513 status = RtlWaitOnAddress(&q->next, &next, sizeof(next), &to);
2514 if(status == STATUS_TIMEOUT) {
2515 if(!InterlockedExchange(&q->expired, TRUE)) {
2516 critical_section_lock(cs);
2517 return FALSE;
2519 break;
2523 operator_delete(q);
2524 critical_section_lock(cs);
2525 return TRUE;
2528 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
2529 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
2530 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
2531 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
2533 cv_queue *node;
2535 TRACE("(%p)\n", this);
2537 if(!this->queue)
2538 return;
2540 while(1) {
2541 critical_section_lock(&this->lock);
2542 node = this->queue;
2543 if(!node) {
2544 critical_section_unlock(&this->lock);
2545 return;
2547 this->queue = node->next;
2548 critical_section_unlock(&this->lock);
2550 node->next = CV_WAKE;
2551 if(!InterlockedExchange(&node->expired, TRUE)) {
2552 RtlWakeAddressSingle(&node->next);
2553 return;
2554 } else {
2555 HeapFree(GetProcessHeap(), 0, node);
2560 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
2561 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
2562 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
2563 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
2565 cv_queue *ptr;
2567 TRACE("(%p)\n", this);
2569 if(!this->queue)
2570 return;
2572 critical_section_lock(&this->lock);
2573 ptr = this->queue;
2574 this->queue = NULL;
2575 critical_section_unlock(&this->lock);
2577 while(ptr) {
2578 cv_queue *next = ptr->next;
2580 ptr->next = CV_WAKE;
2581 if(!InterlockedExchange(&ptr->expired, TRUE))
2582 RtlWakeAddressSingle(&ptr->next);
2583 else
2584 HeapFree(GetProcessHeap(), 0, ptr);
2585 ptr = next;
2588 #endif
2590 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
2591 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
2592 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
2593 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
2595 TRACE("(%p)\n", this);
2597 if (!keyed_event) {
2598 HANDLE event;
2600 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
2601 if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
2602 NtClose(event);
2605 memset(this, 0, sizeof(*this));
2606 return this;
2609 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
2610 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
2611 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
2612 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
2614 TRACE("(%p)\n", this);
2616 if (this->thread_id != 0 || this->count)
2617 WARN("destroying locked reader_writer_lock\n");
2620 static inline void spin_wait_for_next_rwl(rwl_queue *q)
2622 SpinWait sw;
2624 if(q->next) return;
2626 SpinWait_ctor(&sw, &spin_wait_yield);
2627 SpinWait__Reset(&sw);
2628 while(!q->next)
2629 SpinWait__SpinOnce(&sw);
2630 SpinWait_dtor(&sw);
2633 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
2634 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
2635 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
2636 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
2638 rwl_queue q = { NULL }, *last;
2640 TRACE("(%p)\n", this);
2642 if (this->thread_id == GetCurrentThreadId()) {
2643 improper_lock e;
2644 improper_lock_ctor_str(&e, "Already locked");
2645 _CxxThrowException(&e, &improper_lock_exception_type);
2648 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
2649 if (last) {
2650 last->next = &q;
2651 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2652 } else {
2653 this->writer_head = &q;
2654 if (InterlockedOr(&this->count, WRITER_WAITING))
2655 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2658 this->thread_id = GetCurrentThreadId();
2659 this->writer_head = &this->active;
2660 this->active.next = NULL;
2661 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2662 spin_wait_for_next_rwl(&q);
2663 this->active.next = q.next;
2667 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
2668 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
2669 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
2670 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
2672 rwl_queue q;
2674 TRACE("(%p)\n", this);
2676 if (this->thread_id == GetCurrentThreadId()) {
2677 improper_lock e;
2678 improper_lock_ctor_str(&e, "Already locked as writer");
2679 _CxxThrowException(&e, &improper_lock_exception_type);
2682 do {
2683 q.next = this->reader_head;
2684 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
2686 if (!q.next) {
2687 rwl_queue *head;
2688 LONG count;
2690 while (!((count = this->count) & WRITER_WAITING))
2691 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
2693 if (count & WRITER_WAITING)
2694 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2696 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2697 while(head && head != &q) {
2698 rwl_queue *next = head->next;
2699 InterlockedIncrement(&this->count);
2700 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2701 head = next;
2703 } else {
2704 NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
2708 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
2709 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
2710 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
2711 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
2713 rwl_queue q = { NULL };
2715 TRACE("(%p)\n", this);
2717 if (this->thread_id == GetCurrentThreadId())
2718 return FALSE;
2720 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
2721 return FALSE;
2722 this->writer_head = &q;
2723 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
2724 this->thread_id = GetCurrentThreadId();
2725 this->writer_head = &this->active;
2726 this->active.next = NULL;
2727 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
2728 spin_wait_for_next_rwl(&q);
2729 this->active.next = q.next;
2731 return TRUE;
2734 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
2735 return FALSE;
2736 spin_wait_for_next_rwl(&q);
2737 this->writer_head = q.next;
2738 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
2739 this->thread_id = GetCurrentThreadId();
2740 this->writer_head = &this->active;
2741 this->active.next = q.next;
2742 return TRUE;
2744 return FALSE;
2747 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
2748 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
2749 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
2750 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
2752 LONG count;
2754 TRACE("(%p)\n", this);
2756 while (!((count = this->count) & WRITER_WAITING))
2757 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
2758 return FALSE;
2761 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
2762 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
2763 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
2764 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
2766 LONG count;
2767 rwl_queue *head, *next;
2769 TRACE("(%p)\n", this);
2771 if ((count = this->count) & ~WRITER_WAITING) {
2772 count = InterlockedDecrement(&this->count);
2773 if (count != WRITER_WAITING)
2774 return;
2775 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
2776 return;
2779 this->thread_id = 0;
2780 next = this->writer_head->next;
2781 if (next) {
2782 NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
2783 return;
2785 InterlockedAnd(&this->count, ~WRITER_WAITING);
2786 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
2787 while (head) {
2788 next = head->next;
2789 InterlockedIncrement(&this->count);
2790 NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
2791 head = next;
2794 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
2795 return;
2796 InterlockedOr(&this->count, WRITER_WAITING);
2799 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2800 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2801 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
2802 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
2803 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2805 TRACE("(%p %p)\n", this, lock);
2807 this->lock = lock;
2808 reader_writer_lock_lock(lock);
2809 return this;
2812 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
2813 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
2814 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
2815 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
2817 TRACE("(%p)\n", this);
2818 reader_writer_lock_unlock(this->lock);
2821 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2822 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2823 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
2824 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
2825 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
2827 TRACE("(%p %p)\n", this, lock);
2829 this->lock = lock;
2830 reader_writer_lock_lock_read(lock);
2831 return this;
2834 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
2835 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
2836 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
2837 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
2839 TRACE("(%p)\n", this);
2840 reader_writer_lock_unlock(this->lock);
2843 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2844 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2845 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
2846 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
2848 TRACE("(%p)\n", this);
2850 InitializeCriticalSection(&this->cs);
2851 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
2852 return this;
2855 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2856 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2857 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
2858 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
2860 TRACE("(%p)\n", this);
2862 this->cs.DebugInfo->Spare[0] = 0;
2863 DeleteCriticalSection(&this->cs);
2866 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2867 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2868 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
2869 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
2871 TRACE("(%p)\n", this);
2872 EnterCriticalSection(&this->cs);
2875 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2876 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2877 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
2878 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
2880 TRACE("(%p)\n", this);
2881 LeaveCriticalSection(&this->cs);
2884 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
2885 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
2886 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
2887 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
2889 TRACE("(%p)\n", this);
2890 return TryEnterCriticalSection(&this->cs);
2893 /* ?wait@Concurrency@@YAXI@Z */
2894 void __cdecl Concurrency_wait(unsigned int time)
2896 static int once;
2898 if (!once++) FIXME("(%d) stub!\n", time);
2900 Sleep(time);
2903 #if _MSVCR_VER>=110
2904 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
2905 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
2907 FIXME("(%d %#I64x)\n", type, id);
2909 #endif
2911 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
2912 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
2913 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
2915 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
2918 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
2919 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
2920 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
2921 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
2923 TRACE("(%p %u %x)\n", this, elapse, repeat);
2925 this->vtable = &_Timer_vtable;
2926 this->timer = NULL;
2927 this->elapse = elapse;
2928 this->repeat = repeat;
2929 return this;
2932 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2934 _Timer *this = ctx;
2935 TRACE("calling _Timer(%p) callback\n", this);
2936 call__Timer_callback(this);
2939 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
2940 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
2941 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
2942 void __thiscall _Timer__Start(_Timer *this)
2944 LONGLONG ll;
2945 FILETIME ft;
2947 TRACE("(%p)\n", this);
2949 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
2950 if (!this->timer)
2952 FIXME("throw exception?\n");
2953 return;
2956 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
2957 ft.dwLowDateTime = ll & 0xffffffff;
2958 ft.dwHighDateTime = ll >> 32;
2959 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
2962 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
2963 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
2964 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
2965 void __thiscall _Timer__Stop(_Timer *this)
2967 TRACE("(%p)\n", this);
2969 SetThreadpoolTimer(this->timer, NULL, 0, 0);
2970 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
2971 CloseThreadpoolTimer(this->timer);
2972 this->timer = NULL;
2975 /* ??1_Timer@details@Concurrency@@MAE@XZ */
2976 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
2977 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
2978 void __thiscall _Timer_dtor(_Timer *this)
2980 TRACE("(%p)\n", this);
2982 if (this->timer)
2983 _Timer__Stop(this);
2986 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
2987 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
2989 TRACE("(%p %x)\n", this, flags);
2990 if (flags & 2) {
2991 /* we have an array, with the number of elements stored before the first object */
2992 INT_PTR i, *ptr = (INT_PTR *)this-1;
2994 for (i=*ptr-1; i>=0; i--)
2995 _Timer_dtor(this+i);
2996 operator_delete(ptr);
2997 } else {
2998 _Timer_dtor(this);
2999 if (flags & 1)
3000 operator_delete(this);
3003 return this;
3006 #ifdef __ASM_USE_THISCALL_WRAPPER
3008 #define DEFINE_VTBL_WRAPPER(off) \
3009 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3010 "popl %eax\n\t" \
3011 "popl %ecx\n\t" \
3012 "pushl %eax\n\t" \
3013 "movl 0(%ecx), %eax\n\t" \
3014 "jmp *" #off "(%eax)\n\t")
3016 DEFINE_VTBL_WRAPPER(0);
3017 DEFINE_VTBL_WRAPPER(4);
3018 DEFINE_VTBL_WRAPPER(8);
3019 DEFINE_VTBL_WRAPPER(12);
3020 DEFINE_VTBL_WRAPPER(16);
3021 DEFINE_VTBL_WRAPPER(20);
3022 DEFINE_VTBL_WRAPPER(24);
3023 DEFINE_VTBL_WRAPPER(28);
3024 DEFINE_VTBL_WRAPPER(32);
3025 DEFINE_VTBL_WRAPPER(36);
3026 DEFINE_VTBL_WRAPPER(40);
3027 DEFINE_VTBL_WRAPPER(44);
3028 DEFINE_VTBL_WRAPPER(48);
3030 #endif
3032 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
3033 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
3034 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
3035 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
3036 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
3037 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
3038 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
3039 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
3040 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
3042 __ASM_BLOCK_BEGIN(concurrency_vtables)
3043 __ASM_VTABLE(ExternalContextBase,
3044 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
3045 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
3046 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
3047 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
3048 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
3049 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
3050 __ASM_VTABLE(ThreadScheduler,
3051 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
3052 VTABLE_ADD_FUNC(ThreadScheduler_Id)
3053 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
3054 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
3055 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
3056 VTABLE_ADD_FUNC(ThreadScheduler_Release)
3057 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
3058 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
3059 #if _MSVCR_VER > 100
3060 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
3061 #endif
3062 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
3063 #if _MSVCR_VER > 100
3064 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
3065 #endif
3066 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
3067 #if _MSVCR_VER > 100
3068 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
3069 #endif
3071 __ASM_VTABLE(_Timer,
3072 VTABLE_ADD_FUNC(_Timer_vector_dtor));
3073 __ASM_BLOCK_END
3075 void msvcrt_init_concurrency(void *base)
3077 #ifdef __x86_64__
3078 init_cexception_rtti(base);
3079 init_improper_lock_rtti(base);
3080 init_improper_scheduler_attach_rtti(base);
3081 init_improper_scheduler_detach_rtti(base);
3082 init_invalid_multiple_scheduling_rtti(base);
3083 init_invalid_scheduler_policy_key_rtti(base);
3084 init_invalid_scheduler_policy_thread_specification_rtti(base);
3085 init_invalid_scheduler_policy_value_rtti(base);
3086 init_scheduler_resource_allocation_error_rtti(base);
3087 init_Context_rtti(base);
3088 init_ContextBase_rtti(base);
3089 init_ExternalContextBase_rtti(base);
3090 init_Scheduler_rtti(base);
3091 init_SchedulerBase_rtti(base);
3092 init_ThreadScheduler_rtti(base);
3093 init__Timer_rtti(base);
3095 init_cexception_cxx_type_info(base);
3096 init_improper_lock_cxx(base);
3097 init_improper_scheduler_attach_cxx(base);
3098 init_improper_scheduler_detach_cxx(base);
3099 init_invalid_scheduler_policy_key_cxx(base);
3100 init_invalid_scheduler_policy_thread_specification_cxx(base);
3101 init_invalid_scheduler_policy_value_cxx(base);
3102 init_scheduler_resource_allocation_error_cxx(base);
3103 #endif
3106 void msvcrt_free_concurrency(void)
3108 if (context_tls_index != TLS_OUT_OF_INDEXES)
3109 TlsFree(context_tls_index);
3110 if(default_scheduler_policy.policy_container)
3111 SchedulerPolicy_dtor(&default_scheduler_policy);
3112 if(default_scheduler) {
3113 ThreadScheduler_dtor(default_scheduler);
3114 operator_delete(default_scheduler);
3117 if(keyed_event)
3118 NtClose(keyed_event);
3121 void msvcrt_free_scheduler_thread(void)
3123 Context *context = try_get_current_context();
3124 if (!context) return;
3125 call_Context_dtor(context, 1);
3128 #endif /* _MSVCR_VER >= 100 */