imm32: Move ImmAssociateContext(Ex) around.
[wine.git] / dlls / msvcrt / concurrency.c
blobd0f029650ac4a5716bf80fae1c662f77bd6781d3
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "wine/exception.h"
28 #include "wine/list.h"
29 #include "msvcrt.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 typedef exception cexception;
37 CREATE_EXCEPTION_OBJECT(cexception)
39 static LONG context_id = -1;
40 static LONG scheduler_id = -1;
42 typedef enum {
43 SchedulerKind,
44 MaxConcurrency,
45 MinConcurrency,
46 TargetOversubscriptionFactor,
47 LocalContextCacheSize,
48 ContextStackSize,
49 ContextPriority,
50 SchedulingProtocol,
51 DynamicProgressFeedback,
52 WinRTInitialization,
53 last_policy_id
54 } PolicyElementKey;
56 typedef struct {
57 struct _policy_container {
58 unsigned int policies[last_policy_id];
59 } *policy_container;
60 } SchedulerPolicy;
62 typedef struct {
63 const vtable_ptr *vtable;
64 } Context;
65 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
70 unsigned int, (const Context*), (this))
71 #define call_Context_Unblock(this) CALL_VTBL_FUNC(this, 12, \
72 void, (Context*), (this))
73 #define call_Context_IsSynchronouslyBlocked(this) CALL_VTBL_FUNC(this, 16, \
74 bool, (const Context*), (this))
75 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
76 Context*, (Context*, unsigned int), (this, flags))
77 #define call_Context_Block(this) CALL_VTBL_FUNC(this, 24, \
78 void, (Context*), (this))
80 typedef struct {
81 Context *context;
82 } _Context;
84 union allocator_cache_entry {
85 struct _free {
86 int depth;
87 union allocator_cache_entry *next;
88 } free;
89 struct _alloc {
90 int bucket;
91 char mem[1];
92 } alloc;
95 struct scheduler_list {
96 struct Scheduler *scheduler;
97 struct scheduler_list *next;
100 typedef struct {
101 Context context;
102 struct scheduler_list scheduler;
103 unsigned int id;
104 union allocator_cache_entry *allocator_cache[8];
105 LONG blocked;
106 } ExternalContextBase;
107 extern const vtable_ptr ExternalContextBase_vtable;
108 static void ExternalContextBase_ctor(ExternalContextBase*);
110 typedef struct Scheduler {
111 const vtable_ptr *vtable;
112 } Scheduler;
113 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
114 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
115 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
116 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
117 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
118 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
119 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
120 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
121 #if _MSVCR_VER > 100
122 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
123 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
124 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
125 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
126 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
127 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
128 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
129 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
130 bool, (Scheduler*,const /*location*/void*), (this,placement))
131 #else
132 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
133 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
134 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
135 #endif
137 typedef struct {
138 Scheduler scheduler;
139 LONG ref;
140 unsigned int id;
141 unsigned int virt_proc_no;
142 SchedulerPolicy policy;
143 int shutdown_count;
144 int shutdown_size;
145 HANDLE *shutdown_events;
146 CRITICAL_SECTION cs;
147 struct list scheduled_chores;
148 } ThreadScheduler;
149 extern const vtable_ptr ThreadScheduler_vtable;
151 typedef struct {
152 Scheduler *scheduler;
153 } _Scheduler;
155 typedef struct {
156 char empty;
157 } _CurrentScheduler;
159 typedef enum
161 SPINWAIT_INIT,
162 SPINWAIT_SPIN,
163 SPINWAIT_YIELD,
164 SPINWAIT_DONE
165 } SpinWait_state;
167 typedef void (__cdecl *yield_func)(void);
169 typedef struct
171 ULONG spin;
172 ULONG unknown;
173 SpinWait_state state;
174 yield_func yield_func;
175 } SpinWait;
177 #define FINISHED_INITIAL 0x80000000
178 typedef struct
180 void *unk1;
181 unsigned int unk2;
182 void *unk3;
183 Context *context;
184 volatile LONG count;
185 volatile LONG finished;
186 void *exception;
187 void *event;
188 } _StructuredTaskCollection;
190 typedef enum
192 TASK_COLLECTION_SUCCESS = 1,
193 TASK_COLLECTION_CANCELLED
194 } _TaskCollectionStatus;
196 typedef enum
198 STRUCTURED_TASK_COLLECTION_CANCELLED = 0x2,
199 STRUCTURED_TASK_COLLECTION_STATUS_MASK = 0x7
200 } _StructuredTaskCollectionStatusBits;
202 typedef struct _UnrealizedChore
204 const vtable_ptr *vtable;
205 void (__cdecl *chore_proc)(struct _UnrealizedChore*);
206 _StructuredTaskCollection *task_collection;
207 void (__cdecl *chore_wrapper)(struct _UnrealizedChore*);
208 void *unk[6];
209 } _UnrealizedChore;
211 struct scheduled_chore {
212 struct list entry;
213 _UnrealizedChore *chore;
216 /* keep in sync with msvcp90/msvcp90.h */
217 typedef struct cs_queue
219 struct cs_queue *next;
220 #if _MSVCR_VER >= 110
221 LONG free;
222 int unknown;
223 #endif
224 } cs_queue;
226 typedef struct
228 ULONG_PTR unk_thread_id;
229 cs_queue unk_active;
230 #if _MSVCR_VER >= 110
231 void *unknown[2];
232 #else
233 void *unknown[1];
234 #endif
235 cs_queue *head;
236 void *tail;
237 } critical_section;
239 typedef struct
241 critical_section *cs;
242 union {
243 cs_queue q;
244 struct {
245 void *unknown[4];
246 int unknown2[2];
247 } unknown;
248 } lock;
249 } critical_section_scoped_lock;
251 typedef struct
253 critical_section cs;
254 } _NonReentrantPPLLock;
256 typedef struct
258 _NonReentrantPPLLock *lock;
259 union {
260 cs_queue q;
261 struct {
262 void *unknown[4];
263 int unknown2[2];
264 } unknown;
265 } wait;
266 } _NonReentrantPPLLock__Scoped_lock;
268 typedef struct
270 critical_section cs;
271 LONG count;
272 LONG owner;
273 } _ReentrantPPLLock;
275 typedef struct
277 _ReentrantPPLLock *lock;
278 union {
279 cs_queue q;
280 struct {
281 void *unknown[4];
282 int unknown2[2];
283 } unknown;
284 } wait;
285 } _ReentrantPPLLock__Scoped_lock;
287 #define EVT_RUNNING (void*)1
288 #define EVT_WAITING NULL
290 struct thread_wait;
291 typedef struct thread_wait_entry
293 struct thread_wait *wait;
294 struct thread_wait_entry *next;
295 struct thread_wait_entry *prev;
296 } thread_wait_entry;
298 typedef struct thread_wait
300 void *signaled;
301 LONG pending_waits;
302 thread_wait_entry entries[1];
303 } thread_wait;
305 typedef struct
307 thread_wait_entry *waiters;
308 INT_PTR signaled;
309 critical_section cs;
310 } event;
312 #if _MSVCR_VER >= 110
313 #define CV_WAKE (void*)1
314 typedef struct cv_queue {
315 struct cv_queue *next;
316 LONG expired;
317 } cv_queue;
319 typedef struct {
320 /* cv_queue structure is not binary compatible */
321 cv_queue *queue;
322 critical_section lock;
323 } _Condition_variable;
324 #endif
326 typedef struct rwl_queue
328 struct rwl_queue *next;
329 Context *ctx;
330 } rwl_queue;
332 #define WRITER_WAITING 0x80000000
333 /* FIXME: reader_writer_lock structure is not binary compatible
334 * it can't exceed 28/56 bytes */
335 typedef struct
337 LONG count;
338 LONG thread_id;
339 rwl_queue active;
340 rwl_queue *writer_head;
341 rwl_queue *writer_tail;
342 rwl_queue *reader_head;
343 } reader_writer_lock;
345 typedef struct {
346 reader_writer_lock *lock;
347 } reader_writer_lock_scoped_lock;
349 typedef struct {
350 CRITICAL_SECTION cs;
351 } _ReentrantBlockingLock;
353 #define TICKSPERMSEC 10000
354 typedef struct {
355 const vtable_ptr *vtable;
356 TP_TIMER *timer;
357 unsigned int elapse;
358 bool repeat;
359 } _Timer;
360 extern const vtable_ptr _Timer_vtable;
361 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
363 typedef exception improper_lock;
364 extern const vtable_ptr improper_lock_vtable;
366 typedef exception improper_scheduler_attach;
367 extern const vtable_ptr improper_scheduler_attach_vtable;
369 typedef exception improper_scheduler_detach;
370 extern const vtable_ptr improper_scheduler_detach_vtable;
372 typedef exception invalid_multiple_scheduling;
373 extern const vtable_ptr invalid_multiple_scheduling_vtable;
375 typedef exception invalid_scheduler_policy_key;
376 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
378 typedef exception invalid_scheduler_policy_thread_specification;
379 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
381 typedef exception invalid_scheduler_policy_value;
382 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
384 typedef exception missing_wait;
385 extern const vtable_ptr missing_wait_vtable;
387 typedef struct {
388 exception e;
389 HRESULT hr;
390 } scheduler_resource_allocation_error;
391 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
393 enum ConcRT_EventType
395 CONCRT_EVENT_GENERIC,
396 CONCRT_EVENT_START,
397 CONCRT_EVENT_END,
398 CONCRT_EVENT_BLOCK,
399 CONCRT_EVENT_UNBLOCK,
400 CONCRT_EVENT_YIELD,
401 CONCRT_EVENT_ATTACH,
402 CONCRT_EVENT_DETACH
405 static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
407 static CRITICAL_SECTION default_scheduler_cs;
408 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
410 0, 0, &default_scheduler_cs,
411 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
412 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
414 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
415 static SchedulerPolicy default_scheduler_policy;
416 static ThreadScheduler *default_scheduler;
418 static HANDLE keyed_event;
420 static void create_default_scheduler(void);
422 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
423 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
424 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
425 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
427 TRACE("(%p %s)\n", this, str);
428 return __exception_ctor(this, str, &improper_lock_vtable);
431 /* ??0improper_lock@Concurrency@@QAE@XZ */
432 /* ??0improper_lock@Concurrency@@QEAA@XZ */
433 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
434 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
436 return improper_lock_ctor_str(this, NULL);
439 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
440 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
442 TRACE("(%p %p)\n", this, rhs);
443 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
446 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
447 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
448 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
449 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
450 improper_scheduler_attach *this, const char *str)
452 TRACE("(%p %s)\n", this, str);
453 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
456 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
457 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
458 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
459 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
460 improper_scheduler_attach *this)
462 return improper_scheduler_attach_ctor_str(this, NULL);
465 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
466 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
467 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
469 TRACE("(%p %p)\n", _this, rhs);
470 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
473 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
474 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
475 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
476 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
477 improper_scheduler_detach *this, const char *str)
479 TRACE("(%p %s)\n", this, str);
480 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
483 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
484 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
485 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
486 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
487 improper_scheduler_detach *this)
489 return improper_scheduler_detach_ctor_str(this, NULL);
492 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
493 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
494 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
496 TRACE("(%p %p)\n", _this, rhs);
497 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
500 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
501 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
502 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
503 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str, 8)
504 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor_str(
505 invalid_multiple_scheduling *this, const char *str)
507 TRACE("(%p %s)\n", this, str);
508 return __exception_ctor(this, str, &invalid_multiple_scheduling_vtable);
511 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
512 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
513 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
514 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor, 4)
515 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor(
516 invalid_multiple_scheduling *this)
518 return invalid_multiple_scheduling_ctor_str(this, NULL);
521 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor,8)
522 invalid_multiple_scheduling * __thiscall invalid_multiple_scheduling_copy_ctor(
523 invalid_multiple_scheduling * _this, const invalid_multiple_scheduling * rhs)
525 TRACE("(%p %p)\n", _this, rhs);
526 return __exception_copy_ctor(_this, rhs, &invalid_multiple_scheduling_vtable);
529 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
530 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
531 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
532 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
533 invalid_scheduler_policy_key *this, const char *str)
535 TRACE("(%p %s)\n", this, str);
536 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
539 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
540 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
541 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
542 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
543 invalid_scheduler_policy_key *this)
545 return invalid_scheduler_policy_key_ctor_str(this, NULL);
548 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
549 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
550 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
552 TRACE("(%p %p)\n", _this, rhs);
553 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
556 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
557 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
558 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
559 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
560 invalid_scheduler_policy_thread_specification *this, const char *str)
562 TRACE("(%p %s)\n", this, str);
563 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
566 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
567 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
568 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
569 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
570 invalid_scheduler_policy_thread_specification *this)
572 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
575 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
576 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
577 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
579 TRACE("(%p %p)\n", _this, rhs);
580 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
583 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
584 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
585 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
586 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
587 invalid_scheduler_policy_value *this, const char *str)
589 TRACE("(%p %s)\n", this, str);
590 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
593 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
594 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
595 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
596 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
597 invalid_scheduler_policy_value *this)
599 return invalid_scheduler_policy_value_ctor_str(this, NULL);
602 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
603 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
604 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
606 TRACE("(%p %p)\n", _this, rhs);
607 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
610 /* ??0missing_wait@Concurrency@@QAA@PBD@Z */
611 /* ??0missing_wait@Concurrency@@QAE@PBD@Z */
612 /* ??0missing_wait@Concurrency@@QEAA@PEBD@Z */
613 DEFINE_THISCALL_WRAPPER(missing_wait_ctor_str, 8)
614 missing_wait* __thiscall missing_wait_ctor_str(
615 missing_wait *this, const char *str)
617 TRACE("(%p %p)\n", this, str);
618 return __exception_ctor(this, str, &missing_wait_vtable);
621 /* ??0missing_wait@Concurrency@@QAA@XZ */
622 /* ??0missing_wait@Concurrency@@QAE@XZ */
623 /* ??0missing_wait@Concurrency@@QEAA@XZ */
624 DEFINE_THISCALL_WRAPPER(missing_wait_ctor, 4)
625 missing_wait* __thiscall missing_wait_ctor(missing_wait *this)
627 return missing_wait_ctor_str(this, NULL);
630 DEFINE_THISCALL_WRAPPER(missing_wait_copy_ctor,8)
631 missing_wait * __thiscall missing_wait_copy_ctor(
632 missing_wait * _this, const missing_wait * rhs)
634 TRACE("(%p %p)\n", _this, rhs);
635 return __exception_copy_ctor(_this, rhs, &missing_wait_vtable);
638 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
639 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
640 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
641 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
642 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
644 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
645 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
646 this->hr = hr;
647 return this;
650 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
651 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
652 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
653 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
654 scheduler_resource_allocation_error *this, HRESULT hr)
656 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
659 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
660 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
661 scheduler_resource_allocation_error *this,
662 const scheduler_resource_allocation_error *rhs)
664 TRACE("(%p,%p)\n", this, rhs);
666 if (!rhs->e.do_free)
667 memcpy(this, rhs, sizeof(*this));
668 else
669 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
670 return this;
673 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
674 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
675 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
676 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
677 const scheduler_resource_allocation_error *this)
679 TRACE("(%p)\n", this);
680 return this->hr;
683 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
684 ".?AVimproper_lock@Concurrency@@")
685 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
686 ".?AVimproper_scheduler_attach@Concurrency@@")
687 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
688 ".?AVimproper_scheduler_detach@Concurrency@@")
689 DEFINE_RTTI_DATA1(invalid_multiple_scheduling, 0, &cexception_rtti_base_descriptor,
690 ".?AVinvalid_multiple_scheduling@Concurrency@@")
691 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
692 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
693 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
694 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
695 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
696 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
697 DEFINE_RTTI_DATA1(missing_wait, 0, &cexception_rtti_base_descriptor,
698 ".?AVmissing_wait@Concurrency@@")
699 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
700 ".?AVscheduler_resource_allocation_error@Concurrency@@")
702 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
703 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
704 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
705 DEFINE_CXX_DATA1(invalid_multiple_scheduling, &cexception_cxx_type_info, cexception_dtor)
706 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
707 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
708 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
709 #if _MSVCR_VER >= 120
710 DEFINE_CXX_DATA1(missing_wait, &cexception_cxx_type_info, cexception_dtor)
711 #endif
712 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
714 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
715 __ASM_VTABLE(improper_lock,
716 VTABLE_ADD_FUNC(cexception_vector_dtor)
717 VTABLE_ADD_FUNC(cexception_what));
718 __ASM_VTABLE(improper_scheduler_attach,
719 VTABLE_ADD_FUNC(cexception_vector_dtor)
720 VTABLE_ADD_FUNC(cexception_what));
721 __ASM_VTABLE(improper_scheduler_detach,
722 VTABLE_ADD_FUNC(cexception_vector_dtor)
723 VTABLE_ADD_FUNC(cexception_what));
724 __ASM_VTABLE(invalid_multiple_scheduling,
725 VTABLE_ADD_FUNC(cexception_vector_dtor)
726 VTABLE_ADD_FUNC(cexception_what));
727 __ASM_VTABLE(invalid_scheduler_policy_key,
728 VTABLE_ADD_FUNC(cexception_vector_dtor)
729 VTABLE_ADD_FUNC(cexception_what));
730 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
731 VTABLE_ADD_FUNC(cexception_vector_dtor)
732 VTABLE_ADD_FUNC(cexception_what));
733 __ASM_VTABLE(invalid_scheduler_policy_value,
734 VTABLE_ADD_FUNC(cexception_vector_dtor)
735 VTABLE_ADD_FUNC(cexception_what));
736 __ASM_VTABLE(missing_wait,
737 VTABLE_ADD_FUNC(cexception_vector_dtor)
738 VTABLE_ADD_FUNC(cexception_what));
739 __ASM_VTABLE(scheduler_resource_allocation_error,
740 VTABLE_ADD_FUNC(cexception_vector_dtor)
741 VTABLE_ADD_FUNC(cexception_what));
742 __ASM_BLOCK_END
744 static Context* try_get_current_context(void)
746 if (context_tls_index == TLS_OUT_OF_INDEXES)
747 return NULL;
748 return TlsGetValue(context_tls_index);
751 static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
753 context_tls_index = TlsAlloc();
754 return context_tls_index != TLS_OUT_OF_INDEXES;
757 static Context* get_current_context(void)
759 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
760 Context *ret;
762 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
764 scheduler_resource_allocation_error e;
765 scheduler_resource_allocation_error_ctor_name(&e, NULL,
766 HRESULT_FROM_WIN32(GetLastError()));
767 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
770 ret = TlsGetValue(context_tls_index);
771 if (!ret) {
772 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
773 ExternalContextBase_ctor(context);
774 TlsSetValue(context_tls_index, context);
775 ret = &context->context;
777 return ret;
780 static Scheduler* get_scheduler_from_context(Context *ctx)
782 ExternalContextBase *context = (ExternalContextBase*)ctx;
784 if (context->context.vtable != &ExternalContextBase_vtable)
785 return NULL;
786 return context->scheduler.scheduler;
789 static Scheduler* try_get_current_scheduler(void)
791 Context *context = try_get_current_context();
792 Scheduler *ret;
794 if (!context)
795 return NULL;
797 ret = get_scheduler_from_context(context);
798 if (!ret)
799 ERR("unknown context set\n");
800 return ret;
803 static Scheduler* get_current_scheduler(void)
805 Context *context = get_current_context();
806 Scheduler *ret;
808 ret = get_scheduler_from_context(context);
809 if (!ret)
810 ERR("unknown context set\n");
811 return ret;
814 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
815 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
816 Context* __cdecl Context_CurrentContext(void)
818 TRACE("()\n");
819 return get_current_context();
822 /* ?Id@Context@Concurrency@@SAIXZ */
823 unsigned int __cdecl Context_Id(void)
825 Context *ctx = try_get_current_context();
826 TRACE("()\n");
827 return ctx ? call_Context_GetId(ctx) : -1;
830 /* ?Block@Context@Concurrency@@SAXXZ */
831 void __cdecl Context_Block(void)
833 Context *ctx = get_current_context();
834 TRACE("()\n");
835 call_Context_Block(ctx);
838 /* ?Yield@Context@Concurrency@@SAXXZ */
839 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
840 void __cdecl Context_Yield(void)
842 FIXME("()\n");
845 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
846 void __cdecl Context__SpinYield(void)
848 FIXME("()\n");
851 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
852 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
854 FIXME("()\n");
855 return FALSE;
858 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
859 void __cdecl Context_Oversubscribe(bool begin)
861 FIXME("(%x)\n", begin);
864 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
865 unsigned int __cdecl Context_ScheduleGroupId(void)
867 Context *ctx = try_get_current_context();
868 TRACE("()\n");
869 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
872 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
873 unsigned int __cdecl Context_VirtualProcessorId(void)
875 Context *ctx = try_get_current_context();
876 TRACE("()\n");
877 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
880 #if _MSVCR_VER > 100
881 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
882 _Context *__cdecl _Context__CurrentContext(_Context *ret)
884 TRACE("(%p)\n", ret);
885 ret->context = Context_CurrentContext();
886 return ret;
889 DEFINE_THISCALL_WRAPPER(_Context_IsSynchronouslyBlocked, 4)
890 BOOL __thiscall _Context_IsSynchronouslyBlocked(const _Context *this)
892 TRACE("(%p)\n", this);
893 return call_Context_IsSynchronouslyBlocked(this->context);
895 #endif
897 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
898 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
900 TRACE("(%p)->()\n", this);
901 return this->id;
904 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
905 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
907 FIXME("(%p)->() stub\n", this);
908 return -1;
911 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
912 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
914 FIXME("(%p)->() stub\n", this);
915 return -1;
918 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
919 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
921 TRACE("(%p)->()\n", this);
923 /* TODO: throw context_unblock_unbalanced if this->blocked goes below -1 */
924 if (!InterlockedDecrement(&this->blocked))
925 RtlWakeAddressSingle(&this->blocked);
928 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
929 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
931 TRACE("(%p)->()\n", this);
932 return this->blocked >= 1;
935 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Block, 4)
936 void __thiscall ExternalContextBase_Block(ExternalContextBase *this)
938 LONG blocked;
940 TRACE("(%p)->()\n", this);
942 blocked = InterlockedIncrement(&this->blocked);
943 while (blocked >= 1)
945 RtlWaitOnAddress(&this->blocked, &blocked, sizeof(LONG), NULL);
946 blocked = this->blocked;
950 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Yield, 4)
951 void __thiscall ExternalContextBase_Yield(ExternalContextBase *this)
953 FIXME("(%p)->() stub\n", this);
956 DEFINE_THISCALL_WRAPPER(ExternalContextBase_SpinYield, 4)
957 void __thiscall ExternalContextBase_SpinYield(ExternalContextBase *this)
959 FIXME("(%p)->() stub\n", this);
962 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Oversubscribe, 8)
963 void __thiscall ExternalContextBase_Oversubscribe(
964 ExternalContextBase *this, bool oversubscribe)
966 FIXME("(%p)->(%x) stub\n", this, oversubscribe);
969 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Alloc, 8)
970 void* __thiscall ExternalContextBase_Alloc(ExternalContextBase *this, size_t size)
972 FIXME("(%p)->(%Iu) stub\n", this, size);
973 return NULL;
976 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Free, 8)
977 void __thiscall ExternalContextBase_Free(ExternalContextBase *this, void *addr)
979 FIXME("(%p)->(%p) stub\n", this, addr);
982 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterCriticalRegionHelper, 4)
983 int __thiscall ExternalContextBase_EnterCriticalRegionHelper(ExternalContextBase *this)
985 FIXME("(%p)->() stub\n", this);
986 return 0;
989 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterHyperCriticalRegionHelper, 4)
990 int __thiscall ExternalContextBase_EnterHyperCriticalRegionHelper(ExternalContextBase *this)
992 FIXME("(%p)->() stub\n", this);
993 return 0;
996 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitCriticalRegionHelper, 4)
997 int __thiscall ExternalContextBase_ExitCriticalRegionHelper(ExternalContextBase *this)
999 FIXME("(%p)->() stub\n", this);
1000 return 0;
1003 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitHyperCriticalRegionHelper, 4)
1004 int __thiscall ExternalContextBase_ExitHyperCriticalRegionHelper(ExternalContextBase *this)
1006 FIXME("(%p)->() stub\n", this);
1007 return 0;
1010 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetCriticalRegionType, 4)
1011 int __thiscall ExternalContextBase_GetCriticalRegionType(const ExternalContextBase *this)
1013 FIXME("(%p)->() stub\n", this);
1014 return 0;
1017 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetContextKind, 4)
1018 int __thiscall ExternalContextBase_GetContextKind(const ExternalContextBase *this)
1020 FIXME("(%p)->() stub\n", this);
1021 return 0;
1024 static void remove_scheduled_chores(Scheduler *scheduler, const ExternalContextBase *context)
1026 ThreadScheduler *tscheduler = (ThreadScheduler*)scheduler;
1027 struct scheduled_chore *sc, *next;
1029 if (tscheduler->scheduler.vtable != &ThreadScheduler_vtable)
1030 return;
1032 EnterCriticalSection(&tscheduler->cs);
1033 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &tscheduler->scheduled_chores,
1034 struct scheduled_chore, entry) {
1035 if (sc->chore->task_collection->context == &context->context) {
1036 list_remove(&sc->entry);
1037 operator_delete(sc);
1040 LeaveCriticalSection(&tscheduler->cs);
1043 static void ExternalContextBase_dtor(ExternalContextBase *this)
1045 struct scheduler_list *scheduler_cur, *scheduler_next;
1046 union allocator_cache_entry *next, *cur;
1047 int i;
1049 /* TODO: move the allocator cache to scheduler so it can be reused */
1050 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
1051 for(cur = this->allocator_cache[i]; cur; cur=next) {
1052 next = cur->free.next;
1053 operator_delete(cur);
1057 if (this->scheduler.scheduler) {
1058 remove_scheduled_chores(this->scheduler.scheduler, this);
1059 call_Scheduler_Release(this->scheduler.scheduler);
1061 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
1062 scheduler_next = scheduler_cur->next;
1063 remove_scheduled_chores(scheduler_cur->scheduler, this);
1064 call_Scheduler_Release(scheduler_cur->scheduler);
1065 operator_delete(scheduler_cur);
1070 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
1071 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
1073 TRACE("(%p %x)\n", this, flags);
1074 if(flags & 2) {
1075 /* we have an array, with the number of elements stored before the first object */
1076 INT_PTR i, *ptr = (INT_PTR *)this-1;
1078 for(i=*ptr-1; i>=0; i--)
1079 ExternalContextBase_dtor(this+i);
1080 operator_delete(ptr);
1081 } else {
1082 ExternalContextBase_dtor(this);
1083 if(flags & 1)
1084 operator_delete(this);
1087 return &this->context;
1090 static void ExternalContextBase_ctor(ExternalContextBase *this)
1092 TRACE("(%p)->()\n", this);
1094 memset(this, 0, sizeof(*this));
1095 this->context.vtable = &ExternalContextBase_vtable;
1096 this->id = InterlockedIncrement(&context_id);
1098 create_default_scheduler();
1099 this->scheduler.scheduler = &default_scheduler->scheduler;
1100 call_Scheduler_Reference(&default_scheduler->scheduler);
1103 /* ?Alloc@Concurrency@@YAPAXI@Z */
1104 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
1105 void * CDECL Concurrency_Alloc(size_t size)
1107 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1108 union allocator_cache_entry *p;
1110 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
1111 if (size < sizeof(*p))
1112 size = sizeof(*p);
1114 if (context->context.vtable != &ExternalContextBase_vtable) {
1115 p = operator_new(size);
1116 p->alloc.bucket = -1;
1117 }else {
1118 int i;
1120 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
1121 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
1122 if (1 << (i+4) >= size) break;
1124 if(i==ARRAY_SIZE(context->allocator_cache)) {
1125 p = operator_new(size);
1126 p->alloc.bucket = -1;
1127 }else if (context->allocator_cache[i]) {
1128 p = context->allocator_cache[i];
1129 context->allocator_cache[i] = p->free.next;
1130 p->alloc.bucket = i;
1131 }else {
1132 p = operator_new(1 << (i+4));
1133 p->alloc.bucket = i;
1137 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
1138 return p->alloc.mem;
1141 /* ?Free@Concurrency@@YAXPAX@Z */
1142 /* ?Free@Concurrency@@YAXPEAX@Z */
1143 void CDECL Concurrency_Free(void* mem)
1145 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
1146 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1147 int bucket = p->alloc.bucket;
1149 TRACE("(%p)\n", mem);
1151 if (context->context.vtable != &ExternalContextBase_vtable) {
1152 operator_delete(p);
1153 }else {
1154 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
1155 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
1156 p->free.next = context->allocator_cache[bucket];
1157 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
1158 context->allocator_cache[bucket] = p;
1159 }else {
1160 operator_delete(p);
1165 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
1166 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
1167 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
1168 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
1169 PolicyElementKey policy, unsigned int val)
1171 unsigned int ret;
1173 TRACE("(%p %d %d)\n", this, policy, val);
1175 if (policy == MinConcurrency) {
1176 invalid_scheduler_policy_key e;
1177 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
1178 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1180 if (policy == MaxConcurrency) {
1181 invalid_scheduler_policy_key e;
1182 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
1183 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1185 if (policy >= last_policy_id) {
1186 invalid_scheduler_policy_key e;
1187 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1188 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1191 switch(policy) {
1192 case SchedulerKind:
1193 if (val) {
1194 invalid_scheduler_policy_value e;
1195 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
1196 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1198 break;
1199 case TargetOversubscriptionFactor:
1200 if (!val) {
1201 invalid_scheduler_policy_value e;
1202 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
1203 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1205 break;
1206 case ContextPriority:
1207 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
1208 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
1209 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
1210 && val != INHERIT_THREAD_PRIORITY) {
1211 invalid_scheduler_policy_value e;
1212 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
1213 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1215 break;
1216 case SchedulingProtocol:
1217 case DynamicProgressFeedback:
1218 case WinRTInitialization:
1219 if (val != 0 && val != 1) {
1220 invalid_scheduler_policy_value e;
1221 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
1222 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1224 break;
1225 default:
1226 break;
1229 ret = this->policy_container->policies[policy];
1230 this->policy_container->policies[policy] = val;
1231 return ret;
1234 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1235 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1236 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
1237 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
1238 unsigned int min_concurrency, unsigned int max_concurrency)
1240 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
1242 if (min_concurrency > max_concurrency) {
1243 invalid_scheduler_policy_thread_specification e;
1244 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
1245 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
1247 if (!max_concurrency) {
1248 invalid_scheduler_policy_value e;
1249 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
1250 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1253 this->policy_container->policies[MinConcurrency] = min_concurrency;
1254 this->policy_container->policies[MaxConcurrency] = max_concurrency;
1257 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1258 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1259 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1260 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1261 const SchedulerPolicy *this, PolicyElementKey policy)
1263 TRACE("(%p %d)\n", this, policy);
1265 if (policy >= last_policy_id) {
1266 invalid_scheduler_policy_key e;
1267 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1268 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1270 return this->policy_container->policies[policy];
1273 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1274 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1275 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1276 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1278 TRACE("(%p)\n", this);
1280 this->policy_container = operator_new(sizeof(*this->policy_container));
1281 /* TODO: default values can probably be affected by CurrentScheduler */
1282 this->policy_container->policies[SchedulerKind] = 0;
1283 this->policy_container->policies[MaxConcurrency] = -1;
1284 this->policy_container->policies[MinConcurrency] = 1;
1285 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1286 this->policy_container->policies[LocalContextCacheSize] = 8;
1287 this->policy_container->policies[ContextStackSize] = 0;
1288 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1289 this->policy_container->policies[SchedulingProtocol] = 0;
1290 this->policy_container->policies[DynamicProgressFeedback] = 1;
1291 return this;
1294 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1295 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1296 /* TODO: don't leak policy_container on exception */
1297 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1298 SchedulerPolicy *this, size_t n, ...)
1300 unsigned int min_concurrency, max_concurrency;
1301 va_list valist;
1302 size_t i;
1304 TRACE("(%p %Iu)\n", this, n);
1306 SchedulerPolicy_ctor(this);
1307 min_concurrency = this->policy_container->policies[MinConcurrency];
1308 max_concurrency = this->policy_container->policies[MaxConcurrency];
1310 va_start(valist, n);
1311 for(i=0; i<n; i++) {
1312 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1313 unsigned int val = va_arg(valist, unsigned int);
1315 if(policy == MinConcurrency)
1316 min_concurrency = val;
1317 else if(policy == MaxConcurrency)
1318 max_concurrency = val;
1319 else
1320 SchedulerPolicy_SetPolicyValue(this, policy, val);
1322 va_end(valist);
1324 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1325 return this;
1328 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1329 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1330 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1331 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1332 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1334 TRACE("(%p %p)\n", this, rhs);
1335 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1336 sizeof(this->policy_container->policies));
1337 return this;
1340 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1341 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1342 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1343 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1344 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1346 TRACE("(%p %p)\n", this, rhs);
1347 SchedulerPolicy_ctor(this);
1348 return SchedulerPolicy_op_assign(this, rhs);
1351 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1352 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1353 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1354 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1356 TRACE("(%p)\n", this);
1357 operator_delete(this->policy_container);
1360 static void ThreadScheduler_dtor(ThreadScheduler *this)
1362 int i;
1363 struct scheduled_chore *sc, *next;
1365 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1366 SchedulerPolicy_dtor(&this->policy);
1368 for(i=0; i<this->shutdown_count; i++)
1369 SetEvent(this->shutdown_events[i]);
1370 operator_delete(this->shutdown_events);
1372 this->cs.DebugInfo->Spare[0] = 0;
1373 DeleteCriticalSection(&this->cs);
1375 if (!list_empty(&this->scheduled_chores))
1376 ERR("scheduled chore list is not empty\n");
1377 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &this->scheduled_chores,
1378 struct scheduled_chore, entry)
1379 operator_delete(sc);
1382 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1383 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1385 TRACE("(%p)\n", this);
1386 return this->id;
1389 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1390 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1392 TRACE("(%p)\n", this);
1393 return this->virt_proc_no;
1396 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1397 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1398 const ThreadScheduler *this, SchedulerPolicy *ret)
1400 TRACE("(%p %p)\n", this, ret);
1401 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1404 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1405 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1407 TRACE("(%p)\n", this);
1408 return InterlockedIncrement(&this->ref);
1411 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1412 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1414 unsigned int ret = InterlockedDecrement(&this->ref);
1416 TRACE("(%p)\n", this);
1418 if(!ret) {
1419 ThreadScheduler_dtor(this);
1420 operator_delete(this);
1422 return ret;
1425 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1426 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1428 HANDLE *shutdown_events;
1429 int size;
1431 TRACE("(%p %p)\n", this, event);
1433 EnterCriticalSection(&this->cs);
1435 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1436 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1437 memcpy(shutdown_events, this->shutdown_events,
1438 this->shutdown_count * sizeof(*shutdown_events));
1439 operator_delete(this->shutdown_events);
1440 this->shutdown_size = size;
1441 this->shutdown_events = shutdown_events;
1442 this->shutdown_events[this->shutdown_count++] = event;
1444 LeaveCriticalSection(&this->cs);
1447 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1448 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1450 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1452 TRACE("(%p)\n", this);
1454 if(context->context.vtable != &ExternalContextBase_vtable) {
1455 ERR("unknown context set\n");
1456 return;
1459 if(context->scheduler.scheduler == &this->scheduler) {
1460 improper_scheduler_attach e;
1461 improper_scheduler_attach_ctor_str(&e, NULL);
1462 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1465 if(context->scheduler.scheduler) {
1466 struct scheduler_list *l = operator_new(sizeof(*l));
1467 *l = context->scheduler;
1468 context->scheduler.next = l;
1470 context->scheduler.scheduler = &this->scheduler;
1471 ThreadScheduler_Reference(this);
1474 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1475 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1476 ThreadScheduler *this, /*location*/void *placement)
1478 FIXME("(%p %p) stub\n", this, placement);
1479 return NULL;
1482 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1483 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1485 FIXME("(%p) stub\n", this);
1486 return NULL;
1489 typedef struct
1491 void (__cdecl *proc)(void*);
1492 void *data;
1493 ThreadScheduler *scheduler;
1494 } schedule_task_arg;
1496 void __cdecl CurrentScheduler_Detach(void);
1498 static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1500 schedule_task_arg arg;
1501 BOOL detach = FALSE;
1503 arg = *(schedule_task_arg*)context;
1504 operator_delete(context);
1506 if(&arg.scheduler->scheduler != get_current_scheduler()) {
1507 ThreadScheduler_Attach(arg.scheduler);
1508 detach = TRUE;
1510 ThreadScheduler_Release(arg.scheduler);
1512 arg.proc(arg.data);
1514 if(detach)
1515 CurrentScheduler_Detach();
1518 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1519 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1520 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1522 schedule_task_arg *arg;
1523 TP_WORK *work;
1525 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1527 arg = operator_new(sizeof(*arg));
1528 arg->proc = proc;
1529 arg->data = data;
1530 arg->scheduler = this;
1531 ThreadScheduler_Reference(this);
1533 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1534 if(!work) {
1535 scheduler_resource_allocation_error e;
1537 ThreadScheduler_Release(this);
1538 operator_delete(arg);
1539 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1540 HRESULT_FROM_WIN32(GetLastError()));
1541 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1543 SubmitThreadpoolWork(work);
1544 CloseThreadpoolWork(work);
1547 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1548 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1549 void (__cdecl *proc)(void*), void* data)
1551 FIXME("(%p %p %p) stub\n", this, proc, data);
1552 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1555 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1556 bool __thiscall ThreadScheduler_IsAvailableLocation(
1557 const ThreadScheduler *this, const /*location*/void *placement)
1559 FIXME("(%p %p) stub\n", this, placement);
1560 return FALSE;
1563 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1564 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1566 TRACE("(%p %x)\n", this, flags);
1567 if(flags & 2) {
1568 /* we have an array, with the number of elements stored before the first object */
1569 INT_PTR i, *ptr = (INT_PTR *)this-1;
1571 for(i=*ptr-1; i>=0; i--)
1572 ThreadScheduler_dtor(this+i);
1573 operator_delete(ptr);
1574 } else {
1575 ThreadScheduler_dtor(this);
1576 if(flags & 1)
1577 operator_delete(this);
1580 return &this->scheduler;
1583 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1584 const SchedulerPolicy *policy)
1586 SYSTEM_INFO si;
1588 TRACE("(%p)->()\n", this);
1590 this->scheduler.vtable = &ThreadScheduler_vtable;
1591 this->ref = 1;
1592 this->id = InterlockedIncrement(&scheduler_id);
1593 SchedulerPolicy_copy_ctor(&this->policy, policy);
1595 GetSystemInfo(&si);
1596 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1597 if(this->virt_proc_no > si.dwNumberOfProcessors)
1598 this->virt_proc_no = si.dwNumberOfProcessors;
1600 this->shutdown_count = this->shutdown_size = 0;
1601 this->shutdown_events = NULL;
1603 InitializeCriticalSection(&this->cs);
1604 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1606 list_init(&this->scheduled_chores);
1607 return this;
1610 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1611 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1612 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1614 ThreadScheduler *ret;
1616 TRACE("(%p)\n", policy);
1618 ret = operator_new(sizeof(*ret));
1619 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1622 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1623 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1625 TRACE("()\n");
1627 EnterCriticalSection(&default_scheduler_cs);
1628 if(default_scheduler_policy.policy_container)
1629 SchedulerPolicy_dtor(&default_scheduler_policy);
1630 SchedulerPolicy_ctor(&default_scheduler_policy);
1631 LeaveCriticalSection(&default_scheduler_cs);
1634 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1635 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1636 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1638 TRACE("(%p)\n", policy);
1640 EnterCriticalSection(&default_scheduler_cs);
1641 if(!default_scheduler_policy.policy_container)
1642 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1643 else
1644 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1645 LeaveCriticalSection(&default_scheduler_cs);
1648 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1649 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1650 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1652 Scheduler *scheduler;
1654 TRACE("(%p)\n", policy);
1656 scheduler = Scheduler_Create(policy);
1657 call_Scheduler_Attach(scheduler);
1660 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1661 void __cdecl CurrentScheduler_Detach(void)
1663 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1665 TRACE("()\n");
1667 if(!context) {
1668 improper_scheduler_detach e;
1669 improper_scheduler_detach_ctor_str(&e, NULL);
1670 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1673 if(context->context.vtable != &ExternalContextBase_vtable) {
1674 ERR("unknown context set\n");
1675 return;
1678 if(!context->scheduler.next) {
1679 improper_scheduler_detach e;
1680 improper_scheduler_detach_ctor_str(&e, NULL);
1681 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1684 call_Scheduler_Release(context->scheduler.scheduler);
1685 if(!context->scheduler.next) {
1686 context->scheduler.scheduler = NULL;
1687 }else {
1688 struct scheduler_list *entry = context->scheduler.next;
1689 context->scheduler.scheduler = entry->scheduler;
1690 context->scheduler.next = entry->next;
1691 operator_delete(entry);
1695 static void create_default_scheduler(void)
1697 if(default_scheduler)
1698 return;
1700 EnterCriticalSection(&default_scheduler_cs);
1701 if(!default_scheduler) {
1702 ThreadScheduler *scheduler;
1704 if(!default_scheduler_policy.policy_container)
1705 SchedulerPolicy_ctor(&default_scheduler_policy);
1707 scheduler = operator_new(sizeof(*scheduler));
1708 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1709 default_scheduler = scheduler;
1711 LeaveCriticalSection(&default_scheduler_cs);
1714 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1715 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1716 Scheduler* __cdecl CurrentScheduler_Get(void)
1718 TRACE("()\n");
1719 return get_current_scheduler();
1722 #if _MSVCR_VER > 100
1723 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1724 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1725 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1727 TRACE("(%p)\n", placement);
1728 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1730 #endif
1732 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1733 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1734 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1736 TRACE("()\n");
1737 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1740 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1741 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1743 Scheduler *scheduler = try_get_current_scheduler();
1745 TRACE("()\n");
1747 if(!scheduler)
1748 return -1;
1749 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1752 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1753 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1755 TRACE("(%p)\n", policy);
1756 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1759 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1760 unsigned int __cdecl CurrentScheduler_Id(void)
1762 Scheduler *scheduler = try_get_current_scheduler();
1764 TRACE("()\n");
1766 if(!scheduler)
1767 return -1;
1768 return call_Scheduler_Id(scheduler);
1771 #if _MSVCR_VER > 100
1772 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1773 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1774 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1776 Scheduler *scheduler = try_get_current_scheduler();
1778 TRACE("(%p)\n", placement);
1780 if(!scheduler)
1781 return FALSE;
1782 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1784 #endif
1786 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1787 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1788 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1790 TRACE("(%p)\n", event);
1791 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1794 #if _MSVCR_VER > 100
1795 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1796 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1797 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1798 void *data, /*location*/void *placement)
1800 TRACE("(%p %p %p)\n", proc, data, placement);
1801 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1803 #endif
1805 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1806 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1807 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1809 TRACE("(%p %p)\n", proc, data);
1810 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1813 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1814 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1815 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1816 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1818 TRACE("(%p %p)\n", this, scheduler);
1820 this->scheduler = scheduler;
1821 return this;
1824 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1825 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1826 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1827 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1829 return _Scheduler_ctor_sched(this, NULL);
1832 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1833 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1834 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1835 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1837 TRACE("(%p)\n", this);
1838 return this->scheduler;
1841 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1842 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1843 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1844 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1846 TRACE("(%p)\n", this);
1847 return call_Scheduler_Reference(this->scheduler);
1850 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1851 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1852 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1853 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1855 TRACE("(%p)\n", this);
1856 return call_Scheduler_Release(this->scheduler);
1859 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1860 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1862 TRACE("()\n");
1863 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1866 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1867 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1869 TRACE("()\n");
1870 get_current_scheduler();
1871 return CurrentScheduler_GetNumberOfVirtualProcessors();
1874 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1875 unsigned int __cdecl _CurrentScheduler__Id(void)
1877 TRACE("()\n");
1878 get_current_scheduler();
1879 return CurrentScheduler_Id();
1882 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1883 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1884 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1886 TRACE("(%p %p)\n", proc, data);
1887 CurrentScheduler_ScheduleTask(proc, data);
1890 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1891 unsigned int __cdecl SpinCount__Value(void)
1893 static unsigned int val = -1;
1895 TRACE("()\n");
1897 if(val == -1) {
1898 SYSTEM_INFO si;
1900 GetSystemInfo(&si);
1901 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1904 return val;
1907 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1908 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1909 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1910 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1912 TRACE("(%p %p)\n", this, yf);
1914 this->state = SPINWAIT_INIT;
1915 this->unknown = 1;
1916 this->yield_func = yf;
1917 return this;
1920 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1921 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1922 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1923 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1925 TRACE("(%p %p)\n", this, yf);
1927 this->state = SPINWAIT_INIT;
1928 this->unknown = 0;
1929 this->yield_func = yf;
1930 return this;
1933 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1934 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1935 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1936 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1937 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1938 void __thiscall SpinWait_dtor(SpinWait *this)
1940 TRACE("(%p)\n", this);
1943 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1944 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1945 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1946 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1947 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1948 void __thiscall SpinWait__DoYield(SpinWait *this)
1950 TRACE("(%p)\n", this);
1952 if(this->unknown)
1953 this->yield_func();
1956 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1957 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1958 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1959 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1960 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1961 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
1963 TRACE("(%p)\n", this);
1964 return 1;
1967 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1968 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1969 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1970 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1971 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
1972 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
1974 TRACE("(%p %d)\n", this, spin);
1976 this->spin = spin;
1977 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
1980 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1981 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1982 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1983 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1984 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
1985 void __thiscall SpinWait__Reset(SpinWait *this)
1987 SpinWait__SetSpinCount(this, SpinCount__Value());
1990 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1991 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1992 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1993 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1994 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
1995 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
1997 TRACE("(%p)\n", this);
1999 this->spin--;
2000 return this->spin > 0;
2003 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
2004 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
2005 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
2006 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
2007 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
2008 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
2010 switch(this->state) {
2011 case SPINWAIT_INIT:
2012 SpinWait__Reset(this);
2013 /* fall through */
2014 case SPINWAIT_SPIN:
2015 InterlockedDecrement((LONG*)&this->spin);
2016 if(!this->spin)
2017 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
2018 return TRUE;
2019 case SPINWAIT_YIELD:
2020 this->state = SPINWAIT_DONE;
2021 this->yield_func();
2022 return TRUE;
2023 default:
2024 SpinWait__Reset(this);
2025 return FALSE;
2029 #if _MSVCR_VER >= 110
2031 /* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
2032 /* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
2033 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor, 8)
2034 _StructuredTaskCollection* __thiscall _StructuredTaskCollection_ctor(
2035 _StructuredTaskCollection *this, /*_CancellationTokenState*/void *token)
2037 TRACE("(%p)\n", this);
2039 if (token)
2040 FIXME("_StructuredTaskCollection with cancellation token not implemented!\n");
2042 memset(this, 0, sizeof(*this));
2043 this->finished = FINISHED_INITIAL;
2044 return this;
2047 #endif /* _MSVCR_VER >= 110 */
2049 #if _MSVCR_VER >= 120
2051 /* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
2052 /* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
2053 /* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
2054 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor, 4)
2055 void __thiscall _StructuredTaskCollection_dtor(_StructuredTaskCollection *this)
2057 FIXME("(%p): stub!\n", this);
2058 if (this->count && !__uncaught_exception()) {
2059 missing_wait e;
2060 missing_wait_ctor_str(&e, "Missing call to _RunAndWait");
2061 _CxxThrowException(&e, &missing_wait_exception_type);
2065 #endif /* _MSVCR_VER >= 120 */
2067 static ThreadScheduler *get_thread_scheduler_from_context(Context *context)
2069 Scheduler *scheduler = get_scheduler_from_context(context);
2070 if (scheduler && scheduler->vtable == &ThreadScheduler_vtable)
2071 return (ThreadScheduler*)scheduler;
2072 return NULL;
2075 struct execute_chore_data {
2076 _UnrealizedChore *chore;
2077 _StructuredTaskCollection *task_collection;
2080 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
2081 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
2082 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
2083 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel, 4)
2084 void __thiscall _StructuredTaskCollection__Cancel(
2085 _StructuredTaskCollection *this)
2087 ThreadScheduler *scheduler;
2088 void *prev_exception, *new_exception;
2089 struct scheduled_chore *sc, *next;
2090 LONG removed = 0;
2091 LONG prev_finished, new_finished;
2093 TRACE("(%p)\n", this);
2095 if (!this->context)
2096 this->context = get_current_context();
2097 scheduler = get_thread_scheduler_from_context(this->context);
2098 if (!scheduler)
2099 return;
2101 new_exception = this->exception;
2102 do {
2103 prev_exception = new_exception;
2104 if ((ULONG_PTR)prev_exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2105 return;
2106 new_exception = (void*)((ULONG_PTR)prev_exception |
2107 STRUCTURED_TASK_COLLECTION_CANCELLED);
2108 } while ((new_exception = InterlockedCompareExchangePointer(
2109 &this->exception, new_exception, prev_exception))
2110 != prev_exception);
2112 EnterCriticalSection(&scheduler->cs);
2113 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &scheduler->scheduled_chores,
2114 struct scheduled_chore, entry) {
2115 if (sc->chore->task_collection != this)
2116 continue;
2117 sc->chore->task_collection = NULL;
2118 list_remove(&sc->entry);
2119 removed++;
2120 operator_delete(sc);
2122 LeaveCriticalSection(&scheduler->cs);
2123 if (!removed)
2124 return;
2126 new_finished = this->finished;
2127 do {
2128 prev_finished = new_finished;
2129 if (prev_finished == FINISHED_INITIAL)
2130 new_finished = removed;
2131 else
2132 new_finished = prev_finished + removed;
2133 } while ((new_finished = InterlockedCompareExchange(&this->finished,
2134 new_finished, prev_finished)) != prev_finished);
2135 RtlWakeAddressAll((LONG*)&this->finished);
2138 static LONG CALLBACK execute_chore_except(EXCEPTION_POINTERS *pexc, void *_data)
2140 struct execute_chore_data *data = _data;
2141 void *prev_exception, *new_exception;
2142 exception_ptr *ptr;
2144 if (pexc->ExceptionRecord->ExceptionCode != CXX_EXCEPTION)
2145 return EXCEPTION_CONTINUE_SEARCH;
2147 _StructuredTaskCollection__Cancel(data->task_collection);
2149 ptr = operator_new(sizeof(*ptr));
2150 __ExceptionPtrCreate(ptr);
2151 exception_ptr_from_record(ptr, pexc->ExceptionRecord);
2153 new_exception = data->task_collection->exception;
2154 do {
2155 if ((ULONG_PTR)new_exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) {
2156 __ExceptionPtrDestroy(ptr);
2157 operator_delete(ptr);
2158 break;
2160 prev_exception = new_exception;
2161 new_exception = (void*)((ULONG_PTR)new_exception | (ULONG_PTR)ptr);
2162 } while ((new_exception = InterlockedCompareExchangePointer(
2163 &data->task_collection->exception, new_exception,
2164 prev_exception)) != prev_exception);
2165 data->task_collection->event = 0;
2166 return EXCEPTION_EXECUTE_HANDLER;
2169 static void execute_chore(_UnrealizedChore *chore,
2170 _StructuredTaskCollection *task_collection)
2172 struct execute_chore_data data = { chore, task_collection };
2174 TRACE("(%p %p)\n", chore, task_collection);
2176 __TRY
2178 if (!((ULONG_PTR)task_collection->exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) &&
2179 chore->chore_proc)
2180 chore->chore_proc(chore);
2182 __EXCEPT_CTX(execute_chore_except, &data)
2185 __ENDTRY
2188 static void CALLBACK chore_wrapper_finally(BOOL normal, void *data)
2190 _UnrealizedChore *chore = data;
2191 LONG count, prev_finished, new_finished;
2192 volatile LONG *ptr;
2194 TRACE("(%u %p)\n", normal, data);
2196 if (!chore->task_collection)
2197 return;
2198 ptr = &chore->task_collection->finished;
2199 count = chore->task_collection->count;
2200 chore->task_collection = NULL;
2202 do {
2203 prev_finished = *ptr;
2204 if (prev_finished == FINISHED_INITIAL)
2205 new_finished = 1;
2206 else
2207 new_finished = prev_finished + 1;
2208 } while (InterlockedCompareExchange(ptr, new_finished, prev_finished)
2209 != prev_finished);
2210 if (new_finished >= count)
2211 RtlWakeAddressSingle((LONG*)ptr);
2214 static void __cdecl chore_wrapper(_UnrealizedChore *chore)
2216 __TRY
2218 execute_chore(chore, chore->task_collection);
2220 __FINALLY_CTX(chore_wrapper_finally, chore)
2223 static BOOL pick_and_execute_chore(ThreadScheduler *scheduler)
2225 struct list *entry;
2226 struct scheduled_chore *sc;
2227 _UnrealizedChore *chore;
2229 TRACE("(%p)\n", scheduler);
2231 if (scheduler->scheduler.vtable != &ThreadScheduler_vtable)
2233 ERR("unknown scheduler set\n");
2234 return FALSE;
2237 EnterCriticalSection(&scheduler->cs);
2238 entry = list_head(&scheduler->scheduled_chores);
2239 if (entry)
2240 list_remove(entry);
2241 LeaveCriticalSection(&scheduler->cs);
2242 if (!entry)
2243 return FALSE;
2245 sc = LIST_ENTRY(entry, struct scheduled_chore, entry);
2246 chore = sc->chore;
2247 operator_delete(sc);
2249 chore->chore_wrapper(chore);
2250 return TRUE;
2253 static void __cdecl _StructuredTaskCollection_scheduler_cb(void *data)
2255 pick_and_execute_chore((ThreadScheduler*)get_current_scheduler());
2258 static bool schedule_chore(_StructuredTaskCollection *this,
2259 _UnrealizedChore *chore, Scheduler **pscheduler)
2261 struct scheduled_chore *sc;
2262 ThreadScheduler *scheduler;
2264 if (chore->task_collection) {
2265 invalid_multiple_scheduling e;
2266 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2267 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2268 return FALSE;
2271 if (!this->context)
2272 this->context = get_current_context();
2273 scheduler = get_thread_scheduler_from_context(this->context);
2274 if (!scheduler) {
2275 ERR("unknown context or scheduler set\n");
2276 return FALSE;
2279 sc = operator_new(sizeof(*sc));
2280 sc->chore = chore;
2282 chore->task_collection = this;
2283 chore->chore_wrapper = chore_wrapper;
2284 InterlockedIncrement(&this->count);
2286 EnterCriticalSection(&scheduler->cs);
2287 list_add_head(&scheduler->scheduled_chores, &sc->entry);
2288 LeaveCriticalSection(&scheduler->cs);
2289 *pscheduler = &scheduler->scheduler;
2290 return TRUE;
2293 #if _MSVCR_VER >= 110
2295 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2296 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2297 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
2298 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc, 12)
2299 void __thiscall _StructuredTaskCollection__Schedule_loc(
2300 _StructuredTaskCollection *this, _UnrealizedChore *chore,
2301 /*location*/void *placement)
2303 Scheduler *scheduler;
2305 TRACE("(%p %p %p)\n", this, chore, placement);
2307 if (schedule_chore(this, chore, &scheduler))
2309 call_Scheduler_ScheduleTask_loc(scheduler,
2310 _StructuredTaskCollection_scheduler_cb, NULL, placement);
2314 #endif /* _MSVCR_VER >= 110 */
2316 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
2317 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
2318 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
2319 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule, 8)
2320 void __thiscall _StructuredTaskCollection__Schedule(
2321 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2323 Scheduler *scheduler;
2325 TRACE("(%p %p)\n", this, chore);
2327 if (schedule_chore(this, chore, &scheduler))
2329 call_Scheduler_ScheduleTask(scheduler,
2330 _StructuredTaskCollection_scheduler_cb, NULL);
2334 static void CALLBACK exception_ptr_rethrow_finally(BOOL normal, void *data)
2336 exception_ptr *ep = data;
2338 TRACE("(%u %p)\n", normal, data);
2340 __ExceptionPtrDestroy(ep);
2341 operator_delete(ep);
2344 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2345 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2346 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
2347 _TaskCollectionStatus __stdcall _StructuredTaskCollection__RunAndWait(
2348 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2350 LONG expected, val;
2351 ULONG_PTR exception;
2352 exception_ptr *ep;
2354 TRACE("(%p %p)\n", this, chore);
2356 if (chore) {
2357 if (chore->task_collection) {
2358 invalid_multiple_scheduling e;
2359 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2360 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2362 execute_chore(chore, this);
2365 if (this->context) {
2366 ThreadScheduler *scheduler = get_thread_scheduler_from_context(this->context);
2367 if (scheduler) {
2368 while (pick_and_execute_chore(scheduler)) ;
2372 expected = this->count ? this->count : FINISHED_INITIAL;
2373 while ((val = this->finished) != expected)
2374 RtlWaitOnAddress((LONG*)&this->finished, &val, sizeof(val), NULL);
2376 this->finished = 0;
2377 this->count = 0;
2379 exception = (ULONG_PTR)this->exception;
2380 ep = (exception_ptr*)(exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK);
2381 if (ep) {
2382 this->exception = 0;
2383 __TRY
2385 __ExceptionPtrRethrow(ep);
2387 __FINALLY_CTX(exception_ptr_rethrow_finally, ep)
2389 if (exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2390 return TASK_COLLECTION_CANCELLED;
2391 return TASK_COLLECTION_SUCCESS;
2394 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
2395 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
2396 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
2397 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling, 4)
2398 bool __thiscall _StructuredTaskCollection__IsCanceling(
2399 _StructuredTaskCollection *this)
2401 TRACE("(%p)\n", this);
2402 return !!((ULONG_PTR)this->exception & STRUCTURED_TASK_COLLECTION_CANCELLED);
2405 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ */
2406 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IEAAXXZ */
2407 DEFINE_THISCALL_WRAPPER(_UnrealizedChore__CheckTaskCollection, 4)
2408 void __thiscall _UnrealizedChore__CheckTaskCollection(_UnrealizedChore *this)
2410 FIXME("() stub\n");
2413 /* ??0critical_section@Concurrency@@QAE@XZ */
2414 /* ??0critical_section@Concurrency@@QEAA@XZ */
2415 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
2416 critical_section* __thiscall critical_section_ctor(critical_section *this)
2418 TRACE("(%p)\n", this);
2420 if(!keyed_event) {
2421 HANDLE event;
2423 NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
2424 if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
2425 NtClose(event);
2428 this->unk_thread_id = 0;
2429 this->head = this->tail = NULL;
2430 return this;
2433 /* ??1critical_section@Concurrency@@QAE@XZ */
2434 /* ??1critical_section@Concurrency@@QEAA@XZ */
2435 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
2436 void __thiscall critical_section_dtor(critical_section *this)
2438 TRACE("(%p)\n", this);
2441 static void __cdecl spin_wait_yield(void)
2443 Sleep(0);
2446 static inline void spin_wait_for_next_cs(cs_queue *q)
2448 SpinWait sw;
2450 if(q->next) return;
2452 SpinWait_ctor(&sw, &spin_wait_yield);
2453 SpinWait__Reset(&sw);
2454 while(!q->next)
2455 SpinWait__SpinOnce(&sw);
2456 SpinWait_dtor(&sw);
2459 static inline void cs_set_head(critical_section *cs, cs_queue *q)
2461 cs->unk_thread_id = GetCurrentThreadId();
2462 cs->unk_active.next = q->next;
2463 cs->head = &cs->unk_active;
2466 static inline void cs_lock(critical_section *cs, cs_queue *q)
2468 cs_queue *last;
2470 if(cs->unk_thread_id == GetCurrentThreadId()) {
2471 improper_lock e;
2472 improper_lock_ctor_str(&e, "Already locked");
2473 _CxxThrowException(&e, &improper_lock_exception_type);
2476 memset(q, 0, sizeof(*q));
2477 last = InterlockedExchangePointer(&cs->tail, q);
2478 if(last) {
2479 last->next = q;
2480 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
2483 cs_set_head(cs, q);
2484 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
2485 spin_wait_for_next_cs(q);
2486 cs->unk_active.next = q->next;
2490 /* ?lock@critical_section@Concurrency@@QAEXXZ */
2491 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
2492 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
2493 void __thiscall critical_section_lock(critical_section *this)
2495 cs_queue q;
2497 TRACE("(%p)\n", this);
2498 cs_lock(this, &q);
2501 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
2502 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
2503 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
2504 bool __thiscall critical_section_try_lock(critical_section *this)
2506 cs_queue q;
2508 TRACE("(%p)\n", this);
2510 if(this->unk_thread_id == GetCurrentThreadId())
2511 return FALSE;
2513 memset(&q, 0, sizeof(q));
2514 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
2515 cs_set_head(this, &q);
2516 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
2517 spin_wait_for_next_cs(&q);
2518 this->unk_active.next = q.next;
2520 return TRUE;
2522 return FALSE;
2525 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
2526 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
2527 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
2528 void __thiscall critical_section_unlock(critical_section *this)
2530 TRACE("(%p)\n", this);
2532 this->unk_thread_id = 0;
2533 this->head = NULL;
2534 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
2535 == &this->unk_active) return;
2536 spin_wait_for_next_cs(&this->unk_active);
2538 #if _MSVCR_VER >= 110
2539 while(1) {
2540 cs_queue *next;
2542 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
2543 break;
2545 next = this->unk_active.next;
2546 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
2547 HeapFree(GetProcessHeap(), 0, next);
2548 return;
2550 spin_wait_for_next_cs(next);
2552 this->unk_active.next = next->next;
2553 HeapFree(GetProcessHeap(), 0, next);
2555 #endif
2557 NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
2560 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2561 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2562 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
2563 critical_section* __thiscall critical_section_native_handle(critical_section *this)
2565 TRACE("(%p)\n", this);
2566 return this;
2569 #if _MSVCR_VER >= 110
2570 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2571 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2572 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
2573 bool __thiscall critical_section_try_lock_for(
2574 critical_section *this, unsigned int timeout)
2576 cs_queue *q, *last;
2578 TRACE("(%p %d)\n", this, timeout);
2580 if(this->unk_thread_id == GetCurrentThreadId()) {
2581 improper_lock e;
2582 improper_lock_ctor_str(&e, "Already locked");
2583 _CxxThrowException(&e, &improper_lock_exception_type);
2586 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
2587 return critical_section_try_lock(this);
2589 last = InterlockedExchangePointer(&this->tail, q);
2590 if(last) {
2591 LARGE_INTEGER to;
2592 NTSTATUS status;
2593 FILETIME ft;
2595 last->next = q;
2596 GetSystemTimeAsFileTime(&ft);
2597 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
2598 ft.dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
2599 status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
2600 if(status == STATUS_TIMEOUT) {
2601 if(!InterlockedExchange(&q->free, TRUE))
2602 return FALSE;
2603 /* A thread has signaled the event and is block waiting. */
2604 /* We need to catch the event to wake the thread. */
2605 NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
2609 cs_set_head(this, q);
2610 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
2611 spin_wait_for_next_cs(q);
2612 this->unk_active.next = q->next;
2615 HeapFree(GetProcessHeap(), 0, q);
2616 return TRUE;
2618 #endif
2620 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2621 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2622 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
2623 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
2624 critical_section_scoped_lock *this, critical_section *cs)
2626 TRACE("(%p %p)\n", this, cs);
2627 this->cs = cs;
2628 cs_lock(this->cs, &this->lock.q);
2629 return this;
2632 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2633 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2634 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
2635 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
2637 TRACE("(%p)\n", this);
2638 critical_section_unlock(this->cs);
2641 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2642 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2643 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
2644 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
2646 TRACE("(%p)\n", this);
2648 critical_section_ctor(&this->cs);
2649 return this;
2652 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2653 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2654 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
2655 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
2657 TRACE("(%p %p)\n", this, q);
2658 cs_lock(&this->cs, q);
2661 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2662 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2663 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
2664 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
2666 TRACE("(%p)\n", this);
2667 critical_section_unlock(&this->cs);
2670 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2671 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2672 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2673 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2674 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2676 TRACE("(%p %p)\n", this, lock);
2678 this->lock = lock;
2679 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2680 return this;
2683 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2684 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2685 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2686 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2688 TRACE("(%p)\n", this);
2690 _NonReentrantPPLLock__Release(this->lock);
2693 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2694 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2695 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2696 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2698 TRACE("(%p)\n", this);
2700 critical_section_ctor(&this->cs);
2701 this->count = 0;
2702 this->owner = -1;
2703 return this;
2706 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2707 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2708 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2709 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2711 TRACE("(%p %p)\n", this, q);
2713 if(this->owner == GetCurrentThreadId()) {
2714 this->count++;
2715 return;
2718 cs_lock(&this->cs, q);
2719 this->count++;
2720 this->owner = GetCurrentThreadId();
2723 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2724 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2725 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2726 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2728 TRACE("(%p)\n", this);
2730 this->count--;
2731 if(this->count)
2732 return;
2734 this->owner = -1;
2735 critical_section_unlock(&this->cs);
2738 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2739 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2740 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2741 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2742 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2744 TRACE("(%p %p)\n", this, lock);
2746 this->lock = lock;
2747 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2748 return this;
2751 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2752 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2753 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2754 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2756 TRACE("(%p)\n", this);
2758 _ReentrantPPLLock__Release(this->lock);
2761 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2762 unsigned int __cdecl _GetConcurrency(void)
2764 static unsigned int val = -1;
2766 TRACE("()\n");
2768 if(val == -1) {
2769 SYSTEM_INFO si;
2771 GetSystemInfo(&si);
2772 val = si.dwNumberOfProcessors;
2775 return val;
2778 static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
2780 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
2781 pTime->QuadPart = (ULONGLONG)timeout * -TICKSPERMSEC;
2782 return pTime;
2785 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2787 entry->next = *head;
2788 entry->prev = NULL;
2789 if(*head) (*head)->prev = entry;
2790 *head = entry;
2793 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2795 if(entry == *head)
2796 *head = entry->next;
2797 else if(entry->prev)
2798 entry->prev->next = entry->next;
2799 if(entry->next) entry->next->prev = entry->prev;
2802 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2804 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2806 for(i = 0; i < count; i++) {
2807 critical_section_lock(&events[i]->cs);
2808 if(events[i] == wait->signaled) ret = i;
2809 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2810 critical_section_unlock(&events[i]->cs);
2813 return ret;
2816 static inline int evt_transition(void **state, void *from, void *to)
2818 return InterlockedCompareExchangePointer(state, to, from) == from;
2821 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2823 int i;
2824 NTSTATUS status;
2825 LARGE_INTEGER ntto;
2827 wait->signaled = EVT_RUNNING;
2828 wait->pending_waits = wait_all ? count : 1;
2829 for(i = 0; i < count; i++) {
2830 wait->entries[i].wait = wait;
2832 critical_section_lock(&events[i]->cs);
2833 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2834 if(events[i]->signaled) {
2835 if(!InterlockedDecrement(&wait->pending_waits)) {
2836 wait->signaled = events[i];
2837 critical_section_unlock(&events[i]->cs);
2839 return evt_end_wait(wait, events, i+1);
2842 critical_section_unlock(&events[i]->cs);
2845 if(!timeout)
2846 return evt_end_wait(wait, events, count);
2848 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2849 return evt_end_wait(wait, events, count);
2851 status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
2853 if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2854 NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
2856 return evt_end_wait(wait, events, count);
2859 /* ??0event@Concurrency@@QAE@XZ */
2860 /* ??0event@Concurrency@@QEAA@XZ */
2861 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2862 event* __thiscall event_ctor(event *this)
2864 TRACE("(%p)\n", this);
2866 this->waiters = NULL;
2867 this->signaled = FALSE;
2868 critical_section_ctor(&this->cs);
2870 return this;
2873 /* ??1event@Concurrency@@QAE@XZ */
2874 /* ??1event@Concurrency@@QEAA@XZ */
2875 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2876 void __thiscall event_dtor(event *this)
2878 TRACE("(%p)\n", this);
2879 critical_section_dtor(&this->cs);
2880 if(this->waiters)
2881 ERR("there's a wait on destroyed event\n");
2884 /* ?reset@event@Concurrency@@QAEXXZ */
2885 /* ?reset@event@Concurrency@@QEAAXXZ */
2886 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2887 void __thiscall event_reset(event *this)
2889 thread_wait_entry *entry;
2891 TRACE("(%p)\n", this);
2893 critical_section_lock(&this->cs);
2894 if(this->signaled) {
2895 this->signaled = FALSE;
2896 for(entry=this->waiters; entry; entry = entry->next)
2897 InterlockedIncrement(&entry->wait->pending_waits);
2899 critical_section_unlock(&this->cs);
2902 /* ?set@event@Concurrency@@QAEXXZ */
2903 /* ?set@event@Concurrency@@QEAAXXZ */
2904 DEFINE_THISCALL_WRAPPER(event_set, 4)
2905 void __thiscall event_set(event *this)
2907 thread_wait_entry *wakeup = NULL;
2908 thread_wait_entry *entry, *next;
2910 TRACE("(%p)\n", this);
2912 critical_section_lock(&this->cs);
2913 if(!this->signaled) {
2914 this->signaled = TRUE;
2915 for(entry=this->waiters; entry; entry=next) {
2916 next = entry->next;
2917 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
2918 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
2919 evt_remove_queue(&this->waiters, entry);
2920 evt_add_queue(&wakeup, entry);
2925 critical_section_unlock(&this->cs);
2927 for(entry=wakeup; entry; entry=next) {
2928 next = entry->next;
2929 entry->next = entry->prev = NULL;
2930 NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
2934 /* ?wait@event@Concurrency@@QAEII@Z */
2935 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2936 DEFINE_THISCALL_WRAPPER(event_wait, 8)
2937 size_t __thiscall event_wait(event *this, unsigned int timeout)
2939 thread_wait wait;
2940 size_t signaled;
2942 TRACE("(%p %u)\n", this, timeout);
2944 critical_section_lock(&this->cs);
2945 signaled = this->signaled;
2946 critical_section_unlock(&this->cs);
2948 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
2949 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
2952 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2953 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2954 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
2956 thread_wait *wait;
2957 size_t ret;
2959 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
2961 if(count == 0)
2962 return 0;
2964 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
2965 ret = evt_wait(wait, events, count, wait_all, timeout);
2966 operator_delete(wait);
2968 return ret;
2971 #if _MSVCR_VER >= 110
2973 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
2974 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
2975 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
2976 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
2978 TRACE("(%p)\n", this);
2980 this->queue = NULL;
2981 critical_section_ctor(&this->lock);
2982 return this;
2985 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
2986 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
2987 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
2988 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
2990 TRACE("(%p)\n", this);
2992 while(this->queue) {
2993 cv_queue *next = this->queue->next;
2994 if(!this->queue->expired)
2995 ERR("there's an active wait\n");
2996 operator_delete(this->queue);
2997 this->queue = next;
2999 critical_section_dtor(&this->lock);
3002 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
3003 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
3004 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
3005 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
3007 cv_queue q, *next;
3009 TRACE("(%p, %p)\n", this, cs);
3011 critical_section_lock(&this->lock);
3012 q.next = this->queue;
3013 q.expired = FALSE;
3014 next = q.next;
3015 this->queue = &q;
3016 critical_section_unlock(&this->lock);
3018 critical_section_unlock(cs);
3019 while (q.next != CV_WAKE)
3020 RtlWaitOnAddress(&q.next, &next, sizeof(next), NULL);
3021 critical_section_lock(cs);
3024 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
3025 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
3026 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
3027 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
3028 critical_section *cs, unsigned int timeout)
3030 LARGE_INTEGER to;
3031 NTSTATUS status;
3032 FILETIME ft;
3033 cv_queue *q, *next;
3035 TRACE("(%p %p %d)\n", this, cs, timeout);
3037 q = operator_new(sizeof(cv_queue));
3038 critical_section_lock(&this->lock);
3039 q->next = this->queue;
3040 q->expired = FALSE;
3041 next = q->next;
3042 this->queue = q;
3043 critical_section_unlock(&this->lock);
3045 critical_section_unlock(cs);
3047 GetSystemTimeAsFileTime(&ft);
3048 to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
3049 ft.dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
3050 while (q->next != CV_WAKE) {
3051 status = RtlWaitOnAddress(&q->next, &next, sizeof(next), &to);
3052 if(status == STATUS_TIMEOUT) {
3053 if(!InterlockedExchange(&q->expired, TRUE)) {
3054 critical_section_lock(cs);
3055 return FALSE;
3057 break;
3061 operator_delete(q);
3062 critical_section_lock(cs);
3063 return TRUE;
3066 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
3067 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
3068 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
3069 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
3071 cv_queue *node;
3073 TRACE("(%p)\n", this);
3075 if(!this->queue)
3076 return;
3078 while(1) {
3079 critical_section_lock(&this->lock);
3080 node = this->queue;
3081 if(!node) {
3082 critical_section_unlock(&this->lock);
3083 return;
3085 this->queue = node->next;
3086 critical_section_unlock(&this->lock);
3088 node->next = CV_WAKE;
3089 if(!InterlockedExchange(&node->expired, TRUE)) {
3090 RtlWakeAddressSingle(&node->next);
3091 return;
3092 } else {
3093 operator_delete(node);
3098 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
3099 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
3100 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
3101 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
3103 cv_queue *ptr;
3105 TRACE("(%p)\n", this);
3107 if(!this->queue)
3108 return;
3110 critical_section_lock(&this->lock);
3111 ptr = this->queue;
3112 this->queue = NULL;
3113 critical_section_unlock(&this->lock);
3115 while(ptr) {
3116 cv_queue *next = ptr->next;
3118 ptr->next = CV_WAKE;
3119 if(!InterlockedExchange(&ptr->expired, TRUE))
3120 RtlWakeAddressSingle(&ptr->next);
3121 else
3122 operator_delete(ptr);
3123 ptr = next;
3126 #endif
3128 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
3129 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
3130 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
3131 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
3133 TRACE("(%p)\n", this);
3135 memset(this, 0, sizeof(*this));
3136 return this;
3139 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
3140 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
3141 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
3142 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
3144 TRACE("(%p)\n", this);
3146 if (this->thread_id != 0 || this->count)
3147 WARN("destroying locked reader_writer_lock\n");
3150 static inline void spin_wait_for_next_rwl(rwl_queue *q)
3152 SpinWait sw;
3154 if(q->next) return;
3156 SpinWait_ctor(&sw, &spin_wait_yield);
3157 SpinWait__Reset(&sw);
3158 while(!q->next)
3159 SpinWait__SpinOnce(&sw);
3160 SpinWait_dtor(&sw);
3163 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
3164 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
3165 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
3166 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
3168 rwl_queue q = { NULL, get_current_context() }, *last;
3170 TRACE("(%p)\n", this);
3172 if (this->thread_id == GetCurrentThreadId()) {
3173 improper_lock e;
3174 improper_lock_ctor_str(&e, "Already locked");
3175 _CxxThrowException(&e, &improper_lock_exception_type);
3178 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
3179 if (last) {
3180 last->next = &q;
3181 call_Context_Block(q.ctx);
3182 } else {
3183 this->writer_head = &q;
3184 if (InterlockedOr(&this->count, WRITER_WAITING))
3185 call_Context_Block(q.ctx);
3188 this->thread_id = GetCurrentThreadId();
3189 this->writer_head = &this->active;
3190 this->active.next = NULL;
3191 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3192 spin_wait_for_next_rwl(&q);
3193 this->active.next = q.next;
3197 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
3198 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
3199 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
3200 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
3202 rwl_queue q = { NULL, get_current_context() };
3204 TRACE("(%p)\n", this);
3206 if (this->thread_id == GetCurrentThreadId()) {
3207 improper_lock e;
3208 improper_lock_ctor_str(&e, "Already locked as writer");
3209 _CxxThrowException(&e, &improper_lock_exception_type);
3212 do {
3213 q.next = this->reader_head;
3214 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
3216 if (!q.next) {
3217 rwl_queue *head;
3218 LONG count;
3220 while (!((count = this->count) & WRITER_WAITING))
3221 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
3223 if (count & WRITER_WAITING)
3224 call_Context_Block(q.ctx);
3226 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3227 while(head && head != &q) {
3228 rwl_queue *next = head->next;
3229 InterlockedIncrement(&this->count);
3230 call_Context_Unblock(head->ctx);
3231 head = next;
3233 } else {
3234 call_Context_Block(q.ctx);
3238 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
3239 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
3240 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
3241 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
3243 rwl_queue q = { NULL };
3245 TRACE("(%p)\n", this);
3247 if (this->thread_id == GetCurrentThreadId())
3248 return FALSE;
3250 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
3251 return FALSE;
3252 this->writer_head = &q;
3253 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
3254 this->thread_id = GetCurrentThreadId();
3255 this->writer_head = &this->active;
3256 this->active.next = NULL;
3257 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3258 spin_wait_for_next_rwl(&q);
3259 this->active.next = q.next;
3261 return TRUE;
3264 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
3265 return FALSE;
3266 spin_wait_for_next_rwl(&q);
3267 this->writer_head = q.next;
3268 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
3269 this->thread_id = GetCurrentThreadId();
3270 this->writer_head = &this->active;
3271 this->active.next = q.next;
3272 return TRUE;
3274 return FALSE;
3277 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
3278 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
3279 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
3280 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
3282 LONG count;
3284 TRACE("(%p)\n", this);
3286 while (!((count = this->count) & WRITER_WAITING))
3287 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
3288 return FALSE;
3291 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
3292 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
3293 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
3294 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
3296 LONG count;
3297 rwl_queue *head, *next;
3299 TRACE("(%p)\n", this);
3301 if ((count = this->count) & ~WRITER_WAITING) {
3302 count = InterlockedDecrement(&this->count);
3303 if (count != WRITER_WAITING)
3304 return;
3305 NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
3306 return;
3309 this->thread_id = 0;
3310 next = this->writer_head->next;
3311 if (next) {
3312 call_Context_Unblock(next->ctx);
3313 return;
3315 InterlockedAnd(&this->count, ~WRITER_WAITING);
3316 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3317 while (head) {
3318 next = head->next;
3319 InterlockedIncrement(&this->count);
3320 call_Context_Unblock(head->ctx);
3321 head = next;
3324 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
3325 return;
3326 InterlockedOr(&this->count, WRITER_WAITING);
3329 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3330 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3331 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
3332 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
3333 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3335 TRACE("(%p %p)\n", this, lock);
3337 this->lock = lock;
3338 reader_writer_lock_lock(lock);
3339 return this;
3342 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
3343 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
3344 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
3345 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
3347 TRACE("(%p)\n", this);
3348 reader_writer_lock_unlock(this->lock);
3351 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3352 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3353 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
3354 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
3355 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3357 TRACE("(%p %p)\n", this, lock);
3359 this->lock = lock;
3360 reader_writer_lock_lock_read(lock);
3361 return this;
3364 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
3365 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
3366 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
3367 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
3369 TRACE("(%p)\n", this);
3370 reader_writer_lock_unlock(this->lock);
3373 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3374 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3375 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
3376 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
3378 TRACE("(%p)\n", this);
3380 InitializeCriticalSection(&this->cs);
3381 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
3382 return this;
3385 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3386 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3387 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
3388 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
3390 TRACE("(%p)\n", this);
3392 this->cs.DebugInfo->Spare[0] = 0;
3393 DeleteCriticalSection(&this->cs);
3396 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3397 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3398 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
3399 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
3401 TRACE("(%p)\n", this);
3402 EnterCriticalSection(&this->cs);
3405 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3406 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3407 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
3408 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
3410 TRACE("(%p)\n", this);
3411 LeaveCriticalSection(&this->cs);
3414 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
3415 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
3416 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
3417 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
3419 TRACE("(%p)\n", this);
3420 return TryEnterCriticalSection(&this->cs);
3423 /* ?wait@Concurrency@@YAXI@Z */
3424 void __cdecl Concurrency_wait(unsigned int time)
3426 static int once;
3428 if (!once++) FIXME("(%d) stub!\n", time);
3430 Sleep(time);
3433 #if _MSVCR_VER>=110
3434 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
3435 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
3437 FIXME("(%d %#I64x)\n", type, id);
3439 #endif
3441 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
3442 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
3443 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
3445 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
3448 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
3449 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
3450 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
3451 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
3453 TRACE("(%p %u %x)\n", this, elapse, repeat);
3455 this->vtable = &_Timer_vtable;
3456 this->timer = NULL;
3457 this->elapse = elapse;
3458 this->repeat = repeat;
3459 return this;
3462 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
3464 _Timer *this = ctx;
3465 TRACE("calling _Timer(%p) callback\n", this);
3466 call__Timer_callback(this);
3469 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
3470 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
3471 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
3472 void __thiscall _Timer__Start(_Timer *this)
3474 LONGLONG ll;
3475 FILETIME ft;
3477 TRACE("(%p)\n", this);
3479 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
3480 if (!this->timer)
3482 FIXME("throw exception?\n");
3483 return;
3486 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
3487 ft.dwLowDateTime = ll & 0xffffffff;
3488 ft.dwHighDateTime = ll >> 32;
3489 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
3492 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
3493 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
3494 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
3495 void __thiscall _Timer__Stop(_Timer *this)
3497 TRACE("(%p)\n", this);
3499 SetThreadpoolTimer(this->timer, NULL, 0, 0);
3500 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
3501 CloseThreadpoolTimer(this->timer);
3502 this->timer = NULL;
3505 /* ??1_Timer@details@Concurrency@@MAE@XZ */
3506 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
3507 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
3508 void __thiscall _Timer_dtor(_Timer *this)
3510 TRACE("(%p)\n", this);
3512 if (this->timer)
3513 _Timer__Stop(this);
3516 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
3517 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
3519 TRACE("(%p %x)\n", this, flags);
3520 if (flags & 2) {
3521 /* we have an array, with the number of elements stored before the first object */
3522 INT_PTR i, *ptr = (INT_PTR *)this-1;
3524 for (i=*ptr-1; i>=0; i--)
3525 _Timer_dtor(this+i);
3526 operator_delete(ptr);
3527 } else {
3528 _Timer_dtor(this);
3529 if (flags & 1)
3530 operator_delete(this);
3533 return this;
3536 #ifdef __ASM_USE_THISCALL_WRAPPER
3538 #define DEFINE_VTBL_WRAPPER(off) \
3539 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3540 "popl %eax\n\t" \
3541 "popl %ecx\n\t" \
3542 "pushl %eax\n\t" \
3543 "movl 0(%ecx), %eax\n\t" \
3544 "jmp *" #off "(%eax)\n\t")
3546 DEFINE_VTBL_WRAPPER(0);
3547 DEFINE_VTBL_WRAPPER(4);
3548 DEFINE_VTBL_WRAPPER(8);
3549 DEFINE_VTBL_WRAPPER(12);
3550 DEFINE_VTBL_WRAPPER(16);
3551 DEFINE_VTBL_WRAPPER(20);
3552 DEFINE_VTBL_WRAPPER(24);
3553 DEFINE_VTBL_WRAPPER(28);
3554 DEFINE_VTBL_WRAPPER(32);
3555 DEFINE_VTBL_WRAPPER(36);
3556 DEFINE_VTBL_WRAPPER(40);
3557 DEFINE_VTBL_WRAPPER(44);
3558 DEFINE_VTBL_WRAPPER(48);
3560 #endif
3562 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
3563 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
3564 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
3565 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
3566 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
3567 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
3568 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
3569 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
3570 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
3572 __ASM_BLOCK_BEGIN(concurrency_vtables)
3573 __ASM_VTABLE(ExternalContextBase,
3574 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
3575 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
3576 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
3577 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
3578 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
3579 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor)
3580 VTABLE_ADD_FUNC(ExternalContextBase_Block)
3581 VTABLE_ADD_FUNC(ExternalContextBase_Yield)
3582 VTABLE_ADD_FUNC(ExternalContextBase_SpinYield)
3583 VTABLE_ADD_FUNC(ExternalContextBase_Oversubscribe)
3584 VTABLE_ADD_FUNC(ExternalContextBase_Alloc)
3585 VTABLE_ADD_FUNC(ExternalContextBase_Free)
3586 VTABLE_ADD_FUNC(ExternalContextBase_EnterCriticalRegionHelper)
3587 VTABLE_ADD_FUNC(ExternalContextBase_EnterHyperCriticalRegionHelper)
3588 VTABLE_ADD_FUNC(ExternalContextBase_ExitCriticalRegionHelper)
3589 VTABLE_ADD_FUNC(ExternalContextBase_ExitHyperCriticalRegionHelper)
3590 VTABLE_ADD_FUNC(ExternalContextBase_GetCriticalRegionType)
3591 VTABLE_ADD_FUNC(ExternalContextBase_GetContextKind));
3592 __ASM_VTABLE(ThreadScheduler,
3593 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
3594 VTABLE_ADD_FUNC(ThreadScheduler_Id)
3595 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
3596 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
3597 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
3598 VTABLE_ADD_FUNC(ThreadScheduler_Release)
3599 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
3600 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
3601 #if _MSVCR_VER > 100
3602 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
3603 #endif
3604 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
3605 #if _MSVCR_VER > 100
3606 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
3607 #endif
3608 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
3609 #if _MSVCR_VER > 100
3610 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
3611 #endif
3613 __ASM_VTABLE(_Timer,
3614 VTABLE_ADD_FUNC(_Timer_vector_dtor));
3615 __ASM_BLOCK_END
3617 void msvcrt_init_concurrency(void *base)
3619 #ifdef __x86_64__
3620 init_cexception_rtti(base);
3621 init_improper_lock_rtti(base);
3622 init_improper_scheduler_attach_rtti(base);
3623 init_improper_scheduler_detach_rtti(base);
3624 init_invalid_multiple_scheduling_rtti(base);
3625 init_invalid_scheduler_policy_key_rtti(base);
3626 init_invalid_scheduler_policy_thread_specification_rtti(base);
3627 init_invalid_scheduler_policy_value_rtti(base);
3628 init_missing_wait_rtti(base);
3629 init_scheduler_resource_allocation_error_rtti(base);
3630 init_Context_rtti(base);
3631 init_ContextBase_rtti(base);
3632 init_ExternalContextBase_rtti(base);
3633 init_Scheduler_rtti(base);
3634 init_SchedulerBase_rtti(base);
3635 init_ThreadScheduler_rtti(base);
3636 init__Timer_rtti(base);
3638 init_cexception_cxx_type_info(base);
3639 init_improper_lock_cxx(base);
3640 init_improper_scheduler_attach_cxx(base);
3641 init_improper_scheduler_detach_cxx(base);
3642 init_invalid_multiple_scheduling_cxx(base);
3643 init_invalid_scheduler_policy_key_cxx(base);
3644 init_invalid_scheduler_policy_thread_specification_cxx(base);
3645 init_invalid_scheduler_policy_value_cxx(base);
3646 #if _MSVCR_VER >= 120
3647 init_missing_wait_cxx(base);
3648 #endif
3649 init_scheduler_resource_allocation_error_cxx(base);
3650 #endif
3653 void msvcrt_free_concurrency(void)
3655 if (context_tls_index != TLS_OUT_OF_INDEXES)
3656 TlsFree(context_tls_index);
3657 if(default_scheduler_policy.policy_container)
3658 SchedulerPolicy_dtor(&default_scheduler_policy);
3659 if(default_scheduler) {
3660 ThreadScheduler_dtor(default_scheduler);
3661 operator_delete(default_scheduler);
3664 if(keyed_event)
3665 NtClose(keyed_event);
3668 void msvcrt_free_scheduler_thread(void)
3670 Context *context = try_get_current_context();
3671 if (!context) return;
3672 call_Context_dtor(context, 1);
3675 #endif /* _MSVCR_VER >= 100 */