include/mscvpdb.h: Use flexible array members for the rest of structures.
[wine.git] / dlls / msvcrt / concurrency.c
blob39059d66b3836cb0549147323e401d3e91957f9c
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "wine/exception.h"
28 #include "wine/list.h"
29 #include "msvcrt.h"
30 #include "cppexcept.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 typedef exception cexception;
37 CREATE_EXCEPTION_OBJECT(cexception)
38 DEFINE_CXX_TYPE_INFO(cexception)
40 static LONG context_id = -1;
41 static LONG scheduler_id = -1;
43 typedef enum {
44 SchedulerKind,
45 MaxConcurrency,
46 MinConcurrency,
47 TargetOversubscriptionFactor,
48 LocalContextCacheSize,
49 ContextStackSize,
50 ContextPriority,
51 SchedulingProtocol,
52 DynamicProgressFeedback,
53 WinRTInitialization,
54 last_policy_id
55 } PolicyElementKey;
57 typedef struct {
58 struct _policy_container {
59 unsigned int policies[last_policy_id];
60 } *policy_container;
61 } SchedulerPolicy;
63 typedef struct {
64 const vtable_ptr *vtable;
65 } Context;
66 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
67 unsigned int, (const Context*), (this))
68 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
69 unsigned int, (const Context*), (this))
70 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
71 unsigned int, (const Context*), (this))
72 #define call_Context_Unblock(this) CALL_VTBL_FUNC(this, 12, \
73 void, (Context*), (this))
74 #define call_Context_IsSynchronouslyBlocked(this) CALL_VTBL_FUNC(this, 16, \
75 bool, (const Context*), (this))
76 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
77 Context*, (Context*, unsigned int), (this, flags))
78 #define call_Context_Block(this) CALL_VTBL_FUNC(this, 24, \
79 void, (Context*), (this))
81 typedef struct {
82 Context *context;
83 } _Context;
85 union allocator_cache_entry {
86 struct _free {
87 int depth;
88 union allocator_cache_entry *next;
89 } free;
90 struct _alloc {
91 int bucket;
92 char mem[1];
93 } alloc;
96 struct scheduler_list {
97 struct Scheduler *scheduler;
98 struct scheduler_list *next;
101 struct beacon {
102 LONG cancelling;
103 struct list entry;
104 struct _StructuredTaskCollection *task_collection;
107 typedef struct {
108 Context context;
109 struct scheduler_list scheduler;
110 unsigned int id;
111 union allocator_cache_entry *allocator_cache[8];
112 LONG blocked;
113 struct _StructuredTaskCollection *task_collection;
114 CRITICAL_SECTION beacons_cs;
115 struct list beacons;
116 } ExternalContextBase;
117 extern const vtable_ptr ExternalContextBase_vtable;
118 static void ExternalContextBase_ctor(ExternalContextBase*);
120 typedef struct Scheduler {
121 const vtable_ptr *vtable;
122 } Scheduler;
123 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
124 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
125 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
126 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
127 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
128 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
129 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
130 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
131 #if _MSVCR_VER > 100
132 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
133 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
134 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
135 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
136 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
137 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
138 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
139 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
140 bool, (Scheduler*,const /*location*/void*), (this,placement))
141 #else
142 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
143 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
144 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
145 #endif
147 typedef struct {
148 Scheduler scheduler;
149 LONG ref;
150 unsigned int id;
151 unsigned int virt_proc_no;
152 SchedulerPolicy policy;
153 int shutdown_count;
154 int shutdown_size;
155 HANDLE *shutdown_events;
156 CRITICAL_SECTION cs;
157 struct list scheduled_chores;
158 } ThreadScheduler;
159 extern const vtable_ptr ThreadScheduler_vtable;
161 typedef struct {
162 Scheduler *scheduler;
163 } _Scheduler;
165 typedef struct {
166 char empty;
167 } _CurrentScheduler;
169 typedef enum
171 SPINWAIT_INIT,
172 SPINWAIT_SPIN,
173 SPINWAIT_YIELD,
174 SPINWAIT_DONE
175 } SpinWait_state;
177 typedef void (__cdecl *yield_func)(void);
179 typedef struct
181 ULONG spin;
182 ULONG unknown;
183 SpinWait_state state;
184 yield_func yield_func;
185 } SpinWait;
187 #define FINISHED_INITIAL 0x80000000
188 typedef struct _StructuredTaskCollection
190 void *unk1;
191 unsigned int unk2;
192 void *unk3;
193 Context *context;
194 volatile LONG count;
195 volatile LONG finished;
196 void *exception;
197 Context *event;
198 } _StructuredTaskCollection;
200 bool __thiscall _StructuredTaskCollection__IsCanceling(_StructuredTaskCollection*);
202 typedef enum
204 TASK_COLLECTION_SUCCESS = 1,
205 TASK_COLLECTION_CANCELLED
206 } _TaskCollectionStatus;
208 typedef enum
210 STRUCTURED_TASK_COLLECTION_CANCELLED = 0x2,
211 STRUCTURED_TASK_COLLECTION_STATUS_MASK = 0x7
212 } _StructuredTaskCollectionStatusBits;
214 typedef struct _UnrealizedChore
216 const vtable_ptr *vtable;
217 void (__cdecl *chore_proc)(struct _UnrealizedChore*);
218 _StructuredTaskCollection *task_collection;
219 void (__cdecl *chore_wrapper)(struct _UnrealizedChore*);
220 void *unk[6];
221 } _UnrealizedChore;
223 struct scheduled_chore {
224 struct list entry;
225 _UnrealizedChore *chore;
228 /* keep in sync with msvcp90/msvcp90.h */
229 typedef struct cs_queue
231 Context *ctx;
232 struct cs_queue *next;
233 #if _MSVCR_VER >= 110
234 LONG free;
235 int unknown;
236 #endif
237 } cs_queue;
239 typedef struct
241 cs_queue unk_active;
242 #if _MSVCR_VER >= 110
243 void *unknown[2];
244 #else
245 void *unknown[1];
246 #endif
247 cs_queue *head;
248 void *tail;
249 } critical_section;
251 typedef struct
253 critical_section *cs;
254 union {
255 cs_queue q;
256 struct {
257 void *unknown[4];
258 int unknown2[2];
259 } unknown;
260 } lock;
261 } critical_section_scoped_lock;
263 typedef struct
265 critical_section cs;
266 } _NonReentrantPPLLock;
268 typedef struct
270 _NonReentrantPPLLock *lock;
271 union {
272 cs_queue q;
273 struct {
274 void *unknown[4];
275 int unknown2[2];
276 } unknown;
277 } wait;
278 } _NonReentrantPPLLock__Scoped_lock;
280 typedef struct
282 critical_section cs;
283 LONG count;
284 LONG owner;
285 } _ReentrantPPLLock;
287 typedef struct
289 _ReentrantPPLLock *lock;
290 union {
291 cs_queue q;
292 struct {
293 void *unknown[4];
294 int unknown2[2];
295 } unknown;
296 } wait;
297 } _ReentrantPPLLock__Scoped_lock;
299 typedef struct
301 LONG state;
302 LONG count;
303 } _ReaderWriterLock;
305 #define EVT_RUNNING (void*)1
306 #define EVT_WAITING NULL
308 struct thread_wait;
309 typedef struct thread_wait_entry
311 struct thread_wait *wait;
312 struct thread_wait_entry *next;
313 struct thread_wait_entry *prev;
314 } thread_wait_entry;
316 typedef struct thread_wait
318 Context *ctx;
319 void *signaled;
320 LONG pending_waits;
321 thread_wait_entry entries[1];
322 } thread_wait;
324 typedef struct
326 thread_wait_entry *waiters;
327 INT_PTR signaled;
328 critical_section cs;
329 } event;
331 #if _MSVCR_VER >= 110
332 #define CV_WAKE (void*)1
333 typedef struct cv_queue {
334 Context *ctx;
335 struct cv_queue *next;
336 LONG expired;
337 } cv_queue;
339 typedef struct {
340 struct beacon *beacon;
341 } _Cancellation_beacon;
343 typedef struct {
344 /* cv_queue structure is not binary compatible */
345 cv_queue *queue;
346 critical_section lock;
347 } _Condition_variable;
348 #endif
350 typedef struct rwl_queue
352 struct rwl_queue *next;
353 Context *ctx;
354 } rwl_queue;
356 #define WRITER_WAITING 0x80000000
357 /* FIXME: reader_writer_lock structure is not binary compatible
358 * it can't exceed 28/56 bytes */
359 typedef struct
361 LONG count;
362 LONG thread_id;
363 rwl_queue active;
364 rwl_queue *writer_head;
365 rwl_queue *writer_tail;
366 rwl_queue *reader_head;
367 } reader_writer_lock;
369 typedef struct {
370 reader_writer_lock *lock;
371 } reader_writer_lock_scoped_lock;
373 typedef struct {
374 CRITICAL_SECTION cs;
375 } _ReentrantBlockingLock;
377 #define TICKSPERMSEC 10000
378 typedef struct {
379 const vtable_ptr *vtable;
380 TP_TIMER *timer;
381 unsigned int elapse;
382 bool repeat;
383 } _Timer;
384 extern const vtable_ptr _Timer_vtable;
385 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
387 typedef exception improper_lock;
388 extern const vtable_ptr improper_lock_vtable;
390 typedef exception improper_scheduler_attach;
391 extern const vtable_ptr improper_scheduler_attach_vtable;
393 typedef exception improper_scheduler_detach;
394 extern const vtable_ptr improper_scheduler_detach_vtable;
396 typedef exception invalid_multiple_scheduling;
397 extern const vtable_ptr invalid_multiple_scheduling_vtable;
399 typedef exception invalid_scheduler_policy_key;
400 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
402 typedef exception invalid_scheduler_policy_thread_specification;
403 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
405 typedef exception invalid_scheduler_policy_value;
406 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
408 typedef exception missing_wait;
409 extern const vtable_ptr missing_wait_vtable;
411 typedef struct {
412 exception e;
413 HRESULT hr;
414 } scheduler_resource_allocation_error;
415 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
417 enum ConcRT_EventType
419 CONCRT_EVENT_GENERIC,
420 CONCRT_EVENT_START,
421 CONCRT_EVENT_END,
422 CONCRT_EVENT_BLOCK,
423 CONCRT_EVENT_UNBLOCK,
424 CONCRT_EVENT_YIELD,
425 CONCRT_EVENT_ATTACH,
426 CONCRT_EVENT_DETACH
429 static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
431 static CRITICAL_SECTION default_scheduler_cs;
432 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
434 0, 0, &default_scheduler_cs,
435 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
436 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
438 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
439 static SchedulerPolicy default_scheduler_policy;
440 static ThreadScheduler *default_scheduler;
442 static void create_default_scheduler(void);
444 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
445 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
446 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
447 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
449 TRACE("(%p %s)\n", this, str);
450 return __exception_ctor(this, str, &improper_lock_vtable);
453 /* ??0improper_lock@Concurrency@@QAE@XZ */
454 /* ??0improper_lock@Concurrency@@QEAA@XZ */
455 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
456 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
458 return improper_lock_ctor_str(this, NULL);
461 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
462 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
464 TRACE("(%p %p)\n", this, rhs);
465 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
468 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
469 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
470 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
471 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
472 improper_scheduler_attach *this, const char *str)
474 TRACE("(%p %s)\n", this, str);
475 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
478 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
479 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
480 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
481 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
482 improper_scheduler_attach *this)
484 return improper_scheduler_attach_ctor_str(this, NULL);
487 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
488 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
489 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
491 TRACE("(%p %p)\n", _this, rhs);
492 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
495 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
496 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
497 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
498 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
499 improper_scheduler_detach *this, const char *str)
501 TRACE("(%p %s)\n", this, str);
502 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
505 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
506 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
507 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
508 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
509 improper_scheduler_detach *this)
511 return improper_scheduler_detach_ctor_str(this, NULL);
514 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
515 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
516 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
518 TRACE("(%p %p)\n", _this, rhs);
519 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
522 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
523 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
524 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
525 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str, 8)
526 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor_str(
527 invalid_multiple_scheduling *this, const char *str)
529 TRACE("(%p %s)\n", this, str);
530 return __exception_ctor(this, str, &invalid_multiple_scheduling_vtable);
533 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
534 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
535 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
536 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor, 4)
537 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor(
538 invalid_multiple_scheduling *this)
540 return invalid_multiple_scheduling_ctor_str(this, NULL);
543 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor,8)
544 invalid_multiple_scheduling * __thiscall invalid_multiple_scheduling_copy_ctor(
545 invalid_multiple_scheduling * _this, const invalid_multiple_scheduling * rhs)
547 TRACE("(%p %p)\n", _this, rhs);
548 return __exception_copy_ctor(_this, rhs, &invalid_multiple_scheduling_vtable);
551 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
552 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
553 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
554 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
555 invalid_scheduler_policy_key *this, const char *str)
557 TRACE("(%p %s)\n", this, str);
558 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
561 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
562 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
563 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
564 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
565 invalid_scheduler_policy_key *this)
567 return invalid_scheduler_policy_key_ctor_str(this, NULL);
570 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
571 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
572 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
574 TRACE("(%p %p)\n", _this, rhs);
575 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
578 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
579 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
580 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
581 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
582 invalid_scheduler_policy_thread_specification *this, const char *str)
584 TRACE("(%p %s)\n", this, str);
585 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
588 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
589 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
590 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
591 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
592 invalid_scheduler_policy_thread_specification *this)
594 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
597 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
598 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
599 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
601 TRACE("(%p %p)\n", _this, rhs);
602 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
605 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
606 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
607 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
608 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
609 invalid_scheduler_policy_value *this, const char *str)
611 TRACE("(%p %s)\n", this, str);
612 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
615 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
616 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
617 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
618 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
619 invalid_scheduler_policy_value *this)
621 return invalid_scheduler_policy_value_ctor_str(this, NULL);
624 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
625 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
626 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
628 TRACE("(%p %p)\n", _this, rhs);
629 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
632 /* ??0missing_wait@Concurrency@@QAA@PBD@Z */
633 /* ??0missing_wait@Concurrency@@QAE@PBD@Z */
634 /* ??0missing_wait@Concurrency@@QEAA@PEBD@Z */
635 DEFINE_THISCALL_WRAPPER(missing_wait_ctor_str, 8)
636 missing_wait* __thiscall missing_wait_ctor_str(
637 missing_wait *this, const char *str)
639 TRACE("(%p %p)\n", this, str);
640 return __exception_ctor(this, str, &missing_wait_vtable);
643 /* ??0missing_wait@Concurrency@@QAA@XZ */
644 /* ??0missing_wait@Concurrency@@QAE@XZ */
645 /* ??0missing_wait@Concurrency@@QEAA@XZ */
646 DEFINE_THISCALL_WRAPPER(missing_wait_ctor, 4)
647 missing_wait* __thiscall missing_wait_ctor(missing_wait *this)
649 return missing_wait_ctor_str(this, NULL);
652 DEFINE_THISCALL_WRAPPER(missing_wait_copy_ctor,8)
653 missing_wait * __thiscall missing_wait_copy_ctor(
654 missing_wait * _this, const missing_wait * rhs)
656 TRACE("(%p %p)\n", _this, rhs);
657 return __exception_copy_ctor(_this, rhs, &missing_wait_vtable);
660 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
661 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
662 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
663 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
664 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
666 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
667 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
668 this->hr = hr;
669 return this;
672 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
673 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
674 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
675 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
676 scheduler_resource_allocation_error *this, HRESULT hr)
678 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
681 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
682 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
683 scheduler_resource_allocation_error *this,
684 const scheduler_resource_allocation_error *rhs)
686 TRACE("(%p,%p)\n", this, rhs);
688 if (!rhs->e.do_free)
689 memcpy(this, rhs, sizeof(*this));
690 else
691 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
692 return this;
695 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
696 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
697 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
698 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
699 const scheduler_resource_allocation_error *this)
701 TRACE("(%p)\n", this);
702 return this->hr;
705 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
706 ".?AVimproper_lock@Concurrency@@")
707 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
708 ".?AVimproper_scheduler_attach@Concurrency@@")
709 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
710 ".?AVimproper_scheduler_detach@Concurrency@@")
711 DEFINE_RTTI_DATA1(invalid_multiple_scheduling, 0, &cexception_rtti_base_descriptor,
712 ".?AVinvalid_multiple_scheduling@Concurrency@@")
713 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
714 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
715 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
716 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
717 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
718 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
719 DEFINE_RTTI_DATA1(missing_wait, 0, &cexception_rtti_base_descriptor,
720 ".?AVmissing_wait@Concurrency@@")
721 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
722 ".?AVscheduler_resource_allocation_error@Concurrency@@")
724 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
725 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
726 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
727 DEFINE_CXX_DATA1(invalid_multiple_scheduling, &cexception_cxx_type_info, cexception_dtor)
728 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
729 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
730 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
731 #if _MSVCR_VER >= 120
732 DEFINE_CXX_DATA1(missing_wait, &cexception_cxx_type_info, cexception_dtor)
733 #endif
734 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
736 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
737 __ASM_VTABLE(improper_lock,
738 VTABLE_ADD_FUNC(cexception_vector_dtor)
739 VTABLE_ADD_FUNC(cexception_what));
740 __ASM_VTABLE(improper_scheduler_attach,
741 VTABLE_ADD_FUNC(cexception_vector_dtor)
742 VTABLE_ADD_FUNC(cexception_what));
743 __ASM_VTABLE(improper_scheduler_detach,
744 VTABLE_ADD_FUNC(cexception_vector_dtor)
745 VTABLE_ADD_FUNC(cexception_what));
746 __ASM_VTABLE(invalid_multiple_scheduling,
747 VTABLE_ADD_FUNC(cexception_vector_dtor)
748 VTABLE_ADD_FUNC(cexception_what));
749 __ASM_VTABLE(invalid_scheduler_policy_key,
750 VTABLE_ADD_FUNC(cexception_vector_dtor)
751 VTABLE_ADD_FUNC(cexception_what));
752 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
753 VTABLE_ADD_FUNC(cexception_vector_dtor)
754 VTABLE_ADD_FUNC(cexception_what));
755 __ASM_VTABLE(invalid_scheduler_policy_value,
756 VTABLE_ADD_FUNC(cexception_vector_dtor)
757 VTABLE_ADD_FUNC(cexception_what));
758 __ASM_VTABLE(missing_wait,
759 VTABLE_ADD_FUNC(cexception_vector_dtor)
760 VTABLE_ADD_FUNC(cexception_what));
761 __ASM_VTABLE(scheduler_resource_allocation_error,
762 VTABLE_ADD_FUNC(cexception_vector_dtor)
763 VTABLE_ADD_FUNC(cexception_what));
764 __ASM_BLOCK_END
766 static Context* try_get_current_context(void)
768 if (context_tls_index == TLS_OUT_OF_INDEXES)
769 return NULL;
770 return TlsGetValue(context_tls_index);
773 static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
775 context_tls_index = TlsAlloc();
776 return context_tls_index != TLS_OUT_OF_INDEXES;
779 static Context* get_current_context(void)
781 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
782 Context *ret;
784 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
786 scheduler_resource_allocation_error e;
787 scheduler_resource_allocation_error_ctor_name(&e, NULL,
788 HRESULT_FROM_WIN32(GetLastError()));
789 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
792 ret = TlsGetValue(context_tls_index);
793 if (!ret) {
794 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
795 ExternalContextBase_ctor(context);
796 TlsSetValue(context_tls_index, context);
797 ret = &context->context;
799 return ret;
802 static Scheduler* get_scheduler_from_context(Context *ctx)
804 ExternalContextBase *context = (ExternalContextBase*)ctx;
806 if (context->context.vtable != &ExternalContextBase_vtable)
807 return NULL;
808 return context->scheduler.scheduler;
811 static Scheduler* try_get_current_scheduler(void)
813 Context *context = try_get_current_context();
814 Scheduler *ret;
816 if (!context)
817 return NULL;
819 ret = get_scheduler_from_context(context);
820 if (!ret)
821 ERR("unknown context set\n");
822 return ret;
825 static Scheduler* get_current_scheduler(void)
827 Context *context = get_current_context();
828 Scheduler *ret;
830 ret = get_scheduler_from_context(context);
831 if (!ret)
832 ERR("unknown context set\n");
833 return ret;
836 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
837 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
838 Context* __cdecl Context_CurrentContext(void)
840 TRACE("()\n");
841 return get_current_context();
844 /* ?Id@Context@Concurrency@@SAIXZ */
845 unsigned int __cdecl Context_Id(void)
847 Context *ctx = try_get_current_context();
848 TRACE("()\n");
849 return ctx ? call_Context_GetId(ctx) : -1;
852 /* ?Block@Context@Concurrency@@SAXXZ */
853 void __cdecl Context_Block(void)
855 Context *ctx = get_current_context();
856 TRACE("()\n");
857 call_Context_Block(ctx);
860 /* ?Yield@Context@Concurrency@@SAXXZ */
861 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
862 void __cdecl Context_Yield(void)
864 FIXME("()\n");
867 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
868 void __cdecl Context__SpinYield(void)
870 FIXME("()\n");
873 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
874 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
876 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
878 TRACE("()\n");
880 if (ctx && ctx->context.vtable != &ExternalContextBase_vtable) {
881 ERR("unknown context set\n");
882 return FALSE;
885 if (ctx && ctx->task_collection)
886 return _StructuredTaskCollection__IsCanceling(ctx->task_collection);
887 return FALSE;
890 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
891 void __cdecl Context_Oversubscribe(bool begin)
893 FIXME("(%x)\n", begin);
896 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
897 unsigned int __cdecl Context_ScheduleGroupId(void)
899 Context *ctx = try_get_current_context();
900 TRACE("()\n");
901 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
904 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
905 unsigned int __cdecl Context_VirtualProcessorId(void)
907 Context *ctx = try_get_current_context();
908 TRACE("()\n");
909 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
912 #if _MSVCR_VER > 100
913 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
914 _Context *__cdecl _Context__CurrentContext(_Context *ret)
916 TRACE("(%p)\n", ret);
917 ret->context = Context_CurrentContext();
918 return ret;
921 DEFINE_THISCALL_WRAPPER(_Context_IsSynchronouslyBlocked, 4)
922 BOOL __thiscall _Context_IsSynchronouslyBlocked(const _Context *this)
924 TRACE("(%p)\n", this);
925 return call_Context_IsSynchronouslyBlocked(this->context);
927 #endif
929 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
930 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
932 TRACE("(%p)->()\n", this);
933 return this->id;
936 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
937 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
939 FIXME("(%p)->() stub\n", this);
940 return -1;
943 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
944 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
946 FIXME("(%p)->() stub\n", this);
947 return -1;
950 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
951 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
953 TRACE("(%p)->()\n", this);
955 /* TODO: throw context_unblock_unbalanced if this->blocked goes below -1 */
956 if (!InterlockedDecrement(&this->blocked))
957 RtlWakeAddressSingle(&this->blocked);
960 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
961 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
963 TRACE("(%p)->()\n", this);
964 return this->blocked >= 1;
967 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Block, 4)
968 void __thiscall ExternalContextBase_Block(ExternalContextBase *this)
970 LONG blocked;
972 TRACE("(%p)->()\n", this);
974 blocked = InterlockedIncrement(&this->blocked);
975 while (blocked >= 1)
977 RtlWaitOnAddress(&this->blocked, &blocked, sizeof(LONG), NULL);
978 blocked = this->blocked;
982 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Yield, 4)
983 void __thiscall ExternalContextBase_Yield(ExternalContextBase *this)
985 FIXME("(%p)->() stub\n", this);
988 DEFINE_THISCALL_WRAPPER(ExternalContextBase_SpinYield, 4)
989 void __thiscall ExternalContextBase_SpinYield(ExternalContextBase *this)
991 FIXME("(%p)->() stub\n", this);
994 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Oversubscribe, 8)
995 void __thiscall ExternalContextBase_Oversubscribe(
996 ExternalContextBase *this, bool oversubscribe)
998 FIXME("(%p)->(%x) stub\n", this, oversubscribe);
1001 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Alloc, 8)
1002 void* __thiscall ExternalContextBase_Alloc(ExternalContextBase *this, size_t size)
1004 FIXME("(%p)->(%Iu) stub\n", this, size);
1005 return NULL;
1008 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Free, 8)
1009 void __thiscall ExternalContextBase_Free(ExternalContextBase *this, void *addr)
1011 FIXME("(%p)->(%p) stub\n", this, addr);
1014 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterCriticalRegionHelper, 4)
1015 int __thiscall ExternalContextBase_EnterCriticalRegionHelper(ExternalContextBase *this)
1017 FIXME("(%p)->() stub\n", this);
1018 return 0;
1021 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterHyperCriticalRegionHelper, 4)
1022 int __thiscall ExternalContextBase_EnterHyperCriticalRegionHelper(ExternalContextBase *this)
1024 FIXME("(%p)->() stub\n", this);
1025 return 0;
1028 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitCriticalRegionHelper, 4)
1029 int __thiscall ExternalContextBase_ExitCriticalRegionHelper(ExternalContextBase *this)
1031 FIXME("(%p)->() stub\n", this);
1032 return 0;
1035 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitHyperCriticalRegionHelper, 4)
1036 int __thiscall ExternalContextBase_ExitHyperCriticalRegionHelper(ExternalContextBase *this)
1038 FIXME("(%p)->() stub\n", this);
1039 return 0;
1042 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetCriticalRegionType, 4)
1043 int __thiscall ExternalContextBase_GetCriticalRegionType(const ExternalContextBase *this)
1045 FIXME("(%p)->() stub\n", this);
1046 return 0;
1049 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetContextKind, 4)
1050 int __thiscall ExternalContextBase_GetContextKind(const ExternalContextBase *this)
1052 FIXME("(%p)->() stub\n", this);
1053 return 0;
1056 static void remove_scheduled_chores(Scheduler *scheduler, const ExternalContextBase *context)
1058 ThreadScheduler *tscheduler = (ThreadScheduler*)scheduler;
1059 struct scheduled_chore *sc, *next;
1061 if (tscheduler->scheduler.vtable != &ThreadScheduler_vtable)
1062 return;
1064 EnterCriticalSection(&tscheduler->cs);
1065 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &tscheduler->scheduled_chores,
1066 struct scheduled_chore, entry) {
1067 if (sc->chore->task_collection->context == &context->context) {
1068 list_remove(&sc->entry);
1069 operator_delete(sc);
1072 LeaveCriticalSection(&tscheduler->cs);
1075 static void ExternalContextBase_dtor(ExternalContextBase *this)
1077 struct scheduler_list *scheduler_cur, *scheduler_next;
1078 union allocator_cache_entry *next, *cur;
1079 int i;
1081 /* TODO: move the allocator cache to scheduler so it can be reused */
1082 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
1083 for(cur = this->allocator_cache[i]; cur; cur=next) {
1084 next = cur->free.next;
1085 operator_delete(cur);
1089 if (this->scheduler.scheduler) {
1090 remove_scheduled_chores(this->scheduler.scheduler, this);
1091 call_Scheduler_Release(this->scheduler.scheduler);
1093 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
1094 scheduler_next = scheduler_cur->next;
1095 remove_scheduled_chores(scheduler_cur->scheduler, this);
1096 call_Scheduler_Release(scheduler_cur->scheduler);
1097 operator_delete(scheduler_cur);
1101 DeleteCriticalSection(&this->beacons_cs);
1102 if (!list_empty(&this->beacons))
1103 ERR("beacons list is not empty - expect crash\n");
1106 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
1107 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
1109 TRACE("(%p %x)\n", this, flags);
1110 if(flags & 2) {
1111 /* we have an array, with the number of elements stored before the first object */
1112 INT_PTR i, *ptr = (INT_PTR *)this-1;
1114 for(i=*ptr-1; i>=0; i--)
1115 ExternalContextBase_dtor(this+i);
1116 operator_delete(ptr);
1117 } else {
1118 ExternalContextBase_dtor(this);
1119 if(flags & 1)
1120 operator_delete(this);
1123 return &this->context;
1126 static void ExternalContextBase_ctor(ExternalContextBase *this)
1128 TRACE("(%p)->()\n", this);
1130 memset(this, 0, sizeof(*this));
1131 this->context.vtable = &ExternalContextBase_vtable;
1132 this->id = InterlockedIncrement(&context_id);
1133 InitializeCriticalSection(&this->beacons_cs);
1134 list_init(&this->beacons);
1136 create_default_scheduler();
1137 this->scheduler.scheduler = &default_scheduler->scheduler;
1138 call_Scheduler_Reference(&default_scheduler->scheduler);
1141 /* ?Alloc@Concurrency@@YAPAXI@Z */
1142 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
1143 void * CDECL Concurrency_Alloc(size_t size)
1145 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1146 union allocator_cache_entry *p;
1148 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
1149 if (size < sizeof(*p))
1150 size = sizeof(*p);
1152 if (context->context.vtable != &ExternalContextBase_vtable) {
1153 p = operator_new(size);
1154 p->alloc.bucket = -1;
1155 }else {
1156 int i;
1158 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
1159 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
1160 if (1 << (i+4) >= size) break;
1162 if(i==ARRAY_SIZE(context->allocator_cache)) {
1163 p = operator_new(size);
1164 p->alloc.bucket = -1;
1165 }else if (context->allocator_cache[i]) {
1166 p = context->allocator_cache[i];
1167 context->allocator_cache[i] = p->free.next;
1168 p->alloc.bucket = i;
1169 }else {
1170 p = operator_new(1 << (i+4));
1171 p->alloc.bucket = i;
1175 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
1176 return p->alloc.mem;
1179 /* ?Free@Concurrency@@YAXPAX@Z */
1180 /* ?Free@Concurrency@@YAXPEAX@Z */
1181 void CDECL Concurrency_Free(void* mem)
1183 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
1184 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1185 int bucket = p->alloc.bucket;
1187 TRACE("(%p)\n", mem);
1189 if (context->context.vtable != &ExternalContextBase_vtable) {
1190 operator_delete(p);
1191 }else {
1192 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
1193 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
1194 p->free.next = context->allocator_cache[bucket];
1195 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
1196 context->allocator_cache[bucket] = p;
1197 }else {
1198 operator_delete(p);
1203 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
1204 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
1205 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
1206 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
1207 PolicyElementKey policy, unsigned int val)
1209 unsigned int ret;
1211 TRACE("(%p %d %d)\n", this, policy, val);
1213 if (policy == MinConcurrency) {
1214 invalid_scheduler_policy_key e;
1215 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
1216 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1218 if (policy == MaxConcurrency) {
1219 invalid_scheduler_policy_key e;
1220 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
1221 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1223 if (policy >= last_policy_id) {
1224 invalid_scheduler_policy_key e;
1225 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1226 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1229 switch(policy) {
1230 case SchedulerKind:
1231 if (val) {
1232 invalid_scheduler_policy_value e;
1233 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
1234 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1236 break;
1237 case TargetOversubscriptionFactor:
1238 if (!val) {
1239 invalid_scheduler_policy_value e;
1240 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
1241 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1243 break;
1244 case ContextPriority:
1245 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
1246 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
1247 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
1248 && val != INHERIT_THREAD_PRIORITY) {
1249 invalid_scheduler_policy_value e;
1250 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
1251 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1253 break;
1254 case SchedulingProtocol:
1255 case DynamicProgressFeedback:
1256 case WinRTInitialization:
1257 if (val != 0 && val != 1) {
1258 invalid_scheduler_policy_value e;
1259 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
1260 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1262 break;
1263 default:
1264 break;
1267 ret = this->policy_container->policies[policy];
1268 this->policy_container->policies[policy] = val;
1269 return ret;
1272 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1273 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1274 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
1275 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
1276 unsigned int min_concurrency, unsigned int max_concurrency)
1278 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
1280 if (min_concurrency > max_concurrency) {
1281 invalid_scheduler_policy_thread_specification e;
1282 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
1283 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
1285 if (!max_concurrency) {
1286 invalid_scheduler_policy_value e;
1287 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
1288 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1291 this->policy_container->policies[MinConcurrency] = min_concurrency;
1292 this->policy_container->policies[MaxConcurrency] = max_concurrency;
1295 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1296 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1297 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1298 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1299 const SchedulerPolicy *this, PolicyElementKey policy)
1301 TRACE("(%p %d)\n", this, policy);
1303 if (policy >= last_policy_id) {
1304 invalid_scheduler_policy_key e;
1305 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1306 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1308 return this->policy_container->policies[policy];
1311 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1312 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1313 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1314 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1316 TRACE("(%p)\n", this);
1318 this->policy_container = operator_new(sizeof(*this->policy_container));
1319 /* TODO: default values can probably be affected by CurrentScheduler */
1320 this->policy_container->policies[SchedulerKind] = 0;
1321 this->policy_container->policies[MaxConcurrency] = -1;
1322 this->policy_container->policies[MinConcurrency] = 1;
1323 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1324 this->policy_container->policies[LocalContextCacheSize] = 8;
1325 this->policy_container->policies[ContextStackSize] = 0;
1326 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1327 this->policy_container->policies[SchedulingProtocol] = 0;
1328 this->policy_container->policies[DynamicProgressFeedback] = 1;
1329 return this;
1332 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1333 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1334 /* TODO: don't leak policy_container on exception */
1335 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1336 SchedulerPolicy *this, size_t n, ...)
1338 unsigned int min_concurrency, max_concurrency;
1339 va_list valist;
1340 size_t i;
1342 TRACE("(%p %Iu)\n", this, n);
1344 SchedulerPolicy_ctor(this);
1345 min_concurrency = this->policy_container->policies[MinConcurrency];
1346 max_concurrency = this->policy_container->policies[MaxConcurrency];
1348 va_start(valist, n);
1349 for(i=0; i<n; i++) {
1350 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1351 unsigned int val = va_arg(valist, unsigned int);
1353 if(policy == MinConcurrency)
1354 min_concurrency = val;
1355 else if(policy == MaxConcurrency)
1356 max_concurrency = val;
1357 else
1358 SchedulerPolicy_SetPolicyValue(this, policy, val);
1360 va_end(valist);
1362 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1363 return this;
1366 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1367 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1368 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1369 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1370 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1372 TRACE("(%p %p)\n", this, rhs);
1373 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1374 sizeof(this->policy_container->policies));
1375 return this;
1378 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1379 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1380 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1381 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1382 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1384 TRACE("(%p %p)\n", this, rhs);
1385 SchedulerPolicy_ctor(this);
1386 return SchedulerPolicy_op_assign(this, rhs);
1389 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1390 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1391 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1392 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1394 TRACE("(%p)\n", this);
1395 operator_delete(this->policy_container);
1398 static void ThreadScheduler_dtor(ThreadScheduler *this)
1400 int i;
1401 struct scheduled_chore *sc, *next;
1403 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1404 SchedulerPolicy_dtor(&this->policy);
1406 for(i=0; i<this->shutdown_count; i++)
1407 SetEvent(this->shutdown_events[i]);
1408 operator_delete(this->shutdown_events);
1410 this->cs.DebugInfo->Spare[0] = 0;
1411 DeleteCriticalSection(&this->cs);
1413 if (!list_empty(&this->scheduled_chores))
1414 ERR("scheduled chore list is not empty\n");
1415 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &this->scheduled_chores,
1416 struct scheduled_chore, entry)
1417 operator_delete(sc);
1420 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1421 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1423 TRACE("(%p)\n", this);
1424 return this->id;
1427 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1428 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1430 TRACE("(%p)\n", this);
1431 return this->virt_proc_no;
1434 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1435 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1436 const ThreadScheduler *this, SchedulerPolicy *ret)
1438 TRACE("(%p %p)\n", this, ret);
1439 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1442 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1443 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1445 TRACE("(%p)\n", this);
1446 return InterlockedIncrement(&this->ref);
1449 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1450 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1452 unsigned int ret = InterlockedDecrement(&this->ref);
1454 TRACE("(%p)\n", this);
1456 if(!ret) {
1457 ThreadScheduler_dtor(this);
1458 operator_delete(this);
1460 return ret;
1463 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1464 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1466 HANDLE *shutdown_events;
1467 int size;
1469 TRACE("(%p %p)\n", this, event);
1471 EnterCriticalSection(&this->cs);
1473 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1474 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1475 memcpy(shutdown_events, this->shutdown_events,
1476 this->shutdown_count * sizeof(*shutdown_events));
1477 operator_delete(this->shutdown_events);
1478 this->shutdown_size = size;
1479 this->shutdown_events = shutdown_events;
1480 this->shutdown_events[this->shutdown_count++] = event;
1482 LeaveCriticalSection(&this->cs);
1485 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1486 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1488 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1490 TRACE("(%p)\n", this);
1492 if(context->context.vtable != &ExternalContextBase_vtable) {
1493 ERR("unknown context set\n");
1494 return;
1497 if(context->scheduler.scheduler == &this->scheduler) {
1498 improper_scheduler_attach e;
1499 improper_scheduler_attach_ctor_str(&e, NULL);
1500 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1503 if(context->scheduler.scheduler) {
1504 struct scheduler_list *l = operator_new(sizeof(*l));
1505 *l = context->scheduler;
1506 context->scheduler.next = l;
1508 context->scheduler.scheduler = &this->scheduler;
1509 ThreadScheduler_Reference(this);
1512 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1513 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1514 ThreadScheduler *this, /*location*/void *placement)
1516 FIXME("(%p %p) stub\n", this, placement);
1517 return NULL;
1520 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1521 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1523 FIXME("(%p) stub\n", this);
1524 return NULL;
1527 typedef struct
1529 void (__cdecl *proc)(void*);
1530 void *data;
1531 ThreadScheduler *scheduler;
1532 } schedule_task_arg;
1534 void __cdecl CurrentScheduler_Detach(void);
1536 static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1538 schedule_task_arg arg;
1539 BOOL detach = FALSE;
1541 arg = *(schedule_task_arg*)context;
1542 operator_delete(context);
1544 if(&arg.scheduler->scheduler != get_current_scheduler()) {
1545 ThreadScheduler_Attach(arg.scheduler);
1546 detach = TRUE;
1548 ThreadScheduler_Release(arg.scheduler);
1550 arg.proc(arg.data);
1552 if(detach)
1553 CurrentScheduler_Detach();
1556 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1557 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1558 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1560 schedule_task_arg *arg;
1561 TP_WORK *work;
1563 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1565 arg = operator_new(sizeof(*arg));
1566 arg->proc = proc;
1567 arg->data = data;
1568 arg->scheduler = this;
1569 ThreadScheduler_Reference(this);
1571 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1572 if(!work) {
1573 scheduler_resource_allocation_error e;
1575 ThreadScheduler_Release(this);
1576 operator_delete(arg);
1577 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1578 HRESULT_FROM_WIN32(GetLastError()));
1579 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1581 SubmitThreadpoolWork(work);
1582 CloseThreadpoolWork(work);
1585 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1586 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1587 void (__cdecl *proc)(void*), void* data)
1589 FIXME("(%p %p %p) stub\n", this, proc, data);
1590 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1593 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1594 bool __thiscall ThreadScheduler_IsAvailableLocation(
1595 const ThreadScheduler *this, const /*location*/void *placement)
1597 FIXME("(%p %p) stub\n", this, placement);
1598 return FALSE;
1601 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1602 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1604 TRACE("(%p %x)\n", this, flags);
1605 if(flags & 2) {
1606 /* we have an array, with the number of elements stored before the first object */
1607 INT_PTR i, *ptr = (INT_PTR *)this-1;
1609 for(i=*ptr-1; i>=0; i--)
1610 ThreadScheduler_dtor(this+i);
1611 operator_delete(ptr);
1612 } else {
1613 ThreadScheduler_dtor(this);
1614 if(flags & 1)
1615 operator_delete(this);
1618 return &this->scheduler;
1621 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1622 const SchedulerPolicy *policy)
1624 SYSTEM_INFO si;
1626 TRACE("(%p)->()\n", this);
1628 this->scheduler.vtable = &ThreadScheduler_vtable;
1629 this->ref = 1;
1630 this->id = InterlockedIncrement(&scheduler_id);
1631 SchedulerPolicy_copy_ctor(&this->policy, policy);
1633 GetSystemInfo(&si);
1634 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1635 if(this->virt_proc_no > si.dwNumberOfProcessors)
1636 this->virt_proc_no = si.dwNumberOfProcessors;
1638 this->shutdown_count = this->shutdown_size = 0;
1639 this->shutdown_events = NULL;
1641 InitializeCriticalSectionEx(&this->cs, 0, RTL_CRITICAL_SECTION_FLAG_FORCE_DEBUG_INFO);
1642 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1644 list_init(&this->scheduled_chores);
1645 return this;
1648 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1649 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1650 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1652 ThreadScheduler *ret;
1654 TRACE("(%p)\n", policy);
1656 ret = operator_new(sizeof(*ret));
1657 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1660 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1661 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1663 TRACE("()\n");
1665 EnterCriticalSection(&default_scheduler_cs);
1666 if(default_scheduler_policy.policy_container)
1667 SchedulerPolicy_dtor(&default_scheduler_policy);
1668 SchedulerPolicy_ctor(&default_scheduler_policy);
1669 LeaveCriticalSection(&default_scheduler_cs);
1672 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1673 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1674 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1676 TRACE("(%p)\n", policy);
1678 EnterCriticalSection(&default_scheduler_cs);
1679 if(!default_scheduler_policy.policy_container)
1680 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1681 else
1682 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1683 LeaveCriticalSection(&default_scheduler_cs);
1686 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1687 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1688 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1690 Scheduler *scheduler;
1692 TRACE("(%p)\n", policy);
1694 scheduler = Scheduler_Create(policy);
1695 call_Scheduler_Attach(scheduler);
1698 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1699 void __cdecl CurrentScheduler_Detach(void)
1701 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1703 TRACE("()\n");
1705 if(!context) {
1706 improper_scheduler_detach e;
1707 improper_scheduler_detach_ctor_str(&e, NULL);
1708 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1711 if(context->context.vtable != &ExternalContextBase_vtable) {
1712 ERR("unknown context set\n");
1713 return;
1716 if(!context->scheduler.next) {
1717 improper_scheduler_detach e;
1718 improper_scheduler_detach_ctor_str(&e, NULL);
1719 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1722 call_Scheduler_Release(context->scheduler.scheduler);
1723 if(!context->scheduler.next) {
1724 context->scheduler.scheduler = NULL;
1725 }else {
1726 struct scheduler_list *entry = context->scheduler.next;
1727 context->scheduler.scheduler = entry->scheduler;
1728 context->scheduler.next = entry->next;
1729 operator_delete(entry);
1733 static void create_default_scheduler(void)
1735 if(default_scheduler)
1736 return;
1738 EnterCriticalSection(&default_scheduler_cs);
1739 if(!default_scheduler) {
1740 ThreadScheduler *scheduler;
1742 if(!default_scheduler_policy.policy_container)
1743 SchedulerPolicy_ctor(&default_scheduler_policy);
1745 scheduler = operator_new(sizeof(*scheduler));
1746 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1747 default_scheduler = scheduler;
1749 LeaveCriticalSection(&default_scheduler_cs);
1752 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1753 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1754 Scheduler* __cdecl CurrentScheduler_Get(void)
1756 TRACE("()\n");
1757 return get_current_scheduler();
1760 #if _MSVCR_VER > 100
1761 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1762 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1763 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1765 TRACE("(%p)\n", placement);
1766 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1768 #endif
1770 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1771 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1772 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1774 TRACE("()\n");
1775 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1778 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1779 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1781 Scheduler *scheduler = try_get_current_scheduler();
1783 TRACE("()\n");
1785 if(!scheduler)
1786 return -1;
1787 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1790 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1791 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1793 TRACE("(%p)\n", policy);
1794 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1797 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1798 unsigned int __cdecl CurrentScheduler_Id(void)
1800 Scheduler *scheduler = try_get_current_scheduler();
1802 TRACE("()\n");
1804 if(!scheduler)
1805 return -1;
1806 return call_Scheduler_Id(scheduler);
1809 #if _MSVCR_VER > 100
1810 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1811 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1812 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1814 Scheduler *scheduler = try_get_current_scheduler();
1816 TRACE("(%p)\n", placement);
1818 if(!scheduler)
1819 return FALSE;
1820 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1822 #endif
1824 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1825 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1826 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1828 TRACE("(%p)\n", event);
1829 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1832 #if _MSVCR_VER > 100
1833 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1834 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1835 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1836 void *data, /*location*/void *placement)
1838 TRACE("(%p %p %p)\n", proc, data, placement);
1839 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1841 #endif
1843 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1844 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1845 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1847 TRACE("(%p %p)\n", proc, data);
1848 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1851 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1852 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1853 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1854 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1856 TRACE("(%p %p)\n", this, scheduler);
1858 this->scheduler = scheduler;
1859 return this;
1862 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1863 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1864 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1865 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1867 return _Scheduler_ctor_sched(this, NULL);
1870 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1871 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1872 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1873 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1875 TRACE("(%p)\n", this);
1876 return this->scheduler;
1879 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1880 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1881 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1882 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1884 TRACE("(%p)\n", this);
1885 return call_Scheduler_Reference(this->scheduler);
1888 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1889 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1890 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1891 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1893 TRACE("(%p)\n", this);
1894 return call_Scheduler_Release(this->scheduler);
1897 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1898 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1900 TRACE("()\n");
1901 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1904 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1905 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1907 TRACE("()\n");
1908 get_current_scheduler();
1909 return CurrentScheduler_GetNumberOfVirtualProcessors();
1912 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1913 unsigned int __cdecl _CurrentScheduler__Id(void)
1915 TRACE("()\n");
1916 get_current_scheduler();
1917 return CurrentScheduler_Id();
1920 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1921 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1922 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1924 TRACE("(%p %p)\n", proc, data);
1925 CurrentScheduler_ScheduleTask(proc, data);
1928 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1929 unsigned int __cdecl SpinCount__Value(void)
1931 static unsigned int val = -1;
1933 TRACE("()\n");
1935 if(val == -1) {
1936 SYSTEM_INFO si;
1938 GetSystemInfo(&si);
1939 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1942 return val;
1945 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1946 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1947 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1948 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1950 TRACE("(%p %p)\n", this, yf);
1952 this->state = SPINWAIT_INIT;
1953 this->unknown = 1;
1954 this->yield_func = yf;
1955 return this;
1958 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1959 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1960 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1961 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1963 TRACE("(%p %p)\n", this, yf);
1965 this->state = SPINWAIT_INIT;
1966 this->unknown = 0;
1967 this->yield_func = yf;
1968 return this;
1971 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1972 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1973 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1974 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1975 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1976 void __thiscall SpinWait_dtor(SpinWait *this)
1978 TRACE("(%p)\n", this);
1981 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1982 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1983 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1984 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1985 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1986 void __thiscall SpinWait__DoYield(SpinWait *this)
1988 TRACE("(%p)\n", this);
1990 if(this->unknown)
1991 this->yield_func();
1994 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1995 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1996 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1997 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1998 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1999 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
2001 TRACE("(%p)\n", this);
2002 return 1;
2005 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
2006 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
2007 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
2008 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
2009 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
2010 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
2012 TRACE("(%p %d)\n", this, spin);
2014 this->spin = spin;
2015 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
2018 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
2019 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
2020 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
2021 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
2022 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
2023 void __thiscall SpinWait__Reset(SpinWait *this)
2025 SpinWait__SetSpinCount(this, SpinCount__Value());
2028 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
2029 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
2030 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
2031 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
2032 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
2033 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
2035 TRACE("(%p)\n", this);
2037 this->spin--;
2038 return this->spin > 0;
2041 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
2042 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
2043 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
2044 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
2045 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
2046 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
2048 switch(this->state) {
2049 case SPINWAIT_INIT:
2050 SpinWait__Reset(this);
2051 /* fall through */
2052 case SPINWAIT_SPIN:
2053 InterlockedDecrement((LONG*)&this->spin);
2054 if(!this->spin)
2055 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
2056 return TRUE;
2057 case SPINWAIT_YIELD:
2058 this->state = SPINWAIT_DONE;
2059 this->yield_func();
2060 return TRUE;
2061 default:
2062 SpinWait__Reset(this);
2063 return FALSE;
2067 #if _MSVCR_VER >= 110
2069 /* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
2070 /* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
2071 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor, 8)
2072 _StructuredTaskCollection* __thiscall _StructuredTaskCollection_ctor(
2073 _StructuredTaskCollection *this, /*_CancellationTokenState*/void *token)
2075 TRACE("(%p)\n", this);
2077 if (token)
2078 FIXME("_StructuredTaskCollection with cancellation token not implemented!\n");
2080 memset(this, 0, sizeof(*this));
2081 this->finished = FINISHED_INITIAL;
2082 return this;
2085 #endif /* _MSVCR_VER >= 110 */
2087 #if _MSVCR_VER >= 120
2089 /* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
2090 /* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
2091 /* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
2092 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor, 4)
2093 void __thiscall _StructuredTaskCollection_dtor(_StructuredTaskCollection *this)
2095 FIXME("(%p): stub!\n", this);
2096 if (this->count && !__uncaught_exception()) {
2097 missing_wait e;
2098 missing_wait_ctor_str(&e, "Missing call to _RunAndWait");
2099 _CxxThrowException(&e, &missing_wait_exception_type);
2103 #endif /* _MSVCR_VER >= 120 */
2105 static ThreadScheduler *get_thread_scheduler_from_context(Context *context)
2107 Scheduler *scheduler = get_scheduler_from_context(context);
2108 if (scheduler && scheduler->vtable == &ThreadScheduler_vtable)
2109 return (ThreadScheduler*)scheduler;
2110 return NULL;
2113 struct execute_chore_data {
2114 _UnrealizedChore *chore;
2115 _StructuredTaskCollection *task_collection;
2118 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
2119 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
2120 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
2121 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel, 4)
2122 void __thiscall _StructuredTaskCollection__Cancel(
2123 _StructuredTaskCollection *this)
2125 ThreadScheduler *scheduler;
2126 void *prev_exception, *new_exception;
2127 struct scheduled_chore *sc, *next;
2128 LONG removed = 0, finished = 1;
2129 struct beacon *beacon;
2131 TRACE("(%p)\n", this);
2133 if (!this->context)
2134 this->context = get_current_context();
2135 scheduler = get_thread_scheduler_from_context(this->context);
2136 if (!scheduler)
2137 return;
2139 new_exception = this->exception;
2140 do {
2141 prev_exception = new_exception;
2142 if ((ULONG_PTR)prev_exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2143 return;
2144 new_exception = (void*)((ULONG_PTR)prev_exception |
2145 STRUCTURED_TASK_COLLECTION_CANCELLED);
2146 } while ((new_exception = InterlockedCompareExchangePointer(
2147 &this->exception, new_exception, prev_exception))
2148 != prev_exception);
2150 EnterCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2151 LIST_FOR_EACH_ENTRY(beacon, &((ExternalContextBase*)this->context)->beacons, struct beacon, entry) {
2152 if (beacon->task_collection == this)
2153 InterlockedIncrement(&beacon->cancelling);
2155 LeaveCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2157 EnterCriticalSection(&scheduler->cs);
2158 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &scheduler->scheduled_chores,
2159 struct scheduled_chore, entry) {
2160 if (sc->chore->task_collection != this)
2161 continue;
2162 sc->chore->task_collection = NULL;
2163 list_remove(&sc->entry);
2164 removed++;
2165 operator_delete(sc);
2167 LeaveCriticalSection(&scheduler->cs);
2168 if (!removed)
2169 return;
2171 if (InterlockedCompareExchange(&this->finished, removed, FINISHED_INITIAL) != FINISHED_INITIAL)
2172 finished = InterlockedAdd(&this->finished, removed);
2173 if (!finished)
2174 call_Context_Unblock(this->event);
2177 static LONG CALLBACK execute_chore_except(EXCEPTION_POINTERS *pexc, void *_data)
2179 struct execute_chore_data *data = _data;
2180 void *prev_exception, *new_exception;
2181 exception_ptr *ptr;
2183 if (pexc->ExceptionRecord->ExceptionCode != CXX_EXCEPTION)
2184 return EXCEPTION_CONTINUE_SEARCH;
2186 _StructuredTaskCollection__Cancel(data->task_collection);
2188 ptr = operator_new(sizeof(*ptr));
2189 __ExceptionPtrCreate(ptr);
2190 exception_ptr_from_record(ptr, pexc->ExceptionRecord);
2192 new_exception = data->task_collection->exception;
2193 do {
2194 if ((ULONG_PTR)new_exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) {
2195 __ExceptionPtrDestroy(ptr);
2196 operator_delete(ptr);
2197 break;
2199 prev_exception = new_exception;
2200 new_exception = (void*)((ULONG_PTR)new_exception | (ULONG_PTR)ptr);
2201 } while ((new_exception = InterlockedCompareExchangePointer(
2202 &data->task_collection->exception, new_exception,
2203 prev_exception)) != prev_exception);
2204 data->task_collection->event = 0;
2205 return EXCEPTION_EXECUTE_HANDLER;
2208 static void CALLBACK execute_chore_finally(BOOL normal, void *data)
2210 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2211 _StructuredTaskCollection *old_collection = data;
2213 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2214 ctx->task_collection = old_collection;
2217 static void execute_chore(_UnrealizedChore *chore,
2218 _StructuredTaskCollection *task_collection)
2220 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2221 struct execute_chore_data data = { chore, task_collection };
2222 _StructuredTaskCollection *old_collection;
2224 TRACE("(%p %p)\n", chore, task_collection);
2226 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2228 old_collection = ctx->task_collection;
2229 ctx->task_collection = task_collection;
2232 __TRY
2234 __TRY
2236 if (!((ULONG_PTR)task_collection->exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) &&
2237 chore->chore_proc)
2238 chore->chore_proc(chore);
2240 __EXCEPT_CTX(execute_chore_except, &data)
2243 __ENDTRY
2245 __FINALLY_CTX(execute_chore_finally, old_collection)
2248 static void CALLBACK chore_wrapper_finally(BOOL normal, void *data)
2250 _UnrealizedChore *chore = data;
2251 struct _StructuredTaskCollection *task_collection = chore->task_collection;
2252 LONG finished = 1;
2254 TRACE("(%u %p)\n", normal, data);
2256 if (!task_collection)
2257 return;
2258 chore->task_collection = NULL;
2260 if (InterlockedCompareExchange(&task_collection->finished, 1, FINISHED_INITIAL) != FINISHED_INITIAL)
2261 finished = InterlockedIncrement(&task_collection->finished);
2262 if (!finished)
2263 call_Context_Unblock(task_collection->event);
2266 static void __cdecl chore_wrapper(_UnrealizedChore *chore)
2268 __TRY
2270 execute_chore(chore, chore->task_collection);
2272 __FINALLY_CTX(chore_wrapper_finally, chore)
2275 static BOOL pick_and_execute_chore(ThreadScheduler *scheduler)
2277 struct list *entry;
2278 struct scheduled_chore *sc;
2279 _UnrealizedChore *chore;
2281 TRACE("(%p)\n", scheduler);
2283 if (scheduler->scheduler.vtable != &ThreadScheduler_vtable)
2285 ERR("unknown scheduler set\n");
2286 return FALSE;
2289 EnterCriticalSection(&scheduler->cs);
2290 entry = list_head(&scheduler->scheduled_chores);
2291 if (entry)
2292 list_remove(entry);
2293 LeaveCriticalSection(&scheduler->cs);
2294 if (!entry)
2295 return FALSE;
2297 sc = LIST_ENTRY(entry, struct scheduled_chore, entry);
2298 chore = sc->chore;
2299 operator_delete(sc);
2301 chore->chore_wrapper(chore);
2302 return TRUE;
2305 static void __cdecl _StructuredTaskCollection_scheduler_cb(void *data)
2307 pick_and_execute_chore((ThreadScheduler*)get_current_scheduler());
2310 static bool schedule_chore(_StructuredTaskCollection *this,
2311 _UnrealizedChore *chore, Scheduler **pscheduler)
2313 struct scheduled_chore *sc;
2314 ThreadScheduler *scheduler;
2316 if (chore->task_collection) {
2317 invalid_multiple_scheduling e;
2318 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2319 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2320 return FALSE;
2323 if (!this->context)
2324 this->context = get_current_context();
2325 scheduler = get_thread_scheduler_from_context(this->context);
2326 if (!scheduler) {
2327 ERR("unknown context or scheduler set\n");
2328 return FALSE;
2331 sc = operator_new(sizeof(*sc));
2332 sc->chore = chore;
2334 chore->task_collection = this;
2335 chore->chore_wrapper = chore_wrapper;
2336 InterlockedIncrement(&this->count);
2338 EnterCriticalSection(&scheduler->cs);
2339 list_add_head(&scheduler->scheduled_chores, &sc->entry);
2340 LeaveCriticalSection(&scheduler->cs);
2341 *pscheduler = &scheduler->scheduler;
2342 return TRUE;
2345 #if _MSVCR_VER >= 110
2347 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2348 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2349 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
2350 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc, 12)
2351 void __thiscall _StructuredTaskCollection__Schedule_loc(
2352 _StructuredTaskCollection *this, _UnrealizedChore *chore,
2353 /*location*/void *placement)
2355 Scheduler *scheduler;
2357 TRACE("(%p %p %p)\n", this, chore, placement);
2359 if (schedule_chore(this, chore, &scheduler))
2361 call_Scheduler_ScheduleTask_loc(scheduler,
2362 _StructuredTaskCollection_scheduler_cb, NULL, placement);
2366 #endif /* _MSVCR_VER >= 110 */
2368 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
2369 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
2370 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
2371 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule, 8)
2372 void __thiscall _StructuredTaskCollection__Schedule(
2373 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2375 Scheduler *scheduler;
2377 TRACE("(%p %p)\n", this, chore);
2379 if (schedule_chore(this, chore, &scheduler))
2381 call_Scheduler_ScheduleTask(scheduler,
2382 _StructuredTaskCollection_scheduler_cb, NULL);
2386 static void CALLBACK exception_ptr_rethrow_finally(BOOL normal, void *data)
2388 exception_ptr *ep = data;
2390 TRACE("(%u %p)\n", normal, data);
2392 __ExceptionPtrDestroy(ep);
2393 operator_delete(ep);
2396 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2397 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2398 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
2399 _TaskCollectionStatus __stdcall _StructuredTaskCollection__RunAndWait(
2400 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2402 ULONG_PTR exception;
2403 exception_ptr *ep;
2404 LONG count;
2406 TRACE("(%p %p)\n", this, chore);
2408 if (chore) {
2409 if (chore->task_collection) {
2410 invalid_multiple_scheduling e;
2411 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2412 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2414 execute_chore(chore, this);
2417 if (this->context) {
2418 ThreadScheduler *scheduler = get_thread_scheduler_from_context(this->context);
2419 if (scheduler) {
2420 while (pick_and_execute_chore(scheduler)) ;
2424 this->event = get_current_context();
2425 InterlockedCompareExchange(&this->finished, 0, FINISHED_INITIAL);
2427 while (this->count != 0) {
2428 count = this->count;
2429 InterlockedAdd(&this->count, -count);
2430 count = InterlockedAdd(&this->finished, -count);
2432 if (count < 0)
2433 call_Context_Block(this->event);
2436 exception = (ULONG_PTR)this->exception;
2437 ep = (exception_ptr*)(exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK);
2438 if (ep) {
2439 this->exception = 0;
2440 __TRY
2442 __ExceptionPtrRethrow(ep);
2444 __FINALLY_CTX(exception_ptr_rethrow_finally, ep)
2446 if (exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2447 return TASK_COLLECTION_CANCELLED;
2448 return TASK_COLLECTION_SUCCESS;
2451 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
2452 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
2453 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
2454 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling, 4)
2455 bool __thiscall _StructuredTaskCollection__IsCanceling(
2456 _StructuredTaskCollection *this)
2458 TRACE("(%p)\n", this);
2459 return !!((ULONG_PTR)this->exception & STRUCTURED_TASK_COLLECTION_CANCELLED);
2462 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ */
2463 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IEAAXXZ */
2464 DEFINE_THISCALL_WRAPPER(_UnrealizedChore__CheckTaskCollection, 4)
2465 void __thiscall _UnrealizedChore__CheckTaskCollection(_UnrealizedChore *this)
2467 FIXME("() stub\n");
2470 /* ??0critical_section@Concurrency@@QAE@XZ */
2471 /* ??0critical_section@Concurrency@@QEAA@XZ */
2472 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
2473 critical_section* __thiscall critical_section_ctor(critical_section *this)
2475 TRACE("(%p)\n", this);
2477 this->unk_active.ctx = NULL;
2478 this->head = this->tail = NULL;
2479 return this;
2482 /* ??1critical_section@Concurrency@@QAE@XZ */
2483 /* ??1critical_section@Concurrency@@QEAA@XZ */
2484 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
2485 void __thiscall critical_section_dtor(critical_section *this)
2487 TRACE("(%p)\n", this);
2490 static void __cdecl spin_wait_yield(void)
2492 Sleep(0);
2495 static inline void spin_wait_for_next_cs(cs_queue *q)
2497 SpinWait sw;
2499 if(q->next) return;
2501 SpinWait_ctor(&sw, &spin_wait_yield);
2502 SpinWait__Reset(&sw);
2503 while(!q->next)
2504 SpinWait__SpinOnce(&sw);
2505 SpinWait_dtor(&sw);
2508 static inline void cs_set_head(critical_section *cs, cs_queue *q)
2510 cs->unk_active.ctx = get_current_context();
2511 cs->unk_active.next = q->next;
2512 cs->head = &cs->unk_active;
2515 static inline void cs_lock(critical_section *cs, cs_queue *q)
2517 cs_queue *last;
2519 if(cs->unk_active.ctx == get_current_context()) {
2520 improper_lock e;
2521 improper_lock_ctor_str(&e, "Already locked");
2522 _CxxThrowException(&e, &improper_lock_exception_type);
2525 memset(q, 0, sizeof(*q));
2526 q->ctx = get_current_context();
2527 last = InterlockedExchangePointer(&cs->tail, q);
2528 if(last) {
2529 last->next = q;
2530 call_Context_Block(q->ctx);
2533 cs_set_head(cs, q);
2534 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
2535 spin_wait_for_next_cs(q);
2536 cs->unk_active.next = q->next;
2540 /* ?lock@critical_section@Concurrency@@QAEXXZ */
2541 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
2542 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
2543 void __thiscall critical_section_lock(critical_section *this)
2545 cs_queue q;
2547 TRACE("(%p)\n", this);
2548 cs_lock(this, &q);
2551 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
2552 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
2553 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
2554 bool __thiscall critical_section_try_lock(critical_section *this)
2556 cs_queue q;
2558 TRACE("(%p)\n", this);
2560 if(this->unk_active.ctx == get_current_context())
2561 return FALSE;
2563 memset(&q, 0, sizeof(q));
2564 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
2565 cs_set_head(this, &q);
2566 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
2567 spin_wait_for_next_cs(&q);
2568 this->unk_active.next = q.next;
2570 return TRUE;
2572 return FALSE;
2575 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
2576 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
2577 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
2578 void __thiscall critical_section_unlock(critical_section *this)
2580 TRACE("(%p)\n", this);
2582 this->unk_active.ctx = NULL;
2583 this->head = NULL;
2584 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
2585 == &this->unk_active) return;
2586 spin_wait_for_next_cs(&this->unk_active);
2588 #if _MSVCR_VER >= 110
2589 while(1) {
2590 cs_queue *next;
2592 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
2593 break;
2595 next = this->unk_active.next;
2596 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
2597 HeapFree(GetProcessHeap(), 0, next);
2598 return;
2600 spin_wait_for_next_cs(next);
2602 this->unk_active.next = next->next;
2603 HeapFree(GetProcessHeap(), 0, next);
2605 #endif
2607 call_Context_Unblock(this->unk_active.next->ctx);
2610 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2611 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2612 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
2613 critical_section* __thiscall critical_section_native_handle(critical_section *this)
2615 TRACE("(%p)\n", this);
2616 return this;
2619 static void set_timeout(FILETIME *ft, unsigned int timeout)
2621 LARGE_INTEGER to;
2623 GetSystemTimeAsFileTime(ft);
2624 to.QuadPart = ((LONGLONG)ft->dwHighDateTime << 32) +
2625 ft->dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
2626 ft->dwHighDateTime = to.QuadPart >> 32;
2627 ft->dwLowDateTime = to.QuadPart;
2630 struct timeout_unlock
2632 Context *ctx;
2633 BOOL timed_out;
2636 static void WINAPI timeout_unlock(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2638 struct timeout_unlock *tu = ctx;
2639 tu->timed_out = TRUE;
2640 call_Context_Unblock(tu->ctx);
2643 /* returns TRUE if wait has timed out */
2644 static BOOL block_context_for(Context *ctx, unsigned int timeout)
2646 struct timeout_unlock tu = { ctx };
2647 TP_TIMER *tp_timer;
2648 FILETIME ft;
2650 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) {
2651 call_Context_Block(ctx);
2652 return FALSE;
2655 tp_timer = CreateThreadpoolTimer(timeout_unlock, &tu, NULL);
2656 if(!tp_timer) {
2657 FIXME("throw exception?\n");
2658 return TRUE;
2660 set_timeout(&ft, timeout);
2661 SetThreadpoolTimer(tp_timer, &ft, 0, 0);
2663 call_Context_Block(ctx);
2665 SetThreadpoolTimer(tp_timer, NULL, 0, 0);
2666 WaitForThreadpoolTimerCallbacks(tp_timer, TRUE);
2667 CloseThreadpoolTimer(tp_timer);
2668 return tu.timed_out;
2671 #if _MSVCR_VER >= 110
2672 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2673 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2674 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
2675 bool __thiscall critical_section_try_lock_for(
2676 critical_section *this, unsigned int timeout)
2678 Context *ctx = get_current_context();
2679 cs_queue *q, *last;
2681 TRACE("(%p %d)\n", this, timeout);
2683 if(this->unk_active.ctx == ctx) {
2684 improper_lock e;
2685 improper_lock_ctor_str(&e, "Already locked");
2686 _CxxThrowException(&e, &improper_lock_exception_type);
2689 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
2690 return critical_section_try_lock(this);
2691 q->ctx = ctx;
2693 last = InterlockedExchangePointer(&this->tail, q);
2694 if(last) {
2695 last->next = q;
2697 if(block_context_for(q->ctx, timeout))
2699 if(!InterlockedExchange(&q->free, TRUE))
2700 return FALSE;
2701 /* Context was unblocked because of timeout and unlock operation */
2702 call_Context_Block(ctx);
2706 cs_set_head(this, q);
2707 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
2708 spin_wait_for_next_cs(q);
2709 this->unk_active.next = q->next;
2712 HeapFree(GetProcessHeap(), 0, q);
2713 return TRUE;
2715 #endif
2717 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2718 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2719 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
2720 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
2721 critical_section_scoped_lock *this, critical_section *cs)
2723 TRACE("(%p %p)\n", this, cs);
2724 this->cs = cs;
2725 cs_lock(this->cs, &this->lock.q);
2726 return this;
2729 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2730 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2731 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
2732 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
2734 TRACE("(%p)\n", this);
2735 critical_section_unlock(this->cs);
2738 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2739 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2740 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
2741 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
2743 TRACE("(%p)\n", this);
2745 critical_section_ctor(&this->cs);
2746 return this;
2749 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2750 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2751 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
2752 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
2754 TRACE("(%p %p)\n", this, q);
2755 cs_lock(&this->cs, q);
2758 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2759 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2760 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
2761 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
2763 TRACE("(%p)\n", this);
2764 critical_section_unlock(&this->cs);
2767 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2768 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2769 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2770 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2771 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2773 TRACE("(%p %p)\n", this, lock);
2775 this->lock = lock;
2776 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2777 return this;
2780 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2781 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2782 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2783 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2785 TRACE("(%p)\n", this);
2787 _NonReentrantPPLLock__Release(this->lock);
2790 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2791 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2792 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2793 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2795 TRACE("(%p)\n", this);
2797 critical_section_ctor(&this->cs);
2798 this->count = 0;
2799 this->owner = -1;
2800 return this;
2803 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2804 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2805 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2806 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2808 TRACE("(%p %p)\n", this, q);
2810 if(this->owner == GetCurrentThreadId()) {
2811 this->count++;
2812 return;
2815 cs_lock(&this->cs, q);
2816 this->count++;
2817 this->owner = GetCurrentThreadId();
2820 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2821 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2822 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2823 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2825 TRACE("(%p)\n", this);
2827 this->count--;
2828 if(this->count)
2829 return;
2831 this->owner = -1;
2832 critical_section_unlock(&this->cs);
2835 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2836 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2837 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2838 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2839 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2841 TRACE("(%p %p)\n", this, lock);
2843 this->lock = lock;
2844 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2845 return this;
2848 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2849 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2850 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2851 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2853 TRACE("(%p)\n", this);
2855 _ReentrantPPLLock__Release(this->lock);
2858 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2859 unsigned int __cdecl _GetConcurrency(void)
2861 static unsigned int val = -1;
2863 TRACE("()\n");
2865 if(val == -1) {
2866 SYSTEM_INFO si;
2868 GetSystemInfo(&si);
2869 val = si.dwNumberOfProcessors;
2872 return val;
2875 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2877 entry->next = *head;
2878 entry->prev = NULL;
2879 if(*head) (*head)->prev = entry;
2880 *head = entry;
2883 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2885 if(entry == *head)
2886 *head = entry->next;
2887 else if(entry->prev)
2888 entry->prev->next = entry->next;
2889 if(entry->next) entry->next->prev = entry->prev;
2892 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2894 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2896 for(i = 0; i < count; i++) {
2897 critical_section_lock(&events[i]->cs);
2898 if(events[i] == wait->signaled) ret = i;
2899 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2900 critical_section_unlock(&events[i]->cs);
2903 return ret;
2906 static inline int evt_transition(void **state, void *from, void *to)
2908 return InterlockedCompareExchangePointer(state, to, from) == from;
2911 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2913 int i;
2915 wait->signaled = EVT_RUNNING;
2916 wait->pending_waits = wait_all ? count : 1;
2917 for(i = 0; i < count; i++) {
2918 wait->entries[i].wait = wait;
2920 critical_section_lock(&events[i]->cs);
2921 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2922 if(events[i]->signaled) {
2923 if(!InterlockedDecrement(&wait->pending_waits)) {
2924 wait->signaled = events[i];
2925 critical_section_unlock(&events[i]->cs);
2927 return evt_end_wait(wait, events, i+1);
2930 critical_section_unlock(&events[i]->cs);
2933 if(!timeout)
2934 return evt_end_wait(wait, events, count);
2936 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2937 return evt_end_wait(wait, events, count);
2939 if(block_context_for(wait->ctx, timeout) &&
2940 !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2941 call_Context_Block(wait->ctx);
2943 return evt_end_wait(wait, events, count);
2946 /* ??0event@Concurrency@@QAE@XZ */
2947 /* ??0event@Concurrency@@QEAA@XZ */
2948 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2949 event* __thiscall event_ctor(event *this)
2951 TRACE("(%p)\n", this);
2953 this->waiters = NULL;
2954 this->signaled = FALSE;
2955 critical_section_ctor(&this->cs);
2957 return this;
2960 /* ??1event@Concurrency@@QAE@XZ */
2961 /* ??1event@Concurrency@@QEAA@XZ */
2962 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2963 void __thiscall event_dtor(event *this)
2965 TRACE("(%p)\n", this);
2966 critical_section_dtor(&this->cs);
2967 if(this->waiters)
2968 ERR("there's a wait on destroyed event\n");
2971 /* ?reset@event@Concurrency@@QAEXXZ */
2972 /* ?reset@event@Concurrency@@QEAAXXZ */
2973 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2974 void __thiscall event_reset(event *this)
2976 thread_wait_entry *entry;
2978 TRACE("(%p)\n", this);
2980 critical_section_lock(&this->cs);
2981 if(this->signaled) {
2982 this->signaled = FALSE;
2983 for(entry=this->waiters; entry; entry = entry->next)
2984 InterlockedIncrement(&entry->wait->pending_waits);
2986 critical_section_unlock(&this->cs);
2989 /* ?set@event@Concurrency@@QAEXXZ */
2990 /* ?set@event@Concurrency@@QEAAXXZ */
2991 DEFINE_THISCALL_WRAPPER(event_set, 4)
2992 void __thiscall event_set(event *this)
2994 thread_wait_entry *wakeup = NULL;
2995 thread_wait_entry *entry, *next;
2997 TRACE("(%p)\n", this);
2999 critical_section_lock(&this->cs);
3000 if(!this->signaled) {
3001 this->signaled = TRUE;
3002 for(entry=this->waiters; entry; entry=next) {
3003 next = entry->next;
3004 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
3005 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
3006 evt_remove_queue(&this->waiters, entry);
3007 evt_add_queue(&wakeup, entry);
3012 critical_section_unlock(&this->cs);
3014 for(entry=wakeup; entry; entry=next) {
3015 next = entry->next;
3016 entry->next = entry->prev = NULL;
3017 call_Context_Unblock(entry->wait->ctx);
3021 /* ?wait@event@Concurrency@@QAEII@Z */
3022 /* ?wait@event@Concurrency@@QEAA_KI@Z */
3023 DEFINE_THISCALL_WRAPPER(event_wait, 8)
3024 size_t __thiscall event_wait(event *this, unsigned int timeout)
3026 thread_wait wait;
3027 size_t signaled;
3029 TRACE("(%p %u)\n", this, timeout);
3031 critical_section_lock(&this->cs);
3032 signaled = this->signaled;
3033 critical_section_unlock(&this->cs);
3035 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
3036 wait.ctx = get_current_context();
3037 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
3040 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
3041 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
3042 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
3044 thread_wait *wait;
3045 size_t ret;
3047 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
3049 if(count == 0)
3050 return 0;
3052 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
3053 wait->ctx = get_current_context();
3054 ret = evt_wait(wait, events, count, wait_all, timeout);
3055 operator_delete(wait);
3057 return ret;
3060 #if _MSVCR_VER >= 110
3062 /* ??0_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3063 /* ??0_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3064 DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_ctor, 4)
3065 _Cancellation_beacon* __thiscall _Cancellation_beacon_ctor(_Cancellation_beacon *this)
3067 ExternalContextBase *ctx = (ExternalContextBase*)get_current_context();
3068 _StructuredTaskCollection *task_collection = NULL;
3069 struct beacon *beacon;
3071 TRACE("(%p)\n", this);
3073 if (ctx->context.vtable == &ExternalContextBase_vtable) {
3074 task_collection = ctx->task_collection;
3075 if (task_collection)
3076 ctx = (ExternalContextBase*)task_collection->context;
3079 if (ctx->context.vtable != &ExternalContextBase_vtable) {
3080 ERR("unknown context\n");
3081 return NULL;
3084 beacon = malloc(sizeof(*beacon));
3085 beacon->cancelling = Context_IsCurrentTaskCollectionCanceling();
3086 beacon->task_collection = task_collection;
3088 if (task_collection) {
3089 EnterCriticalSection(&ctx->beacons_cs);
3090 list_add_head(&ctx->beacons, &beacon->entry);
3091 LeaveCriticalSection(&ctx->beacons_cs);
3094 this->beacon = beacon;
3095 return this;
3098 /* ??1_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3099 /* ??1_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3100 DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_dtor, 4)
3101 void __thiscall _Cancellation_beacon_dtor(_Cancellation_beacon *this)
3103 TRACE("(%p)\n", this);
3105 if (this->beacon->task_collection) {
3106 ExternalContextBase *ctx = (ExternalContextBase*)this->beacon->task_collection->context;
3108 EnterCriticalSection(&ctx->beacons_cs);
3109 list_remove(&this->beacon->entry);
3110 LeaveCriticalSection(&ctx->beacons_cs);
3113 free(this->beacon);
3116 /* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QAA_NXZ */
3117 /* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QAE_NXZ */
3118 /* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QEAA_NXZ */
3119 DEFINE_THISCALL_WRAPPER(_Cancellation_beacon__Confirm_cancel, 4)
3120 bool __thiscall _Cancellation_beacon__Confirm_cancel(_Cancellation_beacon *this)
3122 bool ret;
3124 TRACE("(%p)\n", this);
3126 ret = Context_IsCurrentTaskCollectionCanceling();
3127 if (!ret)
3128 InterlockedDecrement(&this->beacon->cancelling);
3129 return ret;
3132 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
3133 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
3134 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
3135 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
3137 TRACE("(%p)\n", this);
3139 this->queue = NULL;
3140 critical_section_ctor(&this->lock);
3141 return this;
3144 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
3145 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
3146 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
3147 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
3149 TRACE("(%p)\n", this);
3151 while(this->queue) {
3152 cv_queue *next = this->queue->next;
3153 if(!this->queue->expired)
3154 ERR("there's an active wait\n");
3155 operator_delete(this->queue);
3156 this->queue = next;
3158 critical_section_dtor(&this->lock);
3161 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
3162 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
3163 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
3164 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
3166 cv_queue q;
3168 TRACE("(%p, %p)\n", this, cs);
3170 q.ctx = get_current_context();
3171 q.expired = FALSE;
3172 critical_section_lock(&this->lock);
3173 q.next = this->queue;
3174 this->queue = &q;
3175 critical_section_unlock(&this->lock);
3177 critical_section_unlock(cs);
3178 call_Context_Block(q.ctx);
3179 critical_section_lock(cs);
3182 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
3183 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
3184 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
3185 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
3186 critical_section *cs, unsigned int timeout)
3188 cv_queue *q;
3190 TRACE("(%p %p %d)\n", this, cs, timeout);
3192 q = operator_new(sizeof(cv_queue));
3193 q->ctx = get_current_context();
3194 q->expired = FALSE;
3195 critical_section_lock(&this->lock);
3196 q->next = this->queue;
3197 this->queue = q;
3198 critical_section_unlock(&this->lock);
3200 critical_section_unlock(cs);
3202 if(block_context_for(q->ctx, timeout)) {
3203 if(!InterlockedExchange(&q->expired, TRUE)) {
3204 critical_section_lock(cs);
3205 return FALSE;
3207 call_Context_Block(q->ctx);
3210 operator_delete(q);
3211 critical_section_lock(cs);
3212 return TRUE;
3215 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
3216 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
3217 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
3218 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
3220 cv_queue *node;
3222 TRACE("(%p)\n", this);
3224 if(!this->queue)
3225 return;
3227 while(1) {
3228 critical_section_lock(&this->lock);
3229 node = this->queue;
3230 if(!node) {
3231 critical_section_unlock(&this->lock);
3232 return;
3234 this->queue = node->next;
3235 critical_section_unlock(&this->lock);
3237 node->next = CV_WAKE;
3238 if(!InterlockedExchange(&node->expired, TRUE)) {
3239 call_Context_Unblock(node->ctx);
3240 return;
3241 } else {
3242 operator_delete(node);
3247 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
3248 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
3249 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
3250 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
3252 cv_queue *ptr;
3254 TRACE("(%p)\n", this);
3256 if(!this->queue)
3257 return;
3259 critical_section_lock(&this->lock);
3260 ptr = this->queue;
3261 this->queue = NULL;
3262 critical_section_unlock(&this->lock);
3264 while(ptr) {
3265 cv_queue *next = ptr->next;
3267 ptr->next = CV_WAKE;
3268 if(!InterlockedExchange(&ptr->expired, TRUE))
3269 call_Context_Unblock(ptr->ctx);
3270 else
3271 operator_delete(ptr);
3272 ptr = next;
3275 #endif
3277 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
3278 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
3279 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
3280 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
3282 TRACE("(%p)\n", this);
3284 memset(this, 0, sizeof(*this));
3285 return this;
3288 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
3289 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
3290 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
3291 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
3293 TRACE("(%p)\n", this);
3295 if (this->thread_id != 0 || this->count)
3296 WARN("destroying locked reader_writer_lock\n");
3299 static inline void spin_wait_for_next_rwl(rwl_queue *q)
3301 SpinWait sw;
3303 if(q->next) return;
3305 SpinWait_ctor(&sw, &spin_wait_yield);
3306 SpinWait__Reset(&sw);
3307 while(!q->next)
3308 SpinWait__SpinOnce(&sw);
3309 SpinWait_dtor(&sw);
3312 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
3313 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
3314 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
3315 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
3317 rwl_queue q = { NULL, get_current_context() }, *last;
3319 TRACE("(%p)\n", this);
3321 if (this->thread_id == GetCurrentThreadId()) {
3322 improper_lock e;
3323 improper_lock_ctor_str(&e, "Already locked");
3324 _CxxThrowException(&e, &improper_lock_exception_type);
3327 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
3328 if (last) {
3329 last->next = &q;
3330 call_Context_Block(q.ctx);
3331 } else {
3332 this->writer_head = &q;
3333 if (InterlockedOr(&this->count, WRITER_WAITING))
3334 call_Context_Block(q.ctx);
3337 this->thread_id = GetCurrentThreadId();
3338 this->writer_head = &this->active;
3339 this->active.next = NULL;
3340 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3341 spin_wait_for_next_rwl(&q);
3342 this->active.next = q.next;
3346 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
3347 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
3348 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
3349 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
3351 rwl_queue q = { NULL, get_current_context() };
3353 TRACE("(%p)\n", this);
3355 if (this->thread_id == GetCurrentThreadId()) {
3356 improper_lock e;
3357 improper_lock_ctor_str(&e, "Already locked as writer");
3358 _CxxThrowException(&e, &improper_lock_exception_type);
3361 do {
3362 q.next = this->reader_head;
3363 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
3365 if (!q.next) {
3366 rwl_queue *head;
3367 LONG count;
3369 while (!((count = this->count) & WRITER_WAITING))
3370 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
3372 if (count & WRITER_WAITING)
3373 call_Context_Block(q.ctx);
3375 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3376 while(head && head != &q) {
3377 rwl_queue *next = head->next;
3378 InterlockedIncrement(&this->count);
3379 call_Context_Unblock(head->ctx);
3380 head = next;
3382 } else {
3383 call_Context_Block(q.ctx);
3387 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
3388 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
3389 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
3390 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
3392 rwl_queue q = { NULL };
3394 TRACE("(%p)\n", this);
3396 if (this->thread_id == GetCurrentThreadId())
3397 return FALSE;
3399 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
3400 return FALSE;
3401 this->writer_head = &q;
3402 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
3403 this->thread_id = GetCurrentThreadId();
3404 this->writer_head = &this->active;
3405 this->active.next = NULL;
3406 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3407 spin_wait_for_next_rwl(&q);
3408 this->active.next = q.next;
3410 return TRUE;
3413 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
3414 return FALSE;
3415 spin_wait_for_next_rwl(&q);
3416 this->writer_head = q.next;
3417 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
3418 this->thread_id = GetCurrentThreadId();
3419 this->writer_head = &this->active;
3420 this->active.next = q.next;
3421 return TRUE;
3423 return FALSE;
3426 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
3427 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
3428 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
3429 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
3431 LONG count;
3433 TRACE("(%p)\n", this);
3435 while (!((count = this->count) & WRITER_WAITING))
3436 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
3437 return FALSE;
3440 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
3441 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
3442 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
3443 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
3445 LONG count;
3446 rwl_queue *head, *next;
3448 TRACE("(%p)\n", this);
3450 if ((count = this->count) & ~WRITER_WAITING) {
3451 count = InterlockedDecrement(&this->count);
3452 if (count != WRITER_WAITING)
3453 return;
3454 call_Context_Unblock(this->writer_head->ctx);
3455 return;
3458 this->thread_id = 0;
3459 next = this->writer_head->next;
3460 if (next) {
3461 call_Context_Unblock(next->ctx);
3462 return;
3464 InterlockedAnd(&this->count, ~WRITER_WAITING);
3465 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3466 while (head) {
3467 next = head->next;
3468 InterlockedIncrement(&this->count);
3469 call_Context_Unblock(head->ctx);
3470 head = next;
3473 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
3474 return;
3475 InterlockedOr(&this->count, WRITER_WAITING);
3478 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3479 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3480 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
3481 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
3482 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3484 TRACE("(%p %p)\n", this, lock);
3486 this->lock = lock;
3487 reader_writer_lock_lock(lock);
3488 return this;
3491 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
3492 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
3493 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
3494 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
3496 TRACE("(%p)\n", this);
3497 reader_writer_lock_unlock(this->lock);
3500 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3501 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3502 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
3503 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
3504 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3506 TRACE("(%p %p)\n", this, lock);
3508 this->lock = lock;
3509 reader_writer_lock_lock_read(lock);
3510 return this;
3513 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
3514 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
3515 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
3516 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
3518 TRACE("(%p)\n", this);
3519 reader_writer_lock_unlock(this->lock);
3522 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3523 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3524 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
3525 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
3527 TRACE("(%p)\n", this);
3529 InitializeCriticalSectionEx(&this->cs, 0, RTL_CRITICAL_SECTION_FLAG_FORCE_DEBUG_INFO);
3530 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
3531 return this;
3534 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3535 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3536 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
3537 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
3539 TRACE("(%p)\n", this);
3541 this->cs.DebugInfo->Spare[0] = 0;
3542 DeleteCriticalSection(&this->cs);
3545 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3546 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3547 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
3548 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
3550 TRACE("(%p)\n", this);
3551 EnterCriticalSection(&this->cs);
3554 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3555 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3556 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
3557 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
3559 TRACE("(%p)\n", this);
3560 LeaveCriticalSection(&this->cs);
3563 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
3564 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
3565 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
3566 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
3568 TRACE("(%p)\n", this);
3569 return TryEnterCriticalSection(&this->cs);
3572 /* ??0_ReaderWriterLock@details@Concurrency@@QAA@XZ */
3573 /* ??0_ReaderWriterLock@details@Concurrency@@QAE@XZ */
3574 /* ??0_ReaderWriterLock@details@Concurrency@@QEAA@XZ */
3575 DEFINE_THISCALL_WRAPPER(_ReaderWriterLock_ctor, 4)
3576 _ReaderWriterLock* __thiscall _ReaderWriterLock_ctor(_ReaderWriterLock *this)
3578 TRACE("(%p)\n", this);
3580 this->state = 0;
3581 this->count = 0;
3582 return this;
3585 /* ?wait@Concurrency@@YAXI@Z */
3586 void __cdecl Concurrency_wait(unsigned int time)
3588 TRACE("(%d)\n", time);
3589 block_context_for(get_current_context(), time);
3592 #if _MSVCR_VER>=110
3593 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
3594 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
3596 FIXME("(%d %#I64x)\n", type, id);
3598 #endif
3600 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
3601 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
3602 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
3604 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
3607 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
3608 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
3609 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
3610 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
3612 TRACE("(%p %u %x)\n", this, elapse, repeat);
3614 this->vtable = &_Timer_vtable;
3615 this->timer = NULL;
3616 this->elapse = elapse;
3617 this->repeat = repeat;
3618 return this;
3621 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
3623 _Timer *this = ctx;
3624 TRACE("calling _Timer(%p) callback\n", this);
3625 call__Timer_callback(this);
3628 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
3629 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
3630 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
3631 void __thiscall _Timer__Start(_Timer *this)
3633 LONGLONG ll;
3634 FILETIME ft;
3636 TRACE("(%p)\n", this);
3638 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
3639 if (!this->timer)
3641 FIXME("throw exception?\n");
3642 return;
3645 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
3646 ft.dwLowDateTime = ll & 0xffffffff;
3647 ft.dwHighDateTime = ll >> 32;
3648 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
3651 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
3652 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
3653 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
3654 void __thiscall _Timer__Stop(_Timer *this)
3656 TRACE("(%p)\n", this);
3658 SetThreadpoolTimer(this->timer, NULL, 0, 0);
3659 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
3660 CloseThreadpoolTimer(this->timer);
3661 this->timer = NULL;
3664 /* ??1_Timer@details@Concurrency@@MAE@XZ */
3665 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
3666 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
3667 void __thiscall _Timer_dtor(_Timer *this)
3669 TRACE("(%p)\n", this);
3671 if (this->timer)
3672 _Timer__Stop(this);
3675 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
3676 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
3678 TRACE("(%p %x)\n", this, flags);
3679 if (flags & 2) {
3680 /* we have an array, with the number of elements stored before the first object */
3681 INT_PTR i, *ptr = (INT_PTR *)this-1;
3683 for (i=*ptr-1; i>=0; i--)
3684 _Timer_dtor(this+i);
3685 operator_delete(ptr);
3686 } else {
3687 _Timer_dtor(this);
3688 if (flags & 1)
3689 operator_delete(this);
3692 return this;
3695 #ifdef __ASM_USE_THISCALL_WRAPPER
3697 #define DEFINE_VTBL_WRAPPER(off) \
3698 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3699 "popl %eax\n\t" \
3700 "popl %ecx\n\t" \
3701 "pushl %eax\n\t" \
3702 "movl 0(%ecx), %eax\n\t" \
3703 "jmp *" #off "(%eax)\n\t")
3705 DEFINE_VTBL_WRAPPER(0);
3706 DEFINE_VTBL_WRAPPER(4);
3707 DEFINE_VTBL_WRAPPER(8);
3708 DEFINE_VTBL_WRAPPER(12);
3709 DEFINE_VTBL_WRAPPER(16);
3710 DEFINE_VTBL_WRAPPER(20);
3711 DEFINE_VTBL_WRAPPER(24);
3712 DEFINE_VTBL_WRAPPER(28);
3713 DEFINE_VTBL_WRAPPER(32);
3714 DEFINE_VTBL_WRAPPER(36);
3715 DEFINE_VTBL_WRAPPER(40);
3716 DEFINE_VTBL_WRAPPER(44);
3717 DEFINE_VTBL_WRAPPER(48);
3719 #endif
3721 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
3722 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
3723 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
3724 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
3725 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
3726 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
3727 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
3728 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
3729 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
3731 __ASM_BLOCK_BEGIN(concurrency_vtables)
3732 __ASM_VTABLE(ExternalContextBase,
3733 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
3734 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
3735 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
3736 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
3737 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
3738 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor)
3739 VTABLE_ADD_FUNC(ExternalContextBase_Block)
3740 VTABLE_ADD_FUNC(ExternalContextBase_Yield)
3741 VTABLE_ADD_FUNC(ExternalContextBase_SpinYield)
3742 VTABLE_ADD_FUNC(ExternalContextBase_Oversubscribe)
3743 VTABLE_ADD_FUNC(ExternalContextBase_Alloc)
3744 VTABLE_ADD_FUNC(ExternalContextBase_Free)
3745 VTABLE_ADD_FUNC(ExternalContextBase_EnterCriticalRegionHelper)
3746 VTABLE_ADD_FUNC(ExternalContextBase_EnterHyperCriticalRegionHelper)
3747 VTABLE_ADD_FUNC(ExternalContextBase_ExitCriticalRegionHelper)
3748 VTABLE_ADD_FUNC(ExternalContextBase_ExitHyperCriticalRegionHelper)
3749 VTABLE_ADD_FUNC(ExternalContextBase_GetCriticalRegionType)
3750 VTABLE_ADD_FUNC(ExternalContextBase_GetContextKind));
3751 __ASM_VTABLE(ThreadScheduler,
3752 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
3753 VTABLE_ADD_FUNC(ThreadScheduler_Id)
3754 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
3755 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
3756 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
3757 VTABLE_ADD_FUNC(ThreadScheduler_Release)
3758 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
3759 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
3760 #if _MSVCR_VER > 100
3761 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
3762 #endif
3763 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
3764 #if _MSVCR_VER > 100
3765 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
3766 #endif
3767 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
3768 #if _MSVCR_VER > 100
3769 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
3770 #endif
3772 __ASM_VTABLE(_Timer,
3773 VTABLE_ADD_FUNC(_Timer_vector_dtor));
3774 __ASM_BLOCK_END
3776 void msvcrt_init_concurrency(void *base)
3778 #ifdef RTTI_USE_RVA
3779 init_cexception_rtti(base);
3780 init_improper_lock_rtti(base);
3781 init_improper_scheduler_attach_rtti(base);
3782 init_improper_scheduler_detach_rtti(base);
3783 init_invalid_multiple_scheduling_rtti(base);
3784 init_invalid_scheduler_policy_key_rtti(base);
3785 init_invalid_scheduler_policy_thread_specification_rtti(base);
3786 init_invalid_scheduler_policy_value_rtti(base);
3787 init_missing_wait_rtti(base);
3788 init_scheduler_resource_allocation_error_rtti(base);
3789 init_Context_rtti(base);
3790 init_ContextBase_rtti(base);
3791 init_ExternalContextBase_rtti(base);
3792 init_Scheduler_rtti(base);
3793 init_SchedulerBase_rtti(base);
3794 init_ThreadScheduler_rtti(base);
3795 init__Timer_rtti(base);
3797 init_cexception_cxx_type_info(base);
3798 init_improper_lock_cxx(base);
3799 init_improper_scheduler_attach_cxx(base);
3800 init_improper_scheduler_detach_cxx(base);
3801 init_invalid_multiple_scheduling_cxx(base);
3802 init_invalid_scheduler_policy_key_cxx(base);
3803 init_invalid_scheduler_policy_thread_specification_cxx(base);
3804 init_invalid_scheduler_policy_value_cxx(base);
3805 #if _MSVCR_VER >= 120
3806 init_missing_wait_cxx(base);
3807 #endif
3808 init_scheduler_resource_allocation_error_cxx(base);
3809 #endif
3812 void msvcrt_free_concurrency(void)
3814 if (context_tls_index != TLS_OUT_OF_INDEXES)
3815 TlsFree(context_tls_index);
3816 if(default_scheduler_policy.policy_container)
3817 SchedulerPolicy_dtor(&default_scheduler_policy);
3818 if(default_scheduler) {
3819 ThreadScheduler_dtor(default_scheduler);
3820 operator_delete(default_scheduler);
3824 void msvcrt_free_scheduler_thread(void)
3826 Context *context = try_get_current_context();
3827 if (!context) return;
3828 call_Context_dtor(context, 1);
3831 #endif /* _MSVCR_VER >= 100 */