mfplat/sample: Refactor sample_CopyToBuffer().
[wine.git] / dlls / msvcrt / concurrency.c
blob2af2f3ff917258ef770e0b682ccbbb297bd969f7
1 /*
2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "wine/exception.h"
28 #include "wine/list.h"
29 #include "msvcrt.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 typedef exception cexception;
37 CREATE_EXCEPTION_OBJECT(cexception)
39 static LONG context_id = -1;
40 static LONG scheduler_id = -1;
42 typedef enum {
43 SchedulerKind,
44 MaxConcurrency,
45 MinConcurrency,
46 TargetOversubscriptionFactor,
47 LocalContextCacheSize,
48 ContextStackSize,
49 ContextPriority,
50 SchedulingProtocol,
51 DynamicProgressFeedback,
52 WinRTInitialization,
53 last_policy_id
54 } PolicyElementKey;
56 typedef struct {
57 struct _policy_container {
58 unsigned int policies[last_policy_id];
59 } *policy_container;
60 } SchedulerPolicy;
62 typedef struct {
63 const vtable_ptr *vtable;
64 } Context;
65 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
70 unsigned int, (const Context*), (this))
71 #define call_Context_Unblock(this) CALL_VTBL_FUNC(this, 12, \
72 void, (Context*), (this))
73 #define call_Context_IsSynchronouslyBlocked(this) CALL_VTBL_FUNC(this, 16, \
74 bool, (const Context*), (this))
75 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
76 Context*, (Context*, unsigned int), (this, flags))
77 #define call_Context_Block(this) CALL_VTBL_FUNC(this, 24, \
78 void, (Context*), (this))
80 typedef struct {
81 Context *context;
82 } _Context;
84 union allocator_cache_entry {
85 struct _free {
86 int depth;
87 union allocator_cache_entry *next;
88 } free;
89 struct _alloc {
90 int bucket;
91 char mem[1];
92 } alloc;
95 struct scheduler_list {
96 struct Scheduler *scheduler;
97 struct scheduler_list *next;
100 struct beacon {
101 bool cancelling;
102 struct list entry;
103 struct _StructuredTaskCollection *task_collection;
106 typedef struct {
107 Context context;
108 struct scheduler_list scheduler;
109 unsigned int id;
110 union allocator_cache_entry *allocator_cache[8];
111 LONG blocked;
112 struct _StructuredTaskCollection *task_collection;
113 CRITICAL_SECTION beacons_cs;
114 struct list beacons;
115 } ExternalContextBase;
116 extern const vtable_ptr ExternalContextBase_vtable;
117 static void ExternalContextBase_ctor(ExternalContextBase*);
119 typedef struct Scheduler {
120 const vtable_ptr *vtable;
121 } Scheduler;
122 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
123 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
124 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
125 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
126 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
127 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
128 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
129 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
130 #if _MSVCR_VER > 100
131 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
132 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
133 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
134 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
135 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
136 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
137 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
138 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
139 bool, (Scheduler*,const /*location*/void*), (this,placement))
140 #else
141 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
142 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
143 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
144 #endif
146 typedef struct {
147 Scheduler scheduler;
148 LONG ref;
149 unsigned int id;
150 unsigned int virt_proc_no;
151 SchedulerPolicy policy;
152 int shutdown_count;
153 int shutdown_size;
154 HANDLE *shutdown_events;
155 CRITICAL_SECTION cs;
156 struct list scheduled_chores;
157 } ThreadScheduler;
158 extern const vtable_ptr ThreadScheduler_vtable;
160 typedef struct {
161 Scheduler *scheduler;
162 } _Scheduler;
164 typedef struct {
165 char empty;
166 } _CurrentScheduler;
168 typedef enum
170 SPINWAIT_INIT,
171 SPINWAIT_SPIN,
172 SPINWAIT_YIELD,
173 SPINWAIT_DONE
174 } SpinWait_state;
176 typedef void (__cdecl *yield_func)(void);
178 typedef struct
180 ULONG spin;
181 ULONG unknown;
182 SpinWait_state state;
183 yield_func yield_func;
184 } SpinWait;
186 #define FINISHED_INITIAL 0x80000000
187 typedef struct _StructuredTaskCollection
189 void *unk1;
190 unsigned int unk2;
191 void *unk3;
192 Context *context;
193 volatile LONG count;
194 volatile LONG finished;
195 void *exception;
196 Context *event;
197 } _StructuredTaskCollection;
199 bool __thiscall _StructuredTaskCollection__IsCanceling(_StructuredTaskCollection*);
201 typedef enum
203 TASK_COLLECTION_SUCCESS = 1,
204 TASK_COLLECTION_CANCELLED
205 } _TaskCollectionStatus;
207 typedef enum
209 STRUCTURED_TASK_COLLECTION_CANCELLED = 0x2,
210 STRUCTURED_TASK_COLLECTION_STATUS_MASK = 0x7
211 } _StructuredTaskCollectionStatusBits;
213 typedef struct _UnrealizedChore
215 const vtable_ptr *vtable;
216 void (__cdecl *chore_proc)(struct _UnrealizedChore*);
217 _StructuredTaskCollection *task_collection;
218 void (__cdecl *chore_wrapper)(struct _UnrealizedChore*);
219 void *unk[6];
220 } _UnrealizedChore;
222 struct scheduled_chore {
223 struct list entry;
224 _UnrealizedChore *chore;
227 /* keep in sync with msvcp90/msvcp90.h */
228 typedef struct cs_queue
230 Context *ctx;
231 struct cs_queue *next;
232 #if _MSVCR_VER >= 110
233 LONG free;
234 int unknown;
235 #endif
236 } cs_queue;
238 typedef struct
240 cs_queue unk_active;
241 #if _MSVCR_VER >= 110
242 void *unknown[2];
243 #else
244 void *unknown[1];
245 #endif
246 cs_queue *head;
247 void *tail;
248 } critical_section;
250 typedef struct
252 critical_section *cs;
253 union {
254 cs_queue q;
255 struct {
256 void *unknown[4];
257 int unknown2[2];
258 } unknown;
259 } lock;
260 } critical_section_scoped_lock;
262 typedef struct
264 critical_section cs;
265 } _NonReentrantPPLLock;
267 typedef struct
269 _NonReentrantPPLLock *lock;
270 union {
271 cs_queue q;
272 struct {
273 void *unknown[4];
274 int unknown2[2];
275 } unknown;
276 } wait;
277 } _NonReentrantPPLLock__Scoped_lock;
279 typedef struct
281 critical_section cs;
282 LONG count;
283 LONG owner;
284 } _ReentrantPPLLock;
286 typedef struct
288 _ReentrantPPLLock *lock;
289 union {
290 cs_queue q;
291 struct {
292 void *unknown[4];
293 int unknown2[2];
294 } unknown;
295 } wait;
296 } _ReentrantPPLLock__Scoped_lock;
298 #define EVT_RUNNING (void*)1
299 #define EVT_WAITING NULL
301 struct thread_wait;
302 typedef struct thread_wait_entry
304 struct thread_wait *wait;
305 struct thread_wait_entry *next;
306 struct thread_wait_entry *prev;
307 } thread_wait_entry;
309 typedef struct thread_wait
311 Context *ctx;
312 void *signaled;
313 LONG pending_waits;
314 thread_wait_entry entries[1];
315 } thread_wait;
317 typedef struct
319 thread_wait_entry *waiters;
320 INT_PTR signaled;
321 critical_section cs;
322 } event;
324 #if _MSVCR_VER >= 110
325 #define CV_WAKE (void*)1
326 typedef struct cv_queue {
327 Context *ctx;
328 struct cv_queue *next;
329 LONG expired;
330 } cv_queue;
332 typedef struct {
333 struct beacon *beacon;
334 } _Cancellation_beacon;
336 typedef struct {
337 /* cv_queue structure is not binary compatible */
338 cv_queue *queue;
339 critical_section lock;
340 } _Condition_variable;
341 #endif
343 typedef struct rwl_queue
345 struct rwl_queue *next;
346 Context *ctx;
347 } rwl_queue;
349 #define WRITER_WAITING 0x80000000
350 /* FIXME: reader_writer_lock structure is not binary compatible
351 * it can't exceed 28/56 bytes */
352 typedef struct
354 LONG count;
355 LONG thread_id;
356 rwl_queue active;
357 rwl_queue *writer_head;
358 rwl_queue *writer_tail;
359 rwl_queue *reader_head;
360 } reader_writer_lock;
362 typedef struct {
363 reader_writer_lock *lock;
364 } reader_writer_lock_scoped_lock;
366 typedef struct {
367 CRITICAL_SECTION cs;
368 } _ReentrantBlockingLock;
370 #define TICKSPERMSEC 10000
371 typedef struct {
372 const vtable_ptr *vtable;
373 TP_TIMER *timer;
374 unsigned int elapse;
375 bool repeat;
376 } _Timer;
377 extern const vtable_ptr _Timer_vtable;
378 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
380 typedef exception improper_lock;
381 extern const vtable_ptr improper_lock_vtable;
383 typedef exception improper_scheduler_attach;
384 extern const vtable_ptr improper_scheduler_attach_vtable;
386 typedef exception improper_scheduler_detach;
387 extern const vtable_ptr improper_scheduler_detach_vtable;
389 typedef exception invalid_multiple_scheduling;
390 extern const vtable_ptr invalid_multiple_scheduling_vtable;
392 typedef exception invalid_scheduler_policy_key;
393 extern const vtable_ptr invalid_scheduler_policy_key_vtable;
395 typedef exception invalid_scheduler_policy_thread_specification;
396 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
398 typedef exception invalid_scheduler_policy_value;
399 extern const vtable_ptr invalid_scheduler_policy_value_vtable;
401 typedef exception missing_wait;
402 extern const vtable_ptr missing_wait_vtable;
404 typedef struct {
405 exception e;
406 HRESULT hr;
407 } scheduler_resource_allocation_error;
408 extern const vtable_ptr scheduler_resource_allocation_error_vtable;
410 enum ConcRT_EventType
412 CONCRT_EVENT_GENERIC,
413 CONCRT_EVENT_START,
414 CONCRT_EVENT_END,
415 CONCRT_EVENT_BLOCK,
416 CONCRT_EVENT_UNBLOCK,
417 CONCRT_EVENT_YIELD,
418 CONCRT_EVENT_ATTACH,
419 CONCRT_EVENT_DETACH
422 static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
424 static CRITICAL_SECTION default_scheduler_cs;
425 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
427 0, 0, &default_scheduler_cs,
428 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
429 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
431 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
432 static SchedulerPolicy default_scheduler_policy;
433 static ThreadScheduler *default_scheduler;
435 static void create_default_scheduler(void);
437 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
438 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
439 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
440 improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
442 TRACE("(%p %s)\n", this, str);
443 return __exception_ctor(this, str, &improper_lock_vtable);
446 /* ??0improper_lock@Concurrency@@QAE@XZ */
447 /* ??0improper_lock@Concurrency@@QEAA@XZ */
448 DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
449 improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
451 return improper_lock_ctor_str(this, NULL);
454 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
455 improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
457 TRACE("(%p %p)\n", this, rhs);
458 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
461 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
462 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
463 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
464 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
465 improper_scheduler_attach *this, const char *str)
467 TRACE("(%p %s)\n", this, str);
468 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
471 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
472 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
473 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
474 improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
475 improper_scheduler_attach *this)
477 return improper_scheduler_attach_ctor_str(this, NULL);
480 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
481 improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
482 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
484 TRACE("(%p %p)\n", _this, rhs);
485 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
488 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
489 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
490 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
491 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
492 improper_scheduler_detach *this, const char *str)
494 TRACE("(%p %s)\n", this, str);
495 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
498 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
499 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
500 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
501 improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
502 improper_scheduler_detach *this)
504 return improper_scheduler_detach_ctor_str(this, NULL);
507 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
508 improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
509 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
511 TRACE("(%p %p)\n", _this, rhs);
512 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
515 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
516 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
517 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
518 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str, 8)
519 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor_str(
520 invalid_multiple_scheduling *this, const char *str)
522 TRACE("(%p %s)\n", this, str);
523 return __exception_ctor(this, str, &invalid_multiple_scheduling_vtable);
526 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
527 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
528 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
529 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor, 4)
530 invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor(
531 invalid_multiple_scheduling *this)
533 return invalid_multiple_scheduling_ctor_str(this, NULL);
536 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor,8)
537 invalid_multiple_scheduling * __thiscall invalid_multiple_scheduling_copy_ctor(
538 invalid_multiple_scheduling * _this, const invalid_multiple_scheduling * rhs)
540 TRACE("(%p %p)\n", _this, rhs);
541 return __exception_copy_ctor(_this, rhs, &invalid_multiple_scheduling_vtable);
544 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
545 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
546 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
547 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
548 invalid_scheduler_policy_key *this, const char *str)
550 TRACE("(%p %s)\n", this, str);
551 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
554 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
555 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
556 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
557 invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
558 invalid_scheduler_policy_key *this)
560 return invalid_scheduler_policy_key_ctor_str(this, NULL);
563 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
564 invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
565 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
567 TRACE("(%p %p)\n", _this, rhs);
568 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
571 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
572 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
573 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
574 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
575 invalid_scheduler_policy_thread_specification *this, const char *str)
577 TRACE("(%p %s)\n", this, str);
578 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
581 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
582 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
583 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
584 invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
585 invalid_scheduler_policy_thread_specification *this)
587 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
590 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
591 invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
592 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
594 TRACE("(%p %p)\n", _this, rhs);
595 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
598 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
599 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
600 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
601 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
602 invalid_scheduler_policy_value *this, const char *str)
604 TRACE("(%p %s)\n", this, str);
605 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
608 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
609 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
610 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
611 invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
612 invalid_scheduler_policy_value *this)
614 return invalid_scheduler_policy_value_ctor_str(this, NULL);
617 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
618 invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
619 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
621 TRACE("(%p %p)\n", _this, rhs);
622 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
625 /* ??0missing_wait@Concurrency@@QAA@PBD@Z */
626 /* ??0missing_wait@Concurrency@@QAE@PBD@Z */
627 /* ??0missing_wait@Concurrency@@QEAA@PEBD@Z */
628 DEFINE_THISCALL_WRAPPER(missing_wait_ctor_str, 8)
629 missing_wait* __thiscall missing_wait_ctor_str(
630 missing_wait *this, const char *str)
632 TRACE("(%p %p)\n", this, str);
633 return __exception_ctor(this, str, &missing_wait_vtable);
636 /* ??0missing_wait@Concurrency@@QAA@XZ */
637 /* ??0missing_wait@Concurrency@@QAE@XZ */
638 /* ??0missing_wait@Concurrency@@QEAA@XZ */
639 DEFINE_THISCALL_WRAPPER(missing_wait_ctor, 4)
640 missing_wait* __thiscall missing_wait_ctor(missing_wait *this)
642 return missing_wait_ctor_str(this, NULL);
645 DEFINE_THISCALL_WRAPPER(missing_wait_copy_ctor,8)
646 missing_wait * __thiscall missing_wait_copy_ctor(
647 missing_wait * _this, const missing_wait * rhs)
649 TRACE("(%p %p)\n", _this, rhs);
650 return __exception_copy_ctor(_this, rhs, &missing_wait_vtable);
653 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
654 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
655 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
656 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
657 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
659 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
660 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
661 this->hr = hr;
662 return this;
665 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
666 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
667 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
668 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
669 scheduler_resource_allocation_error *this, HRESULT hr)
671 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
674 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
675 scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
676 scheduler_resource_allocation_error *this,
677 const scheduler_resource_allocation_error *rhs)
679 TRACE("(%p,%p)\n", this, rhs);
681 if (!rhs->e.do_free)
682 memcpy(this, rhs, sizeof(*this));
683 else
684 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
685 return this;
688 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
689 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
690 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
691 HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
692 const scheduler_resource_allocation_error *this)
694 TRACE("(%p)\n", this);
695 return this->hr;
698 DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
699 ".?AVimproper_lock@Concurrency@@")
700 DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
701 ".?AVimproper_scheduler_attach@Concurrency@@")
702 DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
703 ".?AVimproper_scheduler_detach@Concurrency@@")
704 DEFINE_RTTI_DATA1(invalid_multiple_scheduling, 0, &cexception_rtti_base_descriptor,
705 ".?AVinvalid_multiple_scheduling@Concurrency@@")
706 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
707 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
708 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
709 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
710 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
711 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
712 DEFINE_RTTI_DATA1(missing_wait, 0, &cexception_rtti_base_descriptor,
713 ".?AVmissing_wait@Concurrency@@")
714 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
715 ".?AVscheduler_resource_allocation_error@Concurrency@@")
717 DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
718 DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
719 DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
720 DEFINE_CXX_DATA1(invalid_multiple_scheduling, &cexception_cxx_type_info, cexception_dtor)
721 DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
722 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
723 DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
724 #if _MSVCR_VER >= 120
725 DEFINE_CXX_DATA1(missing_wait, &cexception_cxx_type_info, cexception_dtor)
726 #endif
727 DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
729 __ASM_BLOCK_BEGIN(concurrency_exception_vtables)
730 __ASM_VTABLE(improper_lock,
731 VTABLE_ADD_FUNC(cexception_vector_dtor)
732 VTABLE_ADD_FUNC(cexception_what));
733 __ASM_VTABLE(improper_scheduler_attach,
734 VTABLE_ADD_FUNC(cexception_vector_dtor)
735 VTABLE_ADD_FUNC(cexception_what));
736 __ASM_VTABLE(improper_scheduler_detach,
737 VTABLE_ADD_FUNC(cexception_vector_dtor)
738 VTABLE_ADD_FUNC(cexception_what));
739 __ASM_VTABLE(invalid_multiple_scheduling,
740 VTABLE_ADD_FUNC(cexception_vector_dtor)
741 VTABLE_ADD_FUNC(cexception_what));
742 __ASM_VTABLE(invalid_scheduler_policy_key,
743 VTABLE_ADD_FUNC(cexception_vector_dtor)
744 VTABLE_ADD_FUNC(cexception_what));
745 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
746 VTABLE_ADD_FUNC(cexception_vector_dtor)
747 VTABLE_ADD_FUNC(cexception_what));
748 __ASM_VTABLE(invalid_scheduler_policy_value,
749 VTABLE_ADD_FUNC(cexception_vector_dtor)
750 VTABLE_ADD_FUNC(cexception_what));
751 __ASM_VTABLE(missing_wait,
752 VTABLE_ADD_FUNC(cexception_vector_dtor)
753 VTABLE_ADD_FUNC(cexception_what));
754 __ASM_VTABLE(scheduler_resource_allocation_error,
755 VTABLE_ADD_FUNC(cexception_vector_dtor)
756 VTABLE_ADD_FUNC(cexception_what));
757 __ASM_BLOCK_END
759 static Context* try_get_current_context(void)
761 if (context_tls_index == TLS_OUT_OF_INDEXES)
762 return NULL;
763 return TlsGetValue(context_tls_index);
766 static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
768 context_tls_index = TlsAlloc();
769 return context_tls_index != TLS_OUT_OF_INDEXES;
772 static Context* get_current_context(void)
774 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
775 Context *ret;
777 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
779 scheduler_resource_allocation_error e;
780 scheduler_resource_allocation_error_ctor_name(&e, NULL,
781 HRESULT_FROM_WIN32(GetLastError()));
782 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
785 ret = TlsGetValue(context_tls_index);
786 if (!ret) {
787 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
788 ExternalContextBase_ctor(context);
789 TlsSetValue(context_tls_index, context);
790 ret = &context->context;
792 return ret;
795 static Scheduler* get_scheduler_from_context(Context *ctx)
797 ExternalContextBase *context = (ExternalContextBase*)ctx;
799 if (context->context.vtable != &ExternalContextBase_vtable)
800 return NULL;
801 return context->scheduler.scheduler;
804 static Scheduler* try_get_current_scheduler(void)
806 Context *context = try_get_current_context();
807 Scheduler *ret;
809 if (!context)
810 return NULL;
812 ret = get_scheduler_from_context(context);
813 if (!ret)
814 ERR("unknown context set\n");
815 return ret;
818 static Scheduler* get_current_scheduler(void)
820 Context *context = get_current_context();
821 Scheduler *ret;
823 ret = get_scheduler_from_context(context);
824 if (!ret)
825 ERR("unknown context set\n");
826 return ret;
829 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
830 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
831 Context* __cdecl Context_CurrentContext(void)
833 TRACE("()\n");
834 return get_current_context();
837 /* ?Id@Context@Concurrency@@SAIXZ */
838 unsigned int __cdecl Context_Id(void)
840 Context *ctx = try_get_current_context();
841 TRACE("()\n");
842 return ctx ? call_Context_GetId(ctx) : -1;
845 /* ?Block@Context@Concurrency@@SAXXZ */
846 void __cdecl Context_Block(void)
848 Context *ctx = get_current_context();
849 TRACE("()\n");
850 call_Context_Block(ctx);
853 /* ?Yield@Context@Concurrency@@SAXXZ */
854 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
855 void __cdecl Context_Yield(void)
857 FIXME("()\n");
860 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
861 void __cdecl Context__SpinYield(void)
863 FIXME("()\n");
866 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
867 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
869 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
871 TRACE("()\n");
873 if (ctx && ctx->context.vtable != &ExternalContextBase_vtable) {
874 ERR("unknown context set\n");
875 return FALSE;
878 if (ctx && ctx->task_collection)
879 return _StructuredTaskCollection__IsCanceling(ctx->task_collection);
880 return FALSE;
883 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
884 void __cdecl Context_Oversubscribe(bool begin)
886 FIXME("(%x)\n", begin);
889 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
890 unsigned int __cdecl Context_ScheduleGroupId(void)
892 Context *ctx = try_get_current_context();
893 TRACE("()\n");
894 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
897 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
898 unsigned int __cdecl Context_VirtualProcessorId(void)
900 Context *ctx = try_get_current_context();
901 TRACE("()\n");
902 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
905 #if _MSVCR_VER > 100
906 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
907 _Context *__cdecl _Context__CurrentContext(_Context *ret)
909 TRACE("(%p)\n", ret);
910 ret->context = Context_CurrentContext();
911 return ret;
914 DEFINE_THISCALL_WRAPPER(_Context_IsSynchronouslyBlocked, 4)
915 BOOL __thiscall _Context_IsSynchronouslyBlocked(const _Context *this)
917 TRACE("(%p)\n", this);
918 return call_Context_IsSynchronouslyBlocked(this->context);
920 #endif
922 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
923 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
925 TRACE("(%p)->()\n", this);
926 return this->id;
929 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
930 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
932 FIXME("(%p)->() stub\n", this);
933 return -1;
936 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
937 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
939 FIXME("(%p)->() stub\n", this);
940 return -1;
943 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
944 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
946 TRACE("(%p)->()\n", this);
948 /* TODO: throw context_unblock_unbalanced if this->blocked goes below -1 */
949 if (!InterlockedDecrement(&this->blocked))
950 RtlWakeAddressSingle(&this->blocked);
953 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
954 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
956 TRACE("(%p)->()\n", this);
957 return this->blocked >= 1;
960 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Block, 4)
961 void __thiscall ExternalContextBase_Block(ExternalContextBase *this)
963 LONG blocked;
965 TRACE("(%p)->()\n", this);
967 blocked = InterlockedIncrement(&this->blocked);
968 while (blocked >= 1)
970 RtlWaitOnAddress(&this->blocked, &blocked, sizeof(LONG), NULL);
971 blocked = this->blocked;
975 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Yield, 4)
976 void __thiscall ExternalContextBase_Yield(ExternalContextBase *this)
978 FIXME("(%p)->() stub\n", this);
981 DEFINE_THISCALL_WRAPPER(ExternalContextBase_SpinYield, 4)
982 void __thiscall ExternalContextBase_SpinYield(ExternalContextBase *this)
984 FIXME("(%p)->() stub\n", this);
987 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Oversubscribe, 8)
988 void __thiscall ExternalContextBase_Oversubscribe(
989 ExternalContextBase *this, bool oversubscribe)
991 FIXME("(%p)->(%x) stub\n", this, oversubscribe);
994 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Alloc, 8)
995 void* __thiscall ExternalContextBase_Alloc(ExternalContextBase *this, size_t size)
997 FIXME("(%p)->(%Iu) stub\n", this, size);
998 return NULL;
1001 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Free, 8)
1002 void __thiscall ExternalContextBase_Free(ExternalContextBase *this, void *addr)
1004 FIXME("(%p)->(%p) stub\n", this, addr);
1007 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterCriticalRegionHelper, 4)
1008 int __thiscall ExternalContextBase_EnterCriticalRegionHelper(ExternalContextBase *this)
1010 FIXME("(%p)->() stub\n", this);
1011 return 0;
1014 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterHyperCriticalRegionHelper, 4)
1015 int __thiscall ExternalContextBase_EnterHyperCriticalRegionHelper(ExternalContextBase *this)
1017 FIXME("(%p)->() stub\n", this);
1018 return 0;
1021 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitCriticalRegionHelper, 4)
1022 int __thiscall ExternalContextBase_ExitCriticalRegionHelper(ExternalContextBase *this)
1024 FIXME("(%p)->() stub\n", this);
1025 return 0;
1028 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitHyperCriticalRegionHelper, 4)
1029 int __thiscall ExternalContextBase_ExitHyperCriticalRegionHelper(ExternalContextBase *this)
1031 FIXME("(%p)->() stub\n", this);
1032 return 0;
1035 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetCriticalRegionType, 4)
1036 int __thiscall ExternalContextBase_GetCriticalRegionType(const ExternalContextBase *this)
1038 FIXME("(%p)->() stub\n", this);
1039 return 0;
1042 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetContextKind, 4)
1043 int __thiscall ExternalContextBase_GetContextKind(const ExternalContextBase *this)
1045 FIXME("(%p)->() stub\n", this);
1046 return 0;
1049 static void remove_scheduled_chores(Scheduler *scheduler, const ExternalContextBase *context)
1051 ThreadScheduler *tscheduler = (ThreadScheduler*)scheduler;
1052 struct scheduled_chore *sc, *next;
1054 if (tscheduler->scheduler.vtable != &ThreadScheduler_vtable)
1055 return;
1057 EnterCriticalSection(&tscheduler->cs);
1058 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &tscheduler->scheduled_chores,
1059 struct scheduled_chore, entry) {
1060 if (sc->chore->task_collection->context == &context->context) {
1061 list_remove(&sc->entry);
1062 operator_delete(sc);
1065 LeaveCriticalSection(&tscheduler->cs);
1068 static void ExternalContextBase_dtor(ExternalContextBase *this)
1070 struct scheduler_list *scheduler_cur, *scheduler_next;
1071 union allocator_cache_entry *next, *cur;
1072 int i;
1074 /* TODO: move the allocator cache to scheduler so it can be reused */
1075 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
1076 for(cur = this->allocator_cache[i]; cur; cur=next) {
1077 next = cur->free.next;
1078 operator_delete(cur);
1082 if (this->scheduler.scheduler) {
1083 remove_scheduled_chores(this->scheduler.scheduler, this);
1084 call_Scheduler_Release(this->scheduler.scheduler);
1086 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
1087 scheduler_next = scheduler_cur->next;
1088 remove_scheduled_chores(scheduler_cur->scheduler, this);
1089 call_Scheduler_Release(scheduler_cur->scheduler);
1090 operator_delete(scheduler_cur);
1094 DeleteCriticalSection(&this->beacons_cs);
1095 if (!list_empty(&this->beacons))
1096 ERR("beacons list is not empty - expect crash\n");
1099 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
1100 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
1102 TRACE("(%p %x)\n", this, flags);
1103 if(flags & 2) {
1104 /* we have an array, with the number of elements stored before the first object */
1105 INT_PTR i, *ptr = (INT_PTR *)this-1;
1107 for(i=*ptr-1; i>=0; i--)
1108 ExternalContextBase_dtor(this+i);
1109 operator_delete(ptr);
1110 } else {
1111 ExternalContextBase_dtor(this);
1112 if(flags & 1)
1113 operator_delete(this);
1116 return &this->context;
1119 static void ExternalContextBase_ctor(ExternalContextBase *this)
1121 TRACE("(%p)->()\n", this);
1123 memset(this, 0, sizeof(*this));
1124 this->context.vtable = &ExternalContextBase_vtable;
1125 this->id = InterlockedIncrement(&context_id);
1126 InitializeCriticalSection(&this->beacons_cs);
1127 list_init(&this->beacons);
1129 create_default_scheduler();
1130 this->scheduler.scheduler = &default_scheduler->scheduler;
1131 call_Scheduler_Reference(&default_scheduler->scheduler);
1134 /* ?Alloc@Concurrency@@YAPAXI@Z */
1135 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
1136 void * CDECL Concurrency_Alloc(size_t size)
1138 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1139 union allocator_cache_entry *p;
1141 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
1142 if (size < sizeof(*p))
1143 size = sizeof(*p);
1145 if (context->context.vtable != &ExternalContextBase_vtable) {
1146 p = operator_new(size);
1147 p->alloc.bucket = -1;
1148 }else {
1149 int i;
1151 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
1152 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
1153 if (1 << (i+4) >= size) break;
1155 if(i==ARRAY_SIZE(context->allocator_cache)) {
1156 p = operator_new(size);
1157 p->alloc.bucket = -1;
1158 }else if (context->allocator_cache[i]) {
1159 p = context->allocator_cache[i];
1160 context->allocator_cache[i] = p->free.next;
1161 p->alloc.bucket = i;
1162 }else {
1163 p = operator_new(1 << (i+4));
1164 p->alloc.bucket = i;
1168 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
1169 return p->alloc.mem;
1172 /* ?Free@Concurrency@@YAXPAX@Z */
1173 /* ?Free@Concurrency@@YAXPEAX@Z */
1174 void CDECL Concurrency_Free(void* mem)
1176 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
1177 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1178 int bucket = p->alloc.bucket;
1180 TRACE("(%p)\n", mem);
1182 if (context->context.vtable != &ExternalContextBase_vtable) {
1183 operator_delete(p);
1184 }else {
1185 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
1186 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
1187 p->free.next = context->allocator_cache[bucket];
1188 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
1189 context->allocator_cache[bucket] = p;
1190 }else {
1191 operator_delete(p);
1196 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
1197 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
1198 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
1199 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
1200 PolicyElementKey policy, unsigned int val)
1202 unsigned int ret;
1204 TRACE("(%p %d %d)\n", this, policy, val);
1206 if (policy == MinConcurrency) {
1207 invalid_scheduler_policy_key e;
1208 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
1209 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1211 if (policy == MaxConcurrency) {
1212 invalid_scheduler_policy_key e;
1213 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
1214 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1216 if (policy >= last_policy_id) {
1217 invalid_scheduler_policy_key e;
1218 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1219 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1222 switch(policy) {
1223 case SchedulerKind:
1224 if (val) {
1225 invalid_scheduler_policy_value e;
1226 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
1227 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1229 break;
1230 case TargetOversubscriptionFactor:
1231 if (!val) {
1232 invalid_scheduler_policy_value e;
1233 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
1234 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1236 break;
1237 case ContextPriority:
1238 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
1239 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
1240 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
1241 && val != INHERIT_THREAD_PRIORITY) {
1242 invalid_scheduler_policy_value e;
1243 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
1244 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1246 break;
1247 case SchedulingProtocol:
1248 case DynamicProgressFeedback:
1249 case WinRTInitialization:
1250 if (val != 0 && val != 1) {
1251 invalid_scheduler_policy_value e;
1252 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
1253 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1255 break;
1256 default:
1257 break;
1260 ret = this->policy_container->policies[policy];
1261 this->policy_container->policies[policy] = val;
1262 return ret;
1265 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1266 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1267 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
1268 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
1269 unsigned int min_concurrency, unsigned int max_concurrency)
1271 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
1273 if (min_concurrency > max_concurrency) {
1274 invalid_scheduler_policy_thread_specification e;
1275 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
1276 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
1278 if (!max_concurrency) {
1279 invalid_scheduler_policy_value e;
1280 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
1281 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1284 this->policy_container->policies[MinConcurrency] = min_concurrency;
1285 this->policy_container->policies[MaxConcurrency] = max_concurrency;
1288 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1289 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1290 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1291 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1292 const SchedulerPolicy *this, PolicyElementKey policy)
1294 TRACE("(%p %d)\n", this, policy);
1296 if (policy >= last_policy_id) {
1297 invalid_scheduler_policy_key e;
1298 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1299 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1301 return this->policy_container->policies[policy];
1304 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1305 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1306 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1307 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1309 TRACE("(%p)\n", this);
1311 this->policy_container = operator_new(sizeof(*this->policy_container));
1312 /* TODO: default values can probably be affected by CurrentScheduler */
1313 this->policy_container->policies[SchedulerKind] = 0;
1314 this->policy_container->policies[MaxConcurrency] = -1;
1315 this->policy_container->policies[MinConcurrency] = 1;
1316 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1317 this->policy_container->policies[LocalContextCacheSize] = 8;
1318 this->policy_container->policies[ContextStackSize] = 0;
1319 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1320 this->policy_container->policies[SchedulingProtocol] = 0;
1321 this->policy_container->policies[DynamicProgressFeedback] = 1;
1322 return this;
1325 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1326 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1327 /* TODO: don't leak policy_container on exception */
1328 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1329 SchedulerPolicy *this, size_t n, ...)
1331 unsigned int min_concurrency, max_concurrency;
1332 va_list valist;
1333 size_t i;
1335 TRACE("(%p %Iu)\n", this, n);
1337 SchedulerPolicy_ctor(this);
1338 min_concurrency = this->policy_container->policies[MinConcurrency];
1339 max_concurrency = this->policy_container->policies[MaxConcurrency];
1341 va_start(valist, n);
1342 for(i=0; i<n; i++) {
1343 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1344 unsigned int val = va_arg(valist, unsigned int);
1346 if(policy == MinConcurrency)
1347 min_concurrency = val;
1348 else if(policy == MaxConcurrency)
1349 max_concurrency = val;
1350 else
1351 SchedulerPolicy_SetPolicyValue(this, policy, val);
1353 va_end(valist);
1355 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1356 return this;
1359 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1360 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1361 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1362 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1363 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1365 TRACE("(%p %p)\n", this, rhs);
1366 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1367 sizeof(this->policy_container->policies));
1368 return this;
1371 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1372 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1373 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1374 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1375 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1377 TRACE("(%p %p)\n", this, rhs);
1378 SchedulerPolicy_ctor(this);
1379 return SchedulerPolicy_op_assign(this, rhs);
1382 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1383 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1384 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1385 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1387 TRACE("(%p)\n", this);
1388 operator_delete(this->policy_container);
1391 static void ThreadScheduler_dtor(ThreadScheduler *this)
1393 int i;
1394 struct scheduled_chore *sc, *next;
1396 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1397 SchedulerPolicy_dtor(&this->policy);
1399 for(i=0; i<this->shutdown_count; i++)
1400 SetEvent(this->shutdown_events[i]);
1401 operator_delete(this->shutdown_events);
1403 this->cs.DebugInfo->Spare[0] = 0;
1404 DeleteCriticalSection(&this->cs);
1406 if (!list_empty(&this->scheduled_chores))
1407 ERR("scheduled chore list is not empty\n");
1408 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &this->scheduled_chores,
1409 struct scheduled_chore, entry)
1410 operator_delete(sc);
1413 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1414 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1416 TRACE("(%p)\n", this);
1417 return this->id;
1420 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1421 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1423 TRACE("(%p)\n", this);
1424 return this->virt_proc_no;
1427 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1428 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1429 const ThreadScheduler *this, SchedulerPolicy *ret)
1431 TRACE("(%p %p)\n", this, ret);
1432 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1435 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1436 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1438 TRACE("(%p)\n", this);
1439 return InterlockedIncrement(&this->ref);
1442 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1443 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1445 unsigned int ret = InterlockedDecrement(&this->ref);
1447 TRACE("(%p)\n", this);
1449 if(!ret) {
1450 ThreadScheduler_dtor(this);
1451 operator_delete(this);
1453 return ret;
1456 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1457 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1459 HANDLE *shutdown_events;
1460 int size;
1462 TRACE("(%p %p)\n", this, event);
1464 EnterCriticalSection(&this->cs);
1466 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1467 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1468 memcpy(shutdown_events, this->shutdown_events,
1469 this->shutdown_count * sizeof(*shutdown_events));
1470 operator_delete(this->shutdown_events);
1471 this->shutdown_size = size;
1472 this->shutdown_events = shutdown_events;
1473 this->shutdown_events[this->shutdown_count++] = event;
1475 LeaveCriticalSection(&this->cs);
1478 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1479 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1481 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1483 TRACE("(%p)\n", this);
1485 if(context->context.vtable != &ExternalContextBase_vtable) {
1486 ERR("unknown context set\n");
1487 return;
1490 if(context->scheduler.scheduler == &this->scheduler) {
1491 improper_scheduler_attach e;
1492 improper_scheduler_attach_ctor_str(&e, NULL);
1493 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1496 if(context->scheduler.scheduler) {
1497 struct scheduler_list *l = operator_new(sizeof(*l));
1498 *l = context->scheduler;
1499 context->scheduler.next = l;
1501 context->scheduler.scheduler = &this->scheduler;
1502 ThreadScheduler_Reference(this);
1505 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1506 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1507 ThreadScheduler *this, /*location*/void *placement)
1509 FIXME("(%p %p) stub\n", this, placement);
1510 return NULL;
1513 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1514 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1516 FIXME("(%p) stub\n", this);
1517 return NULL;
1520 typedef struct
1522 void (__cdecl *proc)(void*);
1523 void *data;
1524 ThreadScheduler *scheduler;
1525 } schedule_task_arg;
1527 void __cdecl CurrentScheduler_Detach(void);
1529 static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1531 schedule_task_arg arg;
1532 BOOL detach = FALSE;
1534 arg = *(schedule_task_arg*)context;
1535 operator_delete(context);
1537 if(&arg.scheduler->scheduler != get_current_scheduler()) {
1538 ThreadScheduler_Attach(arg.scheduler);
1539 detach = TRUE;
1541 ThreadScheduler_Release(arg.scheduler);
1543 arg.proc(arg.data);
1545 if(detach)
1546 CurrentScheduler_Detach();
1549 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1550 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1551 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1553 schedule_task_arg *arg;
1554 TP_WORK *work;
1556 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
1558 arg = operator_new(sizeof(*arg));
1559 arg->proc = proc;
1560 arg->data = data;
1561 arg->scheduler = this;
1562 ThreadScheduler_Reference(this);
1564 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1565 if(!work) {
1566 scheduler_resource_allocation_error e;
1568 ThreadScheduler_Release(this);
1569 operator_delete(arg);
1570 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1571 HRESULT_FROM_WIN32(GetLastError()));
1572 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1574 SubmitThreadpoolWork(work);
1575 CloseThreadpoolWork(work);
1578 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1579 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1580 void (__cdecl *proc)(void*), void* data)
1582 FIXME("(%p %p %p) stub\n", this, proc, data);
1583 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1586 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1587 bool __thiscall ThreadScheduler_IsAvailableLocation(
1588 const ThreadScheduler *this, const /*location*/void *placement)
1590 FIXME("(%p %p) stub\n", this, placement);
1591 return FALSE;
1594 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1595 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1597 TRACE("(%p %x)\n", this, flags);
1598 if(flags & 2) {
1599 /* we have an array, with the number of elements stored before the first object */
1600 INT_PTR i, *ptr = (INT_PTR *)this-1;
1602 for(i=*ptr-1; i>=0; i--)
1603 ThreadScheduler_dtor(this+i);
1604 operator_delete(ptr);
1605 } else {
1606 ThreadScheduler_dtor(this);
1607 if(flags & 1)
1608 operator_delete(this);
1611 return &this->scheduler;
1614 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1615 const SchedulerPolicy *policy)
1617 SYSTEM_INFO si;
1619 TRACE("(%p)->()\n", this);
1621 this->scheduler.vtable = &ThreadScheduler_vtable;
1622 this->ref = 1;
1623 this->id = InterlockedIncrement(&scheduler_id);
1624 SchedulerPolicy_copy_ctor(&this->policy, policy);
1626 GetSystemInfo(&si);
1627 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1628 if(this->virt_proc_no > si.dwNumberOfProcessors)
1629 this->virt_proc_no = si.dwNumberOfProcessors;
1631 this->shutdown_count = this->shutdown_size = 0;
1632 this->shutdown_events = NULL;
1634 InitializeCriticalSection(&this->cs);
1635 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1637 list_init(&this->scheduled_chores);
1638 return this;
1641 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1642 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1643 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1645 ThreadScheduler *ret;
1647 TRACE("(%p)\n", policy);
1649 ret = operator_new(sizeof(*ret));
1650 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1653 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1654 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1656 TRACE("()\n");
1658 EnterCriticalSection(&default_scheduler_cs);
1659 if(default_scheduler_policy.policy_container)
1660 SchedulerPolicy_dtor(&default_scheduler_policy);
1661 SchedulerPolicy_ctor(&default_scheduler_policy);
1662 LeaveCriticalSection(&default_scheduler_cs);
1665 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1666 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1667 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1669 TRACE("(%p)\n", policy);
1671 EnterCriticalSection(&default_scheduler_cs);
1672 if(!default_scheduler_policy.policy_container)
1673 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1674 else
1675 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1676 LeaveCriticalSection(&default_scheduler_cs);
1679 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1680 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1681 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1683 Scheduler *scheduler;
1685 TRACE("(%p)\n", policy);
1687 scheduler = Scheduler_Create(policy);
1688 call_Scheduler_Attach(scheduler);
1691 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1692 void __cdecl CurrentScheduler_Detach(void)
1694 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1696 TRACE("()\n");
1698 if(!context) {
1699 improper_scheduler_detach e;
1700 improper_scheduler_detach_ctor_str(&e, NULL);
1701 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1704 if(context->context.vtable != &ExternalContextBase_vtable) {
1705 ERR("unknown context set\n");
1706 return;
1709 if(!context->scheduler.next) {
1710 improper_scheduler_detach e;
1711 improper_scheduler_detach_ctor_str(&e, NULL);
1712 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1715 call_Scheduler_Release(context->scheduler.scheduler);
1716 if(!context->scheduler.next) {
1717 context->scheduler.scheduler = NULL;
1718 }else {
1719 struct scheduler_list *entry = context->scheduler.next;
1720 context->scheduler.scheduler = entry->scheduler;
1721 context->scheduler.next = entry->next;
1722 operator_delete(entry);
1726 static void create_default_scheduler(void)
1728 if(default_scheduler)
1729 return;
1731 EnterCriticalSection(&default_scheduler_cs);
1732 if(!default_scheduler) {
1733 ThreadScheduler *scheduler;
1735 if(!default_scheduler_policy.policy_container)
1736 SchedulerPolicy_ctor(&default_scheduler_policy);
1738 scheduler = operator_new(sizeof(*scheduler));
1739 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1740 default_scheduler = scheduler;
1742 LeaveCriticalSection(&default_scheduler_cs);
1745 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1746 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1747 Scheduler* __cdecl CurrentScheduler_Get(void)
1749 TRACE("()\n");
1750 return get_current_scheduler();
1753 #if _MSVCR_VER > 100
1754 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1755 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1756 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1758 TRACE("(%p)\n", placement);
1759 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1761 #endif
1763 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1764 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1765 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1767 TRACE("()\n");
1768 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1771 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1772 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1774 Scheduler *scheduler = try_get_current_scheduler();
1776 TRACE("()\n");
1778 if(!scheduler)
1779 return -1;
1780 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1783 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1784 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1786 TRACE("(%p)\n", policy);
1787 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1790 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1791 unsigned int __cdecl CurrentScheduler_Id(void)
1793 Scheduler *scheduler = try_get_current_scheduler();
1795 TRACE("()\n");
1797 if(!scheduler)
1798 return -1;
1799 return call_Scheduler_Id(scheduler);
1802 #if _MSVCR_VER > 100
1803 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1804 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1805 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1807 Scheduler *scheduler = try_get_current_scheduler();
1809 TRACE("(%p)\n", placement);
1811 if(!scheduler)
1812 return FALSE;
1813 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1815 #endif
1817 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1818 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1819 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1821 TRACE("(%p)\n", event);
1822 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1825 #if _MSVCR_VER > 100
1826 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1827 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1828 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1829 void *data, /*location*/void *placement)
1831 TRACE("(%p %p %p)\n", proc, data, placement);
1832 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1834 #endif
1836 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1837 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1838 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1840 TRACE("(%p %p)\n", proc, data);
1841 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1844 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1845 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1846 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1847 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1849 TRACE("(%p %p)\n", this, scheduler);
1851 this->scheduler = scheduler;
1852 return this;
1855 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1856 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1857 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1858 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1860 return _Scheduler_ctor_sched(this, NULL);
1863 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1864 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1865 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1866 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1868 TRACE("(%p)\n", this);
1869 return this->scheduler;
1872 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1873 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1874 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1875 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1877 TRACE("(%p)\n", this);
1878 return call_Scheduler_Reference(this->scheduler);
1881 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1882 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1883 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1884 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1886 TRACE("(%p)\n", this);
1887 return call_Scheduler_Release(this->scheduler);
1890 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1891 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1893 TRACE("()\n");
1894 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1897 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1898 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1900 TRACE("()\n");
1901 get_current_scheduler();
1902 return CurrentScheduler_GetNumberOfVirtualProcessors();
1905 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1906 unsigned int __cdecl _CurrentScheduler__Id(void)
1908 TRACE("()\n");
1909 get_current_scheduler();
1910 return CurrentScheduler_Id();
1913 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1914 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1915 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1917 TRACE("(%p %p)\n", proc, data);
1918 CurrentScheduler_ScheduleTask(proc, data);
1921 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1922 unsigned int __cdecl SpinCount__Value(void)
1924 static unsigned int val = -1;
1926 TRACE("()\n");
1928 if(val == -1) {
1929 SYSTEM_INFO si;
1931 GetSystemInfo(&si);
1932 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1935 return val;
1938 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1939 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1940 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1941 SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1943 TRACE("(%p %p)\n", this, yf);
1945 this->state = SPINWAIT_INIT;
1946 this->unknown = 1;
1947 this->yield_func = yf;
1948 return this;
1951 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1952 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1953 DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1954 SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1956 TRACE("(%p %p)\n", this, yf);
1958 this->state = SPINWAIT_INIT;
1959 this->unknown = 0;
1960 this->yield_func = yf;
1961 return this;
1964 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1965 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1966 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1967 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1968 DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1969 void __thiscall SpinWait_dtor(SpinWait *this)
1971 TRACE("(%p)\n", this);
1974 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1975 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1976 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1977 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1978 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1979 void __thiscall SpinWait__DoYield(SpinWait *this)
1981 TRACE("(%p)\n", this);
1983 if(this->unknown)
1984 this->yield_func();
1987 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1988 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1989 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1990 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1991 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
1992 ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
1994 TRACE("(%p)\n", this);
1995 return 1;
1998 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1999 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
2000 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
2001 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
2002 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
2003 void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
2005 TRACE("(%p %d)\n", this, spin);
2007 this->spin = spin;
2008 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
2011 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
2012 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
2013 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
2014 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
2015 DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
2016 void __thiscall SpinWait__Reset(SpinWait *this)
2018 SpinWait__SetSpinCount(this, SpinCount__Value());
2021 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
2022 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
2023 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
2024 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
2025 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
2026 bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
2028 TRACE("(%p)\n", this);
2030 this->spin--;
2031 return this->spin > 0;
2034 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
2035 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
2036 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
2037 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
2038 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
2039 bool __thiscall SpinWait__SpinOnce(SpinWait *this)
2041 switch(this->state) {
2042 case SPINWAIT_INIT:
2043 SpinWait__Reset(this);
2044 /* fall through */
2045 case SPINWAIT_SPIN:
2046 InterlockedDecrement((LONG*)&this->spin);
2047 if(!this->spin)
2048 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
2049 return TRUE;
2050 case SPINWAIT_YIELD:
2051 this->state = SPINWAIT_DONE;
2052 this->yield_func();
2053 return TRUE;
2054 default:
2055 SpinWait__Reset(this);
2056 return FALSE;
2060 #if _MSVCR_VER >= 110
2062 /* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
2063 /* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
2064 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor, 8)
2065 _StructuredTaskCollection* __thiscall _StructuredTaskCollection_ctor(
2066 _StructuredTaskCollection *this, /*_CancellationTokenState*/void *token)
2068 TRACE("(%p)\n", this);
2070 if (token)
2071 FIXME("_StructuredTaskCollection with cancellation token not implemented!\n");
2073 memset(this, 0, sizeof(*this));
2074 this->finished = FINISHED_INITIAL;
2075 return this;
2078 #endif /* _MSVCR_VER >= 110 */
2080 #if _MSVCR_VER >= 120
2082 /* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
2083 /* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
2084 /* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
2085 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor, 4)
2086 void __thiscall _StructuredTaskCollection_dtor(_StructuredTaskCollection *this)
2088 FIXME("(%p): stub!\n", this);
2089 if (this->count && !__uncaught_exception()) {
2090 missing_wait e;
2091 missing_wait_ctor_str(&e, "Missing call to _RunAndWait");
2092 _CxxThrowException(&e, &missing_wait_exception_type);
2096 #endif /* _MSVCR_VER >= 120 */
2098 static ThreadScheduler *get_thread_scheduler_from_context(Context *context)
2100 Scheduler *scheduler = get_scheduler_from_context(context);
2101 if (scheduler && scheduler->vtable == &ThreadScheduler_vtable)
2102 return (ThreadScheduler*)scheduler;
2103 return NULL;
2106 struct execute_chore_data {
2107 _UnrealizedChore *chore;
2108 _StructuredTaskCollection *task_collection;
2111 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
2112 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
2113 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
2114 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel, 4)
2115 void __thiscall _StructuredTaskCollection__Cancel(
2116 _StructuredTaskCollection *this)
2118 ThreadScheduler *scheduler;
2119 void *prev_exception, *new_exception;
2120 struct scheduled_chore *sc, *next;
2121 LONG removed = 0, finished = 1;
2122 struct beacon *beacon;
2124 TRACE("(%p)\n", this);
2126 if (!this->context)
2127 this->context = get_current_context();
2128 scheduler = get_thread_scheduler_from_context(this->context);
2129 if (!scheduler)
2130 return;
2132 new_exception = this->exception;
2133 do {
2134 prev_exception = new_exception;
2135 if ((ULONG_PTR)prev_exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2136 return;
2137 new_exception = (void*)((ULONG_PTR)prev_exception |
2138 STRUCTURED_TASK_COLLECTION_CANCELLED);
2139 } while ((new_exception = InterlockedCompareExchangePointer(
2140 &this->exception, new_exception, prev_exception))
2141 != prev_exception);
2143 EnterCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2144 LIST_FOR_EACH_ENTRY(beacon, &((ExternalContextBase*)this->context)->beacons, struct beacon, entry) {
2145 if (beacon->task_collection == this)
2146 beacon->cancelling = TRUE;
2148 LeaveCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2150 EnterCriticalSection(&scheduler->cs);
2151 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &scheduler->scheduled_chores,
2152 struct scheduled_chore, entry) {
2153 if (sc->chore->task_collection != this)
2154 continue;
2155 sc->chore->task_collection = NULL;
2156 list_remove(&sc->entry);
2157 removed++;
2158 operator_delete(sc);
2160 LeaveCriticalSection(&scheduler->cs);
2161 if (!removed)
2162 return;
2164 if (InterlockedCompareExchange(&this->finished, removed, FINISHED_INITIAL) != FINISHED_INITIAL)
2165 finished = InterlockedAdd(&this->finished, removed);
2166 if (!finished)
2167 call_Context_Unblock(this->event);
2170 static LONG CALLBACK execute_chore_except(EXCEPTION_POINTERS *pexc, void *_data)
2172 struct execute_chore_data *data = _data;
2173 void *prev_exception, *new_exception;
2174 exception_ptr *ptr;
2176 if (pexc->ExceptionRecord->ExceptionCode != CXX_EXCEPTION)
2177 return EXCEPTION_CONTINUE_SEARCH;
2179 _StructuredTaskCollection__Cancel(data->task_collection);
2181 ptr = operator_new(sizeof(*ptr));
2182 __ExceptionPtrCreate(ptr);
2183 exception_ptr_from_record(ptr, pexc->ExceptionRecord);
2185 new_exception = data->task_collection->exception;
2186 do {
2187 if ((ULONG_PTR)new_exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) {
2188 __ExceptionPtrDestroy(ptr);
2189 operator_delete(ptr);
2190 break;
2192 prev_exception = new_exception;
2193 new_exception = (void*)((ULONG_PTR)new_exception | (ULONG_PTR)ptr);
2194 } while ((new_exception = InterlockedCompareExchangePointer(
2195 &data->task_collection->exception, new_exception,
2196 prev_exception)) != prev_exception);
2197 data->task_collection->event = 0;
2198 return EXCEPTION_EXECUTE_HANDLER;
2201 static void CALLBACK execute_chore_finally(BOOL normal, void *data)
2203 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2204 _StructuredTaskCollection *old_collection = data;
2206 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2207 ctx->task_collection = old_collection;
2210 static void execute_chore(_UnrealizedChore *chore,
2211 _StructuredTaskCollection *task_collection)
2213 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2214 struct execute_chore_data data = { chore, task_collection };
2215 _StructuredTaskCollection *old_collection;
2217 TRACE("(%p %p)\n", chore, task_collection);
2219 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2221 old_collection = ctx->task_collection;
2222 ctx->task_collection = task_collection;
2225 __TRY
2227 __TRY
2229 if (!((ULONG_PTR)task_collection->exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) &&
2230 chore->chore_proc)
2231 chore->chore_proc(chore);
2233 __EXCEPT_CTX(execute_chore_except, &data)
2236 __ENDTRY
2238 __FINALLY_CTX(execute_chore_finally, old_collection)
2241 static void CALLBACK chore_wrapper_finally(BOOL normal, void *data)
2243 _UnrealizedChore *chore = data;
2244 struct _StructuredTaskCollection *task_collection = chore->task_collection;
2245 LONG finished = 1;
2247 TRACE("(%u %p)\n", normal, data);
2249 if (!task_collection)
2250 return;
2251 chore->task_collection = NULL;
2253 if (InterlockedCompareExchange(&task_collection->finished, 1, FINISHED_INITIAL) != FINISHED_INITIAL)
2254 finished = InterlockedIncrement(&task_collection->finished);
2255 if (!finished)
2256 call_Context_Unblock(task_collection->event);
2259 static void __cdecl chore_wrapper(_UnrealizedChore *chore)
2261 __TRY
2263 execute_chore(chore, chore->task_collection);
2265 __FINALLY_CTX(chore_wrapper_finally, chore)
2268 static BOOL pick_and_execute_chore(ThreadScheduler *scheduler)
2270 struct list *entry;
2271 struct scheduled_chore *sc;
2272 _UnrealizedChore *chore;
2274 TRACE("(%p)\n", scheduler);
2276 if (scheduler->scheduler.vtable != &ThreadScheduler_vtable)
2278 ERR("unknown scheduler set\n");
2279 return FALSE;
2282 EnterCriticalSection(&scheduler->cs);
2283 entry = list_head(&scheduler->scheduled_chores);
2284 if (entry)
2285 list_remove(entry);
2286 LeaveCriticalSection(&scheduler->cs);
2287 if (!entry)
2288 return FALSE;
2290 sc = LIST_ENTRY(entry, struct scheduled_chore, entry);
2291 chore = sc->chore;
2292 operator_delete(sc);
2294 chore->chore_wrapper(chore);
2295 return TRUE;
2298 static void __cdecl _StructuredTaskCollection_scheduler_cb(void *data)
2300 pick_and_execute_chore((ThreadScheduler*)get_current_scheduler());
2303 static bool schedule_chore(_StructuredTaskCollection *this,
2304 _UnrealizedChore *chore, Scheduler **pscheduler)
2306 struct scheduled_chore *sc;
2307 ThreadScheduler *scheduler;
2309 if (chore->task_collection) {
2310 invalid_multiple_scheduling e;
2311 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2312 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2313 return FALSE;
2316 if (!this->context)
2317 this->context = get_current_context();
2318 scheduler = get_thread_scheduler_from_context(this->context);
2319 if (!scheduler) {
2320 ERR("unknown context or scheduler set\n");
2321 return FALSE;
2324 sc = operator_new(sizeof(*sc));
2325 sc->chore = chore;
2327 chore->task_collection = this;
2328 chore->chore_wrapper = chore_wrapper;
2329 InterlockedIncrement(&this->count);
2331 EnterCriticalSection(&scheduler->cs);
2332 list_add_head(&scheduler->scheduled_chores, &sc->entry);
2333 LeaveCriticalSection(&scheduler->cs);
2334 *pscheduler = &scheduler->scheduler;
2335 return TRUE;
2338 #if _MSVCR_VER >= 110
2340 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2341 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2342 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
2343 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc, 12)
2344 void __thiscall _StructuredTaskCollection__Schedule_loc(
2345 _StructuredTaskCollection *this, _UnrealizedChore *chore,
2346 /*location*/void *placement)
2348 Scheduler *scheduler;
2350 TRACE("(%p %p %p)\n", this, chore, placement);
2352 if (schedule_chore(this, chore, &scheduler))
2354 call_Scheduler_ScheduleTask_loc(scheduler,
2355 _StructuredTaskCollection_scheduler_cb, NULL, placement);
2359 #endif /* _MSVCR_VER >= 110 */
2361 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
2362 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
2363 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
2364 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule, 8)
2365 void __thiscall _StructuredTaskCollection__Schedule(
2366 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2368 Scheduler *scheduler;
2370 TRACE("(%p %p)\n", this, chore);
2372 if (schedule_chore(this, chore, &scheduler))
2374 call_Scheduler_ScheduleTask(scheduler,
2375 _StructuredTaskCollection_scheduler_cb, NULL);
2379 static void CALLBACK exception_ptr_rethrow_finally(BOOL normal, void *data)
2381 exception_ptr *ep = data;
2383 TRACE("(%u %p)\n", normal, data);
2385 __ExceptionPtrDestroy(ep);
2386 operator_delete(ep);
2389 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2390 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2391 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
2392 _TaskCollectionStatus __stdcall _StructuredTaskCollection__RunAndWait(
2393 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2395 ULONG_PTR exception;
2396 exception_ptr *ep;
2397 LONG count;
2399 TRACE("(%p %p)\n", this, chore);
2401 if (chore) {
2402 if (chore->task_collection) {
2403 invalid_multiple_scheduling e;
2404 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2405 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2407 execute_chore(chore, this);
2410 if (this->context) {
2411 ThreadScheduler *scheduler = get_thread_scheduler_from_context(this->context);
2412 if (scheduler) {
2413 while (pick_and_execute_chore(scheduler)) ;
2417 this->event = get_current_context();
2418 InterlockedCompareExchange(&this->finished, 0, FINISHED_INITIAL);
2420 while (this->count != 0) {
2421 count = this->count;
2422 InterlockedAdd(&this->count, -count);
2423 count = InterlockedAdd(&this->finished, -count);
2425 if (count < 0)
2426 call_Context_Block(this->event);
2429 exception = (ULONG_PTR)this->exception;
2430 ep = (exception_ptr*)(exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK);
2431 if (ep) {
2432 this->exception = 0;
2433 __TRY
2435 __ExceptionPtrRethrow(ep);
2437 __FINALLY_CTX(exception_ptr_rethrow_finally, ep)
2439 if (exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2440 return TASK_COLLECTION_CANCELLED;
2441 return TASK_COLLECTION_SUCCESS;
2444 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
2445 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
2446 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
2447 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling, 4)
2448 bool __thiscall _StructuredTaskCollection__IsCanceling(
2449 _StructuredTaskCollection *this)
2451 TRACE("(%p)\n", this);
2452 return !!((ULONG_PTR)this->exception & STRUCTURED_TASK_COLLECTION_CANCELLED);
2455 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ */
2456 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IEAAXXZ */
2457 DEFINE_THISCALL_WRAPPER(_UnrealizedChore__CheckTaskCollection, 4)
2458 void __thiscall _UnrealizedChore__CheckTaskCollection(_UnrealizedChore *this)
2460 FIXME("() stub\n");
2463 /* ??0critical_section@Concurrency@@QAE@XZ */
2464 /* ??0critical_section@Concurrency@@QEAA@XZ */
2465 DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
2466 critical_section* __thiscall critical_section_ctor(critical_section *this)
2468 TRACE("(%p)\n", this);
2470 this->unk_active.ctx = NULL;
2471 this->head = this->tail = NULL;
2472 return this;
2475 /* ??1critical_section@Concurrency@@QAE@XZ */
2476 /* ??1critical_section@Concurrency@@QEAA@XZ */
2477 DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
2478 void __thiscall critical_section_dtor(critical_section *this)
2480 TRACE("(%p)\n", this);
2483 static void __cdecl spin_wait_yield(void)
2485 Sleep(0);
2488 static inline void spin_wait_for_next_cs(cs_queue *q)
2490 SpinWait sw;
2492 if(q->next) return;
2494 SpinWait_ctor(&sw, &spin_wait_yield);
2495 SpinWait__Reset(&sw);
2496 while(!q->next)
2497 SpinWait__SpinOnce(&sw);
2498 SpinWait_dtor(&sw);
2501 static inline void cs_set_head(critical_section *cs, cs_queue *q)
2503 cs->unk_active.ctx = get_current_context();
2504 cs->unk_active.next = q->next;
2505 cs->head = &cs->unk_active;
2508 static inline void cs_lock(critical_section *cs, cs_queue *q)
2510 cs_queue *last;
2512 if(cs->unk_active.ctx == get_current_context()) {
2513 improper_lock e;
2514 improper_lock_ctor_str(&e, "Already locked");
2515 _CxxThrowException(&e, &improper_lock_exception_type);
2518 memset(q, 0, sizeof(*q));
2519 q->ctx = get_current_context();
2520 last = InterlockedExchangePointer(&cs->tail, q);
2521 if(last) {
2522 last->next = q;
2523 call_Context_Block(q->ctx);
2526 cs_set_head(cs, q);
2527 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
2528 spin_wait_for_next_cs(q);
2529 cs->unk_active.next = q->next;
2533 /* ?lock@critical_section@Concurrency@@QAEXXZ */
2534 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
2535 DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
2536 void __thiscall critical_section_lock(critical_section *this)
2538 cs_queue q;
2540 TRACE("(%p)\n", this);
2541 cs_lock(this, &q);
2544 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
2545 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
2546 DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
2547 bool __thiscall critical_section_try_lock(critical_section *this)
2549 cs_queue q;
2551 TRACE("(%p)\n", this);
2553 if(this->unk_active.ctx == get_current_context())
2554 return FALSE;
2556 memset(&q, 0, sizeof(q));
2557 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
2558 cs_set_head(this, &q);
2559 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
2560 spin_wait_for_next_cs(&q);
2561 this->unk_active.next = q.next;
2563 return TRUE;
2565 return FALSE;
2568 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
2569 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
2570 DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
2571 void __thiscall critical_section_unlock(critical_section *this)
2573 TRACE("(%p)\n", this);
2575 this->unk_active.ctx = NULL;
2576 this->head = NULL;
2577 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
2578 == &this->unk_active) return;
2579 spin_wait_for_next_cs(&this->unk_active);
2581 #if _MSVCR_VER >= 110
2582 while(1) {
2583 cs_queue *next;
2585 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
2586 break;
2588 next = this->unk_active.next;
2589 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
2590 HeapFree(GetProcessHeap(), 0, next);
2591 return;
2593 spin_wait_for_next_cs(next);
2595 this->unk_active.next = next->next;
2596 HeapFree(GetProcessHeap(), 0, next);
2598 #endif
2600 call_Context_Unblock(this->unk_active.next->ctx);
2603 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2604 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2605 DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
2606 critical_section* __thiscall critical_section_native_handle(critical_section *this)
2608 TRACE("(%p)\n", this);
2609 return this;
2612 static void set_timeout(FILETIME *ft, unsigned int timeout)
2614 LARGE_INTEGER to;
2616 GetSystemTimeAsFileTime(ft);
2617 to.QuadPart = ((LONGLONG)ft->dwHighDateTime << 32) +
2618 ft->dwLowDateTime + (LONGLONG)timeout * TICKSPERMSEC;
2619 ft->dwHighDateTime = to.QuadPart >> 32;
2620 ft->dwLowDateTime = to.QuadPart;
2623 struct timeout_unlock
2625 Context *ctx;
2626 BOOL timed_out;
2629 static void WINAPI timeout_unlock(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2631 struct timeout_unlock *tu = ctx;
2632 tu->timed_out = TRUE;
2633 call_Context_Unblock(tu->ctx);
2636 /* returns TRUE if wait has timed out */
2637 static BOOL block_context_for(Context *ctx, unsigned int timeout)
2639 struct timeout_unlock tu = { ctx };
2640 TP_TIMER *tp_timer;
2641 FILETIME ft;
2643 if(timeout == COOPERATIVE_TIMEOUT_INFINITE) {
2644 call_Context_Block(ctx);
2645 return FALSE;
2648 tp_timer = CreateThreadpoolTimer(timeout_unlock, &tu, NULL);
2649 if(!tp_timer) {
2650 FIXME("throw exception?\n");
2651 return TRUE;
2653 set_timeout(&ft, timeout);
2654 SetThreadpoolTimer(tp_timer, &ft, 0, 0);
2656 call_Context_Block(ctx);
2658 SetThreadpoolTimer(tp_timer, NULL, 0, 0);
2659 WaitForThreadpoolTimerCallbacks(tp_timer, TRUE);
2660 CloseThreadpoolTimer(tp_timer);
2661 return tu.timed_out;
2664 #if _MSVCR_VER >= 110
2665 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2666 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2667 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
2668 bool __thiscall critical_section_try_lock_for(
2669 critical_section *this, unsigned int timeout)
2671 Context *ctx = get_current_context();
2672 cs_queue *q, *last;
2674 TRACE("(%p %d)\n", this, timeout);
2676 if(this->unk_active.ctx == ctx) {
2677 improper_lock e;
2678 improper_lock_ctor_str(&e, "Already locked");
2679 _CxxThrowException(&e, &improper_lock_exception_type);
2682 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
2683 return critical_section_try_lock(this);
2684 q->ctx = ctx;
2686 last = InterlockedExchangePointer(&this->tail, q);
2687 if(last) {
2688 last->next = q;
2690 if(block_context_for(q->ctx, timeout))
2692 if(!InterlockedExchange(&q->free, TRUE))
2693 return FALSE;
2694 /* Context was unblocked because of timeout and unlock operation */
2695 call_Context_Block(ctx);
2699 cs_set_head(this, q);
2700 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
2701 spin_wait_for_next_cs(q);
2702 this->unk_active.next = q->next;
2705 HeapFree(GetProcessHeap(), 0, q);
2706 return TRUE;
2708 #endif
2710 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2711 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2712 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
2713 critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
2714 critical_section_scoped_lock *this, critical_section *cs)
2716 TRACE("(%p %p)\n", this, cs);
2717 this->cs = cs;
2718 cs_lock(this->cs, &this->lock.q);
2719 return this;
2722 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2723 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2724 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
2725 void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
2727 TRACE("(%p)\n", this);
2728 critical_section_unlock(this->cs);
2731 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2732 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2733 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
2734 _NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
2736 TRACE("(%p)\n", this);
2738 critical_section_ctor(&this->cs);
2739 return this;
2742 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2743 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2744 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
2745 void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
2747 TRACE("(%p %p)\n", this, q);
2748 cs_lock(&this->cs, q);
2751 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2752 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2753 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
2754 void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
2756 TRACE("(%p)\n", this);
2757 critical_section_unlock(&this->cs);
2760 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2761 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2762 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2763 _NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2764 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2766 TRACE("(%p %p)\n", this, lock);
2768 this->lock = lock;
2769 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2770 return this;
2773 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2774 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2775 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2776 void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2778 TRACE("(%p)\n", this);
2780 _NonReentrantPPLLock__Release(this->lock);
2783 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2784 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2785 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2786 _ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2788 TRACE("(%p)\n", this);
2790 critical_section_ctor(&this->cs);
2791 this->count = 0;
2792 this->owner = -1;
2793 return this;
2796 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2797 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2798 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2799 void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2801 TRACE("(%p %p)\n", this, q);
2803 if(this->owner == GetCurrentThreadId()) {
2804 this->count++;
2805 return;
2808 cs_lock(&this->cs, q);
2809 this->count++;
2810 this->owner = GetCurrentThreadId();
2813 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2814 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2815 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2816 void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2818 TRACE("(%p)\n", this);
2820 this->count--;
2821 if(this->count)
2822 return;
2824 this->owner = -1;
2825 critical_section_unlock(&this->cs);
2828 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2829 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2830 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2831 _ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2832 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2834 TRACE("(%p %p)\n", this, lock);
2836 this->lock = lock;
2837 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2838 return this;
2841 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2842 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2843 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2844 void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2846 TRACE("(%p)\n", this);
2848 _ReentrantPPLLock__Release(this->lock);
2851 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2852 unsigned int __cdecl _GetConcurrency(void)
2854 static unsigned int val = -1;
2856 TRACE("()\n");
2858 if(val == -1) {
2859 SYSTEM_INFO si;
2861 GetSystemInfo(&si);
2862 val = si.dwNumberOfProcessors;
2865 return val;
2868 static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2870 entry->next = *head;
2871 entry->prev = NULL;
2872 if(*head) (*head)->prev = entry;
2873 *head = entry;
2876 static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2878 if(entry == *head)
2879 *head = entry->next;
2880 else if(entry->prev)
2881 entry->prev->next = entry->next;
2882 if(entry->next) entry->next->prev = entry->prev;
2885 static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2887 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2889 for(i = 0; i < count; i++) {
2890 critical_section_lock(&events[i]->cs);
2891 if(events[i] == wait->signaled) ret = i;
2892 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2893 critical_section_unlock(&events[i]->cs);
2896 return ret;
2899 static inline int evt_transition(void **state, void *from, void *to)
2901 return InterlockedCompareExchangePointer(state, to, from) == from;
2904 static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2906 int i;
2908 wait->signaled = EVT_RUNNING;
2909 wait->pending_waits = wait_all ? count : 1;
2910 for(i = 0; i < count; i++) {
2911 wait->entries[i].wait = wait;
2913 critical_section_lock(&events[i]->cs);
2914 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2915 if(events[i]->signaled) {
2916 if(!InterlockedDecrement(&wait->pending_waits)) {
2917 wait->signaled = events[i];
2918 critical_section_unlock(&events[i]->cs);
2920 return evt_end_wait(wait, events, i+1);
2923 critical_section_unlock(&events[i]->cs);
2926 if(!timeout)
2927 return evt_end_wait(wait, events, count);
2929 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2930 return evt_end_wait(wait, events, count);
2932 if(block_context_for(wait->ctx, timeout) &&
2933 !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2934 call_Context_Block(wait->ctx);
2936 return evt_end_wait(wait, events, count);
2939 /* ??0event@Concurrency@@QAE@XZ */
2940 /* ??0event@Concurrency@@QEAA@XZ */
2941 DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2942 event* __thiscall event_ctor(event *this)
2944 TRACE("(%p)\n", this);
2946 this->waiters = NULL;
2947 this->signaled = FALSE;
2948 critical_section_ctor(&this->cs);
2950 return this;
2953 /* ??1event@Concurrency@@QAE@XZ */
2954 /* ??1event@Concurrency@@QEAA@XZ */
2955 DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2956 void __thiscall event_dtor(event *this)
2958 TRACE("(%p)\n", this);
2959 critical_section_dtor(&this->cs);
2960 if(this->waiters)
2961 ERR("there's a wait on destroyed event\n");
2964 /* ?reset@event@Concurrency@@QAEXXZ */
2965 /* ?reset@event@Concurrency@@QEAAXXZ */
2966 DEFINE_THISCALL_WRAPPER(event_reset, 4)
2967 void __thiscall event_reset(event *this)
2969 thread_wait_entry *entry;
2971 TRACE("(%p)\n", this);
2973 critical_section_lock(&this->cs);
2974 if(this->signaled) {
2975 this->signaled = FALSE;
2976 for(entry=this->waiters; entry; entry = entry->next)
2977 InterlockedIncrement(&entry->wait->pending_waits);
2979 critical_section_unlock(&this->cs);
2982 /* ?set@event@Concurrency@@QAEXXZ */
2983 /* ?set@event@Concurrency@@QEAAXXZ */
2984 DEFINE_THISCALL_WRAPPER(event_set, 4)
2985 void __thiscall event_set(event *this)
2987 thread_wait_entry *wakeup = NULL;
2988 thread_wait_entry *entry, *next;
2990 TRACE("(%p)\n", this);
2992 critical_section_lock(&this->cs);
2993 if(!this->signaled) {
2994 this->signaled = TRUE;
2995 for(entry=this->waiters; entry; entry=next) {
2996 next = entry->next;
2997 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
2998 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
2999 evt_remove_queue(&this->waiters, entry);
3000 evt_add_queue(&wakeup, entry);
3005 critical_section_unlock(&this->cs);
3007 for(entry=wakeup; entry; entry=next) {
3008 next = entry->next;
3009 entry->next = entry->prev = NULL;
3010 call_Context_Unblock(entry->wait->ctx);
3014 /* ?wait@event@Concurrency@@QAEII@Z */
3015 /* ?wait@event@Concurrency@@QEAA_KI@Z */
3016 DEFINE_THISCALL_WRAPPER(event_wait, 8)
3017 size_t __thiscall event_wait(event *this, unsigned int timeout)
3019 thread_wait wait;
3020 size_t signaled;
3022 TRACE("(%p %u)\n", this, timeout);
3024 critical_section_lock(&this->cs);
3025 signaled = this->signaled;
3026 critical_section_unlock(&this->cs);
3028 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
3029 wait.ctx = get_current_context();
3030 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
3033 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
3034 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
3035 int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
3037 thread_wait *wait;
3038 size_t ret;
3040 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
3042 if(count == 0)
3043 return 0;
3045 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
3046 wait->ctx = get_current_context();
3047 ret = evt_wait(wait, events, count, wait_all, timeout);
3048 operator_delete(wait);
3050 return ret;
3053 #if _MSVCR_VER >= 110
3055 /* ??0_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3056 /* ??0_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3057 DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_ctor, 4)
3058 _Cancellation_beacon* __thiscall _Cancellation_beacon_ctor(_Cancellation_beacon *this)
3060 ExternalContextBase *ctx = (ExternalContextBase*)get_current_context();
3061 _StructuredTaskCollection *task_collection = NULL;
3062 struct beacon *beacon;
3064 TRACE("(%p)\n", this);
3066 if (ctx->context.vtable == &ExternalContextBase_vtable) {
3067 task_collection = ctx->task_collection;
3068 if (task_collection)
3069 ctx = (ExternalContextBase*)task_collection->context;
3072 if (ctx->context.vtable != &ExternalContextBase_vtable) {
3073 ERR("unknown context\n");
3074 return NULL;
3077 beacon = malloc(sizeof(*beacon));
3078 beacon->cancelling = Context_IsCurrentTaskCollectionCanceling();
3079 beacon->task_collection = task_collection;
3081 if (task_collection) {
3082 EnterCriticalSection(&ctx->beacons_cs);
3083 list_add_head(&ctx->beacons, &beacon->entry);
3084 LeaveCriticalSection(&ctx->beacons_cs);
3087 this->beacon = beacon;
3088 return this;
3091 /* ??1_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3092 /* ??1_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3093 DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_dtor, 4)
3094 void __thiscall _Cancellation_beacon_dtor(_Cancellation_beacon *this)
3096 TRACE("(%p)\n", this);
3098 if (this->beacon->task_collection) {
3099 ExternalContextBase *ctx = (ExternalContextBase*)this->beacon->task_collection->context;
3101 EnterCriticalSection(&ctx->beacons_cs);
3102 list_remove(&this->beacon->entry);
3103 LeaveCriticalSection(&ctx->beacons_cs);
3106 free(this->beacon);
3109 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
3110 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
3111 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
3112 _Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
3114 TRACE("(%p)\n", this);
3116 this->queue = NULL;
3117 critical_section_ctor(&this->lock);
3118 return this;
3121 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
3122 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
3123 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
3124 void __thiscall _Condition_variable_dtor(_Condition_variable *this)
3126 TRACE("(%p)\n", this);
3128 while(this->queue) {
3129 cv_queue *next = this->queue->next;
3130 if(!this->queue->expired)
3131 ERR("there's an active wait\n");
3132 operator_delete(this->queue);
3133 this->queue = next;
3135 critical_section_dtor(&this->lock);
3138 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
3139 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
3140 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
3141 void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
3143 cv_queue q;
3145 TRACE("(%p, %p)\n", this, cs);
3147 q.ctx = get_current_context();
3148 q.expired = FALSE;
3149 critical_section_lock(&this->lock);
3150 q.next = this->queue;
3151 this->queue = &q;
3152 critical_section_unlock(&this->lock);
3154 critical_section_unlock(cs);
3155 call_Context_Block(q.ctx);
3156 critical_section_lock(cs);
3159 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
3160 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
3161 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
3162 bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
3163 critical_section *cs, unsigned int timeout)
3165 cv_queue *q;
3167 TRACE("(%p %p %d)\n", this, cs, timeout);
3169 q = operator_new(sizeof(cv_queue));
3170 q->ctx = get_current_context();
3171 q->expired = FALSE;
3172 critical_section_lock(&this->lock);
3173 q->next = this->queue;
3174 this->queue = q;
3175 critical_section_unlock(&this->lock);
3177 critical_section_unlock(cs);
3179 if(block_context_for(q->ctx, timeout)) {
3180 if(!InterlockedExchange(&q->expired, TRUE)) {
3181 critical_section_lock(cs);
3182 return FALSE;
3184 call_Context_Block(q->ctx);
3187 operator_delete(q);
3188 critical_section_lock(cs);
3189 return TRUE;
3192 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
3193 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
3194 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
3195 void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
3197 cv_queue *node;
3199 TRACE("(%p)\n", this);
3201 if(!this->queue)
3202 return;
3204 while(1) {
3205 critical_section_lock(&this->lock);
3206 node = this->queue;
3207 if(!node) {
3208 critical_section_unlock(&this->lock);
3209 return;
3211 this->queue = node->next;
3212 critical_section_unlock(&this->lock);
3214 node->next = CV_WAKE;
3215 if(!InterlockedExchange(&node->expired, TRUE)) {
3216 call_Context_Unblock(node->ctx);
3217 return;
3218 } else {
3219 operator_delete(node);
3224 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
3225 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
3226 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
3227 void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
3229 cv_queue *ptr;
3231 TRACE("(%p)\n", this);
3233 if(!this->queue)
3234 return;
3236 critical_section_lock(&this->lock);
3237 ptr = this->queue;
3238 this->queue = NULL;
3239 critical_section_unlock(&this->lock);
3241 while(ptr) {
3242 cv_queue *next = ptr->next;
3244 ptr->next = CV_WAKE;
3245 if(!InterlockedExchange(&ptr->expired, TRUE))
3246 call_Context_Unblock(ptr->ctx);
3247 else
3248 operator_delete(ptr);
3249 ptr = next;
3252 #endif
3254 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
3255 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
3256 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
3257 reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
3259 TRACE("(%p)\n", this);
3261 memset(this, 0, sizeof(*this));
3262 return this;
3265 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
3266 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
3267 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
3268 void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
3270 TRACE("(%p)\n", this);
3272 if (this->thread_id != 0 || this->count)
3273 WARN("destroying locked reader_writer_lock\n");
3276 static inline void spin_wait_for_next_rwl(rwl_queue *q)
3278 SpinWait sw;
3280 if(q->next) return;
3282 SpinWait_ctor(&sw, &spin_wait_yield);
3283 SpinWait__Reset(&sw);
3284 while(!q->next)
3285 SpinWait__SpinOnce(&sw);
3286 SpinWait_dtor(&sw);
3289 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
3290 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
3291 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
3292 void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
3294 rwl_queue q = { NULL, get_current_context() }, *last;
3296 TRACE("(%p)\n", this);
3298 if (this->thread_id == GetCurrentThreadId()) {
3299 improper_lock e;
3300 improper_lock_ctor_str(&e, "Already locked");
3301 _CxxThrowException(&e, &improper_lock_exception_type);
3304 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
3305 if (last) {
3306 last->next = &q;
3307 call_Context_Block(q.ctx);
3308 } else {
3309 this->writer_head = &q;
3310 if (InterlockedOr(&this->count, WRITER_WAITING))
3311 call_Context_Block(q.ctx);
3314 this->thread_id = GetCurrentThreadId();
3315 this->writer_head = &this->active;
3316 this->active.next = NULL;
3317 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3318 spin_wait_for_next_rwl(&q);
3319 this->active.next = q.next;
3323 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
3324 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
3325 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
3326 void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
3328 rwl_queue q = { NULL, get_current_context() };
3330 TRACE("(%p)\n", this);
3332 if (this->thread_id == GetCurrentThreadId()) {
3333 improper_lock e;
3334 improper_lock_ctor_str(&e, "Already locked as writer");
3335 _CxxThrowException(&e, &improper_lock_exception_type);
3338 do {
3339 q.next = this->reader_head;
3340 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
3342 if (!q.next) {
3343 rwl_queue *head;
3344 LONG count;
3346 while (!((count = this->count) & WRITER_WAITING))
3347 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
3349 if (count & WRITER_WAITING)
3350 call_Context_Block(q.ctx);
3352 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3353 while(head && head != &q) {
3354 rwl_queue *next = head->next;
3355 InterlockedIncrement(&this->count);
3356 call_Context_Unblock(head->ctx);
3357 head = next;
3359 } else {
3360 call_Context_Block(q.ctx);
3364 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
3365 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
3366 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
3367 bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
3369 rwl_queue q = { NULL };
3371 TRACE("(%p)\n", this);
3373 if (this->thread_id == GetCurrentThreadId())
3374 return FALSE;
3376 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
3377 return FALSE;
3378 this->writer_head = &q;
3379 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
3380 this->thread_id = GetCurrentThreadId();
3381 this->writer_head = &this->active;
3382 this->active.next = NULL;
3383 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3384 spin_wait_for_next_rwl(&q);
3385 this->active.next = q.next;
3387 return TRUE;
3390 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
3391 return FALSE;
3392 spin_wait_for_next_rwl(&q);
3393 this->writer_head = q.next;
3394 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
3395 this->thread_id = GetCurrentThreadId();
3396 this->writer_head = &this->active;
3397 this->active.next = q.next;
3398 return TRUE;
3400 return FALSE;
3403 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
3404 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
3405 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
3406 bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
3408 LONG count;
3410 TRACE("(%p)\n", this);
3412 while (!((count = this->count) & WRITER_WAITING))
3413 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
3414 return FALSE;
3417 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
3418 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
3419 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
3420 void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
3422 LONG count;
3423 rwl_queue *head, *next;
3425 TRACE("(%p)\n", this);
3427 if ((count = this->count) & ~WRITER_WAITING) {
3428 count = InterlockedDecrement(&this->count);
3429 if (count != WRITER_WAITING)
3430 return;
3431 call_Context_Unblock(this->writer_head->ctx);
3432 return;
3435 this->thread_id = 0;
3436 next = this->writer_head->next;
3437 if (next) {
3438 call_Context_Unblock(next->ctx);
3439 return;
3441 InterlockedAnd(&this->count, ~WRITER_WAITING);
3442 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3443 while (head) {
3444 next = head->next;
3445 InterlockedIncrement(&this->count);
3446 call_Context_Unblock(head->ctx);
3447 head = next;
3450 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
3451 return;
3452 InterlockedOr(&this->count, WRITER_WAITING);
3455 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3456 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3457 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
3458 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
3459 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3461 TRACE("(%p %p)\n", this, lock);
3463 this->lock = lock;
3464 reader_writer_lock_lock(lock);
3465 return this;
3468 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
3469 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
3470 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
3471 void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
3473 TRACE("(%p)\n", this);
3474 reader_writer_lock_unlock(this->lock);
3477 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3478 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3479 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
3480 reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
3481 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3483 TRACE("(%p %p)\n", this, lock);
3485 this->lock = lock;
3486 reader_writer_lock_lock_read(lock);
3487 return this;
3490 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
3491 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
3492 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
3493 void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
3495 TRACE("(%p)\n", this);
3496 reader_writer_lock_unlock(this->lock);
3499 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3500 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3501 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
3502 _ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
3504 TRACE("(%p)\n", this);
3506 InitializeCriticalSection(&this->cs);
3507 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
3508 return this;
3511 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3512 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3513 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
3514 void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
3516 TRACE("(%p)\n", this);
3518 this->cs.DebugInfo->Spare[0] = 0;
3519 DeleteCriticalSection(&this->cs);
3522 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3523 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3524 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
3525 void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
3527 TRACE("(%p)\n", this);
3528 EnterCriticalSection(&this->cs);
3531 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3532 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3533 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
3534 void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
3536 TRACE("(%p)\n", this);
3537 LeaveCriticalSection(&this->cs);
3540 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
3541 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
3542 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
3543 bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
3545 TRACE("(%p)\n", this);
3546 return TryEnterCriticalSection(&this->cs);
3549 /* ?wait@Concurrency@@YAXI@Z */
3550 void __cdecl Concurrency_wait(unsigned int time)
3552 TRACE("(%d)\n", time);
3553 block_context_for(get_current_context(), time);
3556 #if _MSVCR_VER>=110
3557 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
3558 void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
3560 FIXME("(%d %#I64x)\n", type, id);
3562 #endif
3564 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
3565 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
3566 void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
3568 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
3571 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
3572 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
3573 DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
3574 _Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
3576 TRACE("(%p %u %x)\n", this, elapse, repeat);
3578 this->vtable = &_Timer_vtable;
3579 this->timer = NULL;
3580 this->elapse = elapse;
3581 this->repeat = repeat;
3582 return this;
3585 static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
3587 _Timer *this = ctx;
3588 TRACE("calling _Timer(%p) callback\n", this);
3589 call__Timer_callback(this);
3592 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
3593 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
3594 DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
3595 void __thiscall _Timer__Start(_Timer *this)
3597 LONGLONG ll;
3598 FILETIME ft;
3600 TRACE("(%p)\n", this);
3602 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
3603 if (!this->timer)
3605 FIXME("throw exception?\n");
3606 return;
3609 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
3610 ft.dwLowDateTime = ll & 0xffffffff;
3611 ft.dwHighDateTime = ll >> 32;
3612 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
3615 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
3616 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
3617 DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
3618 void __thiscall _Timer__Stop(_Timer *this)
3620 TRACE("(%p)\n", this);
3622 SetThreadpoolTimer(this->timer, NULL, 0, 0);
3623 WaitForThreadpoolTimerCallbacks(this->timer, TRUE);
3624 CloseThreadpoolTimer(this->timer);
3625 this->timer = NULL;
3628 /* ??1_Timer@details@Concurrency@@MAE@XZ */
3629 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
3630 DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
3631 void __thiscall _Timer_dtor(_Timer *this)
3633 TRACE("(%p)\n", this);
3635 if (this->timer)
3636 _Timer__Stop(this);
3639 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
3640 _Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
3642 TRACE("(%p %x)\n", this, flags);
3643 if (flags & 2) {
3644 /* we have an array, with the number of elements stored before the first object */
3645 INT_PTR i, *ptr = (INT_PTR *)this-1;
3647 for (i=*ptr-1; i>=0; i--)
3648 _Timer_dtor(this+i);
3649 operator_delete(ptr);
3650 } else {
3651 _Timer_dtor(this);
3652 if (flags & 1)
3653 operator_delete(this);
3656 return this;
3659 #ifdef __ASM_USE_THISCALL_WRAPPER
3661 #define DEFINE_VTBL_WRAPPER(off) \
3662 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3663 "popl %eax\n\t" \
3664 "popl %ecx\n\t" \
3665 "pushl %eax\n\t" \
3666 "movl 0(%ecx), %eax\n\t" \
3667 "jmp *" #off "(%eax)\n\t")
3669 DEFINE_VTBL_WRAPPER(0);
3670 DEFINE_VTBL_WRAPPER(4);
3671 DEFINE_VTBL_WRAPPER(8);
3672 DEFINE_VTBL_WRAPPER(12);
3673 DEFINE_VTBL_WRAPPER(16);
3674 DEFINE_VTBL_WRAPPER(20);
3675 DEFINE_VTBL_WRAPPER(24);
3676 DEFINE_VTBL_WRAPPER(28);
3677 DEFINE_VTBL_WRAPPER(32);
3678 DEFINE_VTBL_WRAPPER(36);
3679 DEFINE_VTBL_WRAPPER(40);
3680 DEFINE_VTBL_WRAPPER(44);
3681 DEFINE_VTBL_WRAPPER(48);
3683 #endif
3685 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
3686 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
3687 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
3688 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
3689 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
3690 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
3691 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
3692 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
3693 DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
3695 __ASM_BLOCK_BEGIN(concurrency_vtables)
3696 __ASM_VTABLE(ExternalContextBase,
3697 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
3698 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
3699 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
3700 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
3701 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
3702 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor)
3703 VTABLE_ADD_FUNC(ExternalContextBase_Block)
3704 VTABLE_ADD_FUNC(ExternalContextBase_Yield)
3705 VTABLE_ADD_FUNC(ExternalContextBase_SpinYield)
3706 VTABLE_ADD_FUNC(ExternalContextBase_Oversubscribe)
3707 VTABLE_ADD_FUNC(ExternalContextBase_Alloc)
3708 VTABLE_ADD_FUNC(ExternalContextBase_Free)
3709 VTABLE_ADD_FUNC(ExternalContextBase_EnterCriticalRegionHelper)
3710 VTABLE_ADD_FUNC(ExternalContextBase_EnterHyperCriticalRegionHelper)
3711 VTABLE_ADD_FUNC(ExternalContextBase_ExitCriticalRegionHelper)
3712 VTABLE_ADD_FUNC(ExternalContextBase_ExitHyperCriticalRegionHelper)
3713 VTABLE_ADD_FUNC(ExternalContextBase_GetCriticalRegionType)
3714 VTABLE_ADD_FUNC(ExternalContextBase_GetContextKind));
3715 __ASM_VTABLE(ThreadScheduler,
3716 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
3717 VTABLE_ADD_FUNC(ThreadScheduler_Id)
3718 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
3719 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
3720 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
3721 VTABLE_ADD_FUNC(ThreadScheduler_Release)
3722 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
3723 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
3724 #if _MSVCR_VER > 100
3725 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
3726 #endif
3727 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
3728 #if _MSVCR_VER > 100
3729 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
3730 #endif
3731 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
3732 #if _MSVCR_VER > 100
3733 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
3734 #endif
3736 __ASM_VTABLE(_Timer,
3737 VTABLE_ADD_FUNC(_Timer_vector_dtor));
3738 __ASM_BLOCK_END
3740 void msvcrt_init_concurrency(void *base)
3742 #ifdef __x86_64__
3743 init_cexception_rtti(base);
3744 init_improper_lock_rtti(base);
3745 init_improper_scheduler_attach_rtti(base);
3746 init_improper_scheduler_detach_rtti(base);
3747 init_invalid_multiple_scheduling_rtti(base);
3748 init_invalid_scheduler_policy_key_rtti(base);
3749 init_invalid_scheduler_policy_thread_specification_rtti(base);
3750 init_invalid_scheduler_policy_value_rtti(base);
3751 init_missing_wait_rtti(base);
3752 init_scheduler_resource_allocation_error_rtti(base);
3753 init_Context_rtti(base);
3754 init_ContextBase_rtti(base);
3755 init_ExternalContextBase_rtti(base);
3756 init_Scheduler_rtti(base);
3757 init_SchedulerBase_rtti(base);
3758 init_ThreadScheduler_rtti(base);
3759 init__Timer_rtti(base);
3761 init_cexception_cxx_type_info(base);
3762 init_improper_lock_cxx(base);
3763 init_improper_scheduler_attach_cxx(base);
3764 init_improper_scheduler_detach_cxx(base);
3765 init_invalid_multiple_scheduling_cxx(base);
3766 init_invalid_scheduler_policy_key_cxx(base);
3767 init_invalid_scheduler_policy_thread_specification_cxx(base);
3768 init_invalid_scheduler_policy_value_cxx(base);
3769 #if _MSVCR_VER >= 120
3770 init_missing_wait_cxx(base);
3771 #endif
3772 init_scheduler_resource_allocation_error_cxx(base);
3773 #endif
3776 void msvcrt_free_concurrency(void)
3778 if (context_tls_index != TLS_OUT_OF_INDEXES)
3779 TlsFree(context_tls_index);
3780 if(default_scheduler_policy.policy_container)
3781 SchedulerPolicy_dtor(&default_scheduler_policy);
3782 if(default_scheduler) {
3783 ThreadScheduler_dtor(default_scheduler);
3784 operator_delete(default_scheduler);
3788 void msvcrt_free_scheduler_thread(void)
3790 Context *context = try_get_current_context();
3791 if (!context) return;
3792 call_Context_dtor(context, 1);
3795 #endif /* _MSVCR_VER >= 100 */