mfplay: Add support for same-thread event callback.
[wine.git] / dlls / msvcrt / scheduler.c
blobef40a3236cd5988367e61a075808ce36a63b06ae
1 /*
2 * msvcrt.dll C++ objects
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <stdarg.h>
22 #include <stdbool.h>
24 #include "windef.h"
25 #include "winternl.h"
26 #include "wine/debug.h"
27 #include "msvcrt.h"
28 #include "cppexcept.h"
29 #include "cxx.h"
31 #if _MSVCR_VER >= 100
33 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
35 static int context_id = -1;
36 static int scheduler_id = -1;
38 typedef enum {
39 SchedulerKind,
40 MaxConcurrency,
41 MinConcurrency,
42 TargetOversubscriptionFactor,
43 LocalContextCacheSize,
44 ContextStackSize,
45 ContextPriority,
46 SchedulingProtocol,
47 DynamicProgressFeedback,
48 WinRTInitialization,
49 last_policy_id
50 } PolicyElementKey;
52 typedef struct {
53 struct _policy_container {
54 unsigned int policies[last_policy_id];
55 } *policy_container;
56 } SchedulerPolicy;
58 typedef struct {
59 const vtable_ptr *vtable;
60 } Context;
61 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
62 unsigned int, (const Context*), (this))
63 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
64 unsigned int, (const Context*), (this))
65 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
68 Context*, (Context*, unsigned int), (this, flags))
70 typedef struct {
71 Context *context;
72 } _Context;
74 union allocator_cache_entry {
75 struct _free {
76 int depth;
77 union allocator_cache_entry *next;
78 } free;
79 struct _alloc {
80 int bucket;
81 char mem[1];
82 } alloc;
85 struct scheduler_list {
86 struct Scheduler *scheduler;
87 struct scheduler_list *next;
90 typedef struct {
91 Context context;
92 struct scheduler_list scheduler;
93 unsigned int id;
94 union allocator_cache_entry *allocator_cache[8];
95 } ExternalContextBase;
96 extern const vtable_ptr ExternalContextBase_vtable;
97 static void ExternalContextBase_ctor(ExternalContextBase*);
99 typedef struct Scheduler {
100 const vtable_ptr *vtable;
101 } Scheduler;
102 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
103 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
104 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
105 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
106 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
107 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
108 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
109 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
110 #if _MSVCR_VER > 100
111 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
112 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
113 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
114 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
115 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
116 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
117 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
118 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
119 bool, (Scheduler*,const /*location*/void*), (this,placement))
120 #else
121 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
122 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
123 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
124 #endif
126 typedef struct {
127 Scheduler scheduler;
128 LONG ref;
129 unsigned int id;
130 unsigned int virt_proc_no;
131 SchedulerPolicy policy;
132 int shutdown_count;
133 int shutdown_size;
134 HANDLE *shutdown_events;
135 CRITICAL_SECTION cs;
136 } ThreadScheduler;
137 extern const vtable_ptr ThreadScheduler_vtable;
139 typedef struct {
140 Scheduler *scheduler;
141 } _Scheduler;
143 typedef struct {
144 char empty;
145 } _CurrentScheduler;
147 static int context_tls_index = TLS_OUT_OF_INDEXES;
149 static CRITICAL_SECTION default_scheduler_cs;
150 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
152 0, 0, &default_scheduler_cs,
153 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
154 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
156 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
157 static SchedulerPolicy default_scheduler_policy;
158 static ThreadScheduler *default_scheduler;
160 static void create_default_scheduler(void);
162 static Context* try_get_current_context(void)
164 if (context_tls_index == TLS_OUT_OF_INDEXES)
165 return NULL;
166 return TlsGetValue(context_tls_index);
169 static Context* get_current_context(void)
171 Context *ret;
173 if (context_tls_index == TLS_OUT_OF_INDEXES) {
174 int tls_index = TlsAlloc();
175 if (tls_index == TLS_OUT_OF_INDEXES) {
176 throw_exception(EXCEPTION_SCHEDULER_RESOURCE_ALLOCATION_ERROR,
177 HRESULT_FROM_WIN32(GetLastError()), NULL);
178 return NULL;
181 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
182 TlsFree(tls_index);
185 ret = TlsGetValue(context_tls_index);
186 if (!ret) {
187 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
188 ExternalContextBase_ctor(context);
189 TlsSetValue(context_tls_index, context);
190 ret = &context->context;
192 return ret;
195 static Scheduler* try_get_current_scheduler(void)
197 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
199 if (!context)
200 return NULL;
202 if (context->context.vtable != &ExternalContextBase_vtable) {
203 ERR("unknown context set\n");
204 return NULL;
206 return context->scheduler.scheduler;
209 static Scheduler* get_current_scheduler(void)
211 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
213 if (context->context.vtable != &ExternalContextBase_vtable) {
214 ERR("unknown context set\n");
215 return NULL;
217 return context->scheduler.scheduler;
220 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
221 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
222 Context* __cdecl Context_CurrentContext(void)
224 TRACE("()\n");
225 return get_current_context();
228 /* ?Id@Context@Concurrency@@SAIXZ */
229 unsigned int __cdecl Context_Id(void)
231 Context *ctx = try_get_current_context();
232 TRACE("()\n");
233 return ctx ? call_Context_GetId(ctx) : -1;
236 /* ?Block@Context@Concurrency@@SAXXZ */
237 void __cdecl Context_Block(void)
239 FIXME("()\n");
242 /* ?Yield@Context@Concurrency@@SAXXZ */
243 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
244 void __cdecl Context_Yield(void)
246 FIXME("()\n");
249 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
250 void __cdecl Context__SpinYield(void)
252 FIXME("()\n");
255 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
256 bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
258 FIXME("()\n");
259 return FALSE;
262 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
263 void __cdecl Context_Oversubscribe(bool begin)
265 FIXME("(%x)\n", begin);
268 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
269 unsigned int __cdecl Context_ScheduleGroupId(void)
271 Context *ctx = try_get_current_context();
272 TRACE("()\n");
273 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
276 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
277 unsigned int __cdecl Context_VirtualProcessorId(void)
279 Context *ctx = try_get_current_context();
280 TRACE("()\n");
281 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
284 #if _MSVCR_VER > 100
285 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
286 _Context *__cdecl _Context__CurrentContext(_Context *ret)
288 TRACE("(%p)\n", ret);
289 ret->context = Context_CurrentContext();
290 return ret;
292 #endif
294 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
295 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
297 TRACE("(%p)->()\n", this);
298 return this->id;
301 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
302 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
304 FIXME("(%p)->() stub\n", this);
305 return -1;
308 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
309 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
311 FIXME("(%p)->() stub\n", this);
312 return -1;
315 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
316 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
318 FIXME("(%p)->() stub\n", this);
321 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
322 bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
324 FIXME("(%p)->() stub\n", this);
325 return FALSE;
328 static void ExternalContextBase_dtor(ExternalContextBase *this)
330 struct scheduler_list *scheduler_cur, *scheduler_next;
331 union allocator_cache_entry *next, *cur;
332 int i;
334 /* TODO: move the allocator cache to scheduler so it can be reused */
335 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
336 for(cur = this->allocator_cache[i]; cur; cur=next) {
337 next = cur->free.next;
338 operator_delete(cur);
342 if (this->scheduler.scheduler) {
343 call_Scheduler_Release(this->scheduler.scheduler);
345 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
346 scheduler_next = scheduler_cur->next;
347 call_Scheduler_Release(scheduler_cur->scheduler);
348 operator_delete(scheduler_cur);
353 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
354 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
356 TRACE("(%p %x)\n", this, flags);
357 if(flags & 2) {
358 /* we have an array, with the number of elements stored before the first object */
359 INT_PTR i, *ptr = (INT_PTR *)this-1;
361 for(i=*ptr-1; i>=0; i--)
362 ExternalContextBase_dtor(this+i);
363 operator_delete(ptr);
364 } else {
365 ExternalContextBase_dtor(this);
366 if(flags & 1)
367 operator_delete(this);
370 return &this->context;
373 static void ExternalContextBase_ctor(ExternalContextBase *this)
375 TRACE("(%p)->()\n", this);
377 memset(this, 0, sizeof(*this));
378 this->context.vtable = &ExternalContextBase_vtable;
379 this->id = InterlockedIncrement(&context_id);
381 create_default_scheduler();
382 this->scheduler.scheduler = &default_scheduler->scheduler;
383 call_Scheduler_Reference(&default_scheduler->scheduler);
386 /* ?Alloc@Concurrency@@YAPAXI@Z */
387 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
388 void * CDECL Concurrency_Alloc(size_t size)
390 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
391 union allocator_cache_entry *p;
393 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
394 if (size < sizeof(*p))
395 size = sizeof(*p);
397 if (context->context.vtable != &ExternalContextBase_vtable) {
398 p = operator_new(size);
399 p->alloc.bucket = -1;
400 }else {
401 int i;
403 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
404 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
405 if (1 << (i+4) >= size) break;
407 if(i==ARRAY_SIZE(context->allocator_cache)) {
408 p = operator_new(size);
409 p->alloc.bucket = -1;
410 }else if (context->allocator_cache[i]) {
411 p = context->allocator_cache[i];
412 context->allocator_cache[i] = p->free.next;
413 p->alloc.bucket = i;
414 }else {
415 p = operator_new(1 << (i+4));
416 p->alloc.bucket = i;
420 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
421 return p->alloc.mem;
424 /* ?Free@Concurrency@@YAXPAX@Z */
425 /* ?Free@Concurrency@@YAXPEAX@Z */
426 void CDECL Concurrency_Free(void* mem)
428 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
429 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
430 int bucket = p->alloc.bucket;
432 TRACE("(%p)\n", mem);
434 if (context->context.vtable != &ExternalContextBase_vtable) {
435 operator_delete(p);
436 }else {
437 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
438 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
439 p->free.next = context->allocator_cache[bucket];
440 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
441 context->allocator_cache[bucket] = p;
442 }else {
443 operator_delete(p);
448 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
449 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
450 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
451 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
452 PolicyElementKey policy, unsigned int val)
454 unsigned int ret;
456 TRACE("(%p %d %d)\n", this, policy, val);
458 if (policy == MinConcurrency)
459 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MinConcurrency");
460 if (policy == MaxConcurrency)
461 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MaxConcurrency");
462 if (policy >= last_policy_id)
463 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
465 switch(policy) {
466 case SchedulerKind:
467 if (val)
468 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulerKind");
469 break;
470 case TargetOversubscriptionFactor:
471 if (!val)
472 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE,
473 0, "TargetOversubscriptionFactor");
474 break;
475 case ContextPriority:
476 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
477 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
478 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
479 && val != INHERIT_THREAD_PRIORITY)
480 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "ContextPriority");
481 break;
482 case SchedulingProtocol:
483 case DynamicProgressFeedback:
484 case WinRTInitialization:
485 if (val != 0 && val != 1)
486 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulingProtocol");
487 break;
488 default:
489 break;
492 ret = this->policy_container->policies[policy];
493 this->policy_container->policies[policy] = val;
494 return ret;
497 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
498 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
499 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
500 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
501 unsigned int min_concurrency, unsigned int max_concurrency)
503 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
505 if (min_concurrency > max_concurrency)
506 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_THREAD_SPECIFICATION, 0, NULL);
507 if (!max_concurrency)
508 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "MaxConcurrency");
510 this->policy_container->policies[MinConcurrency] = min_concurrency;
511 this->policy_container->policies[MaxConcurrency] = max_concurrency;
514 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
515 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
516 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
517 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
518 const SchedulerPolicy *this, PolicyElementKey policy)
520 TRACE("(%p %d)\n", this, policy);
522 if (policy >= last_policy_id)
523 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
524 return this->policy_container->policies[policy];
527 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
528 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
529 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
530 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
532 TRACE("(%p)\n", this);
534 this->policy_container = operator_new(sizeof(*this->policy_container));
535 /* TODO: default values can probably be affected by CurrentScheduler */
536 this->policy_container->policies[SchedulerKind] = 0;
537 this->policy_container->policies[MaxConcurrency] = -1;
538 this->policy_container->policies[MinConcurrency] = 1;
539 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
540 this->policy_container->policies[LocalContextCacheSize] = 8;
541 this->policy_container->policies[ContextStackSize] = 0;
542 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
543 this->policy_container->policies[SchedulingProtocol] = 0;
544 this->policy_container->policies[DynamicProgressFeedback] = 1;
545 return this;
548 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
549 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
550 /* TODO: don't leak policy_container on exception */
551 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
552 SchedulerPolicy *this, size_t n, ...)
554 unsigned int min_concurrency, max_concurrency;
555 __ms_va_list valist;
556 size_t i;
558 TRACE("(%p %Iu)\n", this, n);
560 SchedulerPolicy_ctor(this);
561 min_concurrency = this->policy_container->policies[MinConcurrency];
562 max_concurrency = this->policy_container->policies[MaxConcurrency];
564 __ms_va_start(valist, n);
565 for(i=0; i<n; i++) {
566 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
567 unsigned int val = va_arg(valist, unsigned int);
569 if(policy == MinConcurrency)
570 min_concurrency = val;
571 else if(policy == MaxConcurrency)
572 max_concurrency = val;
573 else
574 SchedulerPolicy_SetPolicyValue(this, policy, val);
576 __ms_va_end(valist);
578 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
579 return this;
582 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
583 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
584 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
585 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
586 SchedulerPolicy *this, const SchedulerPolicy *rhs)
588 TRACE("(%p %p)\n", this, rhs);
589 memcpy(this->policy_container->policies, rhs->policy_container->policies,
590 sizeof(this->policy_container->policies));
591 return this;
594 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
595 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
596 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
597 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
598 SchedulerPolicy *this, const SchedulerPolicy *rhs)
600 TRACE("(%p %p)\n", this, rhs);
601 SchedulerPolicy_ctor(this);
602 return SchedulerPolicy_op_assign(this, rhs);
605 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
606 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
607 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
608 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
610 TRACE("(%p)\n", this);
611 operator_delete(this->policy_container);
614 static void ThreadScheduler_dtor(ThreadScheduler *this)
616 int i;
618 if(this->ref != 0) WARN("ref = %d\n", this->ref);
619 SchedulerPolicy_dtor(&this->policy);
621 for(i=0; i<this->shutdown_count; i++)
622 SetEvent(this->shutdown_events[i]);
623 operator_delete(this->shutdown_events);
625 this->cs.DebugInfo->Spare[0] = 0;
626 DeleteCriticalSection(&this->cs);
629 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
630 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
632 TRACE("(%p)\n", this);
633 return this->id;
636 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
637 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
639 TRACE("(%p)\n", this);
640 return this->virt_proc_no;
643 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
644 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
645 const ThreadScheduler *this, SchedulerPolicy *ret)
647 TRACE("(%p %p)\n", this, ret);
648 return SchedulerPolicy_copy_ctor(ret, &this->policy);
651 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
652 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
654 TRACE("(%p)\n", this);
655 return InterlockedIncrement(&this->ref);
658 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
659 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
661 unsigned int ret = InterlockedDecrement(&this->ref);
663 TRACE("(%p)\n", this);
665 if(!ret) {
666 ThreadScheduler_dtor(this);
667 operator_delete(this);
669 return ret;
672 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
673 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
675 HANDLE *shutdown_events;
676 int size;
678 TRACE("(%p %p)\n", this, event);
680 EnterCriticalSection(&this->cs);
682 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
683 shutdown_events = operator_new(size * sizeof(*shutdown_events));
684 memcpy(shutdown_events, this->shutdown_events,
685 this->shutdown_count * sizeof(*shutdown_events));
686 operator_delete(this->shutdown_events);
687 this->shutdown_size = size;
688 this->shutdown_events = shutdown_events;
689 this->shutdown_events[this->shutdown_count++] = event;
691 LeaveCriticalSection(&this->cs);
694 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
695 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
697 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
699 TRACE("(%p)\n", this);
701 if(context->context.vtable != &ExternalContextBase_vtable) {
702 ERR("unknown context set\n");
703 return;
706 if(context->scheduler.scheduler == &this->scheduler)
707 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_ATTACH, 0, NULL);
709 if(context->scheduler.scheduler) {
710 struct scheduler_list *l = operator_new(sizeof(*l));
711 *l = context->scheduler;
712 context->scheduler.next = l;
714 context->scheduler.scheduler = &this->scheduler;
715 ThreadScheduler_Reference(this);
718 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
719 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
720 ThreadScheduler *this, /*location*/void *placement)
722 FIXME("(%p %p) stub\n", this, placement);
723 return NULL;
726 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
727 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
729 FIXME("(%p) stub\n", this);
730 return NULL;
733 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
734 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
735 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
737 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
740 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
741 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
742 void (__cdecl *proc)(void*), void* data)
744 FIXME("(%p %p %p) stub\n", this, proc, data);
747 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
748 bool __thiscall ThreadScheduler_IsAvailableLocation(
749 const ThreadScheduler *this, const /*location*/void *placement)
751 FIXME("(%p %p) stub\n", this, placement);
752 return FALSE;
755 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
756 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
758 TRACE("(%p %x)\n", this, flags);
759 if(flags & 2) {
760 /* we have an array, with the number of elements stored before the first object */
761 INT_PTR i, *ptr = (INT_PTR *)this-1;
763 for(i=*ptr-1; i>=0; i--)
764 ThreadScheduler_dtor(this+i);
765 operator_delete(ptr);
766 } else {
767 ThreadScheduler_dtor(this);
768 if(flags & 1)
769 operator_delete(this);
772 return &this->scheduler;
775 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
776 const SchedulerPolicy *policy)
778 SYSTEM_INFO si;
780 TRACE("(%p)->()\n", this);
782 this->scheduler.vtable = &ThreadScheduler_vtable;
783 this->ref = 1;
784 this->id = InterlockedIncrement(&scheduler_id);
785 SchedulerPolicy_copy_ctor(&this->policy, policy);
787 GetSystemInfo(&si);
788 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
789 if(this->virt_proc_no > si.dwNumberOfProcessors)
790 this->virt_proc_no = si.dwNumberOfProcessors;
792 this->shutdown_count = this->shutdown_size = 0;
793 this->shutdown_events = NULL;
795 InitializeCriticalSection(&this->cs);
796 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
797 return this;
800 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
801 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
802 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
804 ThreadScheduler *ret;
806 TRACE("(%p)\n", policy);
808 ret = operator_new(sizeof(*ret));
809 return &ThreadScheduler_ctor(ret, policy)->scheduler;
812 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
813 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
815 TRACE("()\n");
817 EnterCriticalSection(&default_scheduler_cs);
818 if(default_scheduler_policy.policy_container)
819 SchedulerPolicy_dtor(&default_scheduler_policy);
820 SchedulerPolicy_ctor(&default_scheduler_policy);
821 LeaveCriticalSection(&default_scheduler_cs);
824 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
825 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
826 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
828 TRACE("(%p)\n", policy);
830 EnterCriticalSection(&default_scheduler_cs);
831 if(!default_scheduler_policy.policy_container)
832 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
833 else
834 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
835 LeaveCriticalSection(&default_scheduler_cs);
838 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
839 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
840 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
842 Scheduler *scheduler;
844 TRACE("(%p)\n", policy);
846 scheduler = Scheduler_Create(policy);
847 call_Scheduler_Attach(scheduler);
850 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
851 void __cdecl CurrentScheduler_Detach(void)
853 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
855 TRACE("()\n");
857 if(!context)
858 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
860 if(context->context.vtable != &ExternalContextBase_vtable) {
861 ERR("unknown context set\n");
862 return;
865 if(!context->scheduler.next)
866 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
868 call_Scheduler_Release(context->scheduler.scheduler);
869 if(!context->scheduler.next) {
870 context->scheduler.scheduler = NULL;
871 }else {
872 struct scheduler_list *entry = context->scheduler.next;
873 context->scheduler.scheduler = entry->scheduler;
874 context->scheduler.next = entry->next;
875 operator_delete(entry);
879 static void create_default_scheduler(void)
881 if(default_scheduler)
882 return;
884 EnterCriticalSection(&default_scheduler_cs);
885 if(!default_scheduler) {
886 ThreadScheduler *scheduler;
888 if(!default_scheduler_policy.policy_container)
889 SchedulerPolicy_ctor(&default_scheduler_policy);
891 scheduler = operator_new(sizeof(*scheduler));
892 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
893 default_scheduler = scheduler;
895 LeaveCriticalSection(&default_scheduler_cs);
898 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
899 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
900 Scheduler* __cdecl CurrentScheduler_Get(void)
902 TRACE("()\n");
903 return get_current_scheduler();
906 #if _MSVCR_VER > 100
907 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
908 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
909 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
911 TRACE("(%p)\n", placement);
912 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
914 #endif
916 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
917 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
918 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
920 TRACE("()\n");
921 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
924 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
925 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
927 Scheduler *scheduler = try_get_current_scheduler();
929 TRACE("()\n");
931 if(!scheduler)
932 return -1;
933 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
936 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
937 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
939 TRACE("(%p)\n", policy);
940 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
943 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
944 unsigned int __cdecl CurrentScheduler_Id(void)
946 Scheduler *scheduler = try_get_current_scheduler();
948 TRACE("()\n");
950 if(!scheduler)
951 return -1;
952 return call_Scheduler_Id(scheduler);
955 #if _MSVCR_VER > 100
956 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
957 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
958 bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
960 Scheduler *scheduler = try_get_current_scheduler();
962 TRACE("(%p)\n", placement);
964 if(!scheduler)
965 return FALSE;
966 return call_Scheduler_IsAvailableLocation(scheduler, placement);
968 #endif
970 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
971 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
972 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
974 TRACE("(%p)\n", event);
975 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
978 #if _MSVCR_VER > 100
979 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
980 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
981 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
982 void *data, /*location*/void *placement)
984 TRACE("(%p %p %p)\n", proc, data, placement);
985 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
987 #endif
989 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
990 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
991 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
993 TRACE("(%p %p)\n", proc, data);
994 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
997 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
998 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
999 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1000 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1002 TRACE("(%p %p)\n", this, scheduler);
1004 this->scheduler = scheduler;
1005 return this;
1008 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1009 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1010 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1011 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1013 return _Scheduler_ctor_sched(this, NULL);
1016 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1017 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1018 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1019 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1021 TRACE("(%p)\n", this);
1022 return this->scheduler;
1025 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1026 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1027 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1028 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1030 TRACE("(%p)\n", this);
1031 return call_Scheduler_Reference(this->scheduler);
1034 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1035 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1036 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1037 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1039 TRACE("(%p)\n", this);
1040 return call_Scheduler_Release(this->scheduler);
1043 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1044 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1046 TRACE("()\n");
1047 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1050 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1051 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1053 TRACE("()\n");
1054 get_current_scheduler();
1055 return CurrentScheduler_GetNumberOfVirtualProcessors();
1058 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1059 unsigned int __cdecl _CurrentScheduler__Id(void)
1061 TRACE("()\n");
1062 get_current_scheduler();
1063 return CurrentScheduler_Id();
1066 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1067 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1068 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1070 TRACE("(%p %p)\n", proc, data);
1071 CurrentScheduler_ScheduleTask(proc, data);
1074 #ifdef __ASM_USE_THISCALL_WRAPPER
1076 #define DEFINE_VTBL_WRAPPER(off) \
1077 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
1078 "popl %eax\n\t" \
1079 "popl %ecx\n\t" \
1080 "pushl %eax\n\t" \
1081 "movl 0(%ecx), %eax\n\t" \
1082 "jmp *" #off "(%eax)\n\t")
1084 DEFINE_VTBL_WRAPPER(0);
1085 DEFINE_VTBL_WRAPPER(4);
1086 DEFINE_VTBL_WRAPPER(8);
1087 DEFINE_VTBL_WRAPPER(12);
1088 DEFINE_VTBL_WRAPPER(16);
1089 DEFINE_VTBL_WRAPPER(20);
1090 DEFINE_VTBL_WRAPPER(24);
1091 DEFINE_VTBL_WRAPPER(28);
1092 DEFINE_VTBL_WRAPPER(32);
1093 DEFINE_VTBL_WRAPPER(36);
1094 DEFINE_VTBL_WRAPPER(40);
1095 DEFINE_VTBL_WRAPPER(44);
1096 DEFINE_VTBL_WRAPPER(48);
1098 #endif
1100 extern const vtable_ptr type_info_vtable;
1101 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
1102 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
1103 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
1104 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
1105 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
1106 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
1107 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
1108 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
1110 __ASM_BLOCK_BEGIN(scheduler_vtables)
1111 __ASM_VTABLE(ExternalContextBase,
1112 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
1113 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
1114 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
1115 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
1116 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
1117 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
1118 __ASM_VTABLE(ThreadScheduler,
1119 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
1120 VTABLE_ADD_FUNC(ThreadScheduler_Id)
1121 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
1122 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
1123 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
1124 VTABLE_ADD_FUNC(ThreadScheduler_Release)
1125 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
1126 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
1127 #if _MSVCR_VER > 100
1128 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
1129 #endif
1130 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
1131 #if _MSVCR_VER > 100
1132 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
1133 #endif
1134 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
1135 #if _MSVCR_VER > 100
1136 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
1137 #endif
1139 __ASM_BLOCK_END
1141 void msvcrt_init_scheduler(void *base)
1143 #ifdef __x86_64__
1144 init_Context_rtti(base);
1145 init_ContextBase_rtti(base);
1146 init_ExternalContextBase_rtti(base);
1147 init_Scheduler_rtti(base);
1148 init_SchedulerBase_rtti(base);
1149 init_ThreadScheduler_rtti(base);
1150 #endif
1153 void msvcrt_free_scheduler(void)
1155 if (context_tls_index != TLS_OUT_OF_INDEXES)
1156 TlsFree(context_tls_index);
1157 if(default_scheduler_policy.policy_container)
1158 SchedulerPolicy_dtor(&default_scheduler_policy);
1159 if(default_scheduler) {
1160 ThreadScheduler_dtor(default_scheduler);
1161 operator_delete(default_scheduler);
1165 void msvcrt_free_scheduler_thread(void)
1167 Context *context = try_get_current_context();
1168 if (!context) return;
1169 call_Context_dtor(context, 1);
1172 #endif /* _MSVCR_VER >= 100 */