ntdll: Add a helper for platform-specific threading initialization.
[wine.git] / dlls / msvcrt / scheduler.c
blob59e177b5aec140061a5ac4bf1eac7c629acdb13d
1 /*
2 * msvcrt.dll C++ objects
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <stdarg.h>
25 #include "windef.h"
26 #include "winternl.h"
27 #include "wine/debug.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 static int context_id = -1;
37 static int scheduler_id = -1;
39 typedef enum {
40 SchedulerKind,
41 MaxConcurrency,
42 MinConcurrency,
43 TargetOversubscriptionFactor,
44 LocalContextCacheSize,
45 ContextStackSize,
46 ContextPriority,
47 SchedulingProtocol,
48 DynamicProgressFeedback,
49 WinRTInitialization,
50 last_policy_id
51 } PolicyElementKey;
53 typedef struct {
54 struct _policy_container {
55 unsigned int policies[last_policy_id];
56 } *policy_container;
57 } SchedulerPolicy;
59 typedef struct {
60 const vtable_ptr *vtable;
61 } Context;
62 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
63 unsigned int, (const Context*), (this))
64 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
65 unsigned int, (const Context*), (this))
66 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
67 unsigned int, (const Context*), (this))
68 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
69 Context*, (Context*, unsigned int), (this, flags))
71 union allocator_cache_entry {
72 struct _free {
73 int depth;
74 union allocator_cache_entry *next;
75 } free;
76 struct _alloc {
77 int bucket;
78 char mem[1];
79 } alloc;
82 struct scheduler_list {
83 struct Scheduler *scheduler;
84 struct scheduler_list *next;
87 typedef struct {
88 Context context;
89 struct scheduler_list scheduler;
90 unsigned int id;
91 union allocator_cache_entry *allocator_cache[8];
92 } ExternalContextBase;
93 extern const vtable_ptr MSVCRT_ExternalContextBase_vtable;
94 static void ExternalContextBase_ctor(ExternalContextBase*);
96 typedef struct Scheduler {
97 const vtable_ptr *vtable;
98 } Scheduler;
99 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
100 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
101 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
102 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
103 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
104 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
105 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
106 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
107 #if _MSVCR_VER > 100
108 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
109 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
110 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
111 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
112 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
113 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
114 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
115 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
116 MSVCRT_bool, (Scheduler*,const /*location*/void*), (this,placement))
117 #else
118 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
119 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
120 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
121 #endif
123 typedef struct {
124 Scheduler scheduler;
125 LONG ref;
126 unsigned int id;
127 unsigned int virt_proc_no;
128 SchedulerPolicy policy;
129 int shutdown_count;
130 int shutdown_size;
131 HANDLE *shutdown_events;
132 CRITICAL_SECTION cs;
133 } ThreadScheduler;
134 extern const vtable_ptr MSVCRT_ThreadScheduler_vtable;
136 typedef struct {
137 Scheduler *scheduler;
138 } _Scheduler;
140 typedef struct {
141 char empty;
142 } _CurrentScheduler;
144 static int context_tls_index = TLS_OUT_OF_INDEXES;
146 static CRITICAL_SECTION default_scheduler_cs;
147 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
149 0, 0, &default_scheduler_cs,
150 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
151 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
153 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
154 static SchedulerPolicy default_scheduler_policy;
155 static ThreadScheduler *default_scheduler;
157 static void create_default_scheduler(void);
159 static Context* try_get_current_context(void)
161 if (context_tls_index == TLS_OUT_OF_INDEXES)
162 return NULL;
163 return TlsGetValue(context_tls_index);
166 static Context* get_current_context(void)
168 Context *ret;
170 if (context_tls_index == TLS_OUT_OF_INDEXES) {
171 int tls_index = TlsAlloc();
172 if (tls_index == TLS_OUT_OF_INDEXES) {
173 throw_exception(EXCEPTION_SCHEDULER_RESOURCE_ALLOCATION_ERROR,
174 HRESULT_FROM_WIN32(GetLastError()), NULL);
175 return NULL;
178 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
179 TlsFree(tls_index);
182 ret = TlsGetValue(context_tls_index);
183 if (!ret) {
184 ExternalContextBase *context = MSVCRT_operator_new(sizeof(ExternalContextBase));
185 ExternalContextBase_ctor(context);
186 TlsSetValue(context_tls_index, context);
187 ret = &context->context;
189 return ret;
192 static Scheduler* try_get_current_scheduler(void)
194 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
196 if (!context)
197 return NULL;
199 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
200 ERR("unknown context set\n");
201 return NULL;
203 return context->scheduler.scheduler;
206 static Scheduler* get_current_scheduler(void)
208 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
210 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
211 ERR("unknown context set\n");
212 return NULL;
214 return context->scheduler.scheduler;
217 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
218 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
219 Context* __cdecl Context_CurrentContext(void)
221 TRACE("()\n");
222 return get_current_context();
225 /* ?Id@Context@Concurrency@@SAIXZ */
226 unsigned int __cdecl Context_Id(void)
228 Context *ctx = try_get_current_context();
229 TRACE("()\n");
230 return ctx ? call_Context_GetId(ctx) : -1;
233 /* ?Block@Context@Concurrency@@SAXXZ */
234 void __cdecl Context_Block(void)
236 FIXME("()\n");
239 /* ?Yield@Context@Concurrency@@SAXXZ */
240 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
241 void __cdecl Context_Yield(void)
243 FIXME("()\n");
246 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
247 void __cdecl Context__SpinYield(void)
249 FIXME("()\n");
252 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
253 MSVCRT_bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
255 FIXME("()\n");
256 return FALSE;
259 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
260 void __cdecl Context_Oversubscribe(MSVCRT_bool begin)
262 FIXME("(%x)\n", begin);
265 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
266 unsigned int __cdecl Context_ScheduleGroupId(void)
268 Context *ctx = try_get_current_context();
269 TRACE("()\n");
270 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
273 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
274 unsigned int __cdecl Context_VirtualProcessorId(void)
276 Context *ctx = try_get_current_context();
277 TRACE("()\n");
278 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
281 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
282 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
284 TRACE("(%p)->()\n", this);
285 return this->id;
288 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
289 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
291 FIXME("(%p)->() stub\n", this);
292 return -1;
295 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
296 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
298 FIXME("(%p)->() stub\n", this);
299 return -1;
302 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
303 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
305 FIXME("(%p)->() stub\n", this);
308 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
309 MSVCRT_bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
311 FIXME("(%p)->() stub\n", this);
312 return FALSE;
315 static void ExternalContextBase_dtor(ExternalContextBase *this)
317 struct scheduler_list *scheduler_cur, *scheduler_next;
318 union allocator_cache_entry *next, *cur;
319 int i;
321 /* TODO: move the allocator cache to scheduler so it can be reused */
322 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
323 for(cur = this->allocator_cache[i]; cur; cur=next) {
324 next = cur->free.next;
325 MSVCRT_operator_delete(cur);
329 if (this->scheduler.scheduler) {
330 call_Scheduler_Release(this->scheduler.scheduler);
332 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
333 scheduler_next = scheduler_cur->next;
334 call_Scheduler_Release(scheduler_cur->scheduler);
335 MSVCRT_operator_delete(scheduler_cur);
340 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
341 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
343 TRACE("(%p %x)\n", this, flags);
344 if(flags & 2) {
345 /* we have an array, with the number of elements stored before the first object */
346 INT_PTR i, *ptr = (INT_PTR *)this-1;
348 for(i=*ptr-1; i>=0; i--)
349 ExternalContextBase_dtor(this+i);
350 MSVCRT_operator_delete(ptr);
351 } else {
352 ExternalContextBase_dtor(this);
353 if(flags & 1)
354 MSVCRT_operator_delete(this);
357 return &this->context;
360 static void ExternalContextBase_ctor(ExternalContextBase *this)
362 TRACE("(%p)->()\n", this);
364 memset(this, 0, sizeof(*this));
365 this->context.vtable = &MSVCRT_ExternalContextBase_vtable;
366 this->id = InterlockedIncrement(&context_id);
368 create_default_scheduler();
369 this->scheduler.scheduler = &default_scheduler->scheduler;
370 call_Scheduler_Reference(&default_scheduler->scheduler);
373 /* ?Alloc@Concurrency@@YAPAXI@Z */
374 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
375 void * CDECL Concurrency_Alloc(MSVCRT_size_t size)
377 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
378 union allocator_cache_entry *p;
380 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
381 if (size < sizeof(*p))
382 size = sizeof(*p);
384 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
385 p = MSVCRT_operator_new(size);
386 p->alloc.bucket = -1;
387 }else {
388 int i;
390 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
391 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
392 if (1 << (i+4) >= size) break;
394 if(i==ARRAY_SIZE(context->allocator_cache)) {
395 p = MSVCRT_operator_new(size);
396 p->alloc.bucket = -1;
397 }else if (context->allocator_cache[i]) {
398 p = context->allocator_cache[i];
399 context->allocator_cache[i] = p->free.next;
400 p->alloc.bucket = i;
401 }else {
402 p = MSVCRT_operator_new(1 << (i+4));
403 p->alloc.bucket = i;
407 TRACE("(%ld) returning %p\n", size, p->alloc.mem);
408 return p->alloc.mem;
411 /* ?Free@Concurrency@@YAXPAX@Z */
412 /* ?Free@Concurrency@@YAXPEAX@Z */
413 void CDECL Concurrency_Free(void* mem)
415 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
416 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
417 int bucket = p->alloc.bucket;
419 TRACE("(%p)\n", mem);
421 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
422 MSVCRT_operator_delete(p);
423 }else {
424 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
425 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
426 p->free.next = context->allocator_cache[bucket];
427 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
428 context->allocator_cache[bucket] = p;
429 }else {
430 MSVCRT_operator_delete(p);
435 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
436 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
437 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
438 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
439 PolicyElementKey policy, unsigned int val)
441 unsigned int ret;
443 TRACE("(%p %d %d)\n", this, policy, val);
445 if (policy == MinConcurrency)
446 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MinConcurrency");
447 if (policy == MaxConcurrency)
448 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MaxConcurrency");
449 if (policy >= last_policy_id)
450 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
452 switch(policy) {
453 case SchedulerKind:
454 if (val)
455 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulerKind");
456 break;
457 case TargetOversubscriptionFactor:
458 if (!val)
459 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE,
460 0, "TargetOversubscriptionFactor");
461 break;
462 case ContextPriority:
463 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
464 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
465 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
466 && val != INHERIT_THREAD_PRIORITY)
467 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "ContextPriority");
468 break;
469 case SchedulingProtocol:
470 case DynamicProgressFeedback:
471 case WinRTInitialization:
472 if (val != 0 && val != 1)
473 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulingProtocol");
474 break;
475 default:
476 break;
479 ret = this->policy_container->policies[policy];
480 this->policy_container->policies[policy] = val;
481 return ret;
484 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
485 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
486 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
487 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
488 unsigned int min_concurrency, unsigned int max_concurrency)
490 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
492 if (min_concurrency > max_concurrency)
493 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_THREAD_SPECIFICATION, 0, NULL);
494 if (!max_concurrency)
495 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "MaxConcurrency");
497 this->policy_container->policies[MinConcurrency] = min_concurrency;
498 this->policy_container->policies[MaxConcurrency] = max_concurrency;
501 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
502 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
503 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
504 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
505 const SchedulerPolicy *this, PolicyElementKey policy)
507 TRACE("(%p %d)\n", this, policy);
509 if (policy >= last_policy_id)
510 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
511 return this->policy_container->policies[policy];
514 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
515 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
516 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
517 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
519 TRACE("(%p)\n", this);
521 this->policy_container = MSVCRT_operator_new(sizeof(*this->policy_container));
522 /* TODO: default values can probably be affected by CurrentScheduler */
523 this->policy_container->policies[SchedulerKind] = 0;
524 this->policy_container->policies[MaxConcurrency] = -1;
525 this->policy_container->policies[MinConcurrency] = 1;
526 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
527 this->policy_container->policies[LocalContextCacheSize] = 8;
528 this->policy_container->policies[ContextStackSize] = 0;
529 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
530 this->policy_container->policies[SchedulingProtocol] = 0;
531 this->policy_container->policies[DynamicProgressFeedback] = 1;
532 return this;
535 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
536 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
537 /* TODO: don't leak policy_container on exception */
538 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
539 SchedulerPolicy *this, MSVCRT_size_t n, ...)
541 unsigned int min_concurrency, max_concurrency;
542 __ms_va_list valist;
543 MSVCRT_size_t i;
545 TRACE("(%p %ld)\n", this, n);
547 SchedulerPolicy_ctor(this);
548 min_concurrency = this->policy_container->policies[MinConcurrency];
549 max_concurrency = this->policy_container->policies[MaxConcurrency];
551 __ms_va_start(valist, n);
552 for(i=0; i<n; i++) {
553 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
554 unsigned int val = va_arg(valist, unsigned int);
556 if(policy == MinConcurrency)
557 min_concurrency = val;
558 else if(policy == MaxConcurrency)
559 max_concurrency = val;
560 else
561 SchedulerPolicy_SetPolicyValue(this, policy, val);
563 __ms_va_end(valist);
565 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
566 return this;
569 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
570 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
571 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
572 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
573 SchedulerPolicy *this, const SchedulerPolicy *rhs)
575 TRACE("(%p %p)\n", this, rhs);
576 memcpy(this->policy_container->policies, rhs->policy_container->policies,
577 sizeof(this->policy_container->policies));
578 return this;
581 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
582 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
583 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
584 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
585 SchedulerPolicy *this, const SchedulerPolicy *rhs)
587 TRACE("(%p %p)\n", this, rhs);
588 SchedulerPolicy_ctor(this);
589 return SchedulerPolicy_op_assign(this, rhs);
592 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
593 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
594 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
595 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
597 TRACE("(%p)\n", this);
598 MSVCRT_operator_delete(this->policy_container);
601 static void ThreadScheduler_dtor(ThreadScheduler *this)
603 int i;
605 if(this->ref != 0) WARN("ref = %d\n", this->ref);
606 SchedulerPolicy_dtor(&this->policy);
608 for(i=0; i<this->shutdown_count; i++)
609 SetEvent(this->shutdown_events[i]);
610 MSVCRT_operator_delete(this->shutdown_events);
612 this->cs.DebugInfo->Spare[0] = 0;
613 DeleteCriticalSection(&this->cs);
616 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
617 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
619 TRACE("(%p)\n", this);
620 return this->id;
623 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
624 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
626 TRACE("(%p)\n", this);
627 return this->virt_proc_no;
630 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
631 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
632 const ThreadScheduler *this, SchedulerPolicy *ret)
634 TRACE("(%p %p)\n", this, ret);
635 return SchedulerPolicy_copy_ctor(ret, &this->policy);
638 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
639 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
641 TRACE("(%p)\n", this);
642 return InterlockedIncrement(&this->ref);
645 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
646 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
648 unsigned int ret = InterlockedDecrement(&this->ref);
650 TRACE("(%p)\n", this);
652 if(!ret) {
653 ThreadScheduler_dtor(this);
654 MSVCRT_operator_delete(this);
656 return ret;
659 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
660 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
662 HANDLE *shutdown_events;
663 int size;
665 TRACE("(%p %p)\n", this, event);
667 EnterCriticalSection(&this->cs);
669 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
670 shutdown_events = MSVCRT_operator_new(size * sizeof(*shutdown_events));
671 memcpy(shutdown_events, this->shutdown_events,
672 this->shutdown_count * sizeof(*shutdown_events));
673 MSVCRT_operator_delete(this->shutdown_events);
674 this->shutdown_size = size;
675 this->shutdown_events = shutdown_events;
676 this->shutdown_events[this->shutdown_count++] = event;
678 LeaveCriticalSection(&this->cs);
681 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
682 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
684 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
686 TRACE("(%p)\n", this);
688 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
689 ERR("unknown context set\n");
690 return;
693 if(context->scheduler.scheduler == &this->scheduler)
694 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_ATTACH, 0, NULL);
696 if(context->scheduler.scheduler) {
697 struct scheduler_list *l = MSVCRT_operator_new(sizeof(*l));
698 *l = context->scheduler;
699 context->scheduler.next = l;
701 context->scheduler.scheduler = &this->scheduler;
702 ThreadScheduler_Reference(this);
705 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
706 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
707 ThreadScheduler *this, /*location*/void *placement)
709 FIXME("(%p %p) stub\n", this, placement);
710 return NULL;
713 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
714 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
716 FIXME("(%p) stub\n", this);
717 return NULL;
720 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
721 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
722 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
724 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
727 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
728 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
729 void (__cdecl *proc)(void*), void* data)
731 FIXME("(%p %p %p) stub\n", this, proc, data);
734 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
735 MSVCRT_bool __thiscall ThreadScheduler_IsAvailableLocation(
736 const ThreadScheduler *this, const /*location*/void *placement)
738 FIXME("(%p %p) stub\n", this, placement);
739 return FALSE;
742 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
743 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
745 TRACE("(%p %x)\n", this, flags);
746 if(flags & 2) {
747 /* we have an array, with the number of elements stored before the first object */
748 INT_PTR i, *ptr = (INT_PTR *)this-1;
750 for(i=*ptr-1; i>=0; i--)
751 ThreadScheduler_dtor(this+i);
752 MSVCRT_operator_delete(ptr);
753 } else {
754 ThreadScheduler_dtor(this);
755 if(flags & 1)
756 MSVCRT_operator_delete(this);
759 return &this->scheduler;
762 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
763 const SchedulerPolicy *policy)
765 SYSTEM_INFO si;
767 TRACE("(%p)->()\n", this);
769 this->scheduler.vtable = &MSVCRT_ThreadScheduler_vtable;
770 this->ref = 1;
771 this->id = InterlockedIncrement(&scheduler_id);
772 SchedulerPolicy_copy_ctor(&this->policy, policy);
774 GetSystemInfo(&si);
775 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
776 if(this->virt_proc_no > si.dwNumberOfProcessors)
777 this->virt_proc_no = si.dwNumberOfProcessors;
779 this->shutdown_count = this->shutdown_size = 0;
780 this->shutdown_events = NULL;
782 InitializeCriticalSection(&this->cs);
783 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
784 return this;
787 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
788 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
789 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
791 ThreadScheduler *ret;
793 TRACE("(%p)\n", policy);
795 ret = MSVCRT_operator_new(sizeof(*ret));
796 return &ThreadScheduler_ctor(ret, policy)->scheduler;
799 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
800 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
802 TRACE("()\n");
804 EnterCriticalSection(&default_scheduler_cs);
805 if(default_scheduler_policy.policy_container)
806 SchedulerPolicy_dtor(&default_scheduler_policy);
807 SchedulerPolicy_ctor(&default_scheduler_policy);
808 LeaveCriticalSection(&default_scheduler_cs);
811 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
812 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
813 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
815 TRACE("(%p)\n", policy);
817 EnterCriticalSection(&default_scheduler_cs);
818 if(!default_scheduler_policy.policy_container)
819 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
820 else
821 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
822 LeaveCriticalSection(&default_scheduler_cs);
825 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
826 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
827 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
829 Scheduler *scheduler;
831 TRACE("(%p)\n", policy);
833 scheduler = Scheduler_Create(policy);
834 call_Scheduler_Attach(scheduler);
837 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
838 void __cdecl CurrentScheduler_Detach(void)
840 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
842 TRACE("()\n");
844 if(!context)
845 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
847 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
848 ERR("unknown context set\n");
849 return;
852 if(!context->scheduler.next)
853 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
855 call_Scheduler_Release(context->scheduler.scheduler);
856 if(!context->scheduler.next) {
857 context->scheduler.scheduler = NULL;
858 }else {
859 struct scheduler_list *entry = context->scheduler.next;
860 context->scheduler.scheduler = entry->scheduler;
861 context->scheduler.next = entry->next;
862 MSVCRT_operator_delete(entry);
866 static void create_default_scheduler(void)
868 if(default_scheduler)
869 return;
871 EnterCriticalSection(&default_scheduler_cs);
872 if(!default_scheduler) {
873 ThreadScheduler *scheduler;
875 if(!default_scheduler_policy.policy_container)
876 SchedulerPolicy_ctor(&default_scheduler_policy);
878 scheduler = MSVCRT_operator_new(sizeof(*scheduler));
879 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
880 default_scheduler = scheduler;
882 LeaveCriticalSection(&default_scheduler_cs);
885 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
886 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
887 Scheduler* __cdecl CurrentScheduler_Get(void)
889 TRACE("()\n");
890 return get_current_scheduler();
893 #if _MSVCR_VER > 100
894 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
895 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
896 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
898 TRACE("(%p)\n", placement);
899 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
901 #endif
903 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
904 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
905 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
907 TRACE("()\n");
908 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
911 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
912 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
914 Scheduler *scheduler = try_get_current_scheduler();
916 TRACE("()\n");
918 if(!scheduler)
919 return -1;
920 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
923 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
924 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
926 TRACE("(%p)\n", policy);
927 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
930 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
931 unsigned int __cdecl CurrentScheduler_Id(void)
933 Scheduler *scheduler = try_get_current_scheduler();
935 TRACE("()\n");
937 if(!scheduler)
938 return -1;
939 return call_Scheduler_Id(scheduler);
942 #if _MSVCR_VER > 100
943 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
944 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
945 MSVCRT_bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
947 Scheduler *scheduler = try_get_current_scheduler();
949 TRACE("(%p)\n", placement);
951 if(!scheduler)
952 return FALSE;
953 return call_Scheduler_IsAvailableLocation(scheduler, placement);
955 #endif
957 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
958 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
959 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
961 TRACE("(%p)\n", event);
962 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
965 #if _MSVCR_VER > 100
966 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
967 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
968 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
969 void *data, /*location*/void *placement)
971 TRACE("(%p %p %p)\n", proc, data, placement);
972 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
974 #endif
976 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
977 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
978 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
980 TRACE("(%p %p)\n", proc, data);
981 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
984 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
985 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
986 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
987 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
989 TRACE("(%p %p)\n", this, scheduler);
991 this->scheduler = scheduler;
992 return this;
995 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
996 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
997 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
998 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1000 return _Scheduler_ctor_sched(this, NULL);
1003 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1004 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1005 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1006 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1008 TRACE("(%p)\n", this);
1009 return this->scheduler;
1012 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1013 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1014 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1015 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1017 TRACE("(%p)\n", this);
1018 return call_Scheduler_Reference(this->scheduler);
1021 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1022 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1023 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1024 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1026 TRACE("(%p)\n", this);
1027 return call_Scheduler_Release(this->scheduler);
1030 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1031 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1033 TRACE("()\n");
1034 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1037 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1038 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1040 TRACE("()\n");
1041 get_current_scheduler();
1042 return CurrentScheduler_GetNumberOfVirtualProcessors();
1045 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1046 unsigned int __cdecl _CurrentScheduler__Id(void)
1048 TRACE("()\n");
1049 get_current_scheduler();
1050 return CurrentScheduler_Id();
1053 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1054 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1055 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1057 TRACE("(%p %p)\n", proc, data);
1058 CurrentScheduler_ScheduleTask(proc, data);
1061 #ifdef __ASM_USE_THISCALL_WRAPPER
1063 #define DEFINE_VTBL_WRAPPER(off) \
1064 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
1065 "popl %eax\n\t" \
1066 "popl %ecx\n\t" \
1067 "pushl %eax\n\t" \
1068 "movl 0(%ecx), %eax\n\t" \
1069 "jmp *" #off "(%eax)\n\t")
1071 DEFINE_VTBL_WRAPPER(0);
1072 DEFINE_VTBL_WRAPPER(4);
1073 DEFINE_VTBL_WRAPPER(8);
1074 DEFINE_VTBL_WRAPPER(12);
1075 DEFINE_VTBL_WRAPPER(16);
1076 DEFINE_VTBL_WRAPPER(20);
1077 DEFINE_VTBL_WRAPPER(24);
1078 DEFINE_VTBL_WRAPPER(28);
1079 DEFINE_VTBL_WRAPPER(32);
1080 DEFINE_VTBL_WRAPPER(36);
1081 DEFINE_VTBL_WRAPPER(40);
1082 DEFINE_VTBL_WRAPPER(44);
1083 DEFINE_VTBL_WRAPPER(48);
1085 #endif
1087 extern const vtable_ptr MSVCRT_type_info_vtable;
1088 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
1089 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
1090 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
1091 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
1092 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
1093 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
1094 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
1095 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
1097 __ASM_BLOCK_BEGIN(scheduler_vtables)
1098 __ASM_VTABLE(ExternalContextBase,
1099 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
1100 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
1101 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
1102 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
1103 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
1104 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
1105 __ASM_VTABLE(ThreadScheduler,
1106 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
1107 VTABLE_ADD_FUNC(ThreadScheduler_Id)
1108 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
1109 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
1110 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
1111 VTABLE_ADD_FUNC(ThreadScheduler_Release)
1112 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
1113 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
1114 #if _MSVCR_VER > 100
1115 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
1116 #endif
1117 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
1118 #if _MSVCR_VER > 100
1119 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
1120 #endif
1121 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
1122 #if _MSVCR_VER > 100
1123 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
1124 #endif
1126 __ASM_BLOCK_END
1128 void msvcrt_init_scheduler(void *base)
1130 #ifdef __x86_64__
1131 init_Context_rtti(base);
1132 init_ContextBase_rtti(base);
1133 init_ExternalContextBase_rtti(base);
1134 init_Scheduler_rtti(base);
1135 init_SchedulerBase_rtti(base);
1136 init_ThreadScheduler_rtti(base);
1137 #endif
1140 void msvcrt_free_scheduler(void)
1142 if (context_tls_index != TLS_OUT_OF_INDEXES)
1143 TlsFree(context_tls_index);
1144 if(default_scheduler_policy.policy_container)
1145 SchedulerPolicy_dtor(&default_scheduler_policy);
1146 if(default_scheduler) {
1147 ThreadScheduler_dtor(default_scheduler);
1148 MSVCRT_operator_delete(default_scheduler);
1152 void msvcrt_free_scheduler_thread(void)
1154 Context *context = try_get_current_context();
1155 if (!context) return;
1156 call_Context_dtor(context, 1);
1159 #endif /* _MSVCR_VER >= 100 */