winedump: Consistently print hex numbers with leading zeros and 'h' suffix.
[wine.git] / dlls / msvcrt / scheduler.c
blobe4c18612df90c73ad02d2c22e785657209874c7f
1 /*
2 * msvcrt.dll C++ objects
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <stdarg.h>
25 #include "windef.h"
26 #include "winternl.h"
27 #include "wine/debug.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 static int context_id = -1;
37 static int scheduler_id = -1;
39 #ifdef __i386__
41 #define DEFINE_VTBL_WRAPPER(off) \
42 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
43 "popl %eax\n\t" \
44 "popl %ecx\n\t" \
45 "pushl %eax\n\t" \
46 "movl 0(%ecx), %eax\n\t" \
47 "jmp *" #off "(%eax)\n\t")
49 DEFINE_VTBL_WRAPPER(0);
50 DEFINE_VTBL_WRAPPER(4);
51 DEFINE_VTBL_WRAPPER(8);
52 DEFINE_VTBL_WRAPPER(12);
53 DEFINE_VTBL_WRAPPER(16);
54 DEFINE_VTBL_WRAPPER(20);
55 DEFINE_VTBL_WRAPPER(24);
56 DEFINE_VTBL_WRAPPER(28);
57 DEFINE_VTBL_WRAPPER(32);
58 DEFINE_VTBL_WRAPPER(36);
59 DEFINE_VTBL_WRAPPER(40);
60 DEFINE_VTBL_WRAPPER(44);
61 DEFINE_VTBL_WRAPPER(48);
63 #endif
65 typedef enum {
66 SchedulerKind,
67 MaxConcurrency,
68 MinConcurrency,
69 TargetOversubscriptionFactor,
70 LocalContextCacheSize,
71 ContextStackSize,
72 ContextPriority,
73 SchedulingProtocol,
74 DynamicProgressFeedback,
75 WinRTInitialization,
76 last_policy_id
77 } PolicyElementKey;
79 typedef struct {
80 struct _policy_container {
81 unsigned int policies[last_policy_id];
82 } *policy_container;
83 } SchedulerPolicy;
85 typedef struct {
86 const vtable_ptr *vtable;
87 } Context;
88 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
89 unsigned int, (const Context*), (this))
90 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
91 unsigned int, (const Context*), (this))
92 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
93 unsigned int, (const Context*), (this))
94 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
95 Context*, (Context*, unsigned int), (this, flags))
97 union allocator_cache_entry {
98 struct _free {
99 int depth;
100 union allocator_cache_entry *next;
101 } free;
102 struct _alloc {
103 int bucket;
104 char mem[1];
105 } alloc;
108 struct scheduler_list {
109 struct Scheduler *scheduler;
110 struct scheduler_list *next;
113 typedef struct {
114 Context context;
115 struct scheduler_list scheduler;
116 unsigned int id;
117 union allocator_cache_entry *allocator_cache[8];
118 } ExternalContextBase;
119 extern const vtable_ptr MSVCRT_ExternalContextBase_vtable;
120 static void ExternalContextBase_ctor(ExternalContextBase*);
122 typedef struct Scheduler {
123 const vtable_ptr *vtable;
124 } Scheduler;
125 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
126 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
127 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
128 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
129 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
130 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
131 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
132 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
133 #if _MSVCR_VER > 100
134 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
135 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
136 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
137 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
138 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
139 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
140 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
141 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
142 MSVCRT_bool, (Scheduler*,const /*location*/void*), (this,placement))
143 #else
144 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
145 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
146 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
147 #endif
149 typedef struct {
150 Scheduler scheduler;
151 LONG ref;
152 unsigned int id;
153 unsigned int virt_proc_no;
154 SchedulerPolicy policy;
155 int shutdown_count;
156 int shutdown_size;
157 HANDLE *shutdown_events;
158 CRITICAL_SECTION cs;
159 } ThreadScheduler;
160 extern const vtable_ptr MSVCRT_ThreadScheduler_vtable;
162 typedef struct {
163 Scheduler *scheduler;
164 } _Scheduler;
166 typedef struct {
167 char empty;
168 } _CurrentScheduler;
170 static int context_tls_index = TLS_OUT_OF_INDEXES;
172 static CRITICAL_SECTION default_scheduler_cs;
173 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
175 0, 0, &default_scheduler_cs,
176 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
177 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
179 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
180 static SchedulerPolicy default_scheduler_policy;
181 static ThreadScheduler *default_scheduler;
183 static void create_default_scheduler(void);
185 static Context* try_get_current_context(void)
187 if (context_tls_index == TLS_OUT_OF_INDEXES)
188 return NULL;
189 return TlsGetValue(context_tls_index);
192 static Context* get_current_context(void)
194 Context *ret;
196 if (context_tls_index == TLS_OUT_OF_INDEXES) {
197 int tls_index = TlsAlloc();
198 if (tls_index == TLS_OUT_OF_INDEXES) {
199 throw_exception(EXCEPTION_SCHEDULER_RESOURCE_ALLOCATION_ERROR,
200 HRESULT_FROM_WIN32(GetLastError()), NULL);
201 return NULL;
204 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
205 TlsFree(tls_index);
208 ret = TlsGetValue(context_tls_index);
209 if (!ret) {
210 ExternalContextBase *context = MSVCRT_operator_new(sizeof(ExternalContextBase));
211 ExternalContextBase_ctor(context);
212 TlsSetValue(context_tls_index, context);
213 ret = &context->context;
215 return ret;
218 static Scheduler* try_get_current_scheduler(void)
220 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
222 if (!context)
223 return NULL;
225 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
226 ERR("unknown context set\n");
227 return NULL;
229 return context->scheduler.scheduler;
232 static Scheduler* get_current_scheduler(void)
234 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
236 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
237 ERR("unknown context set\n");
238 return NULL;
240 return context->scheduler.scheduler;
243 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
244 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
245 Context* __cdecl Context_CurrentContext(void)
247 TRACE("()\n");
248 return get_current_context();
251 /* ?Id@Context@Concurrency@@SAIXZ */
252 unsigned int __cdecl Context_Id(void)
254 Context *ctx = try_get_current_context();
255 TRACE("()\n");
256 return ctx ? call_Context_GetId(ctx) : -1;
259 /* ?Block@Context@Concurrency@@SAXXZ */
260 void __cdecl Context_Block(void)
262 FIXME("()\n");
265 /* ?Yield@Context@Concurrency@@SAXXZ */
266 void __cdecl Context_Yield(void)
268 FIXME("()\n");
271 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
272 void __cdecl Context__SpinYield(void)
274 FIXME("()\n");
277 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
278 MSVCRT_bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
280 FIXME("()\n");
281 return FALSE;
284 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
285 void __cdecl Context_Oversubscribe(MSVCRT_bool begin)
287 FIXME("(%x)\n", begin);
290 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
291 unsigned int __cdecl Context_ScheduleGroupId(void)
293 Context *ctx = try_get_current_context();
294 TRACE("()\n");
295 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
298 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
299 unsigned int __cdecl Context_VirtualProcessorId(void)
301 Context *ctx = try_get_current_context();
302 TRACE("()\n");
303 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
306 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
307 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
309 TRACE("(%p)->()\n", this);
310 return this->id;
313 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
314 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
316 FIXME("(%p)->() stub\n", this);
317 return -1;
320 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
321 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
323 FIXME("(%p)->() stub\n", this);
324 return -1;
327 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
328 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
330 FIXME("(%p)->() stub\n", this);
333 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
334 MSVCRT_bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
336 FIXME("(%p)->() stub\n", this);
337 return FALSE;
340 static void ExternalContextBase_dtor(ExternalContextBase *this)
342 struct scheduler_list *scheduler_cur, *scheduler_next;
343 union allocator_cache_entry *next, *cur;
344 int i;
346 /* TODO: move the allocator cache to scheduler so it can be reused */
347 for(i=0; i<sizeof(this->allocator_cache)/sizeof(this->allocator_cache[0]); i++) {
348 for(cur = this->allocator_cache[i]; cur; cur=next) {
349 next = cur->free.next;
350 MSVCRT_operator_delete(cur);
354 if (this->scheduler.scheduler) {
355 call_Scheduler_Release(this->scheduler.scheduler);
357 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
358 scheduler_next = scheduler_cur->next;
359 call_Scheduler_Release(scheduler_cur->scheduler);
360 MSVCRT_operator_delete(scheduler_cur);
365 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
366 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
368 TRACE("(%p %x)\n", this, flags);
369 if(flags & 2) {
370 /* we have an array, with the number of elements stored before the first object */
371 INT_PTR i, *ptr = (INT_PTR *)this-1;
373 for(i=*ptr-1; i>=0; i--)
374 ExternalContextBase_dtor(this+i);
375 MSVCRT_operator_delete(ptr);
376 } else {
377 ExternalContextBase_dtor(this);
378 if(flags & 1)
379 MSVCRT_operator_delete(this);
382 return &this->context;
385 static void ExternalContextBase_ctor(ExternalContextBase *this)
387 TRACE("(%p)->()\n", this);
389 memset(this, 0, sizeof(*this));
390 this->context.vtable = &MSVCRT_ExternalContextBase_vtable;
391 this->id = InterlockedIncrement(&context_id);
393 create_default_scheduler();
394 this->scheduler.scheduler = &default_scheduler->scheduler;
395 call_Scheduler_Reference(&default_scheduler->scheduler);
398 /* ?Alloc@Concurrency@@YAPAXI@Z */
399 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
400 void * CDECL Concurrency_Alloc(MSVCRT_size_t size)
402 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
403 union allocator_cache_entry *p;
405 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
406 if (size < sizeof(*p))
407 size = sizeof(*p);
409 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
410 p = MSVCRT_operator_new(size);
411 p->alloc.bucket = -1;
412 }else {
413 int i;
415 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
416 for(i=0; i<sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]); i++)
417 if (1 << (i+4) >= size) break;
419 if(i==sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0])) {
420 p = MSVCRT_operator_new(size);
421 p->alloc.bucket = -1;
422 }else if (context->allocator_cache[i]) {
423 p = context->allocator_cache[i];
424 context->allocator_cache[i] = p->free.next;
425 p->alloc.bucket = i;
426 }else {
427 p = MSVCRT_operator_new(1 << (i+4));
428 p->alloc.bucket = i;
432 TRACE("(%ld) returning %p\n", size, p->alloc.mem);
433 return p->alloc.mem;
436 /* ?Free@Concurrency@@YAXPAX@Z */
437 /* ?Free@Concurrency@@YAXPEAX@Z */
438 void CDECL Concurrency_Free(void* mem)
440 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
441 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
442 int bucket = p->alloc.bucket;
444 TRACE("(%p)\n", mem);
446 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
447 MSVCRT_operator_delete(p);
448 }else {
449 if(bucket >= 0 && bucket < sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]) &&
450 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
451 p->free.next = context->allocator_cache[bucket];
452 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
453 context->allocator_cache[bucket] = p;
454 }else {
455 MSVCRT_operator_delete(p);
460 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
461 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
462 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
463 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
464 PolicyElementKey policy, unsigned int val)
466 unsigned int ret;
468 TRACE("(%p %d %d)\n", this, policy, val);
470 if (policy == MinConcurrency)
471 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MinConcurrency");
472 if (policy == MaxConcurrency)
473 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MaxConcurrency");
474 if (policy >= last_policy_id)
475 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
477 switch(policy) {
478 case SchedulerKind:
479 if (val)
480 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulerKind");
481 break;
482 case TargetOversubscriptionFactor:
483 if (!val)
484 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE,
485 0, "TargetOversubscriptionFactor");
486 break;
487 case ContextPriority:
488 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
489 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
490 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
491 && val != INHERIT_THREAD_PRIORITY)
492 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "ContextPriority");
493 break;
494 case SchedulingProtocol:
495 case DynamicProgressFeedback:
496 case WinRTInitialization:
497 if (val != 0 && val != 1)
498 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulingProtocol");
499 break;
500 default:
501 break;
504 ret = this->policy_container->policies[policy];
505 this->policy_container->policies[policy] = val;
506 return ret;
509 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
510 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
511 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
512 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
513 unsigned int min_concurrency, unsigned int max_concurrency)
515 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
517 if (min_concurrency > max_concurrency)
518 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_THREAD_SPECIFICATION, 0, NULL);
519 if (!max_concurrency)
520 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "MaxConcurrency");
522 this->policy_container->policies[MinConcurrency] = min_concurrency;
523 this->policy_container->policies[MaxConcurrency] = max_concurrency;
526 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
527 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
528 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
529 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
530 const SchedulerPolicy *this, PolicyElementKey policy)
532 TRACE("(%p %d)\n", this, policy);
534 if (policy >= last_policy_id)
535 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
536 return this->policy_container->policies[policy];
539 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
540 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
541 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
542 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
544 TRACE("(%p)\n", this);
546 this->policy_container = MSVCRT_operator_new(sizeof(*this->policy_container));
547 /* TODO: default values can probably be affected by CurrentScheduler */
548 this->policy_container->policies[SchedulerKind] = 0;
549 this->policy_container->policies[MaxConcurrency] = -1;
550 this->policy_container->policies[MinConcurrency] = 1;
551 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
552 this->policy_container->policies[LocalContextCacheSize] = 8;
553 this->policy_container->policies[ContextStackSize] = 0;
554 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
555 this->policy_container->policies[SchedulingProtocol] = 0;
556 this->policy_container->policies[DynamicProgressFeedback] = 1;
557 return this;
560 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
561 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
562 /* TODO: don't leak policy_container on exception */
563 SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
564 SchedulerPolicy *this, MSVCRT_size_t n, ...)
566 unsigned int min_concurrency, max_concurrency;
567 __ms_va_list valist;
568 MSVCRT_size_t i;
570 TRACE("(%p %ld)\n", this, n);
572 SchedulerPolicy_ctor(this);
573 min_concurrency = this->policy_container->policies[MinConcurrency];
574 max_concurrency = this->policy_container->policies[MaxConcurrency];
576 __ms_va_start(valist, n);
577 for(i=0; i<n; i++) {
578 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
579 unsigned int val = va_arg(valist, unsigned int);
581 if(policy == MinConcurrency)
582 min_concurrency = val;
583 else if(policy == MaxConcurrency)
584 max_concurrency = val;
585 else
586 SchedulerPolicy_SetPolicyValue(this, policy, val);
588 __ms_va_end(valist);
590 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
591 return this;
594 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
595 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
596 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
597 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
598 SchedulerPolicy *this, const SchedulerPolicy *rhs)
600 TRACE("(%p %p)\n", this, rhs);
601 memcpy(this->policy_container->policies, rhs->policy_container->policies,
602 sizeof(this->policy_container->policies));
603 return this;
606 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
607 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
608 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
609 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
610 SchedulerPolicy *this, const SchedulerPolicy *rhs)
612 TRACE("(%p %p)\n", this, rhs);
613 SchedulerPolicy_ctor(this);
614 return SchedulerPolicy_op_assign(this, rhs);
617 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
618 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
619 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
620 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
622 TRACE("(%p)\n", this);
623 MSVCRT_operator_delete(this->policy_container);
626 static void ThreadScheduler_dtor(ThreadScheduler *this)
628 int i;
630 if(this->ref != 0) WARN("ref = %d\n", this->ref);
631 SchedulerPolicy_dtor(&this->policy);
633 for(i=0; i<this->shutdown_count; i++)
634 SetEvent(this->shutdown_events[i]);
635 MSVCRT_operator_delete(this->shutdown_events);
637 this->cs.DebugInfo->Spare[0] = 0;
638 DeleteCriticalSection(&this->cs);
641 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
642 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
644 TRACE("(%p)\n", this);
645 return this->id;
648 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
649 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
651 TRACE("(%p)\n", this);
652 return this->virt_proc_no;
655 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
656 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
657 const ThreadScheduler *this, SchedulerPolicy *ret)
659 TRACE("(%p %p)\n", this, ret);
660 return SchedulerPolicy_copy_ctor(ret, &this->policy);
663 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
664 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
666 TRACE("(%p)\n", this);
667 return InterlockedIncrement(&this->ref);
670 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
671 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
673 unsigned int ret = InterlockedDecrement(&this->ref);
675 TRACE("(%p)\n", this);
677 if(!ret) {
678 ThreadScheduler_dtor(this);
679 MSVCRT_operator_delete(this);
681 return ret;
684 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
685 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
687 HANDLE *shutdown_events;
688 int size;
690 TRACE("(%p %p)\n", this, event);
692 EnterCriticalSection(&this->cs);
694 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
695 shutdown_events = MSVCRT_operator_new(size * sizeof(*shutdown_events));
696 memcpy(shutdown_events, this->shutdown_events,
697 this->shutdown_count * sizeof(*shutdown_events));
698 MSVCRT_operator_delete(this->shutdown_events);
699 this->shutdown_size = size;
700 this->shutdown_events = shutdown_events;
701 this->shutdown_events[this->shutdown_count++] = event;
703 LeaveCriticalSection(&this->cs);
706 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
707 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
709 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
711 TRACE("(%p)\n", this);
713 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
714 ERR("unknown context set\n");
715 return;
718 if(context->scheduler.scheduler == &this->scheduler)
719 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_ATTACH, 0, NULL);
721 if(context->scheduler.scheduler) {
722 struct scheduler_list *l = MSVCRT_operator_new(sizeof(*l));
723 *l = context->scheduler;
724 context->scheduler.next = l;
726 context->scheduler.scheduler = &this->scheduler;
727 ThreadScheduler_Reference(this);
730 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
731 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
732 ThreadScheduler *this, /*location*/void *placement)
734 FIXME("(%p %p) stub\n", this, placement);
735 return NULL;
738 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
739 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
741 FIXME("(%p) stub\n", this);
742 return NULL;
745 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
746 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
747 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
749 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
752 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
753 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
754 void (__cdecl *proc)(void*), void* data)
756 FIXME("(%p %p %p) stub\n", this, proc, data);
759 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
760 MSVCRT_bool __thiscall ThreadScheduler_IsAvailableLocation(
761 const ThreadScheduler *this, const /*location*/void *placement)
763 FIXME("(%p %p) stub\n", this, placement);
764 return FALSE;
767 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
768 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
770 TRACE("(%p %x)\n", this, flags);
771 if(flags & 2) {
772 /* we have an array, with the number of elements stored before the first object */
773 INT_PTR i, *ptr = (INT_PTR *)this-1;
775 for(i=*ptr-1; i>=0; i--)
776 ThreadScheduler_dtor(this+i);
777 MSVCRT_operator_delete(ptr);
778 } else {
779 ThreadScheduler_dtor(this);
780 if(flags & 1)
781 MSVCRT_operator_delete(this);
784 return &this->scheduler;
787 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
788 const SchedulerPolicy *policy)
790 SYSTEM_INFO si;
792 TRACE("(%p)->()\n", this);
794 this->scheduler.vtable = &MSVCRT_ThreadScheduler_vtable;
795 this->ref = 1;
796 this->id = InterlockedIncrement(&scheduler_id);
797 SchedulerPolicy_copy_ctor(&this->policy, policy);
799 GetSystemInfo(&si);
800 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
801 if(this->virt_proc_no > si.dwNumberOfProcessors)
802 this->virt_proc_no = si.dwNumberOfProcessors;
804 this->shutdown_count = this->shutdown_size = 0;
805 this->shutdown_events = NULL;
807 InitializeCriticalSection(&this->cs);
808 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
809 return this;
812 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
813 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
814 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
816 ThreadScheduler *ret;
818 TRACE("(%p)\n", policy);
820 ret = MSVCRT_operator_new(sizeof(*ret));
821 return &ThreadScheduler_ctor(ret, policy)->scheduler;
824 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
825 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
827 TRACE("()\n");
829 EnterCriticalSection(&default_scheduler_cs);
830 if(default_scheduler_policy.policy_container)
831 SchedulerPolicy_dtor(&default_scheduler_policy);
832 SchedulerPolicy_ctor(&default_scheduler_policy);
833 LeaveCriticalSection(&default_scheduler_cs);
836 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
837 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
838 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
840 TRACE("(%p)\n", policy);
842 EnterCriticalSection(&default_scheduler_cs);
843 if(!default_scheduler_policy.policy_container)
844 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
845 else
846 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
847 LeaveCriticalSection(&default_scheduler_cs);
850 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
851 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
852 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
854 Scheduler *scheduler;
856 TRACE("(%p)\n", policy);
858 scheduler = Scheduler_Create(policy);
859 call_Scheduler_Attach(scheduler);
862 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
863 void __cdecl CurrentScheduler_Detach(void)
865 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
867 TRACE("()\n");
869 if(!context)
870 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
872 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
873 ERR("unknown context set\n");
874 return;
877 if(!context->scheduler.next)
878 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_DETACH, 0, NULL);
880 call_Scheduler_Release(context->scheduler.scheduler);
881 if(!context->scheduler.next) {
882 context->scheduler.scheduler = NULL;
883 }else {
884 struct scheduler_list *entry = context->scheduler.next;
885 context->scheduler.scheduler = entry->scheduler;
886 context->scheduler.next = entry->next;
887 MSVCRT_operator_delete(entry);
891 static void create_default_scheduler(void)
893 if(default_scheduler)
894 return;
896 EnterCriticalSection(&default_scheduler_cs);
897 if(!default_scheduler) {
898 ThreadScheduler *scheduler;
900 if(!default_scheduler_policy.policy_container)
901 SchedulerPolicy_ctor(&default_scheduler_policy);
903 scheduler = MSVCRT_operator_new(sizeof(*scheduler));
904 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
905 default_scheduler = scheduler;
907 LeaveCriticalSection(&default_scheduler_cs);
910 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
911 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
912 Scheduler* __cdecl CurrentScheduler_Get(void)
914 TRACE("()\n");
915 return get_current_scheduler();
918 #if _MSVCR_VER > 100
919 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
920 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
921 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
923 TRACE("(%p)\n", placement);
924 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
926 #endif
928 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
929 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
930 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
932 TRACE("()\n");
933 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
936 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
937 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
939 Scheduler *scheduler = try_get_current_scheduler();
941 TRACE("()\n");
943 if(!scheduler)
944 return -1;
945 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
948 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
949 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
951 TRACE("(%p)\n", policy);
952 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
955 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
956 unsigned int __cdecl CurrentScheduler_Id(void)
958 Scheduler *scheduler = try_get_current_scheduler();
960 TRACE("()\n");
962 if(!scheduler)
963 return -1;
964 return call_Scheduler_Id(scheduler);
967 #if _MSVCR_VER > 100
968 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
969 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
970 MSVCRT_bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
972 Scheduler *scheduler = try_get_current_scheduler();
974 TRACE("(%p)\n", placement);
976 if(!scheduler)
977 return FALSE;
978 return call_Scheduler_IsAvailableLocation(scheduler, placement);
980 #endif
982 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
983 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
984 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
986 TRACE("(%p)\n", event);
987 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
990 #if _MSVCR_VER > 100
991 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
992 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
993 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
994 void *data, /*location*/void *placement)
996 TRACE("(%p %p %p)\n", proc, data, placement);
997 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
999 #endif
1001 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1002 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1003 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1005 TRACE("(%p %p)\n", proc, data);
1006 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1009 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1010 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1011 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1012 _Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1014 TRACE("(%p %p)\n", this, scheduler);
1016 this->scheduler = scheduler;
1017 return this;
1020 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1021 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1022 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1023 _Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1025 return _Scheduler_ctor_sched(this, NULL);
1028 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1029 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1030 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1031 Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1033 TRACE("(%p)\n", this);
1034 return this->scheduler;
1037 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1038 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1039 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1040 unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1042 TRACE("(%p)\n", this);
1043 return call_Scheduler_Reference(this->scheduler);
1046 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1047 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1048 DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1049 unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1051 TRACE("(%p)\n", this);
1052 return call_Scheduler_Release(this->scheduler);
1055 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1056 _Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1058 TRACE("()\n");
1059 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1062 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1063 unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1065 TRACE("()\n");
1066 return CurrentScheduler_GetNumberOfVirtualProcessors();
1069 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1070 unsigned int __cdecl _CurrentScheduler__Id(void)
1072 TRACE("()\n");
1073 return CurrentScheduler_Id();
1076 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1077 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1078 void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1080 TRACE("(%p %p)\n", proc, data);
1081 CurrentScheduler_ScheduleTask(proc, data);
1084 extern const vtable_ptr MSVCRT_type_info_vtable;
1085 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
1086 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
1087 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
1088 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
1089 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
1090 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
1091 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
1092 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
1094 #ifndef __GNUC__
1095 void __asm_dummy_vtables(void) {
1096 #endif
1097 __ASM_VTABLE(ExternalContextBase,
1098 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
1099 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
1100 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
1101 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
1102 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
1103 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
1104 __ASM_VTABLE(ThreadScheduler,
1105 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
1106 VTABLE_ADD_FUNC(ThreadScheduler_Id)
1107 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
1108 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
1109 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
1110 VTABLE_ADD_FUNC(ThreadScheduler_Release)
1111 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
1112 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
1113 #if _MSVCR_VER > 100
1114 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
1115 #endif
1116 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
1117 #if _MSVCR_VER > 100
1118 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
1119 #endif
1120 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
1121 #if _MSVCR_VER > 100
1122 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
1123 #endif
1125 #ifndef __GNUC__
1127 #endif
1129 void msvcrt_init_scheduler(void *base)
1131 #ifdef __x86_64__
1132 init_Context_rtti(base);
1133 init_ContextBase_rtti(base);
1134 init_ExternalContextBase_rtti(base);
1135 init_Scheduler_rtti(base);
1136 init_SchedulerBase_rtti(base);
1137 init_ThreadScheduler_rtti(base);
1138 #endif
1141 void msvcrt_free_scheduler(void)
1143 if (context_tls_index != TLS_OUT_OF_INDEXES)
1144 TlsFree(context_tls_index);
1145 if(default_scheduler_policy.policy_container)
1146 SchedulerPolicy_dtor(&default_scheduler_policy);
1147 if(default_scheduler) {
1148 ThreadScheduler_dtor(default_scheduler);
1149 MSVCRT_operator_delete(default_scheduler);
1153 void msvcrt_free_scheduler_thread(void)
1155 Context *context = try_get_current_context();
1156 if (!context) return;
1157 call_Context_dtor(context, 1);
1160 #endif /* _MSVCR_VER >= 100 */