msvcrt: Add CurrentScheduler::Get implementation.
[wine.git] / dlls / msvcrt / scheduler.c
blob93dae7fff596387c627d86bfaf22db02acf476db
1 /*
2 * msvcrt.dll C++ objects
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <stdarg.h>
25 #include "windef.h"
26 #include "winternl.h"
27 #include "wine/debug.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 static int context_id = -1;
37 static int scheduler_id = -1;
39 #ifdef __i386__
41 #define DEFINE_VTBL_WRAPPER(off) \
42 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
43 "popl %eax\n\t" \
44 "popl %ecx\n\t" \
45 "pushl %eax\n\t" \
46 "movl 0(%ecx), %eax\n\t" \
47 "jmp *" #off "(%eax)\n\t")
49 DEFINE_VTBL_WRAPPER(0);
50 DEFINE_VTBL_WRAPPER(4);
51 DEFINE_VTBL_WRAPPER(8);
52 DEFINE_VTBL_WRAPPER(20);
53 DEFINE_VTBL_WRAPPER(28);
55 #endif
57 typedef enum {
58 SchedulerKind,
59 MaxConcurrency,
60 MinConcurrency,
61 TargetOversubscriptionFactor,
62 LocalContextCacheSize,
63 ContextStackSize,
64 ContextPriority,
65 SchedulingProtocol,
66 DynamicProgressFeedback,
67 WinRTInitialization,
68 last_policy_id
69 } PolicyElementKey;
71 typedef struct {
72 struct _policy_container {
73 unsigned int policies[last_policy_id];
74 } *policy_container;
75 } SchedulerPolicy;
77 typedef struct {
78 const vtable_ptr *vtable;
79 } Context;
80 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
81 unsigned int, (const Context*), (this))
82 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
83 unsigned int, (const Context*), (this))
84 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
85 unsigned int, (const Context*), (this))
86 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
87 Context*, (Context*, unsigned int), (this, flags))
89 union allocator_cache_entry {
90 struct _free {
91 int depth;
92 union allocator_cache_entry *next;
93 } free;
94 struct _alloc {
95 int bucket;
96 char mem[1];
97 } alloc;
100 struct scheduler_list {
101 struct Scheduler *scheduler;
102 struct scheduler_list *next;
105 typedef struct {
106 Context context;
107 struct scheduler_list scheduler;
108 unsigned int id;
109 union allocator_cache_entry *allocator_cache[8];
110 } ExternalContextBase;
111 extern const vtable_ptr MSVCRT_ExternalContextBase_vtable;
112 static void ExternalContextBase_ctor(ExternalContextBase*);
114 typedef struct Scheduler {
115 const vtable_ptr *vtable;
116 } Scheduler;
117 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
118 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
120 typedef struct {
121 Scheduler scheduler;
122 LONG ref;
123 unsigned int id;
124 unsigned int virt_proc_no;
125 SchedulerPolicy policy;
126 int shutdown_count;
127 int shutdown_size;
128 HANDLE *shutdown_events;
129 CRITICAL_SECTION cs;
130 } ThreadScheduler;
131 extern const vtable_ptr MSVCRT_ThreadScheduler_vtable;
133 static int context_tls_index = TLS_OUT_OF_INDEXES;
135 static CRITICAL_SECTION default_scheduler_cs;
136 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
138 0, 0, &default_scheduler_cs,
139 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
140 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
142 static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
143 static SchedulerPolicy default_scheduler_policy;
144 static ThreadScheduler *default_scheduler;
146 static Context* try_get_current_context(void)
148 if (context_tls_index == TLS_OUT_OF_INDEXES)
149 return NULL;
150 return TlsGetValue(context_tls_index);
153 static Context* get_current_context(void)
155 Context *ret;
157 if (context_tls_index == TLS_OUT_OF_INDEXES) {
158 int tls_index = TlsAlloc();
159 if (tls_index == TLS_OUT_OF_INDEXES) {
160 throw_exception(EXCEPTION_SCHEDULER_RESOURCE_ALLOCATION_ERROR,
161 HRESULT_FROM_WIN32(GetLastError()), NULL);
162 return NULL;
165 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
166 TlsFree(tls_index);
169 ret = TlsGetValue(context_tls_index);
170 if (!ret) {
171 ExternalContextBase *context = MSVCRT_operator_new(sizeof(ExternalContextBase));
172 ExternalContextBase_ctor(context);
173 TlsSetValue(context_tls_index, context);
174 ret = &context->context;
176 return ret;
179 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
180 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
181 Context* __cdecl Context_CurrentContext(void)
183 TRACE("()\n");
184 return get_current_context();
187 /* ?Id@Context@Concurrency@@SAIXZ */
188 unsigned int __cdecl Context_Id(void)
190 Context *ctx = try_get_current_context();
191 TRACE("()\n");
192 return ctx ? call_Context_GetId(ctx) : -1;
195 /* ?Block@Context@Concurrency@@SAXXZ */
196 void __cdecl Context_Block(void)
198 FIXME("()\n");
201 /* ?Yield@Context@Concurrency@@SAXXZ */
202 void __cdecl Context_Yield(void)
204 FIXME("()\n");
207 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
208 void __cdecl Context__SpinYield(void)
210 FIXME("()\n");
213 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
214 MSVCRT_bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
216 FIXME("()\n");
217 return FALSE;
220 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
221 void __cdecl Context_Oversubscribe(MSVCRT_bool begin)
223 FIXME("(%x)\n", begin);
226 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
227 unsigned int __cdecl Context_ScheduleGroupId(void)
229 Context *ctx = try_get_current_context();
230 TRACE("()\n");
231 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
234 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
235 unsigned int __cdecl Context_VirtualProcessorId(void)
237 Context *ctx = try_get_current_context();
238 FIXME("()\n");
239 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
242 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
243 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
245 TRACE("(%p)->()\n", this);
246 return this->id;
249 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
250 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
252 FIXME("(%p)->() stub\n", this);
253 return -1;
256 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
257 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
259 FIXME("(%p)->() stub\n", this);
260 return -1;
263 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
264 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
266 FIXME("(%p)->() stub\n", this);
269 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
270 MSVCRT_bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
272 FIXME("(%p)->() stub\n", this);
273 return FALSE;
276 static void ExternalContextBase_dtor(ExternalContextBase *this)
278 struct scheduler_list *scheduler_cur, *scheduler_next;
279 union allocator_cache_entry *next, *cur;
280 int i;
282 /* TODO: move the allocator cache to scheduler so it can be reused */
283 for(i=0; i<sizeof(this->allocator_cache)/sizeof(this->allocator_cache[0]); i++) {
284 for(cur = this->allocator_cache[i]; cur; cur=next) {
285 next = cur->free.next;
286 MSVCRT_operator_delete(cur);
290 if (this->scheduler.scheduler) {
291 call_Scheduler_Release(this->scheduler.scheduler);
293 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
294 scheduler_next = scheduler_cur->next;
295 call_Scheduler_Release(scheduler_cur->scheduler);
296 MSVCRT_operator_delete(scheduler_cur);
301 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
302 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
304 TRACE("(%p %x)\n", this, flags);
305 if(flags & 2) {
306 /* we have an array, with the number of elements stored before the first object */
307 INT_PTR i, *ptr = (INT_PTR *)this-1;
309 for(i=*ptr-1; i>=0; i--)
310 ExternalContextBase_dtor(this+i);
311 MSVCRT_operator_delete(ptr);
312 } else {
313 ExternalContextBase_dtor(this);
314 if(flags & 1)
315 MSVCRT_operator_delete(this);
318 return &this->context;
321 static void ExternalContextBase_ctor(ExternalContextBase *this)
323 TRACE("(%p)->()\n", this);
325 memset(this, 0, sizeof(*this));
326 this->context.vtable = &MSVCRT_ExternalContextBase_vtable;
327 this->id = InterlockedIncrement(&context_id);
330 /* ?Alloc@Concurrency@@YAPAXI@Z */
331 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
332 void * CDECL Concurrency_Alloc(MSVCRT_size_t size)
334 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
335 union allocator_cache_entry *p;
337 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
338 if (size < sizeof(*p))
339 size = sizeof(*p);
341 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
342 p = MSVCRT_operator_new(size);
343 p->alloc.bucket = -1;
344 }else {
345 int i;
347 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
348 for(i=0; i<sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]); i++)
349 if (1 << (i+4) >= size) break;
351 if(i==sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0])) {
352 p = MSVCRT_operator_new(size);
353 p->alloc.bucket = -1;
354 }else if (context->allocator_cache[i]) {
355 p = context->allocator_cache[i];
356 context->allocator_cache[i] = p->free.next;
357 p->alloc.bucket = i;
358 }else {
359 p = MSVCRT_operator_new(1 << (i+4));
360 p->alloc.bucket = i;
364 TRACE("(%ld) returning %p\n", size, p->alloc.mem);
365 return p->alloc.mem;
368 /* ?Free@Concurrency@@YAXPAX@Z */
369 /* ?Free@Concurrency@@YAXPEAX@Z */
370 void CDECL Concurrency_Free(void* mem)
372 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
373 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
374 int bucket = p->alloc.bucket;
376 TRACE("(%p)\n", mem);
378 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
379 MSVCRT_operator_delete(p);
380 }else {
381 if(bucket >= 0 && bucket < sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]) &&
382 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
383 p->free.next = context->allocator_cache[bucket];
384 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
385 context->allocator_cache[bucket] = p;
386 }else {
387 MSVCRT_operator_delete(p);
392 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
393 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
394 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
395 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
396 PolicyElementKey policy, unsigned int val)
398 unsigned int ret;
400 TRACE("(%p %d %d)\n", this, policy, val);
402 if (policy == MinConcurrency)
403 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MinConcurrency");
404 if (policy == MaxConcurrency)
405 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MaxConcurrency");
406 if (policy < SchedulerKind || policy >= last_policy_id)
407 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
409 switch(policy) {
410 case SchedulerKind:
411 if (val)
412 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulerKind");
413 break;
414 case TargetOversubscriptionFactor:
415 if (!val)
416 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE,
417 0, "TargetOversubscriptionFactor");
418 break;
419 case ContextPriority:
420 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
421 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
422 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
423 && val != INHERIT_THREAD_PRIORITY)
424 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "ContextPriority");
425 break;
426 case SchedulingProtocol:
427 case DynamicProgressFeedback:
428 case WinRTInitialization:
429 if (val != 0 && val != 1)
430 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulingProtocol");
431 break;
432 default:
433 break;
436 ret = this->policy_container->policies[policy];
437 this->policy_container->policies[policy] = val;
438 return ret;
441 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
442 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
443 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
444 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
445 unsigned int min_concurrency, unsigned int max_concurrency)
447 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
449 if (min_concurrency > max_concurrency)
450 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_THREAD_SPECIFICATION, 0, NULL);
451 if (!max_concurrency)
452 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "MaxConcurrency");
454 this->policy_container->policies[MinConcurrency] = min_concurrency;
455 this->policy_container->policies[MaxConcurrency] = max_concurrency;
458 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
459 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
460 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
461 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
462 const SchedulerPolicy *this, PolicyElementKey policy)
464 TRACE("(%p %d)\n", this, policy);
466 if (policy < SchedulerKind || policy >= last_policy_id)
467 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
468 return this->policy_container->policies[policy];
471 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
472 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
473 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
474 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
476 TRACE("(%p)\n", this);
478 this->policy_container = MSVCRT_operator_new(sizeof(*this->policy_container));
479 /* TODO: default values can probably be affected by CurrentScheduler */
480 this->policy_container->policies[SchedulerKind] = 0;
481 this->policy_container->policies[MaxConcurrency] = -1;
482 this->policy_container->policies[MinConcurrency] = 1;
483 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
484 this->policy_container->policies[LocalContextCacheSize] = 8;
485 this->policy_container->policies[ContextStackSize] = 0;
486 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
487 this->policy_container->policies[SchedulingProtocol] = 0;
488 this->policy_container->policies[DynamicProgressFeedback] = 1;
489 return this;
492 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
493 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
494 /* TODO: don't leak policy_container on exception */
495 SchedulerPolicy* __cdecl SchedulerPolicy_ctor_policies(
496 SchedulerPolicy *this, MSVCRT_size_t n, ...)
498 unsigned int min_concurrency, max_concurrency;
499 __ms_va_list valist;
500 MSVCRT_size_t i;
502 TRACE("(%p %ld)\n", this, n);
504 SchedulerPolicy_ctor(this);
505 min_concurrency = this->policy_container->policies[MinConcurrency];
506 max_concurrency = this->policy_container->policies[MaxConcurrency];
508 __ms_va_start(valist, n);
509 for(i=0; i<n; i++) {
510 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
511 unsigned int val = va_arg(valist, unsigned int);
513 if(policy == MinConcurrency)
514 min_concurrency = val;
515 else if(policy == MaxConcurrency)
516 max_concurrency = val;
517 else
518 SchedulerPolicy_SetPolicyValue(this, policy, val);
520 __ms_va_end(valist);
522 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
523 return this;
526 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
527 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
528 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
529 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
530 SchedulerPolicy *this, const SchedulerPolicy *rhs)
532 TRACE("(%p %p)\n", this, rhs);
533 memcpy(this->policy_container->policies, rhs->policy_container->policies,
534 sizeof(this->policy_container->policies));
535 return this;
538 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
539 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
540 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
541 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
542 SchedulerPolicy *this, const SchedulerPolicy *rhs)
544 TRACE("(%p %p)\n", this, rhs);
545 SchedulerPolicy_ctor(this);
546 return SchedulerPolicy_op_assign(this, rhs);
549 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
550 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
551 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
552 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
554 TRACE("(%p)\n", this);
555 MSVCRT_operator_delete(this->policy_container);
558 static void ThreadScheduler_dtor(ThreadScheduler *this)
560 int i;
562 if(this->ref != 0) WARN("ref = %d\n", this->ref);
563 SchedulerPolicy_dtor(&this->policy);
565 for(i=0; i<this->shutdown_count; i++)
566 SetEvent(this->shutdown_events[i]);
567 MSVCRT_operator_delete(this->shutdown_events);
569 this->cs.DebugInfo->Spare[0] = 0;
570 DeleteCriticalSection(&this->cs);
573 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
574 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
576 TRACE("(%p)\n", this);
577 return this->id;
580 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
581 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
583 TRACE("(%p)\n", this);
584 return this->virt_proc_no;
587 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
588 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
589 const ThreadScheduler *this, SchedulerPolicy *ret)
591 TRACE("(%p %p)\n", this, ret);
592 return SchedulerPolicy_copy_ctor(ret, &this->policy);
595 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
596 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
598 TRACE("(%p)\n", this);
599 return InterlockedIncrement(&this->ref);
602 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
603 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
605 unsigned int ret = InterlockedDecrement(&this->ref);
607 TRACE("(%p)\n", this);
609 if(!ret) {
610 ThreadScheduler_dtor(this);
611 MSVCRT_operator_delete(this);
613 return ret;
616 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
617 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
619 HANDLE *shutdown_events;
620 int size;
622 TRACE("(%p %p)\n", this, event);
624 EnterCriticalSection(&this->cs);
626 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
627 shutdown_events = MSVCRT_operator_new(size * sizeof(*shutdown_events));
628 memcpy(shutdown_events, this->shutdown_events,
629 this->shutdown_count * sizeof(*shutdown_events));
630 MSVCRT_operator_delete(this->shutdown_events);
631 this->shutdown_size = size;
632 this->shutdown_events = shutdown_events;
633 this->shutdown_events[this->shutdown_count++] = event;
635 LeaveCriticalSection(&this->cs);
638 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
639 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
641 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
643 TRACE("(%p)\n", this);
645 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
646 ERR("unknown context set\n");
647 return;
650 if(context->scheduler.scheduler == &this->scheduler)
651 throw_exception(EXCEPTION_IMPROPER_SCHEDULER_ATTACH, 0, NULL);
653 if(context->scheduler.scheduler) {
654 struct scheduler_list *l = MSVCRT_operator_new(sizeof(*l));
655 *l = context->scheduler;
656 context->scheduler.next = l;
658 context->scheduler.scheduler = &this->scheduler;
659 ThreadScheduler_Reference(this);
662 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
663 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
664 ThreadScheduler *this, /*location*/void *placement)
666 FIXME("(%p %p) stub\n", this, placement);
667 return NULL;
670 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
671 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
673 FIXME("(%p) stub\n", this);
674 return NULL;
677 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
678 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
679 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
681 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
684 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
685 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
686 void (__cdecl *proc)(void*), void* data)
688 FIXME("(%p %p %p) stub\n", this, proc, data);
691 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
692 MSVCRT_bool __thiscall ThreadScheduler_IsAvailableLocation(
693 const ThreadScheduler *this, const /*location*/void *placement)
695 FIXME("(%p %p) stub\n", this, placement);
696 return FALSE;
699 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
700 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
702 TRACE("(%p %x)\n", this, flags);
703 if(flags & 2) {
704 /* we have an array, with the number of elements stored before the first object */
705 INT_PTR i, *ptr = (INT_PTR *)this-1;
707 for(i=*ptr-1; i>=0; i--)
708 ThreadScheduler_dtor(this+i);
709 MSVCRT_operator_delete(ptr);
710 } else {
711 ThreadScheduler_dtor(this);
712 if(flags & 1)
713 MSVCRT_operator_delete(this);
716 return &this->scheduler;
719 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
720 const SchedulerPolicy *policy)
722 SYSTEM_INFO si;
724 TRACE("(%p)->()\n", this);
726 this->scheduler.vtable = &MSVCRT_ThreadScheduler_vtable;
727 this->ref = 1;
728 this->id = InterlockedIncrement(&scheduler_id);
729 SchedulerPolicy_copy_ctor(&this->policy, policy);
731 GetSystemInfo(&si);
732 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
733 if(this->virt_proc_no > si.dwNumberOfProcessors)
734 this->virt_proc_no = si.dwNumberOfProcessors;
736 this->shutdown_count = this->shutdown_size = 0;
737 this->shutdown_events = NULL;
739 InitializeCriticalSection(&this->cs);
740 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
741 return this;
744 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
745 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
746 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
748 ThreadScheduler *ret;
750 TRACE("(%p)\n", policy);
752 ret = MSVCRT_operator_new(sizeof(*ret));
753 return &ThreadScheduler_ctor(ret, policy)->scheduler;
756 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
757 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
759 TRACE("()\n");
761 EnterCriticalSection(&default_scheduler_cs);
762 if(default_scheduler_policy.policy_container)
763 SchedulerPolicy_dtor(&default_scheduler_policy);
764 SchedulerPolicy_ctor(&default_scheduler_policy);
765 LeaveCriticalSection(&default_scheduler_cs);
768 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
769 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
770 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
772 TRACE("(%p)\n", policy);
774 EnterCriticalSection(&default_scheduler_cs);
775 if(!default_scheduler_policy.policy_container)
776 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
777 else
778 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
779 LeaveCriticalSection(&default_scheduler_cs);
782 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
783 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
784 void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
786 Scheduler *scheduler;
788 TRACE("(%p)\n", policy);
790 scheduler = Scheduler_Create(policy);
791 call_Scheduler_Attach(scheduler);
794 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
795 void __cdecl CurrentScheduler_Detach(void)
797 FIXME("() stub\n");
800 static void create_default_scheduler(void)
802 if(default_scheduler)
803 return;
805 EnterCriticalSection(&default_scheduler_cs);
806 if(!default_scheduler) {
807 ThreadScheduler *scheduler;
809 if(!default_scheduler_policy.policy_container)
810 SchedulerPolicy_ctor(&default_scheduler_policy);
812 scheduler = MSVCRT_operator_new(sizeof(*scheduler));
813 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
814 default_scheduler = scheduler;
816 LeaveCriticalSection(&default_scheduler_cs);
819 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
820 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
821 Scheduler* __cdecl CurrentScheduler_Get(void)
823 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
825 TRACE("()\n");
827 if(context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
828 ERR("unknown context set\n");
829 return NULL;
832 if(context->scheduler.scheduler)
833 return context->scheduler.scheduler;
835 create_default_scheduler();
836 context->scheduler.scheduler = &default_scheduler->scheduler;
837 ThreadScheduler_Reference(default_scheduler);
838 return &default_scheduler->scheduler;
841 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
842 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
843 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
845 FIXME("(%p) stub\n", placement);
846 return NULL;
849 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
850 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
851 /*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
853 FIXME("() stub\n");
854 return NULL;
857 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
858 unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
860 FIXME("() stub\n");
861 return 0;
864 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
865 SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
867 FIXME("(%p) stub\n", policy);
868 return NULL;
871 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
872 unsigned int __cdecl CurrentScheduler_Id(void)
874 FIXME("() stub\n");
875 return 0;
878 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
879 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
880 MSVCRT_bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
882 FIXME("(%p) stub\n", placement);
883 return 0;
886 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
887 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
888 void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
890 FIXME("(%p) stub\n", event);
893 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
894 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
895 void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
896 void *data, /*location*/void *placement)
898 FIXME("(%p %p %p) stub\n", proc, data, placement);
901 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
902 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
903 void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
905 FIXME("(%p %p) stub\n", proc, data);
908 extern const vtable_ptr MSVCRT_type_info_vtable;
909 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
910 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
911 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
912 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
913 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
914 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
915 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
916 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
918 #ifndef __GNUC__
919 void __asm_dummy_vtables(void) {
920 #endif
921 __ASM_VTABLE(ExternalContextBase,
922 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
923 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
924 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
925 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
926 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
927 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
928 __ASM_VTABLE(ThreadScheduler,
929 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
930 VTABLE_ADD_FUNC(ThreadScheduler_Id)
931 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
932 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
933 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
934 VTABLE_ADD_FUNC(ThreadScheduler_Release)
935 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
936 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
937 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
938 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
939 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
940 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
941 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation));
942 #ifndef __GNUC__
944 #endif
946 void msvcrt_init_scheduler(void *base)
948 #ifdef __x86_64__
949 init_Context_rtti(base);
950 init_ContextBase_rtti(base);
951 init_ExternalContextBase_rtti(base);
952 init_Scheduler_rtti(base);
953 init_SchedulerBase_rtti(base);
954 init_ThreadScheduler_rtti(base);
955 #endif
958 void msvcrt_free_scheduler(void)
960 if (context_tls_index != TLS_OUT_OF_INDEXES)
961 TlsFree(context_tls_index);
962 if(default_scheduler_policy.policy_container)
963 SchedulerPolicy_dtor(&default_scheduler_policy);
964 if(default_scheduler) {
965 ThreadScheduler_dtor(default_scheduler);
966 MSVCRT_operator_delete(default_scheduler);
970 void msvcrt_free_scheduler_thread(void)
972 Context *context = try_get_current_context();
973 if (!context) return;
974 call_Context_dtor(context, 1);
977 #endif /* _MSVCR_VER >= 100 */