user32: Disable scrolling for multi-column menus.
[wine.git] / dlls / msvcrt / scheduler.c
blob48a9d1bc8ee8ab3d2f4ddf1d55a7e79c0889b979
1 /*
2 * msvcrt.dll C++ objects
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <stdarg.h>
25 #include "windef.h"
26 #include "winternl.h"
27 #include "wine/debug.h"
28 #include "msvcrt.h"
29 #include "cppexcept.h"
30 #include "cxx.h"
32 #if _MSVCR_VER >= 100
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
36 static int context_id = -1;
37 static int scheduler_id = -1;
39 #ifdef __i386__
41 #define DEFINE_VTBL_WRAPPER(off) \
42 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
43 "popl %eax\n\t" \
44 "popl %ecx\n\t" \
45 "pushl %eax\n\t" \
46 "movl 0(%ecx), %eax\n\t" \
47 "jmp *" #off "(%eax)\n\t")
49 DEFINE_VTBL_WRAPPER(0);
50 DEFINE_VTBL_WRAPPER(4);
51 DEFINE_VTBL_WRAPPER(8);
52 DEFINE_VTBL_WRAPPER(20);
54 #endif
56 typedef enum {
57 SchedulerKind,
58 MaxConcurrency,
59 MinConcurrency,
60 TargetOversubscriptionFactor,
61 LocalContextCacheSize,
62 ContextStackSize,
63 ContextPriority,
64 SchedulingProtocol,
65 DynamicProgressFeedback,
66 WinRTInitialization,
67 last_policy_id
68 } PolicyElementKey;
70 typedef struct {
71 struct _policy_container {
72 unsigned int policies[last_policy_id];
73 } *policy_container;
74 } SchedulerPolicy;
76 typedef struct {
77 const vtable_ptr *vtable;
78 } Context;
79 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
80 unsigned int, (const Context*), (this))
81 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
82 unsigned int, (const Context*), (this))
83 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
84 unsigned int, (const Context*), (this))
85 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
86 Context*, (Context*, unsigned int), (this, flags))
88 union allocator_cache_entry {
89 struct _free {
90 int depth;
91 union allocator_cache_entry *next;
92 } free;
93 struct _alloc {
94 int bucket;
95 char mem[1];
96 } alloc;
99 typedef struct {
100 Context context;
101 unsigned int id;
102 union allocator_cache_entry *allocator_cache[8];
103 } ExternalContextBase;
104 extern const vtable_ptr MSVCRT_ExternalContextBase_vtable;
105 static void ExternalContextBase_ctor(ExternalContextBase*);
107 typedef struct {
108 const vtable_ptr *vtable;
109 } Scheduler;
111 typedef struct {
112 Scheduler scheduler;
113 LONG ref;
114 unsigned int id;
115 unsigned int virt_proc_no;
116 SchedulerPolicy policy;
117 } ThreadScheduler;
118 extern const vtable_ptr MSVCRT_ThreadScheduler_vtable;
120 static int context_tls_index = TLS_OUT_OF_INDEXES;
122 static Context* try_get_current_context(void)
124 if (context_tls_index == TLS_OUT_OF_INDEXES)
125 return NULL;
126 return TlsGetValue(context_tls_index);
129 static Context* get_current_context(void)
131 Context *ret;
133 if (context_tls_index == TLS_OUT_OF_INDEXES) {
134 int tls_index = TlsAlloc();
135 if (tls_index == TLS_OUT_OF_INDEXES) {
136 throw_exception(EXCEPTION_SCHEDULER_RESOURCE_ALLOCATION_ERROR,
137 HRESULT_FROM_WIN32(GetLastError()), NULL);
138 return NULL;
141 if(InterlockedCompareExchange(&context_tls_index, tls_index, TLS_OUT_OF_INDEXES) != TLS_OUT_OF_INDEXES)
142 TlsFree(tls_index);
145 ret = TlsGetValue(context_tls_index);
146 if (!ret) {
147 ExternalContextBase *context = MSVCRT_operator_new(sizeof(ExternalContextBase));
148 ExternalContextBase_ctor(context);
149 TlsSetValue(context_tls_index, context);
150 ret = &context->context;
152 return ret;
155 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
156 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
157 Context* __cdecl Context_CurrentContext(void)
159 TRACE("()\n");
160 return get_current_context();
163 /* ?Id@Context@Concurrency@@SAIXZ */
164 unsigned int __cdecl Context_Id(void)
166 Context *ctx = try_get_current_context();
167 TRACE("()\n");
168 return ctx ? call_Context_GetId(ctx) : -1;
171 /* ?Block@Context@Concurrency@@SAXXZ */
172 void __cdecl Context_Block(void)
174 FIXME("()\n");
177 /* ?Yield@Context@Concurrency@@SAXXZ */
178 void __cdecl Context_Yield(void)
180 FIXME("()\n");
183 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
184 void __cdecl Context__SpinYield(void)
186 FIXME("()\n");
189 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
190 MSVCRT_bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
192 FIXME("()\n");
193 return FALSE;
196 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
197 void __cdecl Context_Oversubscribe(MSVCRT_bool begin)
199 FIXME("(%x)\n", begin);
202 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
203 unsigned int __cdecl Context_ScheduleGroupId(void)
205 Context *ctx = try_get_current_context();
206 TRACE("()\n");
207 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
210 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
211 unsigned int __cdecl Context_VirtualProcessorId(void)
213 Context *ctx = try_get_current_context();
214 FIXME("()\n");
215 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
218 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
219 unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
221 TRACE("(%p)->()\n", this);
222 return this->id;
225 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
226 unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
228 FIXME("(%p)->() stub\n", this);
229 return -1;
232 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
233 unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
235 FIXME("(%p)->() stub\n", this);
236 return -1;
239 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
240 void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
242 FIXME("(%p)->() stub\n", this);
245 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
246 MSVCRT_bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
248 FIXME("(%p)->() stub\n", this);
249 return FALSE;
252 static void ExternalContextBase_dtor(ExternalContextBase *this)
254 union allocator_cache_entry *next, *cur;
255 int i;
257 /* TODO: move the allocator cache to scheduler so it can be reused */
258 for(i=0; i<sizeof(this->allocator_cache)/sizeof(this->allocator_cache[0]); i++) {
259 for(cur = this->allocator_cache[i]; cur; cur=next) {
260 next = cur->free.next;
261 MSVCRT_operator_delete(cur);
266 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
267 Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
269 TRACE("(%p %x)\n", this, flags);
270 if(flags & 2) {
271 /* we have an array, with the number of elements stored before the first object */
272 INT_PTR i, *ptr = (INT_PTR *)this-1;
274 for(i=*ptr-1; i>=0; i--)
275 ExternalContextBase_dtor(this+i);
276 MSVCRT_operator_delete(ptr);
277 } else {
278 ExternalContextBase_dtor(this);
279 if(flags & 1)
280 MSVCRT_operator_delete(this);
283 return &this->context;
286 static void ExternalContextBase_ctor(ExternalContextBase *this)
288 TRACE("(%p)->()\n", this);
290 this->context.vtable = &MSVCRT_ExternalContextBase_vtable;
291 this->id = InterlockedIncrement(&context_id);
292 memset(this->allocator_cache, 0, sizeof(this->allocator_cache));
295 /* ?Alloc@Concurrency@@YAPAXI@Z */
296 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
297 void * CDECL Concurrency_Alloc(MSVCRT_size_t size)
299 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
300 union allocator_cache_entry *p;
302 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
303 if (size < sizeof(*p))
304 size = sizeof(*p);
306 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
307 p = MSVCRT_operator_new(size);
308 p->alloc.bucket = -1;
309 }else {
310 int i;
312 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
313 for(i=0; i<sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]); i++)
314 if (1 << (i+4) >= size) break;
316 if(i==sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0])) {
317 p = MSVCRT_operator_new(size);
318 p->alloc.bucket = -1;
319 }else if (context->allocator_cache[i]) {
320 p = context->allocator_cache[i];
321 context->allocator_cache[i] = p->free.next;
322 p->alloc.bucket = i;
323 }else {
324 p = MSVCRT_operator_new(1 << (i+4));
325 p->alloc.bucket = i;
329 TRACE("(%ld) returning %p\n", size, p->alloc.mem);
330 return p->alloc.mem;
333 /* ?Free@Concurrency@@YAXPAX@Z */
334 /* ?Free@Concurrency@@YAXPEAX@Z */
335 void CDECL Concurrency_Free(void* mem)
337 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
338 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
339 int bucket = p->alloc.bucket;
341 TRACE("(%p)\n", mem);
343 if (context->context.vtable != &MSVCRT_ExternalContextBase_vtable) {
344 MSVCRT_operator_delete(p);
345 }else {
346 if(bucket >= 0 && bucket < sizeof(context->allocator_cache)/sizeof(context->allocator_cache[0]) &&
347 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
348 p->free.next = context->allocator_cache[bucket];
349 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
350 context->allocator_cache[bucket] = p;
351 }else {
352 MSVCRT_operator_delete(p);
357 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
358 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
359 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
360 unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
361 PolicyElementKey policy, unsigned int val)
363 unsigned int ret;
365 TRACE("(%p %d %d)\n", this, policy, val);
367 if (policy == MinConcurrency)
368 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MinConcurrency");
369 if (policy == MaxConcurrency)
370 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "MaxConcurrency");
371 if (policy < SchedulerKind || policy >= last_policy_id)
372 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
374 switch(policy) {
375 case SchedulerKind:
376 if (val)
377 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulerKind");
378 break;
379 case TargetOversubscriptionFactor:
380 if (!val)
381 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE,
382 0, "TargetOversubscriptionFactor");
383 break;
384 case ContextPriority:
385 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
386 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
387 && val != THREAD_PRIORITY_IDLE && val != THREAD_PRIORITY_TIME_CRITICAL
388 && val != INHERIT_THREAD_PRIORITY)
389 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "ContextPriority");
390 break;
391 case SchedulingProtocol:
392 case DynamicProgressFeedback:
393 case WinRTInitialization:
394 if (val != 0 && val != 1)
395 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "SchedulingProtocol");
396 break;
397 default:
398 break;
401 ret = this->policy_container->policies[policy];
402 this->policy_container->policies[policy] = val;
403 return ret;
406 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
407 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
408 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
409 void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
410 unsigned int min_concurrency, unsigned int max_concurrency)
412 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
414 if (min_concurrency > max_concurrency)
415 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_THREAD_SPECIFICATION, 0, NULL);
416 if (!max_concurrency)
417 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_VALUE, 0, "MaxConcurrency");
419 this->policy_container->policies[MinConcurrency] = min_concurrency;
420 this->policy_container->policies[MaxConcurrency] = max_concurrency;
423 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
424 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
425 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
426 unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
427 const SchedulerPolicy *this, PolicyElementKey policy)
429 TRACE("(%p %d)\n", this, policy);
431 if (policy < SchedulerKind || policy >= last_policy_id)
432 throw_exception(EXCEPTION_INVALID_SCHEDULER_POLICY_KEY, 0, "Invalid policy");
433 return this->policy_container->policies[policy];
436 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
437 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
438 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
439 SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
441 TRACE("(%p)\n", this);
443 this->policy_container = MSVCRT_operator_new(sizeof(*this->policy_container));
444 /* TODO: default values can probably be affected by CurrentScheduler */
445 this->policy_container->policies[SchedulerKind] = 0;
446 this->policy_container->policies[MaxConcurrency] = -1;
447 this->policy_container->policies[MinConcurrency] = 1;
448 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
449 this->policy_container->policies[LocalContextCacheSize] = 8;
450 this->policy_container->policies[ContextStackSize] = 0;
451 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
452 this->policy_container->policies[SchedulingProtocol] = 0;
453 this->policy_container->policies[DynamicProgressFeedback] = 1;
454 return this;
457 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
458 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
459 /* TODO: don't leak policy_container on exception */
460 SchedulerPolicy* __cdecl SchedulerPolicy_ctor_policies(
461 SchedulerPolicy *this, MSVCRT_size_t n, ...)
463 unsigned int min_concurrency, max_concurrency;
464 __ms_va_list valist;
465 MSVCRT_size_t i;
467 TRACE("(%p %ld)\n", this, n);
469 SchedulerPolicy_ctor(this);
470 min_concurrency = this->policy_container->policies[MinConcurrency];
471 max_concurrency = this->policy_container->policies[MaxConcurrency];
473 __ms_va_start(valist, n);
474 for(i=0; i<n; i++) {
475 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
476 unsigned int val = va_arg(valist, unsigned int);
478 if(policy == MinConcurrency)
479 min_concurrency = val;
480 else if(policy == MaxConcurrency)
481 max_concurrency = val;
482 else
483 SchedulerPolicy_SetPolicyValue(this, policy, val);
485 __ms_va_end(valist);
487 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
488 return this;
491 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
492 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
493 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
494 SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
495 SchedulerPolicy *this, const SchedulerPolicy *rhs)
497 TRACE("(%p %p)\n", this, rhs);
498 memcpy(this->policy_container->policies, rhs->policy_container->policies,
499 sizeof(this->policy_container->policies));
500 return this;
503 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
504 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
505 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
506 SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
507 SchedulerPolicy *this, const SchedulerPolicy *rhs)
509 TRACE("(%p %p)\n", this, rhs);
510 SchedulerPolicy_ctor(this);
511 return SchedulerPolicy_op_assign(this, rhs);
514 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
515 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
516 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
517 void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
519 TRACE("(%p)\n", this);
520 MSVCRT_operator_delete(this->policy_container);
523 static void ThreadScheduler_dtor(ThreadScheduler *this)
525 if(this->ref != 0) WARN("ref = %d\n", this->ref);
526 SchedulerPolicy_dtor(&this->policy);
529 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
530 unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
532 TRACE("(%p)\n", this);
533 return this->id;
536 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
537 unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
539 TRACE("(%p)\n", this);
540 return this->virt_proc_no;
543 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
544 SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
545 const ThreadScheduler *this, SchedulerPolicy *ret)
547 TRACE("(%p %p)\n", this, ret);
548 return SchedulerPolicy_copy_ctor(ret, &this->policy);
551 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
552 unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
554 TRACE("(%p)\n", this);
555 return InterlockedIncrement(&this->ref);
558 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
559 unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
561 unsigned int ret = InterlockedDecrement(&this->ref);
563 TRACE("(%p)\n", this);
565 if(!ret) {
566 ThreadScheduler_dtor(this);
567 MSVCRT_operator_delete(this);
569 return ret;
572 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
573 void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
575 FIXME("(%p %p) stub\n", this, event);
578 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
579 void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
581 FIXME("(%p) stub\n", this);
584 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
585 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
586 ThreadScheduler *this, /*location*/void *placement)
588 FIXME("(%p %p) stub\n", this, placement);
589 return NULL;
592 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
593 /*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
595 FIXME("(%p) stub\n", this);
596 return NULL;
599 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
600 void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
601 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
603 FIXME("(%p %p %p %p) stub\n", this, proc, data, placement);
606 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
607 void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
608 void (__cdecl *proc)(void*), void* data)
610 FIXME("(%p %p %p) stub\n", this, proc, data);
613 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
614 MSVCRT_bool __thiscall ThreadScheduler_IsAvailableLocation(
615 const ThreadScheduler *this, const /*location*/void *placement)
617 FIXME("(%p %p) stub\n", this, placement);
618 return FALSE;
621 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
622 Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
624 TRACE("(%p %x)\n", this, flags);
625 if(flags & 2) {
626 /* we have an array, with the number of elements stored before the first object */
627 INT_PTR i, *ptr = (INT_PTR *)this-1;
629 for(i=*ptr-1; i>=0; i--)
630 ThreadScheduler_dtor(this+i);
631 MSVCRT_operator_delete(ptr);
632 } else {
633 ThreadScheduler_dtor(this);
634 if(flags & 1)
635 MSVCRT_operator_delete(this);
638 return &this->scheduler;
641 static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
642 const SchedulerPolicy *policy)
644 SYSTEM_INFO si;
646 TRACE("(%p)->()\n", this);
648 this->scheduler.vtable = &MSVCRT_ThreadScheduler_vtable;
649 this->ref = 1;
650 this->id = InterlockedIncrement(&scheduler_id);
651 SchedulerPolicy_copy_ctor(&this->policy, policy);
653 GetSystemInfo(&si);
654 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
655 if(this->virt_proc_no > si.dwNumberOfProcessors)
656 this->virt_proc_no = si.dwNumberOfProcessors;
657 return this;
660 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
661 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
662 Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
664 ThreadScheduler *ret;
666 TRACE("(%p)\n", policy);
668 ret = MSVCRT_operator_new(sizeof(*ret));
669 return &ThreadScheduler_ctor(ret, policy)->scheduler;
672 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
673 void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
675 FIXME("() stub\n");
678 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
679 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
680 void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
682 FIXME("(%p) stub\n", policy);
685 extern const vtable_ptr MSVCRT_type_info_vtable;
686 DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
687 DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
688 DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
689 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
690 DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
691 DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
692 DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
693 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
695 #ifndef __GNUC__
696 void __asm_dummy_vtables(void) {
697 #endif
698 __ASM_VTABLE(ExternalContextBase,
699 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
700 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
701 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
702 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
703 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
704 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor));
705 __ASM_VTABLE(ThreadScheduler,
706 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
707 VTABLE_ADD_FUNC(ThreadScheduler_Id)
708 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
709 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
710 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
711 VTABLE_ADD_FUNC(ThreadScheduler_Release)
712 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
713 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
714 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
715 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
716 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
717 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
718 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation));
719 #ifndef __GNUC__
721 #endif
723 void msvcrt_init_scheduler(void *base)
725 #ifdef __x86_64__
726 init_Context_rtti(base);
727 init_ContextBase_rtti(base);
728 init_ExternalContextBase_rtti(base);
729 init_Scheduler_rtti(base);
730 init_SchedulerBase_rtti(base);
731 init_ThreadScheduler_rtti(base);
732 #endif
735 void msvcrt_free_scheduler(void)
737 if (context_tls_index != TLS_OUT_OF_INDEXES)
738 TlsFree(context_tls_index);
741 void msvcrt_free_scheduler_thread(void)
743 Context *context = try_get_current_context();
744 if (!context) return;
745 call_Context_dtor(context, 1);
748 #endif /* _MSVCR_VER >= 100 */