libphobos: Fix backtraces in Fibers on AArch64.
[official-gcc.git] / libphobos / libdruntime / core / thread.d
blobff15d066a4968c4c8624eb60612b7b36dd30dfc3
1 /**
2 * The thread module provides support for thread creation and management.
4 * Copyright: Copyright Sean Kelly 2005 - 2012.
5 * License: Distributed under the
6 * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
7 * (See accompanying file LICENSE)
8 * Authors: Sean Kelly, Walter Bright, Alex Rønne Petersen, Martin Nowak
9 * Source: $(DRUNTIMESRC core/_thread.d)
12 /* NOTE: This file has been patched from the original DMD distribution to
13 * work with the GDC compiler.
15 module core.thread;
18 public import core.time; // for Duration
19 import core.exception : onOutOfMemoryError;
21 version (OSX)
22 version = Darwin;
23 else version (iOS)
24 version = Darwin;
25 else version (TVOS)
26 version = Darwin;
27 else version (WatchOS)
28 version = Darwin;
30 private
32 // interface to rt.tlsgc
33 import core.internal.traits : externDFunc;
35 alias rt_tlsgc_init = externDFunc!("rt.tlsgc.init", void* function() nothrow @nogc);
36 alias rt_tlsgc_destroy = externDFunc!("rt.tlsgc.destroy", void function(void*) nothrow @nogc);
38 alias ScanDg = void delegate(void* pstart, void* pend) nothrow;
39 alias rt_tlsgc_scan =
40 externDFunc!("rt.tlsgc.scan", void function(void*, scope ScanDg) nothrow);
42 alias rt_tlsgc_processGCMarks =
43 externDFunc!("rt.tlsgc.processGCMarks", void function(void*, scope IsMarkedDg) nothrow);
46 version (Solaris)
48 import core.sys.solaris.sys.priocntl;
49 import core.sys.solaris.sys.types;
52 // this should be true for most architectures
53 version (GNU_StackGrowsDown)
54 version = StackGrowsDown;
56 /**
57 * Returns the process ID of the calling process, which is guaranteed to be
58 * unique on the system. This call is always successful.
60 * Example:
61 * ---
62 * writefln("Current process id: %s", getpid());
63 * ---
65 version (Posix)
67 alias getpid = core.sys.posix.unistd.getpid;
69 else version (Windows)
71 alias getpid = core.sys.windows.windows.GetCurrentProcessId;
75 ///////////////////////////////////////////////////////////////////////////////
76 // Thread and Fiber Exceptions
77 ///////////////////////////////////////////////////////////////////////////////
80 /**
81 * Base class for thread exceptions.
83 class ThreadException : Exception
85 @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
87 super(msg, file, line, next);
90 @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
92 super(msg, file, line, next);
97 /**
98 * Base class for thread errors to be used for function inside GC when allocations are unavailable.
100 class ThreadError : Error
102 @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
104 super(msg, file, line, next);
107 @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
109 super(msg, file, line, next);
113 private
115 import core.atomic, core.memory, core.sync.mutex;
118 // exposed by compiler runtime
120 extern (C) void rt_moduleTlsCtor();
121 extern (C) void rt_moduleTlsDtor();
124 * Hook for whatever EH implementation is used to save/restore some data
125 * per stack.
127 * Params:
128 * newContext = The return value of the prior call to this function
129 * where the stack was last swapped out, or null when a fiber stack
130 * is switched in for the first time.
132 extern(C) void* _d_eh_swapContext(void* newContext) nothrow @nogc;
134 version (DigitalMars)
136 version (Windows)
137 alias swapContext = _d_eh_swapContext;
138 else
140 extern(C) void* _d_eh_swapContextDwarf(void* newContext) nothrow @nogc;
142 void* swapContext(void* newContext) nothrow @nogc
144 /* Detect at runtime which scheme is being used.
145 * Eventually, determine it statically.
147 static int which = 0;
148 final switch (which)
150 case 0:
152 assert(newContext == null);
153 auto p = _d_eh_swapContext(newContext);
154 auto pdwarf = _d_eh_swapContextDwarf(newContext);
155 if (p)
157 which = 1;
158 return p;
160 else if (pdwarf)
162 which = 2;
163 return pdwarf;
165 return null;
167 case 1:
168 return _d_eh_swapContext(newContext);
169 case 2:
170 return _d_eh_swapContextDwarf(newContext);
175 else
176 alias swapContext = _d_eh_swapContext;
180 ///////////////////////////////////////////////////////////////////////////////
181 // Thread Entry Point and Signal Handlers
182 ///////////////////////////////////////////////////////////////////////////////
185 version (Windows)
187 private
189 import core.stdc.stdint : uintptr_t; // for _beginthreadex decl below
190 import core.stdc.stdlib; // for malloc, atexit
191 import core.sys.windows.windows;
192 import core.sys.windows.threadaux; // for OpenThreadHandle
194 extern (Windows) alias btex_fptr = uint function(void*);
195 extern (C) uintptr_t _beginthreadex(void*, uint, btex_fptr, void*, uint, uint*) nothrow;
198 // Entry point for Windows threads
200 extern (Windows) uint thread_entryPoint( void* arg ) nothrow
202 Thread obj = cast(Thread) arg;
203 assert( obj );
205 assert( obj.m_curr is &obj.m_main );
206 obj.m_main.bstack = getStackBottom();
207 obj.m_main.tstack = obj.m_main.bstack;
208 obj.m_tlsgcdata = rt_tlsgc_init();
210 Thread.setThis(obj);
211 Thread.add(obj);
212 scope (exit)
214 Thread.remove(obj);
216 Thread.add(&obj.m_main);
218 // NOTE: No GC allocations may occur until the stack pointers have
219 // been set and Thread.getThis returns a valid reference to
220 // this thread object (this latter condition is not strictly
221 // necessary on Windows but it should be followed for the
222 // sake of consistency).
224 // TODO: Consider putting an auto exception object here (using
225 // alloca) forOutOfMemoryError plus something to track
226 // whether an exception is in-flight?
228 void append( Throwable t )
230 if ( obj.m_unhandled is null )
231 obj.m_unhandled = t;
232 else
234 Throwable last = obj.m_unhandled;
235 while ( last.next !is null )
236 last = last.next;
237 last.next = t;
241 version (D_InlineAsm_X86)
243 asm nothrow @nogc { fninit; }
248 rt_moduleTlsCtor();
251 obj.run();
253 catch ( Throwable t )
255 append( t );
257 rt_moduleTlsDtor();
259 catch ( Throwable t )
261 append( t );
263 return 0;
267 HANDLE GetCurrentThreadHandle() nothrow @nogc
269 const uint DUPLICATE_SAME_ACCESS = 0x00000002;
271 HANDLE curr = GetCurrentThread(),
272 proc = GetCurrentProcess(),
273 hndl;
275 DuplicateHandle( proc, curr, proc, &hndl, 0, TRUE, DUPLICATE_SAME_ACCESS );
276 return hndl;
280 else version (Posix)
282 private
284 import core.stdc.errno;
285 import core.sys.posix.semaphore;
286 import core.sys.posix.stdlib; // for malloc, valloc, free, atexit
287 import core.sys.posix.pthread;
288 import core.sys.posix.signal;
289 import core.sys.posix.time;
291 version (Darwin)
293 import core.sys.darwin.mach.thread_act;
294 import core.sys.darwin.pthread : pthread_mach_thread_np;
297 version (GNU)
299 import gcc.builtins;
303 // Entry point for POSIX threads
305 extern (C) void* thread_entryPoint( void* arg ) nothrow
307 version (Shared)
309 import rt.sections;
310 Thread obj = cast(Thread)(cast(void**)arg)[0];
311 auto loadedLibraries = (cast(void**)arg)[1];
312 .free(arg);
314 else
316 Thread obj = cast(Thread)arg;
318 assert( obj );
320 // loadedLibraries need to be inherited from parent thread
321 // before initilizing GC for TLS (rt_tlsgc_init)
322 version (Shared) inheritLoadedLibraries(loadedLibraries);
324 assert( obj.m_curr is &obj.m_main );
325 obj.m_main.bstack = getStackBottom();
326 obj.m_main.tstack = obj.m_main.bstack;
327 obj.m_tlsgcdata = rt_tlsgc_init();
329 atomicStore!(MemoryOrder.raw)(obj.m_isRunning, true);
330 Thread.setThis(obj); // allocates lazy TLS (see Issue 11981)
331 Thread.add(obj); // can only receive signals from here on
332 scope (exit)
334 Thread.remove(obj);
335 atomicStore!(MemoryOrder.raw)(obj.m_isRunning, false);
337 Thread.add(&obj.m_main);
339 static extern (C) void thread_cleanupHandler( void* arg ) nothrow @nogc
341 Thread obj = cast(Thread) arg;
342 assert( obj );
344 // NOTE: If the thread terminated abnormally, just set it as
345 // not running and let thread_suspendAll remove it from
346 // the thread list. This is safer and is consistent
347 // with the Windows thread code.
348 atomicStore!(MemoryOrder.raw)(obj.m_isRunning,false);
351 // NOTE: Using void to skip the initialization here relies on
352 // knowledge of how pthread_cleanup is implemented. It may
353 // not be appropriate for all platforms. However, it does
354 // avoid the need to link the pthread module. If any
355 // implementation actually requires default initialization
356 // then pthread_cleanup should be restructured to maintain
357 // the current lack of a link dependency.
358 static if ( __traits( compiles, pthread_cleanup ) )
360 pthread_cleanup cleanup = void;
361 cleanup.push( &thread_cleanupHandler, cast(void*) obj );
363 else static if ( __traits( compiles, pthread_cleanup_push ) )
365 pthread_cleanup_push( &thread_cleanupHandler, cast(void*) obj );
367 else
369 static assert( false, "Platform not supported." );
372 // NOTE: No GC allocations may occur until the stack pointers have
373 // been set and Thread.getThis returns a valid reference to
374 // this thread object (this latter condition is not strictly
375 // necessary on Windows but it should be followed for the
376 // sake of consistency).
378 // TODO: Consider putting an auto exception object here (using
379 // alloca) forOutOfMemoryError plus something to track
380 // whether an exception is in-flight?
382 void append( Throwable t )
384 if ( obj.m_unhandled is null )
385 obj.m_unhandled = t;
386 else
388 Throwable last = obj.m_unhandled;
389 while ( last.next !is null )
390 last = last.next;
391 last.next = t;
397 rt_moduleTlsCtor();
400 obj.run();
402 catch ( Throwable t )
404 append( t );
406 rt_moduleTlsDtor();
407 version (Shared) cleanupLoadedLibraries();
409 catch ( Throwable t )
411 append( t );
414 // NOTE: Normal cleanup is handled by scope(exit).
416 static if ( __traits( compiles, pthread_cleanup ) )
418 cleanup.pop( 0 );
420 else static if ( __traits( compiles, pthread_cleanup_push ) )
422 pthread_cleanup_pop( 0 );
425 return null;
430 // Used to track the number of suspended threads
432 __gshared sem_t suspendCount;
435 extern (C) void thread_suspendHandler( int sig ) nothrow
438 assert( sig == suspendSignalNumber );
440 body
442 void op(void* sp) nothrow
444 // NOTE: Since registers are being pushed and popped from the
445 // stack, any other stack data used by this function should
446 // be gone before the stack cleanup code is called below.
447 Thread obj = Thread.getThis();
448 assert(obj !is null);
450 if ( !obj.m_lock )
452 obj.m_curr.tstack = getStackTop();
455 sigset_t sigres = void;
456 int status;
458 status = sigfillset( &sigres );
459 assert( status == 0 );
461 status = sigdelset( &sigres, resumeSignalNumber );
462 assert( status == 0 );
464 version (FreeBSD) obj.m_suspendagain = false;
465 status = sem_post( &suspendCount );
466 assert( status == 0 );
468 sigsuspend( &sigres );
470 if ( !obj.m_lock )
472 obj.m_curr.tstack = obj.m_curr.bstack;
476 // avoid deadlocks on FreeBSD, see Issue 13416
477 version (FreeBSD)
479 auto obj = Thread.getThis();
480 if (THR_IN_CRITICAL(obj.m_addr))
482 obj.m_suspendagain = true;
483 if (sem_post(&suspendCount)) assert(0);
484 return;
488 callWithStackShell(&op);
492 extern (C) void thread_resumeHandler( int sig ) nothrow
495 assert( sig == resumeSignalNumber );
497 body
502 // HACK libthr internal (thr_private.h) macro, used to
503 // avoid deadlocks in signal handler, see Issue 13416
504 version (FreeBSD) bool THR_IN_CRITICAL(pthread_t p) nothrow @nogc
506 import core.sys.posix.config : c_long;
507 import core.sys.posix.sys.types : lwpid_t;
509 // If the begin of pthread would be changed in libthr (unlikely)
510 // we'll run into undefined behavior, compare with thr_private.h.
511 static struct pthread
513 c_long tid;
514 static struct umutex { lwpid_t owner; uint flags; uint[2] ceilings; uint[4] spare; }
515 umutex lock;
516 uint cycle;
517 int locklevel;
518 int critical_count;
519 // ...
521 auto priv = cast(pthread*)p;
522 return priv.locklevel > 0 || priv.critical_count > 0;
526 else
528 // NOTE: This is the only place threading versions are checked. If a new
529 // version is added, the module code will need to be searched for
530 // places where version-specific code may be required. This can be
531 // easily accomlished by searching for 'Windows' or 'Posix'.
532 static assert( false, "Unknown threading implementation." );
536 ///////////////////////////////////////////////////////////////////////////////
537 // Thread
538 ///////////////////////////////////////////////////////////////////////////////
542 * This class encapsulates all threading functionality for the D
543 * programming language. As thread manipulation is a required facility
544 * for garbage collection, all user threads should derive from this
545 * class, and instances of this class should never be explicitly deleted.
546 * A new thread may be created using either derivation or composition, as
547 * in the following example.
549 class Thread
551 ///////////////////////////////////////////////////////////////////////////
552 // Initialization
553 ///////////////////////////////////////////////////////////////////////////
557 * Initializes a thread object which is associated with a static
558 * D function.
560 * Params:
561 * fn = The thread function.
562 * sz = The stack size for this thread.
564 * In:
565 * fn must not be null.
567 this( void function() fn, size_t sz = 0 ) @safe pure nothrow @nogc
570 assert( fn );
572 body
574 this(sz);
575 () @trusted { m_fn = fn; }();
576 m_call = Call.FN;
577 m_curr = &m_main;
582 * Initializes a thread object which is associated with a dynamic
583 * D function.
585 * Params:
586 * dg = The thread function.
587 * sz = The stack size for this thread.
589 * In:
590 * dg must not be null.
592 this( void delegate() dg, size_t sz = 0 ) @safe pure nothrow @nogc
595 assert( dg );
597 body
599 this(sz);
600 () @trusted { m_dg = dg; }();
601 m_call = Call.DG;
602 m_curr = &m_main;
607 * Cleans up any remaining resources used by this object.
609 ~this() nothrow @nogc
611 if ( m_addr == m_addr.init )
613 return;
616 version (Windows)
618 m_addr = m_addr.init;
619 CloseHandle( m_hndl );
620 m_hndl = m_hndl.init;
622 else version (Posix)
624 pthread_detach( m_addr );
625 m_addr = m_addr.init;
627 version (Darwin)
629 m_tmach = m_tmach.init;
631 rt_tlsgc_destroy( m_tlsgcdata );
632 m_tlsgcdata = null;
636 ///////////////////////////////////////////////////////////////////////////
637 // General Actions
638 ///////////////////////////////////////////////////////////////////////////
642 * Starts the thread and invokes the function or delegate passed upon
643 * construction.
645 * In:
646 * This routine may only be called once per thread instance.
648 * Throws:
649 * ThreadException if the thread fails to start.
651 final Thread start() nothrow
654 assert( !next && !prev );
656 body
658 auto wasThreaded = multiThreadedFlag;
659 multiThreadedFlag = true;
660 scope( failure )
662 if ( !wasThreaded )
663 multiThreadedFlag = false;
666 version (Windows) {} else
667 version (Posix)
669 pthread_attr_t attr;
671 if ( pthread_attr_init( &attr ) )
672 onThreadError( "Error initializing thread attributes" );
673 if ( m_sz && pthread_attr_setstacksize( &attr, m_sz ) )
674 onThreadError( "Error initializing thread stack size" );
677 version (Windows)
679 // NOTE: If a thread is just executing DllMain()
680 // while another thread is started here, it holds an OS internal
681 // lock that serializes DllMain with CreateThread. As the code
682 // might request a synchronization on slock (e.g. in thread_findByAddr()),
683 // we cannot hold that lock while creating the thread without
684 // creating a deadlock
686 // Solution: Create the thread in suspended state and then
687 // add and resume it with slock acquired
688 assert(m_sz <= uint.max, "m_sz must be less than or equal to uint.max");
689 m_hndl = cast(HANDLE) _beginthreadex( null, cast(uint) m_sz, &thread_entryPoint, cast(void*) this, CREATE_SUSPENDED, &m_addr );
690 if ( cast(size_t) m_hndl == 0 )
691 onThreadError( "Error creating thread" );
694 slock.lock_nothrow();
695 scope(exit) slock.unlock_nothrow();
697 ++nAboutToStart;
698 pAboutToStart = cast(Thread*)realloc(pAboutToStart, Thread.sizeof * nAboutToStart);
699 pAboutToStart[nAboutToStart - 1] = this;
700 version (Windows)
702 if ( ResumeThread( m_hndl ) == -1 )
703 onThreadError( "Error resuming thread" );
705 else version (Posix)
707 // NOTE: This is also set to true by thread_entryPoint, but set it
708 // here as well so the calling thread will see the isRunning
709 // state immediately.
710 atomicStore!(MemoryOrder.raw)(m_isRunning, true);
711 scope( failure ) atomicStore!(MemoryOrder.raw)(m_isRunning, false);
713 version (Shared)
715 import rt.sections;
716 auto libs = pinLoadedLibraries();
717 auto ps = cast(void**).malloc(2 * size_t.sizeof);
718 if (ps is null) onOutOfMemoryError();
719 ps[0] = cast(void*)this;
720 ps[1] = cast(void*)libs;
721 if ( pthread_create( &m_addr, &attr, &thread_entryPoint, ps ) != 0 )
723 unpinLoadedLibraries(libs);
724 .free(ps);
725 onThreadError( "Error creating thread" );
728 else
730 if ( pthread_create( &m_addr, &attr, &thread_entryPoint, cast(void*) this ) != 0 )
731 onThreadError( "Error creating thread" );
734 version (Darwin)
736 m_tmach = pthread_mach_thread_np( m_addr );
737 if ( m_tmach == m_tmach.init )
738 onThreadError( "Error creating thread" );
741 return this;
746 * Waits for this thread to complete. If the thread terminated as the
747 * result of an unhandled exception, this exception will be rethrown.
749 * Params:
750 * rethrow = Rethrow any unhandled exception which may have caused this
751 * thread to terminate.
753 * Throws:
754 * ThreadException if the operation fails.
755 * Any exception not handled by the joined thread.
757 * Returns:
758 * Any exception not handled by this thread if rethrow = false, null
759 * otherwise.
761 final Throwable join( bool rethrow = true )
763 version (Windows)
765 if ( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 )
766 throw new ThreadException( "Unable to join thread" );
767 // NOTE: m_addr must be cleared before m_hndl is closed to avoid
768 // a race condition with isRunning. The operation is done
769 // with atomicStore to prevent compiler reordering.
770 atomicStore!(MemoryOrder.raw)(*cast(shared)&m_addr, m_addr.init);
771 CloseHandle( m_hndl );
772 m_hndl = m_hndl.init;
774 else version (Posix)
776 if ( pthread_join( m_addr, null ) != 0 )
777 throw new ThreadException( "Unable to join thread" );
778 // NOTE: pthread_join acts as a substitute for pthread_detach,
779 // which is normally called by the dtor. Setting m_addr
780 // to zero ensures that pthread_detach will not be called
781 // on object destruction.
782 m_addr = m_addr.init;
784 if ( m_unhandled )
786 if ( rethrow )
787 throw m_unhandled;
788 return m_unhandled;
790 return null;
794 ///////////////////////////////////////////////////////////////////////////
795 // General Properties
796 ///////////////////////////////////////////////////////////////////////////
800 * Gets the OS identifier for this thread.
802 * Returns:
803 * If the thread hasn't been started yet, returns $(LREF ThreadID)$(D.init).
804 * Otherwise, returns the result of $(D GetCurrentThreadId) on Windows,
805 * and $(D pthread_self) on POSIX.
807 * The value is unique for the current process.
809 final @property ThreadID id() @safe @nogc
811 synchronized( this )
813 return m_addr;
819 * Gets the user-readable label for this thread.
821 * Returns:
822 * The name of this thread.
824 final @property string name() @safe @nogc
826 synchronized( this )
828 return m_name;
834 * Sets the user-readable label for this thread.
836 * Params:
837 * val = The new name of this thread.
839 final @property void name( string val ) @safe @nogc
841 synchronized( this )
843 m_name = val;
849 * Gets the daemon status for this thread. While the runtime will wait for
850 * all normal threads to complete before tearing down the process, daemon
851 * threads are effectively ignored and thus will not prevent the process
852 * from terminating. In effect, daemon threads will be terminated
853 * automatically by the OS when the process exits.
855 * Returns:
856 * true if this is a daemon thread.
858 final @property bool isDaemon() @safe @nogc
860 synchronized( this )
862 return m_isDaemon;
868 * Sets the daemon status for this thread. While the runtime will wait for
869 * all normal threads to complete before tearing down the process, daemon
870 * threads are effectively ignored and thus will not prevent the process
871 * from terminating. In effect, daemon threads will be terminated
872 * automatically by the OS when the process exits.
874 * Params:
875 * val = The new daemon status for this thread.
877 final @property void isDaemon( bool val ) @safe @nogc
879 synchronized( this )
881 m_isDaemon = val;
887 * Tests whether this thread is running.
889 * Returns:
890 * true if the thread is running, false if not.
892 final @property bool isRunning() nothrow @nogc
894 if ( m_addr == m_addr.init )
896 return false;
899 version (Windows)
901 uint ecode = 0;
902 GetExitCodeThread( m_hndl, &ecode );
903 return ecode == STILL_ACTIVE;
905 else version (Posix)
907 return atomicLoad(m_isRunning);
912 ///////////////////////////////////////////////////////////////////////////
913 // Thread Priority Actions
914 ///////////////////////////////////////////////////////////////////////////
916 version (Windows)
918 @property static int PRIORITY_MIN() @nogc nothrow pure @safe
920 return THREAD_PRIORITY_IDLE;
923 @property static const(int) PRIORITY_MAX() @nogc nothrow pure @safe
925 return THREAD_PRIORITY_TIME_CRITICAL;
928 @property static int PRIORITY_DEFAULT() @nogc nothrow pure @safe
930 return THREAD_PRIORITY_NORMAL;
933 else
935 private struct Priority
937 int PRIORITY_MIN = int.min;
938 int PRIORITY_DEFAULT = int.min;
939 int PRIORITY_MAX = int.min;
943 Lazily loads one of the members stored in a hidden global variable of
944 type `Priority`. Upon the first access of either member, the entire
945 `Priority` structure is initialized. Multiple initializations from
946 different threads calling this function are tolerated.
948 `which` must be one of `PRIORITY_MIN`, `PRIORITY_DEFAULT`,
949 `PRIORITY_MAX`.
951 private static int loadGlobal(string which)()
953 static shared Priority cache;
954 auto local = atomicLoad(mixin("cache." ~ which));
955 if (local != local.min) return local;
956 // There will be benign races
957 cache = loadPriorities;
958 return atomicLoad(mixin("cache." ~ which));
962 Loads all priorities and returns them as a `Priority` structure. This
963 function is thread-neutral.
965 private static Priority loadPriorities() @nogc nothrow @trusted
967 Priority result;
968 version (Solaris)
970 pcparms_t pcParms;
971 pcinfo_t pcInfo;
973 pcParms.pc_cid = PC_CLNULL;
974 if (priocntl(idtype_t.P_PID, P_MYID, PC_GETPARMS, &pcParms) == -1)
975 assert( 0, "Unable to get scheduling class" );
977 pcInfo.pc_cid = pcParms.pc_cid;
978 // PC_GETCLINFO ignores the first two args, use dummy values
979 if (priocntl(idtype_t.P_PID, 0, PC_GETCLINFO, &pcInfo) == -1)
980 assert( 0, "Unable to get scheduling class info" );
982 pri_t* clparms = cast(pri_t*)&pcParms.pc_clparms;
983 pri_t* clinfo = cast(pri_t*)&pcInfo.pc_clinfo;
985 result.PRIORITY_MAX = clparms[0];
987 if (pcInfo.pc_clname == "RT")
989 m_isRTClass = true;
991 // For RT class, just assume it can't be changed
992 result.PRIORITY_MIN = clparms[0];
993 result.PRIORITY_DEFAULT = clparms[0];
995 else
997 m_isRTClass = false;
999 // For all other scheduling classes, there are
1000 // two key values -- uprilim and maxupri.
1001 // maxupri is the maximum possible priority defined
1002 // for the scheduling class, and valid priorities
1003 // range are in [-maxupri, maxupri].
1005 // However, uprilim is an upper limit that the
1006 // current thread can set for the current scheduling
1007 // class, which can be less than maxupri. As such,
1008 // use this value for priorityMax since this is
1009 // the effective maximum.
1011 // maxupri
1012 result.PRIORITY_MIN = -clinfo[0];
1013 // by definition
1014 result.PRIORITY_DEFAULT = 0;
1017 else version (Posix)
1019 int policy;
1020 sched_param param;
1021 pthread_getschedparam( pthread_self(), &policy, &param ) == 0
1022 || assert(0, "Internal error in pthread_getschedparam");
1024 result.PRIORITY_MIN = sched_get_priority_min( policy );
1025 result.PRIORITY_MIN != -1
1026 || assert(0, "Internal error in sched_get_priority_min");
1027 result.PRIORITY_DEFAULT = param.sched_priority;
1028 result.PRIORITY_MAX = sched_get_priority_max( policy );
1029 result.PRIORITY_MAX != -1 ||
1030 assert(0, "Internal error in sched_get_priority_max");
1032 else
1034 static assert(0, "Your code here.");
1036 return result;
1040 * The minimum scheduling priority that may be set for a thread. On
1041 * systems where multiple scheduling policies are defined, this value
1042 * represents the minimum valid priority for the scheduling policy of
1043 * the process.
1045 @property static int PRIORITY_MIN() @nogc nothrow pure @trusted
1047 return (cast(int function() @nogc nothrow pure @safe)
1048 &loadGlobal!"PRIORITY_MIN")();
1052 * The maximum scheduling priority that may be set for a thread. On
1053 * systems where multiple scheduling policies are defined, this value
1054 * represents the maximum valid priority for the scheduling policy of
1055 * the process.
1057 @property static const(int) PRIORITY_MAX() @nogc nothrow pure @trusted
1059 return (cast(int function() @nogc nothrow pure @safe)
1060 &loadGlobal!"PRIORITY_MAX")();
1064 * The default scheduling priority that is set for a thread. On
1065 * systems where multiple scheduling policies are defined, this value
1066 * represents the default priority for the scheduling policy of
1067 * the process.
1069 @property static int PRIORITY_DEFAULT() @nogc nothrow pure @trusted
1071 return (cast(int function() @nogc nothrow pure @safe)
1072 &loadGlobal!"PRIORITY_DEFAULT")();
1076 version (NetBSD)
1078 //NetBSD does not support priority for default policy
1079 // and it is not possible change policy without root access
1080 int fakePriority = int.max;
1084 * Gets the scheduling priority for the associated thread.
1086 * Note: Getting the priority of a thread that already terminated
1087 * might return the default priority.
1089 * Returns:
1090 * The scheduling priority of this thread.
1092 final @property int priority()
1094 version (Windows)
1096 return GetThreadPriority( m_hndl );
1098 else version (NetBSD)
1100 return fakePriority==int.max? PRIORITY_DEFAULT : fakePriority;
1102 else version (Posix)
1104 int policy;
1105 sched_param param;
1107 if (auto err = pthread_getschedparam(m_addr, &policy, &param))
1109 // ignore error if thread is not running => Bugzilla 8960
1110 if (!atomicLoad(m_isRunning)) return PRIORITY_DEFAULT;
1111 throw new ThreadException("Unable to get thread priority");
1113 return param.sched_priority;
1119 * Sets the scheduling priority for the associated thread.
1121 * Note: Setting the priority of a thread that already terminated
1122 * might have no effect.
1124 * Params:
1125 * val = The new scheduling priority of this thread.
1127 final @property void priority( int val )
1130 assert(val >= PRIORITY_MIN);
1131 assert(val <= PRIORITY_MAX);
1133 body
1135 version (Windows)
1137 if ( !SetThreadPriority( m_hndl, val ) )
1138 throw new ThreadException( "Unable to set thread priority" );
1140 else version (Solaris)
1142 // the pthread_setschedprio(3c) and pthread_setschedparam functions
1143 // are broken for the default (TS / time sharing) scheduling class.
1144 // instead, we use priocntl(2) which gives us the desired behavior.
1146 // We hardcode the min and max priorities to the current value
1147 // so this is a no-op for RT threads.
1148 if (m_isRTClass)
1149 return;
1151 pcparms_t pcparm;
1153 pcparm.pc_cid = PC_CLNULL;
1154 if (priocntl(idtype_t.P_LWPID, P_MYID, PC_GETPARMS, &pcparm) == -1)
1155 throw new ThreadException( "Unable to get scheduling class" );
1157 pri_t* clparms = cast(pri_t*)&pcparm.pc_clparms;
1159 // clparms is filled in by the PC_GETPARMS call, only necessary
1160 // to adjust the element that contains the thread priority
1161 clparms[1] = cast(pri_t) val;
1163 if (priocntl(idtype_t.P_LWPID, P_MYID, PC_SETPARMS, &pcparm) == -1)
1164 throw new ThreadException( "Unable to set scheduling class" );
1166 else version (NetBSD)
1168 fakePriority = val;
1170 else version (Posix)
1172 static if (__traits(compiles, pthread_setschedprio))
1174 if (auto err = pthread_setschedprio(m_addr, val))
1176 // ignore error if thread is not running => Bugzilla 8960
1177 if (!atomicLoad(m_isRunning)) return;
1178 throw new ThreadException("Unable to set thread priority");
1181 else
1183 // NOTE: pthread_setschedprio is not implemented on Darwin or FreeBSD, so use
1184 // the more complicated get/set sequence below.
1185 int policy;
1186 sched_param param;
1188 if (auto err = pthread_getschedparam(m_addr, &policy, &param))
1190 // ignore error if thread is not running => Bugzilla 8960
1191 if (!atomicLoad(m_isRunning)) return;
1192 throw new ThreadException("Unable to set thread priority");
1194 param.sched_priority = val;
1195 if (auto err = pthread_setschedparam(m_addr, policy, &param))
1197 // ignore error if thread is not running => Bugzilla 8960
1198 if (!atomicLoad(m_isRunning)) return;
1199 throw new ThreadException("Unable to set thread priority");
1206 unittest
1208 auto thr = Thread.getThis();
1209 immutable prio = thr.priority;
1210 scope (exit) thr.priority = prio;
1212 assert(prio == PRIORITY_DEFAULT);
1213 assert(prio >= PRIORITY_MIN && prio <= PRIORITY_MAX);
1214 thr.priority = PRIORITY_MIN;
1215 assert(thr.priority == PRIORITY_MIN);
1216 thr.priority = PRIORITY_MAX;
1217 assert(thr.priority == PRIORITY_MAX);
1220 unittest // Bugzilla 8960
1222 import core.sync.semaphore;
1224 auto thr = new Thread({});
1225 thr.start();
1226 Thread.sleep(1.msecs); // wait a little so the thread likely has finished
1227 thr.priority = PRIORITY_MAX; // setting priority doesn't cause error
1228 auto prio = thr.priority; // getting priority doesn't cause error
1229 assert(prio >= PRIORITY_MIN && prio <= PRIORITY_MAX);
1232 ///////////////////////////////////////////////////////////////////////////
1233 // Actions on Calling Thread
1234 ///////////////////////////////////////////////////////////////////////////
1238 * Suspends the calling thread for at least the supplied period. This may
1239 * result in multiple OS calls if period is greater than the maximum sleep
1240 * duration supported by the operating system.
1242 * Params:
1243 * val = The minimum duration the calling thread should be suspended.
1245 * In:
1246 * period must be non-negative.
1248 * Example:
1249 * ------------------------------------------------------------------------
1251 * Thread.sleep( dur!("msecs")( 50 ) ); // sleep for 50 milliseconds
1252 * Thread.sleep( dur!("seconds")( 5 ) ); // sleep for 5 seconds
1254 * ------------------------------------------------------------------------
1256 static void sleep( Duration val ) @nogc nothrow
1259 assert( !val.isNegative );
1261 body
1263 version (Windows)
1265 auto maxSleepMillis = dur!("msecs")( uint.max - 1 );
1267 // avoid a non-zero time to be round down to 0
1268 if ( val > dur!"msecs"( 0 ) && val < dur!"msecs"( 1 ) )
1269 val = dur!"msecs"( 1 );
1271 // NOTE: In instances where all other threads in the process have a
1272 // lower priority than the current thread, the current thread
1273 // will not yield with a sleep time of zero. However, unlike
1274 // yield(), the user is not asking for a yield to occur but
1275 // only for execution to suspend for the requested interval.
1276 // Therefore, expected performance may not be met if a yield
1277 // is forced upon the user.
1278 while ( val > maxSleepMillis )
1280 Sleep( cast(uint)
1281 maxSleepMillis.total!"msecs" );
1282 val -= maxSleepMillis;
1284 Sleep( cast(uint) val.total!"msecs" );
1286 else version (Posix)
1288 timespec tin = void;
1289 timespec tout = void;
1291 val.split!("seconds", "nsecs")(tin.tv_sec, tin.tv_nsec);
1292 if ( val.total!"seconds" > tin.tv_sec.max )
1293 tin.tv_sec = tin.tv_sec.max;
1294 while ( true )
1296 if ( !nanosleep( &tin, &tout ) )
1297 return;
1298 if ( errno != EINTR )
1299 assert(0, "Unable to sleep for the specified duration");
1300 tin = tout;
1307 * Forces a context switch to occur away from the calling thread.
1309 static void yield() @nogc nothrow
1311 version (Windows)
1312 SwitchToThread();
1313 else version (Posix)
1314 sched_yield();
1318 ///////////////////////////////////////////////////////////////////////////
1319 // Thread Accessors
1320 ///////////////////////////////////////////////////////////////////////////
1323 * Provides a reference to the calling thread.
1325 * Returns:
1326 * The thread object representing the calling thread. The result of
1327 * deleting this object is undefined. If the current thread is not
1328 * attached to the runtime, a null reference is returned.
1330 static Thread getThis() @safe nothrow @nogc
1332 // NOTE: This function may not be called until thread_init has
1333 // completed. See thread_suspendAll for more information
1334 // on why this might occur.
1335 return sm_this;
1340 * Provides a list of all threads currently being tracked by the system.
1341 * Note that threads in the returned array might no longer run (see
1342 * $(D Thread.)$(LREF isRunning)).
1344 * Returns:
1345 * An array containing references to all threads currently being
1346 * tracked by the system. The result of deleting any contained
1347 * objects is undefined.
1349 static Thread[] getAll()
1351 static void resize(ref Thread[] buf, size_t nlen)
1353 buf.length = nlen;
1355 return getAllImpl!resize();
1360 * Operates on all threads currently being tracked by the system. The
1361 * result of deleting any Thread object is undefined.
1362 * Note that threads passed to the callback might no longer run (see
1363 * $(D Thread.)$(LREF isRunning)).
1365 * Params:
1366 * dg = The supplied code as a delegate.
1368 * Returns:
1369 * Zero if all elemented are visited, nonzero if not.
1371 static int opApply(scope int delegate(ref Thread) dg)
1373 import core.stdc.stdlib : free, realloc;
1375 static void resize(ref Thread[] buf, size_t nlen)
1377 buf = (cast(Thread*)realloc(buf.ptr, nlen * Thread.sizeof))[0 .. nlen];
1379 auto buf = getAllImpl!resize;
1380 scope(exit) if (buf.ptr) free(buf.ptr);
1382 foreach (t; buf)
1384 if (auto res = dg(t))
1385 return res;
1387 return 0;
1390 unittest
1392 auto t1 = new Thread({
1393 foreach (_; 0 .. 20)
1394 Thread.getAll;
1395 }).start;
1396 auto t2 = new Thread({
1397 foreach (_; 0 .. 20)
1398 GC.collect;
1399 }).start;
1400 t1.join();
1401 t2.join();
1404 private static Thread[] getAllImpl(alias resize)()
1406 import core.atomic;
1408 Thread[] buf;
1409 while (true)
1411 immutable len = atomicLoad!(MemoryOrder.raw)(*cast(shared)&sm_tlen);
1412 resize(buf, len);
1413 assert(buf.length == len);
1414 synchronized (slock)
1416 if (len == sm_tlen)
1418 size_t pos;
1419 for (Thread t = sm_tbeg; t; t = t.next)
1420 buf[pos++] = t;
1421 return buf;
1427 ///////////////////////////////////////////////////////////////////////////
1428 // Stuff That Should Go Away
1429 ///////////////////////////////////////////////////////////////////////////
1432 private:
1434 // Initializes a thread object which has no associated executable function.
1435 // This is used for the main thread initialized in thread_init().
1437 this(size_t sz = 0) @safe pure nothrow @nogc
1439 if (sz)
1441 version (Posix)
1443 // stack size must be a multiple of PAGESIZE
1444 sz += PAGESIZE - 1;
1445 sz -= sz % PAGESIZE;
1446 // and at least PTHREAD_STACK_MIN
1447 if (PTHREAD_STACK_MIN > sz)
1448 sz = PTHREAD_STACK_MIN;
1450 m_sz = sz;
1452 m_call = Call.NO;
1453 m_curr = &m_main;
1458 // Thread entry point. Invokes the function or delegate passed on
1459 // construction (if any).
1461 final void run()
1463 switch ( m_call )
1465 case Call.FN:
1466 m_fn();
1467 break;
1468 case Call.DG:
1469 m_dg();
1470 break;
1471 default:
1472 break;
1477 private:
1479 // The type of routine passed on thread construction.
1481 enum Call
1490 // Standard types
1492 version (Windows)
1494 alias TLSKey = uint;
1496 else version (Posix)
1498 alias TLSKey = pthread_key_t;
1503 // Local storage
1505 static Thread sm_this;
1509 // Main process thread
1511 __gshared Thread sm_main;
1513 version (FreeBSD)
1515 // set when suspend failed and should be retried, see Issue 13416
1516 shared bool m_suspendagain;
1521 // Standard thread data
1523 version (Windows)
1525 HANDLE m_hndl;
1527 else version (Darwin)
1529 mach_port_t m_tmach;
1531 ThreadID m_addr;
1532 Call m_call;
1533 string m_name;
1534 union
1536 void function() m_fn;
1537 void delegate() m_dg;
1539 size_t m_sz;
1540 version (Posix)
1542 shared bool m_isRunning;
1544 bool m_isDaemon;
1545 bool m_isInCriticalRegion;
1546 Throwable m_unhandled;
1548 version (Solaris)
1550 __gshared immutable bool m_isRTClass;
1553 private:
1554 ///////////////////////////////////////////////////////////////////////////
1555 // Storage of Active Thread
1556 ///////////////////////////////////////////////////////////////////////////
1560 // Sets a thread-local reference to the current thread object.
1562 static void setThis( Thread t ) nothrow @nogc
1564 sm_this = t;
1568 private:
1569 ///////////////////////////////////////////////////////////////////////////
1570 // Thread Context and GC Scanning Support
1571 ///////////////////////////////////////////////////////////////////////////
1574 final void pushContext( Context* c ) nothrow @nogc
1577 assert( !c.within );
1579 body
1581 m_curr.ehContext = swapContext(c.ehContext);
1582 c.within = m_curr;
1583 m_curr = c;
1587 final void popContext() nothrow @nogc
1590 assert( m_curr && m_curr.within );
1592 body
1594 Context* c = m_curr;
1595 m_curr = c.within;
1596 c.ehContext = swapContext(m_curr.ehContext);
1597 c.within = null;
1601 final Context* topContext() nothrow @nogc
1604 assert( m_curr );
1606 body
1608 return m_curr;
1612 static struct Context
1614 void* bstack,
1615 tstack;
1617 /// Slot for the EH implementation to keep some state for each stack
1618 /// (will be necessary for exception chaining, etc.). Opaque as far as
1619 /// we are concerned here.
1620 void* ehContext;
1622 Context* within;
1623 Context* next,
1624 prev;
1628 Context m_main;
1629 Context* m_curr;
1630 bool m_lock;
1631 void* m_tlsgcdata;
1633 version (Windows)
1635 version (X86)
1637 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax
1639 else version (X86_64)
1641 ulong[16] m_reg; // rdi,rsi,rbp,rsp,rbx,rdx,rcx,rax
1642 // r8,r9,r10,r11,r12,r13,r14,r15
1644 else
1646 static assert(false, "Architecture not supported." );
1649 else version (Darwin)
1651 version (X86)
1653 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax
1655 else version (X86_64)
1657 ulong[16] m_reg; // rdi,rsi,rbp,rsp,rbx,rdx,rcx,rax
1658 // r8,r9,r10,r11,r12,r13,r14,r15
1660 else
1662 static assert(false, "Architecture not supported." );
1667 private:
1668 ///////////////////////////////////////////////////////////////////////////
1669 // GC Scanning Support
1670 ///////////////////////////////////////////////////////////////////////////
1673 // NOTE: The GC scanning process works like so:
1675 // 1. Suspend all threads.
1676 // 2. Scan the stacks of all suspended threads for roots.
1677 // 3. Resume all threads.
1679 // Step 1 and 3 require a list of all threads in the system, while
1680 // step 2 requires a list of all thread stacks (each represented by
1681 // a Context struct). Traditionally, there was one stack per thread
1682 // and the Context structs were not necessary. However, Fibers have
1683 // changed things so that each thread has its own 'main' stack plus
1684 // an arbitrary number of nested stacks (normally referenced via
1685 // m_curr). Also, there may be 'free-floating' stacks in the system,
1686 // which are Fibers that are not currently executing on any specific
1687 // thread but are still being processed and still contain valid
1688 // roots.
1690 // To support all of this, the Context struct has been created to
1691 // represent a stack range, and a global list of Context structs has
1692 // been added to enable scanning of these stack ranges. The lifetime
1693 // (and presence in the Context list) of a thread's 'main' stack will
1694 // be equivalent to the thread's lifetime. So the Ccontext will be
1695 // added to the list on thread entry, and removed from the list on
1696 // thread exit (which is essentially the same as the presence of a
1697 // Thread object in its own global list). The lifetime of a Fiber's
1698 // context, however, will be tied to the lifetime of the Fiber object
1699 // itself, and Fibers are expected to add/remove their Context struct
1700 // on construction/deletion.
1704 // All use of the global thread lists/array should synchronize on this lock.
1706 // Careful as the GC acquires this lock after the GC lock to suspend all
1707 // threads any GC usage with slock held can result in a deadlock through
1708 // lock order inversion.
1709 @property static Mutex slock() nothrow @nogc
1711 return cast(Mutex)_locks[0].ptr;
1714 @property static Mutex criticalRegionLock() nothrow @nogc
1716 return cast(Mutex)_locks[1].ptr;
1719 __gshared align(Mutex.alignof) void[__traits(classInstanceSize, Mutex)][2] _locks;
1721 static void initLocks()
1723 foreach (ref lock; _locks)
1725 lock[] = typeid(Mutex).initializer[];
1726 (cast(Mutex)lock.ptr).__ctor();
1730 static void termLocks()
1732 foreach (ref lock; _locks)
1733 (cast(Mutex)lock.ptr).__dtor();
1736 __gshared Context* sm_cbeg;
1738 __gshared Thread sm_tbeg;
1739 __gshared size_t sm_tlen;
1741 // can't use rt.util.array in public code
1742 __gshared Thread* pAboutToStart;
1743 __gshared size_t nAboutToStart;
1746 // Used for ordering threads in the global thread list.
1748 Thread prev;
1749 Thread next;
1752 ///////////////////////////////////////////////////////////////////////////
1753 // Global Context List Operations
1754 ///////////////////////////////////////////////////////////////////////////
1758 // Add a context to the global context list.
1760 static void add( Context* c ) nothrow @nogc
1763 assert( c );
1764 assert( !c.next && !c.prev );
1766 body
1768 slock.lock_nothrow();
1769 scope(exit) slock.unlock_nothrow();
1770 assert(!suspendDepth); // must be 0 b/c it's only set with slock held
1772 if (sm_cbeg)
1774 c.next = sm_cbeg;
1775 sm_cbeg.prev = c;
1777 sm_cbeg = c;
1782 // Remove a context from the global context list.
1784 // This assumes slock being acquired. This isn't done here to
1785 // avoid double locking when called from remove(Thread)
1786 static void remove( Context* c ) nothrow @nogc
1789 assert( c );
1790 assert( c.next || c.prev );
1792 body
1794 if ( c.prev )
1795 c.prev.next = c.next;
1796 if ( c.next )
1797 c.next.prev = c.prev;
1798 if ( sm_cbeg == c )
1799 sm_cbeg = c.next;
1800 // NOTE: Don't null out c.next or c.prev because opApply currently
1801 // follows c.next after removing a node. This could be easily
1802 // addressed by simply returning the next node from this
1803 // function, however, a context should never be re-added to the
1804 // list anyway and having next and prev be non-null is a good way
1805 // to ensure that.
1809 ///////////////////////////////////////////////////////////////////////////
1810 // Global Thread List Operations
1811 ///////////////////////////////////////////////////////////////////////////
1815 // Add a thread to the global thread list.
1817 static void add( Thread t, bool rmAboutToStart = true ) nothrow @nogc
1820 assert( t );
1821 assert( !t.next && !t.prev );
1823 body
1825 slock.lock_nothrow();
1826 scope(exit) slock.unlock_nothrow();
1827 assert(t.isRunning); // check this with slock to ensure pthread_create already returned
1828 assert(!suspendDepth); // must be 0 b/c it's only set with slock held
1830 if (rmAboutToStart)
1832 size_t idx = -1;
1833 foreach (i, thr; pAboutToStart[0 .. nAboutToStart])
1835 if (thr is t)
1837 idx = i;
1838 break;
1841 assert(idx != -1);
1842 import core.stdc.string : memmove;
1843 memmove(pAboutToStart + idx, pAboutToStart + idx + 1, Thread.sizeof * (nAboutToStart - idx - 1));
1844 pAboutToStart =
1845 cast(Thread*)realloc(pAboutToStart, Thread.sizeof * --nAboutToStart);
1848 if (sm_tbeg)
1850 t.next = sm_tbeg;
1851 sm_tbeg.prev = t;
1853 sm_tbeg = t;
1854 ++sm_tlen;
1859 // Remove a thread from the global thread list.
1861 static void remove( Thread t ) nothrow @nogc
1864 assert( t );
1866 body
1868 // Thread was already removed earlier, might happen b/c of thread_detachInstance
1869 if (!t.next && !t.prev)
1870 return;
1871 slock.lock_nothrow();
1873 // NOTE: When a thread is removed from the global thread list its
1874 // main context is invalid and should be removed as well.
1875 // It is possible that t.m_curr could reference more
1876 // than just the main context if the thread exited abnormally
1877 // (if it was terminated), but we must assume that the user
1878 // retains a reference to them and that they may be re-used
1879 // elsewhere. Therefore, it is the responsibility of any
1880 // object that creates contexts to clean them up properly
1881 // when it is done with them.
1882 remove( &t.m_main );
1884 if ( t.prev )
1885 t.prev.next = t.next;
1886 if ( t.next )
1887 t.next.prev = t.prev;
1888 if ( sm_tbeg is t )
1889 sm_tbeg = t.next;
1890 t.prev = t.next = null;
1891 --sm_tlen;
1893 // NOTE: Don't null out t.next or t.prev because opApply currently
1894 // follows t.next after removing a node. This could be easily
1895 // addressed by simply returning the next node from this
1896 // function, however, a thread should never be re-added to the
1897 // list anyway and having next and prev be non-null is a good way
1898 // to ensure that.
1899 slock.unlock_nothrow();
1904 unittest
1906 class DerivedThread : Thread
1908 this()
1910 super(&run);
1913 private:
1914 void run()
1916 // Derived thread running.
1920 void threadFunc()
1922 // Composed thread running.
1925 // create and start instances of each type
1926 auto derived = new DerivedThread().start();
1927 auto composed = new Thread(&threadFunc).start();
1928 new Thread({
1929 // Codes to run in the newly created thread.
1930 }).start();
1933 unittest
1935 int x = 0;
1937 new Thread(
1939 x++;
1940 }).start().join();
1941 assert( x == 1 );
1945 unittest
1947 enum MSG = "Test message.";
1948 string caughtMsg;
1952 new Thread(
1954 throw new Exception( MSG );
1955 }).start().join();
1956 assert( false, "Expected rethrown exception." );
1958 catch ( Throwable t )
1960 assert( t.msg == MSG );
1965 ///////////////////////////////////////////////////////////////////////////////
1966 // GC Support Routines
1967 ///////////////////////////////////////////////////////////////////////////////
1969 version (CoreDdoc)
1972 * Instruct the thread module, when initialized, to use a different set of
1973 * signals besides SIGUSR1 and SIGUSR2 for suspension and resumption of threads.
1974 * This function should be called at most once, prior to thread_init().
1975 * This function is Posix-only.
1977 extern (C) void thread_setGCSignals(int suspendSignalNo, int resumeSignalNo) nothrow @nogc
1981 else version (Posix)
1983 extern (C) void thread_setGCSignals(int suspendSignalNo, int resumeSignalNo) nothrow @nogc
1986 assert(suspendSignalNumber == 0);
1987 assert(resumeSignalNumber == 0);
1988 assert(suspendSignalNo != 0);
1989 assert(resumeSignalNo != 0);
1993 assert(suspendSignalNumber != 0);
1994 assert(resumeSignalNumber != 0);
1996 body
1998 suspendSignalNumber = suspendSignalNo;
1999 resumeSignalNumber = resumeSignalNo;
2003 version (Posix)
2005 __gshared int suspendSignalNumber;
2006 __gshared int resumeSignalNumber;
2010 * Initializes the thread module. This function must be called by the
2011 * garbage collector on startup and before any other thread routines
2012 * are called.
2014 extern (C) void thread_init()
2016 // NOTE: If thread_init itself performs any allocations then the thread
2017 // routines reserved for garbage collector use may be called while
2018 // thread_init is being processed. However, since no memory should
2019 // exist to be scanned at this point, it is sufficient for these
2020 // functions to detect the condition and return immediately.
2022 Thread.initLocks();
2023 // The Android VM runtime intercepts SIGUSR1 and apparently doesn't allow
2024 // its signal handler to run, so swap the two signals on Android, since
2025 // thread_resumeHandler does nothing.
2026 version (Android) thread_setGCSignals(SIGUSR2, SIGUSR1);
2028 version (Darwin)
2031 else version (Posix)
2033 if ( suspendSignalNumber == 0 )
2035 suspendSignalNumber = SIGUSR1;
2038 if ( resumeSignalNumber == 0 )
2040 resumeSignalNumber = SIGUSR2;
2043 int status;
2044 sigaction_t sigusr1 = void;
2045 sigaction_t sigusr2 = void;
2047 // This is a quick way to zero-initialize the structs without using
2048 // memset or creating a link dependency on their static initializer.
2049 (cast(byte*) &sigusr1)[0 .. sigaction_t.sizeof] = 0;
2050 (cast(byte*) &sigusr2)[0 .. sigaction_t.sizeof] = 0;
2052 // NOTE: SA_RESTART indicates that system calls should restart if they
2053 // are interrupted by a signal, but this is not available on all
2054 // Posix systems, even those that support multithreading.
2055 static if ( __traits( compiles, SA_RESTART ) )
2056 sigusr1.sa_flags = SA_RESTART;
2057 else
2058 sigusr1.sa_flags = 0;
2059 sigusr1.sa_handler = &thread_suspendHandler;
2060 // NOTE: We want to ignore all signals while in this handler, so fill
2061 // sa_mask to indicate this.
2062 status = sigfillset( &sigusr1.sa_mask );
2063 assert( status == 0 );
2065 // NOTE: Since resumeSignalNumber should only be issued for threads within the
2066 // suspend handler, we don't want this signal to trigger a
2067 // restart.
2068 sigusr2.sa_flags = 0;
2069 sigusr2.sa_handler = &thread_resumeHandler;
2070 // NOTE: We want to ignore all signals while in this handler, so fill
2071 // sa_mask to indicate this.
2072 status = sigfillset( &sigusr2.sa_mask );
2073 assert( status == 0 );
2075 status = sigaction( suspendSignalNumber, &sigusr1, null );
2076 assert( status == 0 );
2078 status = sigaction( resumeSignalNumber, &sigusr2, null );
2079 assert( status == 0 );
2081 status = sem_init( &suspendCount, 0, 0 );
2082 assert( status == 0 );
2084 Thread.sm_main = thread_attachThis();
2089 * Terminates the thread module. No other thread routine may be called
2090 * afterwards.
2092 extern (C) void thread_term()
2094 assert(Thread.sm_tbeg && Thread.sm_tlen == 1);
2095 assert(!Thread.nAboutToStart);
2096 if (Thread.pAboutToStart) // in case realloc(p, 0) doesn't return null
2098 free(Thread.pAboutToStart);
2099 Thread.pAboutToStart = null;
2101 Thread.termLocks();
2108 extern (C) bool thread_isMainThread() nothrow @nogc
2110 return Thread.getThis() is Thread.sm_main;
2115 * Registers the calling thread for use with the D Runtime. If this routine
2116 * is called for a thread which is already registered, no action is performed.
2118 * NOTE: This routine does not run thread-local static constructors when called.
2119 * If full functionality as a D thread is desired, the following function
2120 * must be called after thread_attachThis:
2122 * extern (C) void rt_moduleTlsCtor();
2124 extern (C) Thread thread_attachThis()
2126 GC.disable(); scope(exit) GC.enable();
2128 if (auto t = Thread.getThis())
2129 return t;
2131 Thread thisThread = new Thread();
2132 Thread.Context* thisContext = &thisThread.m_main;
2133 assert( thisContext == thisThread.m_curr );
2135 version (Windows)
2137 thisThread.m_addr = GetCurrentThreadId();
2138 thisThread.m_hndl = GetCurrentThreadHandle();
2139 thisContext.bstack = getStackBottom();
2140 thisContext.tstack = thisContext.bstack;
2142 else version (Posix)
2144 thisThread.m_addr = pthread_self();
2145 thisContext.bstack = getStackBottom();
2146 thisContext.tstack = thisContext.bstack;
2148 atomicStore!(MemoryOrder.raw)(thisThread.m_isRunning, true);
2150 thisThread.m_isDaemon = true;
2151 thisThread.m_tlsgcdata = rt_tlsgc_init();
2152 Thread.setThis( thisThread );
2154 version (Darwin)
2156 thisThread.m_tmach = pthread_mach_thread_np( thisThread.m_addr );
2157 assert( thisThread.m_tmach != thisThread.m_tmach.init );
2160 Thread.add( thisThread, false );
2161 Thread.add( thisContext );
2162 if ( Thread.sm_main !is null )
2163 multiThreadedFlag = true;
2164 return thisThread;
2168 version (Windows)
2170 // NOTE: These calls are not safe on Posix systems that use signals to
2171 // perform garbage collection. The suspendHandler uses getThis()
2172 // to get the thread handle so getThis() must be a simple call.
2173 // Mutexes can't safely be acquired inside signal handlers, and
2174 // even if they could, the mutex needed (Thread.slock) is held by
2175 // thread_suspendAll(). So in short, these routines will remain
2176 // Windows-specific. If they are truly needed elsewhere, the
2177 // suspendHandler will need a way to call a version of getThis()
2178 // that only does the TLS lookup without the fancy fallback stuff.
2180 /// ditto
2181 extern (C) Thread thread_attachByAddr( ThreadID addr )
2183 return thread_attachByAddrB( addr, getThreadStackBottom( addr ) );
2187 /// ditto
2188 extern (C) Thread thread_attachByAddrB( ThreadID addr, void* bstack )
2190 GC.disable(); scope(exit) GC.enable();
2192 if (auto t = thread_findByAddr(addr))
2193 return t;
2195 Thread thisThread = new Thread();
2196 Thread.Context* thisContext = &thisThread.m_main;
2197 assert( thisContext == thisThread.m_curr );
2199 thisThread.m_addr = addr;
2200 thisContext.bstack = bstack;
2201 thisContext.tstack = thisContext.bstack;
2203 thisThread.m_isDaemon = true;
2205 if ( addr == GetCurrentThreadId() )
2207 thisThread.m_hndl = GetCurrentThreadHandle();
2208 thisThread.m_tlsgcdata = rt_tlsgc_init();
2209 Thread.setThis( thisThread );
2211 else
2213 thisThread.m_hndl = OpenThreadHandle( addr );
2214 impersonate_thread(addr,
2216 thisThread.m_tlsgcdata = rt_tlsgc_init();
2217 Thread.setThis( thisThread );
2221 Thread.add( thisThread, false );
2222 Thread.add( thisContext );
2223 if ( Thread.sm_main !is null )
2224 multiThreadedFlag = true;
2225 return thisThread;
2231 * Deregisters the calling thread from use with the runtime. If this routine
2232 * is called for a thread which is not registered, the result is undefined.
2234 * NOTE: This routine does not run thread-local static destructors when called.
2235 * If full functionality as a D thread is desired, the following function
2236 * must be called after thread_detachThis, particularly if the thread is
2237 * being detached at some indeterminate time before program termination:
2239 * $(D extern(C) void rt_moduleTlsDtor();)
2241 extern (C) void thread_detachThis() nothrow @nogc
2243 if (auto t = Thread.getThis())
2244 Thread.remove(t);
2249 * Deregisters the given thread from use with the runtime. If this routine
2250 * is called for a thread which is not registered, the result is undefined.
2252 * NOTE: This routine does not run thread-local static destructors when called.
2253 * If full functionality as a D thread is desired, the following function
2254 * must be called by the detached thread, particularly if the thread is
2255 * being detached at some indeterminate time before program termination:
2257 * $(D extern(C) void rt_moduleTlsDtor();)
2259 extern (C) void thread_detachByAddr( ThreadID addr )
2261 if ( auto t = thread_findByAddr( addr ) )
2262 Thread.remove( t );
2266 /// ditto
2267 extern (C) void thread_detachInstance( Thread t ) nothrow @nogc
2269 Thread.remove( t );
2273 unittest
2275 import core.sync.semaphore;
2276 auto sem = new Semaphore();
2278 auto t = new Thread(
2280 sem.notify();
2281 Thread.sleep(100.msecs);
2282 }).start();
2284 sem.wait(); // thread cannot be detached while being started
2285 thread_detachInstance(t);
2286 foreach (t2; Thread)
2287 assert(t !is t2);
2288 t.join();
2293 * Search the list of all threads for a thread with the given thread identifier.
2295 * Params:
2296 * addr = The thread identifier to search for.
2297 * Returns:
2298 * The thread object associated with the thread identifier, null if not found.
2300 static Thread thread_findByAddr( ThreadID addr )
2302 Thread.slock.lock_nothrow();
2303 scope(exit) Thread.slock.unlock_nothrow();
2305 // also return just spawned thread so that
2306 // DLL_THREAD_ATTACH knows it's a D thread
2307 foreach (t; Thread.pAboutToStart[0 .. Thread.nAboutToStart])
2308 if (t.m_addr == addr)
2309 return t;
2311 foreach (t; Thread)
2312 if (t.m_addr == addr)
2313 return t;
2315 return null;
2320 * Sets the current thread to a specific reference. Only to be used
2321 * when dealing with externally-created threads (in e.g. C code).
2322 * The primary use of this function is when Thread.getThis() must
2323 * return a sensible value in, for example, TLS destructors. In
2324 * other words, don't touch this unless you know what you're doing.
2326 * Params:
2327 * t = A reference to the current thread. May be null.
2329 extern (C) void thread_setThis(Thread t) nothrow @nogc
2331 Thread.setThis(t);
2336 * Joins all non-daemon threads that are currently running. This is done by
2337 * performing successive scans through the thread list until a scan consists
2338 * of only daemon threads.
2340 extern (C) void thread_joinAll()
2342 Lagain:
2343 Thread.slock.lock_nothrow();
2344 // wait for just spawned threads
2345 if (Thread.nAboutToStart)
2347 Thread.slock.unlock_nothrow();
2348 Thread.yield();
2349 goto Lagain;
2352 // join all non-daemon threads, the main thread is also a daemon
2353 auto t = Thread.sm_tbeg;
2354 while (t)
2356 if (!t.isRunning)
2358 auto tn = t.next;
2359 Thread.remove(t);
2360 t = tn;
2362 else if (t.isDaemon)
2364 t = t.next;
2366 else
2368 Thread.slock.unlock_nothrow();
2369 t.join(); // might rethrow
2370 goto Lagain; // must restart iteration b/c of unlock
2373 Thread.slock.unlock_nothrow();
2378 * Performs intermediate shutdown of the thread module.
2380 shared static ~this()
2382 // NOTE: The functionality related to garbage collection must be minimally
2383 // operable after this dtor completes. Therefore, only minimal
2384 // cleanup may occur.
2385 auto t = Thread.sm_tbeg;
2386 while (t)
2388 auto tn = t.next;
2389 if (!t.isRunning)
2390 Thread.remove(t);
2391 t = tn;
2396 // Used for needLock below.
2397 private __gshared bool multiThreadedFlag = false;
2399 version (PPC64) version = ExternStackShell;
2401 version (ExternStackShell)
2403 extern(D) public void callWithStackShell(scope void delegate(void* sp) nothrow fn) nothrow;
2405 else
2407 // Calls the given delegate, passing the current thread's stack pointer to it.
2408 private void callWithStackShell(scope void delegate(void* sp) nothrow fn) nothrow
2411 assert(fn);
2413 body
2415 // The purpose of the 'shell' is to ensure all the registers get
2416 // put on the stack so they'll be scanned. We only need to push
2417 // the callee-save registers.
2418 void *sp = void;
2420 version (GNU)
2422 __builtin_unwind_init();
2423 sp = &sp;
2425 else version (AsmX86_Posix)
2427 size_t[3] regs = void;
2428 asm pure nothrow @nogc
2430 mov [regs + 0 * 4], EBX;
2431 mov [regs + 1 * 4], ESI;
2432 mov [regs + 2 * 4], EDI;
2434 mov sp[EBP], ESP;
2437 else version (AsmX86_Windows)
2439 size_t[3] regs = void;
2440 asm pure nothrow @nogc
2442 mov [regs + 0 * 4], EBX;
2443 mov [regs + 1 * 4], ESI;
2444 mov [regs + 2 * 4], EDI;
2446 mov sp[EBP], ESP;
2449 else version (AsmX86_64_Posix)
2451 size_t[5] regs = void;
2452 asm pure nothrow @nogc
2454 mov [regs + 0 * 8], RBX;
2455 mov [regs + 1 * 8], R12;
2456 mov [regs + 2 * 8], R13;
2457 mov [regs + 3 * 8], R14;
2458 mov [regs + 4 * 8], R15;
2460 mov sp[RBP], RSP;
2463 else version (AsmX86_64_Windows)
2465 size_t[7] regs = void;
2466 asm pure nothrow @nogc
2468 mov [regs + 0 * 8], RBX;
2469 mov [regs + 1 * 8], RSI;
2470 mov [regs + 2 * 8], RDI;
2471 mov [regs + 3 * 8], R12;
2472 mov [regs + 4 * 8], R13;
2473 mov [regs + 5 * 8], R14;
2474 mov [regs + 6 * 8], R15;
2476 mov sp[RBP], RSP;
2479 else
2481 static assert(false, "Architecture not supported.");
2484 fn(sp);
2488 // Used for suspendAll/resumeAll below.
2489 private __gshared uint suspendDepth = 0;
2492 * Suspend the specified thread and load stack and register information for
2493 * use by thread_scanAll. If the supplied thread is the calling thread,
2494 * stack and register information will be loaded but the thread will not
2495 * be suspended. If the suspend operation fails and the thread is not
2496 * running then it will be removed from the global thread list, otherwise
2497 * an exception will be thrown.
2499 * Params:
2500 * t = The thread to suspend.
2502 * Throws:
2503 * ThreadError if the suspend operation fails for a running thread.
2504 * Returns:
2505 * Whether the thread is now suspended (true) or terminated (false).
2507 private bool suspend( Thread t ) nothrow
2509 Duration waittime = dur!"usecs"(10);
2510 Lagain:
2511 if (!t.isRunning)
2513 Thread.remove(t);
2514 return false;
2516 else if (t.m_isInCriticalRegion)
2518 Thread.criticalRegionLock.unlock_nothrow();
2519 Thread.sleep(waittime);
2520 if (waittime < dur!"msecs"(10)) waittime *= 2;
2521 Thread.criticalRegionLock.lock_nothrow();
2522 goto Lagain;
2525 version (Windows)
2527 if ( t.m_addr != GetCurrentThreadId() && SuspendThread( t.m_hndl ) == 0xFFFFFFFF )
2529 if ( !t.isRunning )
2531 Thread.remove( t );
2532 return false;
2534 onThreadError( "Unable to suspend thread" );
2537 CONTEXT context = void;
2538 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
2540 if ( !GetThreadContext( t.m_hndl, &context ) )
2541 onThreadError( "Unable to load thread context" );
2542 version (X86)
2544 if ( !t.m_lock )
2545 t.m_curr.tstack = cast(void*) context.Esp;
2546 // eax,ebx,ecx,edx,edi,esi,ebp,esp
2547 t.m_reg[0] = context.Eax;
2548 t.m_reg[1] = context.Ebx;
2549 t.m_reg[2] = context.Ecx;
2550 t.m_reg[3] = context.Edx;
2551 t.m_reg[4] = context.Edi;
2552 t.m_reg[5] = context.Esi;
2553 t.m_reg[6] = context.Ebp;
2554 t.m_reg[7] = context.Esp;
2556 else version (X86_64)
2558 if ( !t.m_lock )
2559 t.m_curr.tstack = cast(void*) context.Rsp;
2560 // rax,rbx,rcx,rdx,rdi,rsi,rbp,rsp
2561 t.m_reg[0] = context.Rax;
2562 t.m_reg[1] = context.Rbx;
2563 t.m_reg[2] = context.Rcx;
2564 t.m_reg[3] = context.Rdx;
2565 t.m_reg[4] = context.Rdi;
2566 t.m_reg[5] = context.Rsi;
2567 t.m_reg[6] = context.Rbp;
2568 t.m_reg[7] = context.Rsp;
2569 // r8,r9,r10,r11,r12,r13,r14,r15
2570 t.m_reg[8] = context.R8;
2571 t.m_reg[9] = context.R9;
2572 t.m_reg[10] = context.R10;
2573 t.m_reg[11] = context.R11;
2574 t.m_reg[12] = context.R12;
2575 t.m_reg[13] = context.R13;
2576 t.m_reg[14] = context.R14;
2577 t.m_reg[15] = context.R15;
2579 else
2581 static assert(false, "Architecture not supported." );
2584 else version (Darwin)
2586 if ( t.m_addr != pthread_self() && thread_suspend( t.m_tmach ) != KERN_SUCCESS )
2588 if ( !t.isRunning )
2590 Thread.remove( t );
2591 return false;
2593 onThreadError( "Unable to suspend thread" );
2596 version (X86)
2598 x86_thread_state32_t state = void;
2599 mach_msg_type_number_t count = x86_THREAD_STATE32_COUNT;
2601 if ( thread_get_state( t.m_tmach, x86_THREAD_STATE32, &state, &count ) != KERN_SUCCESS )
2602 onThreadError( "Unable to load thread state" );
2603 if ( !t.m_lock )
2604 t.m_curr.tstack = cast(void*) state.esp;
2605 // eax,ebx,ecx,edx,edi,esi,ebp,esp
2606 t.m_reg[0] = state.eax;
2607 t.m_reg[1] = state.ebx;
2608 t.m_reg[2] = state.ecx;
2609 t.m_reg[3] = state.edx;
2610 t.m_reg[4] = state.edi;
2611 t.m_reg[5] = state.esi;
2612 t.m_reg[6] = state.ebp;
2613 t.m_reg[7] = state.esp;
2615 else version (X86_64)
2617 x86_thread_state64_t state = void;
2618 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
2620 if ( thread_get_state( t.m_tmach, x86_THREAD_STATE64, &state, &count ) != KERN_SUCCESS )
2621 onThreadError( "Unable to load thread state" );
2622 if ( !t.m_lock )
2623 t.m_curr.tstack = cast(void*) state.rsp;
2624 // rax,rbx,rcx,rdx,rdi,rsi,rbp,rsp
2625 t.m_reg[0] = state.rax;
2626 t.m_reg[1] = state.rbx;
2627 t.m_reg[2] = state.rcx;
2628 t.m_reg[3] = state.rdx;
2629 t.m_reg[4] = state.rdi;
2630 t.m_reg[5] = state.rsi;
2631 t.m_reg[6] = state.rbp;
2632 t.m_reg[7] = state.rsp;
2633 // r8,r9,r10,r11,r12,r13,r14,r15
2634 t.m_reg[8] = state.r8;
2635 t.m_reg[9] = state.r9;
2636 t.m_reg[10] = state.r10;
2637 t.m_reg[11] = state.r11;
2638 t.m_reg[12] = state.r12;
2639 t.m_reg[13] = state.r13;
2640 t.m_reg[14] = state.r14;
2641 t.m_reg[15] = state.r15;
2643 else
2645 static assert(false, "Architecture not supported." );
2648 else version (Posix)
2650 if ( t.m_addr != pthread_self() )
2652 if ( pthread_kill( t.m_addr, suspendSignalNumber ) != 0 )
2654 if ( !t.isRunning )
2656 Thread.remove( t );
2657 return false;
2659 onThreadError( "Unable to suspend thread" );
2662 else if ( !t.m_lock )
2664 t.m_curr.tstack = getStackTop();
2667 return true;
2671 * Suspend all threads but the calling thread for "stop the world" garbage
2672 * collection runs. This function may be called multiple times, and must
2673 * be followed by a matching number of calls to thread_resumeAll before
2674 * processing is resumed.
2676 * Throws:
2677 * ThreadError if the suspend operation fails for a running thread.
2679 extern (C) void thread_suspendAll() nothrow
2681 // NOTE: We've got an odd chicken & egg problem here, because while the GC
2682 // is required to call thread_init before calling any other thread
2683 // routines, thread_init may allocate memory which could in turn
2684 // trigger a collection. Thus, thread_suspendAll, thread_scanAll,
2685 // and thread_resumeAll must be callable before thread_init
2686 // completes, with the assumption that no other GC memory has yet
2687 // been allocated by the system, and thus there is no risk of losing
2688 // data if the global thread list is empty. The check of
2689 // Thread.sm_tbeg below is done to ensure thread_init has completed,
2690 // and therefore that calling Thread.getThis will not result in an
2691 // error. For the short time when Thread.sm_tbeg is null, there is
2692 // no reason not to simply call the multithreaded code below, with
2693 // the expectation that the foreach loop will never be entered.
2694 if ( !multiThreadedFlag && Thread.sm_tbeg )
2696 if ( ++suspendDepth == 1 )
2697 suspend( Thread.getThis() );
2699 return;
2702 Thread.slock.lock_nothrow();
2704 if ( ++suspendDepth > 1 )
2705 return;
2707 Thread.criticalRegionLock.lock_nothrow();
2708 scope (exit) Thread.criticalRegionLock.unlock_nothrow();
2709 size_t cnt;
2710 auto t = Thread.sm_tbeg;
2711 while (t)
2713 auto tn = t.next;
2714 if (suspend(t))
2715 ++cnt;
2716 t = tn;
2719 version (Darwin)
2721 else version (Posix)
2723 // subtract own thread
2724 assert(cnt >= 1);
2725 --cnt;
2726 Lagain:
2727 // wait for semaphore notifications
2728 for (; cnt; --cnt)
2730 while (sem_wait(&suspendCount) != 0)
2732 if (errno != EINTR)
2733 onThreadError("Unable to wait for semaphore");
2734 errno = 0;
2737 version (FreeBSD)
2739 // avoid deadlocks, see Issue 13416
2740 t = Thread.sm_tbeg;
2741 while (t)
2743 auto tn = t.next;
2744 if (t.m_suspendagain && suspend(t))
2745 ++cnt;
2746 t = tn;
2748 if (cnt)
2749 goto Lagain;
2756 * Resume the specified thread and unload stack and register information.
2757 * If the supplied thread is the calling thread, stack and register
2758 * information will be unloaded but the thread will not be resumed. If
2759 * the resume operation fails and the thread is not running then it will
2760 * be removed from the global thread list, otherwise an exception will be
2761 * thrown.
2763 * Params:
2764 * t = The thread to resume.
2766 * Throws:
2767 * ThreadError if the resume fails for a running thread.
2769 private void resume( Thread t ) nothrow
2771 version (Windows)
2773 if ( t.m_addr != GetCurrentThreadId() && ResumeThread( t.m_hndl ) == 0xFFFFFFFF )
2775 if ( !t.isRunning )
2777 Thread.remove( t );
2778 return;
2780 onThreadError( "Unable to resume thread" );
2783 if ( !t.m_lock )
2784 t.m_curr.tstack = t.m_curr.bstack;
2785 t.m_reg[0 .. $] = 0;
2787 else version (Darwin)
2789 if ( t.m_addr != pthread_self() && thread_resume( t.m_tmach ) != KERN_SUCCESS )
2791 if ( !t.isRunning )
2793 Thread.remove( t );
2794 return;
2796 onThreadError( "Unable to resume thread" );
2799 if ( !t.m_lock )
2800 t.m_curr.tstack = t.m_curr.bstack;
2801 t.m_reg[0 .. $] = 0;
2803 else version (Posix)
2805 if ( t.m_addr != pthread_self() )
2807 if ( pthread_kill( t.m_addr, resumeSignalNumber ) != 0 )
2809 if ( !t.isRunning )
2811 Thread.remove( t );
2812 return;
2814 onThreadError( "Unable to resume thread" );
2817 else if ( !t.m_lock )
2819 t.m_curr.tstack = t.m_curr.bstack;
2825 * Resume all threads but the calling thread for "stop the world" garbage
2826 * collection runs. This function must be called once for each preceding
2827 * call to thread_suspendAll before the threads are actually resumed.
2829 * In:
2830 * This routine must be preceded by a call to thread_suspendAll.
2832 * Throws:
2833 * ThreadError if the resume operation fails for a running thread.
2835 extern (C) void thread_resumeAll() nothrow
2838 assert( suspendDepth > 0 );
2840 body
2842 // NOTE: See thread_suspendAll for the logic behind this.
2843 if ( !multiThreadedFlag && Thread.sm_tbeg )
2845 if ( --suspendDepth == 0 )
2846 resume( Thread.getThis() );
2847 return;
2850 scope(exit) Thread.slock.unlock_nothrow();
2852 if ( --suspendDepth > 0 )
2853 return;
2855 for ( Thread t = Thread.sm_tbeg; t; t = t.next )
2857 // NOTE: We do not need to care about critical regions at all
2858 // here. thread_suspendAll takes care of everything.
2859 resume( t );
2865 * Indicates the kind of scan being performed by $(D thread_scanAllType).
2867 enum ScanType
2869 stack, /// The stack and/or registers are being scanned.
2870 tls, /// TLS data is being scanned.
2873 alias ScanAllThreadsFn = void delegate(void*, void*) nothrow; /// The scanning function.
2874 alias ScanAllThreadsTypeFn = void delegate(ScanType, void*, void*) nothrow; /// ditto
2877 * The main entry point for garbage collection. The supplied delegate
2878 * will be passed ranges representing both stack and register values.
2880 * Params:
2881 * scan = The scanner function. It should scan from p1 through p2 - 1.
2883 * In:
2884 * This routine must be preceded by a call to thread_suspendAll.
2886 extern (C) void thread_scanAllType( scope ScanAllThreadsTypeFn scan ) nothrow
2889 assert( suspendDepth > 0 );
2891 body
2893 callWithStackShell(sp => scanAllTypeImpl(scan, sp));
2897 private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop ) nothrow
2899 Thread thisThread = null;
2900 void* oldStackTop = null;
2902 if ( Thread.sm_tbeg )
2904 thisThread = Thread.getThis();
2905 if ( !thisThread.m_lock )
2907 oldStackTop = thisThread.m_curr.tstack;
2908 thisThread.m_curr.tstack = curStackTop;
2912 scope( exit )
2914 if ( Thread.sm_tbeg )
2916 if ( !thisThread.m_lock )
2918 thisThread.m_curr.tstack = oldStackTop;
2923 // NOTE: Synchronizing on Thread.slock is not needed because this
2924 // function may only be called after all other threads have
2925 // been suspended from within the same lock.
2926 if (Thread.nAboutToStart)
2927 scan(ScanType.stack, Thread.pAboutToStart, Thread.pAboutToStart + Thread.nAboutToStart);
2929 for ( Thread.Context* c = Thread.sm_cbeg; c; c = c.next )
2931 version (StackGrowsDown)
2933 // NOTE: We can't index past the bottom of the stack
2934 // so don't do the "+1" for StackGrowsDown.
2935 if ( c.tstack && c.tstack < c.bstack )
2936 scan( ScanType.stack, c.tstack, c.bstack );
2938 else
2940 if ( c.bstack && c.bstack < c.tstack )
2941 scan( ScanType.stack, c.bstack, c.tstack + 1 );
2945 for ( Thread t = Thread.sm_tbeg; t; t = t.next )
2947 version (Windows)
2949 // Ideally, we'd pass ScanType.regs or something like that, but this
2950 // would make portability annoying because it only makes sense on Windows.
2951 scan( ScanType.stack, t.m_reg.ptr, t.m_reg.ptr + t.m_reg.length );
2954 if (t.m_tlsgcdata !is null)
2955 rt_tlsgc_scan(t.m_tlsgcdata, (p1, p2) => scan(ScanType.tls, p1, p2));
2960 * The main entry point for garbage collection. The supplied delegate
2961 * will be passed ranges representing both stack and register values.
2963 * Params:
2964 * scan = The scanner function. It should scan from p1 through p2 - 1.
2966 * In:
2967 * This routine must be preceded by a call to thread_suspendAll.
2969 extern (C) void thread_scanAll( scope ScanAllThreadsFn scan ) nothrow
2971 thread_scanAllType((type, p1, p2) => scan(p1, p2));
2976 * Signals that the code following this call is a critical region. Any code in
2977 * this region must finish running before the calling thread can be suspended
2978 * by a call to thread_suspendAll.
2980 * This function is, in particular, meant to help maintain garbage collector
2981 * invariants when a lock is not used.
2983 * A critical region is exited with thread_exitCriticalRegion.
2985 * $(RED Warning):
2986 * Using critical regions is extremely error-prone. For instance, using locks
2987 * inside a critical region can easily result in a deadlock when another thread
2988 * holding the lock already got suspended.
2990 * The term and concept of a 'critical region' comes from
2991 * $(LINK2 https://github.com/mono/mono/blob/521f4a198e442573c400835ef19bbb36b60b0ebb/mono/metadata/sgen-gc.h#L925 Mono's SGen garbage collector).
2993 * In:
2994 * The calling thread must be attached to the runtime.
2996 extern (C) void thread_enterCriticalRegion() @nogc
2999 assert(Thread.getThis());
3001 body
3003 synchronized (Thread.criticalRegionLock)
3004 Thread.getThis().m_isInCriticalRegion = true;
3009 * Signals that the calling thread is no longer in a critical region. Following
3010 * a call to this function, the thread can once again be suspended.
3012 * In:
3013 * The calling thread must be attached to the runtime.
3015 extern (C) void thread_exitCriticalRegion() @nogc
3018 assert(Thread.getThis());
3020 body
3022 synchronized (Thread.criticalRegionLock)
3023 Thread.getThis().m_isInCriticalRegion = false;
3028 * Returns true if the current thread is in a critical region; otherwise, false.
3030 * In:
3031 * The calling thread must be attached to the runtime.
3033 extern (C) bool thread_inCriticalRegion() @nogc
3036 assert(Thread.getThis());
3038 body
3040 synchronized (Thread.criticalRegionLock)
3041 return Thread.getThis().m_isInCriticalRegion;
3046 * A callback for thread errors in D during collections. Since an allocation is not possible
3047 * a preallocated ThreadError will be used as the Error instance
3049 * Throws:
3050 * ThreadError.
3052 private void onThreadError(string msg = null, Throwable next = null) nothrow
3054 __gshared ThreadError error = new ThreadError(null);
3055 error.msg = msg;
3056 error.next = next;
3057 import core.exception : SuppressTraceInfo;
3058 error.info = SuppressTraceInfo.instance;
3059 throw error;
3063 unittest
3065 assert(!thread_inCriticalRegion());
3068 thread_enterCriticalRegion();
3070 scope (exit)
3071 thread_exitCriticalRegion();
3073 assert(thread_inCriticalRegion());
3076 assert(!thread_inCriticalRegion());
3079 unittest
3081 // NOTE: This entire test is based on the assumption that no
3082 // memory is allocated after the child thread is
3083 // started. If an allocation happens, a collection could
3084 // trigger, which would cause the synchronization below
3085 // to cause a deadlock.
3086 // NOTE: DO NOT USE LOCKS IN CRITICAL REGIONS IN NORMAL CODE.
3088 import core.sync.semaphore;
3090 auto sema = new Semaphore(),
3091 semb = new Semaphore();
3093 auto thr = new Thread(
3095 thread_enterCriticalRegion();
3096 assert(thread_inCriticalRegion());
3097 sema.notify();
3099 semb.wait();
3100 assert(thread_inCriticalRegion());
3102 thread_exitCriticalRegion();
3103 assert(!thread_inCriticalRegion());
3104 sema.notify();
3106 semb.wait();
3107 assert(!thread_inCriticalRegion());
3110 thr.start();
3112 sema.wait();
3113 synchronized (Thread.criticalRegionLock)
3114 assert(thr.m_isInCriticalRegion);
3115 semb.notify();
3117 sema.wait();
3118 synchronized (Thread.criticalRegionLock)
3119 assert(!thr.m_isInCriticalRegion);
3120 semb.notify();
3122 thr.join();
3125 unittest
3127 import core.sync.semaphore;
3129 shared bool inCriticalRegion;
3130 auto sema = new Semaphore(),
3131 semb = new Semaphore();
3133 auto thr = new Thread(
3135 thread_enterCriticalRegion();
3136 inCriticalRegion = true;
3137 sema.notify();
3138 semb.wait();
3140 Thread.sleep(dur!"msecs"(1));
3141 inCriticalRegion = false;
3142 thread_exitCriticalRegion();
3144 thr.start();
3146 sema.wait();
3147 assert(inCriticalRegion);
3148 semb.notify();
3150 thread_suspendAll();
3151 assert(!inCriticalRegion);
3152 thread_resumeAll();
3156 * Indicates whether an address has been marked by the GC.
3158 enum IsMarked : int
3160 no, /// Address is not marked.
3161 yes, /// Address is marked.
3162 unknown, /// Address is not managed by the GC.
3165 alias IsMarkedDg = int delegate( void* addr ) nothrow; /// The isMarked callback function.
3168 * This routine allows the runtime to process any special per-thread handling
3169 * for the GC. This is needed for taking into account any memory that is
3170 * referenced by non-scanned pointers but is about to be freed. That currently
3171 * means the array append cache.
3173 * Params:
3174 * isMarked = The function used to check if $(D addr) is marked.
3176 * In:
3177 * This routine must be called just prior to resuming all threads.
3179 extern(C) void thread_processGCMarks( scope IsMarkedDg isMarked ) nothrow
3181 for ( Thread t = Thread.sm_tbeg; t; t = t.next )
3183 /* Can be null if collection was triggered between adding a
3184 * thread and calling rt_tlsgc_init.
3186 if (t.m_tlsgcdata !is null)
3187 rt_tlsgc_processGCMarks(t.m_tlsgcdata, isMarked);
3192 extern (C) @nogc nothrow
3194 version (CRuntime_Glibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr);
3195 version (FreeBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr);
3196 version (NetBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr);
3197 version (Solaris) int thr_stksegment(stack_t* stk);
3198 version (CRuntime_Bionic) int pthread_getattr_np(pthread_t thid, pthread_attr_t* attr);
3202 private void* getStackTop() nothrow @nogc
3204 version (D_InlineAsm_X86)
3205 asm pure nothrow @nogc { naked; mov EAX, ESP; ret; }
3206 else version (D_InlineAsm_X86_64)
3207 asm pure nothrow @nogc { naked; mov RAX, RSP; ret; }
3208 else version (GNU)
3209 return __builtin_frame_address(0);
3210 else
3211 static assert(false, "Architecture not supported.");
3215 private void* getStackBottom() nothrow @nogc
3217 version (Windows)
3219 version (D_InlineAsm_X86)
3220 asm pure nothrow @nogc { naked; mov EAX, FS:4; ret; }
3221 else version (D_InlineAsm_X86_64)
3222 asm pure nothrow @nogc
3223 { naked;
3224 mov RAX, 8;
3225 mov RAX, GS:[RAX];
3226 ret;
3228 else version (GNU_InlineAsm)
3230 void *bottom;
3232 version (X86)
3233 asm pure nothrow @nogc { "movl %%fs:4, %0;" : "=r" bottom; }
3234 else version (X86_64)
3235 asm pure nothrow @nogc { "movq %%gs:8, %0;" : "=r" bottom; }
3236 else
3237 static assert(false, "Platform not supported.");
3239 return bottom;
3241 else
3242 static assert(false, "Architecture not supported.");
3244 else version (Darwin)
3246 import core.sys.darwin.pthread;
3247 return pthread_get_stackaddr_np(pthread_self());
3249 else version (CRuntime_Glibc)
3251 pthread_attr_t attr;
3252 void* addr; size_t size;
3254 pthread_getattr_np(pthread_self(), &attr);
3255 pthread_attr_getstack(&attr, &addr, &size);
3256 pthread_attr_destroy(&attr);
3257 return addr + size;
3259 else version (FreeBSD)
3261 pthread_attr_t attr;
3262 void* addr; size_t size;
3264 pthread_attr_init(&attr);
3265 pthread_attr_get_np(pthread_self(), &attr);
3266 pthread_attr_getstack(&attr, &addr, &size);
3267 pthread_attr_destroy(&attr);
3268 return addr + size;
3270 else version (NetBSD)
3272 pthread_attr_t attr;
3273 void* addr; size_t size;
3275 pthread_attr_init(&attr);
3276 pthread_attr_get_np(pthread_self(), &attr);
3277 pthread_attr_getstack(&attr, &addr, &size);
3278 pthread_attr_destroy(&attr);
3279 return addr + size;
3281 else version (Solaris)
3283 stack_t stk;
3285 thr_stksegment(&stk);
3286 return stk.ss_sp;
3288 else version (CRuntime_Bionic)
3290 pthread_attr_t attr;
3291 void* addr; size_t size;
3293 pthread_getattr_np(pthread_self(), &attr);
3294 pthread_attr_getstack(&attr, &addr, &size);
3295 pthread_attr_destroy(&attr);
3296 return addr + size;
3298 else
3299 static assert(false, "Platform not supported.");
3304 * Returns the stack top of the currently active stack within the calling
3305 * thread.
3307 * In:
3308 * The calling thread must be attached to the runtime.
3310 * Returns:
3311 * The address of the stack top.
3313 extern (C) void* thread_stackTop() nothrow @nogc
3316 // Not strictly required, but it gives us more flexibility.
3317 assert(Thread.getThis());
3319 body
3321 return getStackTop();
3326 * Returns the stack bottom of the currently active stack within the calling
3327 * thread.
3329 * In:
3330 * The calling thread must be attached to the runtime.
3332 * Returns:
3333 * The address of the stack bottom.
3335 extern (C) void* thread_stackBottom() nothrow @nogc
3338 assert(Thread.getThis());
3340 body
3342 return Thread.getThis().topContext().bstack;
3346 ///////////////////////////////////////////////////////////////////////////////
3347 // Thread Group
3348 ///////////////////////////////////////////////////////////////////////////////
3352 * This class is intended to simplify certain common programming techniques.
3354 class ThreadGroup
3357 * Creates and starts a new Thread object that executes fn and adds it to
3358 * the list of tracked threads.
3360 * Params:
3361 * fn = The thread function.
3363 * Returns:
3364 * A reference to the newly created thread.
3366 final Thread create( void function() fn )
3368 Thread t = new Thread( fn ).start();
3370 synchronized( this )
3372 m_all[t] = t;
3374 return t;
3379 * Creates and starts a new Thread object that executes dg and adds it to
3380 * the list of tracked threads.
3382 * Params:
3383 * dg = The thread function.
3385 * Returns:
3386 * A reference to the newly created thread.
3388 final Thread create( void delegate() dg )
3390 Thread t = new Thread( dg ).start();
3392 synchronized( this )
3394 m_all[t] = t;
3396 return t;
3401 * Add t to the list of tracked threads if it is not already being tracked.
3403 * Params:
3404 * t = The thread to add.
3406 * In:
3407 * t must not be null.
3409 final void add( Thread t )
3412 assert( t );
3414 body
3416 synchronized( this )
3418 m_all[t] = t;
3424 * Removes t from the list of tracked threads. No operation will be
3425 * performed if t is not currently being tracked by this object.
3427 * Params:
3428 * t = The thread to remove.
3430 * In:
3431 * t must not be null.
3433 final void remove( Thread t )
3436 assert( t );
3438 body
3440 synchronized( this )
3442 m_all.remove( t );
3448 * Operates on all threads currently tracked by this object.
3450 final int opApply( scope int delegate( ref Thread ) dg )
3452 synchronized( this )
3454 int ret = 0;
3456 // NOTE: This loop relies on the knowledge that m_all uses the
3457 // Thread object for both the key and the mapped value.
3458 foreach ( Thread t; m_all.keys )
3460 ret = dg( t );
3461 if ( ret )
3462 break;
3464 return ret;
3470 * Iteratively joins all tracked threads. This function will block add,
3471 * remove, and opApply until it completes.
3473 * Params:
3474 * rethrow = Rethrow any unhandled exception which may have caused the
3475 * current thread to terminate.
3477 * Throws:
3478 * Any exception not handled by the joined threads.
3480 final void joinAll( bool rethrow = true )
3482 synchronized( this )
3484 // NOTE: This loop relies on the knowledge that m_all uses the
3485 // Thread object for both the key and the mapped value.
3486 foreach ( Thread t; m_all.keys )
3488 t.join( rethrow );
3494 private:
3495 Thread[Thread] m_all;
3499 ///////////////////////////////////////////////////////////////////////////////
3500 // Fiber Platform Detection and Memory Allocation
3501 ///////////////////////////////////////////////////////////////////////////////
3504 private
3506 version (D_InlineAsm_X86)
3508 version (Windows)
3509 version = AsmX86_Windows;
3510 else version (Posix)
3511 version = AsmX86_Posix;
3513 version (Darwin)
3514 version = AlignFiberStackTo16Byte;
3516 else version (D_InlineAsm_X86_64)
3518 version (Windows)
3520 version = AsmX86_64_Windows;
3521 version = AlignFiberStackTo16Byte;
3523 else version (Posix)
3525 version = AsmX86_64_Posix;
3526 version = AlignFiberStackTo16Byte;
3529 else version (X86)
3531 version = AsmExternal;
3533 version (MinGW)
3535 version = GNU_AsmX86_Windows;
3536 version = AlignFiberStackTo16Byte;
3538 else version (Posix)
3540 version = AsmX86_Posix;
3541 version (OSX)
3542 version = AlignFiberStackTo16Byte;
3545 else version (X86_64)
3547 version (D_X32)
3549 // let X32 be handled by ucontext swapcontext
3551 else
3553 version = AsmExternal;
3554 version = AlignFiberStackTo16Byte;
3556 version (MinGW)
3557 version = GNU_AsmX86_64_Windows;
3558 else version (Posix)
3559 version = AsmX86_64_Posix;
3562 else version (PPC)
3564 version (Posix)
3566 version = AsmPPC_Posix;
3567 version = AsmExternal;
3570 else version (PPC64)
3572 version (Posix)
3574 version = AlignFiberStackTo16Byte;
3577 else version (MIPS_O32)
3579 version (Posix)
3581 version = AsmMIPS_O32_Posix;
3582 version = AsmExternal;
3585 else version (AArch64)
3587 version (Posix)
3589 version = AsmAArch64_Posix;
3590 version = AsmExternal;
3591 version = AlignFiberStackTo16Byte;
3594 else version (ARM)
3596 version (Posix)
3598 version = AsmARM_Posix;
3599 version = AsmExternal;
3603 version (Posix)
3605 import core.sys.posix.unistd; // for sysconf
3607 version (AsmX86_Windows) {} else
3608 version (AsmX86_Posix) {} else
3609 version (AsmX86_64_Windows) {} else
3610 version (AsmX86_64_Posix) {} else
3611 version (AsmExternal) {} else
3613 // NOTE: The ucontext implementation requires architecture specific
3614 // data definitions to operate so testing for it must be done
3615 // by checking for the existence of ucontext_t rather than by
3616 // a version identifier. Please note that this is considered
3617 // an obsolescent feature according to the POSIX spec, so a
3618 // custom solution is still preferred.
3619 import core.sys.posix.ucontext;
3623 static immutable size_t PAGESIZE;
3624 version (Posix) static immutable size_t PTHREAD_STACK_MIN;
3628 shared static this()
3630 version (Windows)
3632 SYSTEM_INFO info;
3633 GetSystemInfo(&info);
3635 PAGESIZE = info.dwPageSize;
3636 assert(PAGESIZE < int.max);
3638 else version (Posix)
3640 PAGESIZE = cast(size_t)sysconf(_SC_PAGESIZE);
3641 PTHREAD_STACK_MIN = cast(size_t)sysconf(_SC_THREAD_STACK_MIN);
3643 else
3645 static assert(0, "unimplemented");
3650 ///////////////////////////////////////////////////////////////////////////////
3651 // Fiber Entry Point and Context Switch
3652 ///////////////////////////////////////////////////////////////////////////////
3655 private
3657 extern (C) void fiber_entryPoint() nothrow
3659 Fiber obj = Fiber.getThis();
3660 assert( obj );
3662 assert( Thread.getThis().m_curr is obj.m_ctxt );
3663 atomicStore!(MemoryOrder.raw)(*cast(shared)&Thread.getThis().m_lock, false);
3664 obj.m_ctxt.tstack = obj.m_ctxt.bstack;
3665 obj.m_state = Fiber.State.EXEC;
3669 obj.run();
3671 catch ( Throwable t )
3673 obj.m_unhandled = t;
3676 static if ( __traits( compiles, ucontext_t ) )
3677 obj.m_ucur = &obj.m_utxt;
3679 obj.m_state = Fiber.State.TERM;
3680 obj.switchOut();
3683 // Look above the definition of 'class Fiber' for some information about the implementation of this routine
3684 version (AsmExternal)
3686 extern (C) void fiber_switchContext( void** oldp, void* newp ) nothrow @nogc;
3687 version (AArch64)
3688 extern (C) void fiber_trampoline() nothrow;
3690 else
3691 extern (C) void fiber_switchContext( void** oldp, void* newp ) nothrow @nogc
3693 // NOTE: The data pushed and popped in this routine must match the
3694 // default stack created by Fiber.initStack or the initial
3695 // switch into a new context will fail.
3697 version (AsmX86_Windows)
3699 asm pure nothrow @nogc
3701 naked;
3703 // save current stack state
3704 push EBP;
3705 mov EBP, ESP;
3706 push EDI;
3707 push ESI;
3708 push EBX;
3709 push dword ptr FS:[0];
3710 push dword ptr FS:[4];
3711 push dword ptr FS:[8];
3712 push EAX;
3714 // store oldp again with more accurate address
3715 mov EAX, dword ptr 8[EBP];
3716 mov [EAX], ESP;
3717 // load newp to begin context switch
3718 mov ESP, dword ptr 12[EBP];
3720 // load saved state from new stack
3721 pop EAX;
3722 pop dword ptr FS:[8];
3723 pop dword ptr FS:[4];
3724 pop dword ptr FS:[0];
3725 pop EBX;
3726 pop ESI;
3727 pop EDI;
3728 pop EBP;
3730 // 'return' to complete switch
3731 pop ECX;
3732 jmp ECX;
3735 else version (AsmX86_64_Windows)
3737 asm pure nothrow @nogc
3739 naked;
3741 // save current stack state
3742 // NOTE: When changing the layout of registers on the stack,
3743 // make sure that the XMM registers are still aligned.
3744 // On function entry, the stack is guaranteed to not
3745 // be aligned to 16 bytes because of the return address
3746 // on the stack.
3747 push RBP;
3748 mov RBP, RSP;
3749 push R12;
3750 push R13;
3751 push R14;
3752 push R15;
3753 push RDI;
3754 push RSI;
3755 // 7 registers = 56 bytes; stack is now aligned to 16 bytes
3756 sub RSP, 160;
3757 movdqa [RSP + 144], XMM6;
3758 movdqa [RSP + 128], XMM7;
3759 movdqa [RSP + 112], XMM8;
3760 movdqa [RSP + 96], XMM9;
3761 movdqa [RSP + 80], XMM10;
3762 movdqa [RSP + 64], XMM11;
3763 movdqa [RSP + 48], XMM12;
3764 movdqa [RSP + 32], XMM13;
3765 movdqa [RSP + 16], XMM14;
3766 movdqa [RSP], XMM15;
3767 push RBX;
3768 xor RAX,RAX;
3769 push qword ptr GS:[RAX];
3770 push qword ptr GS:8[RAX];
3771 push qword ptr GS:16[RAX];
3773 // store oldp
3774 mov [RCX], RSP;
3775 // load newp to begin context switch
3776 mov RSP, RDX;
3778 // load saved state from new stack
3779 pop qword ptr GS:16[RAX];
3780 pop qword ptr GS:8[RAX];
3781 pop qword ptr GS:[RAX];
3782 pop RBX;
3783 movdqa XMM15, [RSP];
3784 movdqa XMM14, [RSP + 16];
3785 movdqa XMM13, [RSP + 32];
3786 movdqa XMM12, [RSP + 48];
3787 movdqa XMM11, [RSP + 64];
3788 movdqa XMM10, [RSP + 80];
3789 movdqa XMM9, [RSP + 96];
3790 movdqa XMM8, [RSP + 112];
3791 movdqa XMM7, [RSP + 128];
3792 movdqa XMM6, [RSP + 144];
3793 add RSP, 160;
3794 pop RSI;
3795 pop RDI;
3796 pop R15;
3797 pop R14;
3798 pop R13;
3799 pop R12;
3800 pop RBP;
3802 // 'return' to complete switch
3803 pop RCX;
3804 jmp RCX;
3807 else version (AsmX86_Posix)
3809 asm pure nothrow @nogc
3811 naked;
3813 // save current stack state
3814 push EBP;
3815 mov EBP, ESP;
3816 push EDI;
3817 push ESI;
3818 push EBX;
3819 push EAX;
3821 // store oldp again with more accurate address
3822 mov EAX, dword ptr 8[EBP];
3823 mov [EAX], ESP;
3824 // load newp to begin context switch
3825 mov ESP, dword ptr 12[EBP];
3827 // load saved state from new stack
3828 pop EAX;
3829 pop EBX;
3830 pop ESI;
3831 pop EDI;
3832 pop EBP;
3834 // 'return' to complete switch
3835 pop ECX;
3836 jmp ECX;
3839 else version (AsmX86_64_Posix)
3841 asm pure nothrow @nogc
3843 naked;
3845 // save current stack state
3846 push RBP;
3847 mov RBP, RSP;
3848 push RBX;
3849 push R12;
3850 push R13;
3851 push R14;
3852 push R15;
3854 // store oldp
3855 mov [RDI], RSP;
3856 // load newp to begin context switch
3857 mov RSP, RSI;
3859 // load saved state from new stack
3860 pop R15;
3861 pop R14;
3862 pop R13;
3863 pop R12;
3864 pop RBX;
3865 pop RBP;
3867 // 'return' to complete switch
3868 pop RCX;
3869 jmp RCX;
3872 else static if ( __traits( compiles, ucontext_t ) )
3874 Fiber cfib = Fiber.getThis();
3875 void* ucur = cfib.m_ucur;
3877 *oldp = &ucur;
3878 swapcontext( **(cast(ucontext_t***) oldp),
3879 *(cast(ucontext_t**) newp) );
3881 else
3882 static assert(0, "Not implemented");
3887 ///////////////////////////////////////////////////////////////////////////////
3888 // Fiber
3889 ///////////////////////////////////////////////////////////////////////////////
3891 * Documentation of Fiber internals:
3893 * The main routines to implement when porting Fibers to new architectures are
3894 * fiber_switchContext and initStack. Some version constants have to be defined
3895 * for the new platform as well, search for "Fiber Platform Detection and Memory Allocation".
3897 * Fibers are based on a concept called 'Context'. A Context describes the execution
3898 * state of a Fiber or main thread which is fully described by the stack, some
3899 * registers and a return address at which the Fiber/Thread should continue executing.
3900 * Please note that not only each Fiber has a Context, but each thread also has got a
3901 * Context which describes the threads stack and state. If you call Fiber fib; fib.call
3902 * the first time in a thread you switch from Threads Context into the Fibers Context.
3903 * If you call fib.yield in that Fiber you switch out of the Fibers context and back
3904 * into the Thread Context. (However, this is not always the case. You can call a Fiber
3905 * from within another Fiber, then you switch Contexts between the Fibers and the Thread
3906 * Context is not involved)
3908 * In all current implementations the registers and the return address are actually
3909 * saved on a Contexts stack.
3911 * The fiber_switchContext routine has got two parameters:
3912 * void** a: This is the _location_ where we have to store the current stack pointer,
3913 * the stack pointer of the currently executing Context (Fiber or Thread).
3914 * void* b: This is the pointer to the stack of the Context which we want to switch into.
3915 * Note that we get the same pointer here as the one we stored into the void** a
3916 * in a previous call to fiber_switchContext.
3918 * In the simplest case, a fiber_switchContext rountine looks like this:
3919 * fiber_switchContext:
3920 * push {return Address}
3921 * push {registers}
3922 * copy {stack pointer} into {location pointed to by a}
3923 * //We have now switch to the stack of a different Context!
3924 * copy {b} into {stack pointer}
3925 * pop {registers}
3926 * pop {return Address}
3927 * jump to {return Address}
3929 * The GC uses the value returned in parameter a to scan the Fibers stack. It scans from
3930 * the stack base to that value. As the GC dislikes false pointers we can actually optimize
3931 * this a little: By storing registers which can not contain references to memory managed
3932 * by the GC outside of the region marked by the stack base pointer and the stack pointer
3933 * saved in fiber_switchContext we can prevent the GC from scanning them.
3934 * Such registers are usually floating point registers and the return address. In order to
3935 * implement this, we return a modified stack pointer from fiber_switchContext. However,
3936 * we have to remember that when we restore the registers from the stack!
3938 * --------------------------- <= Stack Base
3939 * | Frame | <= Many other stack frames
3940 * | Frame |
3941 * |-------------------------| <= The last stack frame. This one is created by fiber_switchContext
3942 * | registers with pointers |
3943 * | | <= Stack pointer. GC stops scanning here
3944 * | return address |
3945 * |floating point registers |
3946 * --------------------------- <= Real Stack End
3948 * fiber_switchContext:
3949 * push {registers with pointers}
3950 * copy {stack pointer} into {location pointed to by a}
3951 * push {return Address}
3952 * push {Floating point registers}
3953 * //We have now switch to the stack of a different Context!
3954 * copy {b} into {stack pointer}
3955 * //We now have to adjust the stack pointer to point to 'Real Stack End' so we can pop
3956 * //the FP registers
3957 * //+ or - depends on if your stack grows downwards or upwards
3958 * {stack pointer} = {stack pointer} +- ({FPRegisters}.sizeof + {return address}.sizeof}
3959 * pop {Floating point registers}
3960 * pop {return Address}
3961 * pop {registers with pointers}
3962 * jump to {return Address}
3964 * So the question now is which registers need to be saved? This depends on the specific
3965 * architecture ABI of course, but here are some general guidelines:
3966 * - If a register is callee-save (if the callee modifies the register it must saved and
3967 * restored by the callee) it needs to be saved/restored in switchContext
3968 * - If a register is caller-save it needn't be saved/restored. (Calling fiber_switchContext
3969 * is a function call and the compiler therefore already must save these registers before
3970 * calling fiber_switchContext)
3971 * - Argument registers used for passing parameters to functions needn't be saved/restored
3972 * - The return register needn't be saved/restored (fiber_switchContext hasn't got a return type)
3973 * - All scratch registers needn't be saved/restored
3974 * - The link register usually needn't be saved/restored (but sometimes it must be cleared -
3975 * see below for details)
3976 * - The frame pointer register - if it exists - is usually callee-save
3977 * - All current implementations do not save control registers
3979 * What happens on the first switch into a Fiber? We never saved a state for this fiber before,
3980 * but the initial state is prepared in the initStack routine. (This routine will also be called
3981 * when a Fiber is being resetted). initStack must produce exactly the same stack layout as the
3982 * part of fiber_switchContext which saves the registers. Pay special attention to set the stack
3983 * pointer correctly if you use the GC optimization mentioned before. the return Address saved in
3984 * initStack must be the address of fiber_entrypoint.
3986 * There's now a small but important difference between the first context switch into a fiber and
3987 * further context switches. On the first switch, Fiber.call is used and the returnAddress in
3988 * fiber_switchContext will point to fiber_entrypoint. The important thing here is that this jump
3989 * is a _function call_, we call fiber_entrypoint by jumping before it's function prologue. On later
3990 * calls, the user used yield() in a function, and therefore the return address points into a user
3991 * function, after the yield call. So here the jump in fiber_switchContext is a _function return_,
3992 * not a function call!
3994 * The most important result of this is that on entering a function, i.e. fiber_entrypoint, we
3995 * would have to provide a return address / set the link register once fiber_entrypoint
3996 * returns. Now fiber_entrypoint does never return and therefore the actual value of the return
3997 * address / link register is never read/used and therefore doesn't matter. When fiber_switchContext
3998 * performs a _function return_ the value in the link register doesn't matter either.
3999 * However, the link register will still be saved to the stack in fiber_entrypoint and some
4000 * exception handling / stack unwinding code might read it from this stack location and crash.
4001 * The exact solution depends on your architecture, but see the ARM implementation for a way
4002 * to deal with this issue.
4004 * The ARM implementation is meant to be used as a kind of documented example implementation.
4005 * Look there for a concrete example.
4007 * FIXME: fiber_entrypoint might benefit from a @noreturn attribute, but D doesn't have one.
4011 * This class provides a cooperative concurrency mechanism integrated with the
4012 * threading and garbage collection functionality. Calling a fiber may be
4013 * considered a blocking operation that returns when the fiber yields (via
4014 * Fiber.yield()). Execution occurs within the context of the calling thread
4015 * so synchronization is not necessary to guarantee memory visibility so long
4016 * as the same thread calls the fiber each time. Please note that there is no
4017 * requirement that a fiber be bound to one specific thread. Rather, fibers
4018 * may be freely passed between threads so long as they are not currently
4019 * executing. Like threads, a new fiber thread may be created using either
4020 * derivation or composition, as in the following example.
4022 * Warning:
4023 * Status registers are not saved by the current implementations. This means
4024 * floating point exception status bits (overflow, divide by 0), rounding mode
4025 * and similar stuff is set per-thread, not per Fiber!
4027 * Warning:
4028 * On ARM FPU registers are not saved if druntime was compiled as ARM_SoftFloat.
4029 * If such a build is used on a ARM_SoftFP system which actually has got a FPU
4030 * and other libraries are using the FPU registers (other code is compiled
4031 * as ARM_SoftFP) this can cause problems. Druntime must be compiled as
4032 * ARM_SoftFP in this case.
4034 * Example:
4035 * ----------------------------------------------------------------------
4037 * class DerivedFiber : Fiber
4039 * this()
4041 * super( &run );
4044 * private :
4045 * void run()
4047 * printf( "Derived fiber running.\n" );
4051 * void fiberFunc()
4053 * printf( "Composed fiber running.\n" );
4054 * Fiber.yield();
4055 * printf( "Composed fiber running.\n" );
4058 * // create instances of each type
4059 * Fiber derived = new DerivedFiber();
4060 * Fiber composed = new Fiber( &fiberFunc );
4062 * // call both fibers once
4063 * derived.call();
4064 * composed.call();
4065 * printf( "Execution returned to calling context.\n" );
4066 * composed.call();
4068 * // since each fiber has run to completion, each should have state TERM
4069 * assert( derived.state == Fiber.State.TERM );
4070 * assert( composed.state == Fiber.State.TERM );
4072 * ----------------------------------------------------------------------
4074 * Authors: Based on a design by Mikola Lysenko.
4076 class Fiber
4078 ///////////////////////////////////////////////////////////////////////////
4079 // Initialization
4080 ///////////////////////////////////////////////////////////////////////////
4084 * Initializes a fiber object which is associated with a static
4085 * D function.
4087 * Params:
4088 * fn = The fiber function.
4089 * sz = The stack size for this fiber.
4090 * guardPageSize = size of the guard page to trap fiber's stack
4091 * overflows
4093 * In:
4094 * fn must not be null.
4096 this( void function() fn, size_t sz = PAGESIZE*4,
4097 size_t guardPageSize = PAGESIZE ) nothrow
4100 assert( fn );
4102 body
4104 allocStack( sz, guardPageSize );
4105 reset( fn );
4110 * Initializes a fiber object which is associated with a dynamic
4111 * D function.
4113 * Params:
4114 * dg = The fiber function.
4115 * sz = The stack size for this fiber.
4116 * guardPageSize = size of the guard page to trap fiber's stack
4117 * overflows
4119 * In:
4120 * dg must not be null.
4122 this( void delegate() dg, size_t sz = PAGESIZE*4,
4123 size_t guardPageSize = PAGESIZE ) nothrow
4126 assert( dg );
4128 body
4130 allocStack( sz, guardPageSize);
4131 reset( dg );
4136 * Cleans up any remaining resources used by this object.
4138 ~this() nothrow @nogc
4140 // NOTE: A live reference to this object will exist on its associated
4141 // stack from the first time its call() method has been called
4142 // until its execution completes with State.TERM. Thus, the only
4143 // times this dtor should be called are either if the fiber has
4144 // terminated (and therefore has no active stack) or if the user
4145 // explicitly deletes this object. The latter case is an error
4146 // but is not easily tested for, since State.HOLD may imply that
4147 // the fiber was just created but has never been run. There is
4148 // not a compelling case to create a State.INIT just to offer a
4149 // means of ensuring the user isn't violating this object's
4150 // contract, so for now this requirement will be enforced by
4151 // documentation only.
4152 freeStack();
4156 ///////////////////////////////////////////////////////////////////////////
4157 // General Actions
4158 ///////////////////////////////////////////////////////////////////////////
4162 * Transfers execution to this fiber object. The calling context will be
4163 * suspended until the fiber calls Fiber.yield() or until it terminates
4164 * via an unhandled exception.
4166 * Params:
4167 * rethrow = Rethrow any unhandled exception which may have caused this
4168 * fiber to terminate.
4170 * In:
4171 * This fiber must be in state HOLD.
4173 * Throws:
4174 * Any exception not handled by the joined thread.
4176 * Returns:
4177 * Any exception not handled by this fiber if rethrow = false, null
4178 * otherwise.
4180 // Not marked with any attributes, even though `nothrow @nogc` works
4181 // because it calls arbitrary user code. Most of the implementation
4182 // is already `@nogc nothrow`, but in order for `Fiber.call` to
4183 // propagate the attributes of the user's function, the Fiber
4184 // class needs to be templated.
4185 final Throwable call( Rethrow rethrow = Rethrow.yes )
4187 return rethrow ? call!(Rethrow.yes)() : call!(Rethrow.no);
4190 /// ditto
4191 final Throwable call( Rethrow rethrow )()
4193 callImpl();
4194 if ( m_unhandled )
4196 Throwable t = m_unhandled;
4197 m_unhandled = null;
4198 static if ( rethrow )
4199 throw t;
4200 else
4201 return t;
4203 return null;
4206 /// ditto
4207 deprecated("Please pass Fiber.Rethrow.yes or .no instead of a boolean.")
4208 final Throwable call( bool rethrow )
4210 return rethrow ? call!(Rethrow.yes)() : call!(Rethrow.no);
4213 private void callImpl() nothrow @nogc
4216 assert( m_state == State.HOLD );
4218 body
4220 Fiber cur = getThis();
4222 static if ( __traits( compiles, ucontext_t ) )
4223 m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt;
4225 setThis( this );
4226 this.switchIn();
4227 setThis( cur );
4229 static if ( __traits( compiles, ucontext_t ) )
4230 m_ucur = null;
4232 // NOTE: If the fiber has terminated then the stack pointers must be
4233 // reset. This ensures that the stack for this fiber is not
4234 // scanned if the fiber has terminated. This is necessary to
4235 // prevent any references lingering on the stack from delaying
4236 // the collection of otherwise dead objects. The most notable
4237 // being the current object, which is referenced at the top of
4238 // fiber_entryPoint.
4239 if ( m_state == State.TERM )
4241 m_ctxt.tstack = m_ctxt.bstack;
4245 /// Flag to control rethrow behavior of $(D $(LREF call))
4246 enum Rethrow : bool { no, yes }
4249 * Resets this fiber so that it may be re-used, optionally with a
4250 * new function/delegate. This routine should only be called for
4251 * fibers that have terminated, as doing otherwise could result in
4252 * scope-dependent functionality that is not executed.
4253 * Stack-based classes, for example, may not be cleaned up
4254 * properly if a fiber is reset before it has terminated.
4256 * In:
4257 * This fiber must be in state TERM or HOLD.
4259 final void reset() nothrow @nogc
4262 assert( m_state == State.TERM || m_state == State.HOLD );
4264 body
4266 m_ctxt.tstack = m_ctxt.bstack;
4267 m_state = State.HOLD;
4268 initStack();
4269 m_unhandled = null;
4272 /// ditto
4273 final void reset( void function() fn ) nothrow @nogc
4275 reset();
4276 m_fn = fn;
4277 m_call = Call.FN;
4280 /// ditto
4281 final void reset( void delegate() dg ) nothrow @nogc
4283 reset();
4284 m_dg = dg;
4285 m_call = Call.DG;
4288 ///////////////////////////////////////////////////////////////////////////
4289 // General Properties
4290 ///////////////////////////////////////////////////////////////////////////
4294 * A fiber may occupy one of three states: HOLD, EXEC, and TERM. The HOLD
4295 * state applies to any fiber that is suspended and ready to be called.
4296 * The EXEC state will be set for any fiber that is currently executing.
4297 * And the TERM state is set when a fiber terminates. Once a fiber
4298 * terminates, it must be reset before it may be called again.
4300 enum State
4302 HOLD, ///
4303 EXEC, ///
4304 TERM ///
4309 * Gets the current state of this fiber.
4311 * Returns:
4312 * The state of this fiber as an enumerated value.
4314 final @property State state() const @safe pure nothrow @nogc
4316 return m_state;
4320 ///////////////////////////////////////////////////////////////////////////
4321 // Actions on Calling Fiber
4322 ///////////////////////////////////////////////////////////////////////////
4326 * Forces a context switch to occur away from the calling fiber.
4328 static void yield() nothrow @nogc
4330 Fiber cur = getThis();
4331 assert( cur, "Fiber.yield() called with no active fiber" );
4332 assert( cur.m_state == State.EXEC );
4334 static if ( __traits( compiles, ucontext_t ) )
4335 cur.m_ucur = &cur.m_utxt;
4337 cur.m_state = State.HOLD;
4338 cur.switchOut();
4339 cur.m_state = State.EXEC;
4344 * Forces a context switch to occur away from the calling fiber and then
4345 * throws obj in the calling fiber.
4347 * Params:
4348 * t = The object to throw.
4350 * In:
4351 * t must not be null.
4353 static void yieldAndThrow( Throwable t ) nothrow @nogc
4356 assert( t );
4358 body
4360 Fiber cur = getThis();
4361 assert( cur, "Fiber.yield() called with no active fiber" );
4362 assert( cur.m_state == State.EXEC );
4364 static if ( __traits( compiles, ucontext_t ) )
4365 cur.m_ucur = &cur.m_utxt;
4367 cur.m_unhandled = t;
4368 cur.m_state = State.HOLD;
4369 cur.switchOut();
4370 cur.m_state = State.EXEC;
4374 ///////////////////////////////////////////////////////////////////////////
4375 // Fiber Accessors
4376 ///////////////////////////////////////////////////////////////////////////
4380 * Provides a reference to the calling fiber or null if no fiber is
4381 * currently active.
4383 * Returns:
4384 * The fiber object representing the calling fiber or null if no fiber
4385 * is currently active within this thread. The result of deleting this object is undefined.
4387 static Fiber getThis() @safe nothrow @nogc
4389 return sm_this;
4393 ///////////////////////////////////////////////////////////////////////////
4394 // Static Initialization
4395 ///////////////////////////////////////////////////////////////////////////
4398 version (Posix)
4400 static this()
4402 static if ( __traits( compiles, ucontext_t ) )
4404 int status = getcontext( &sm_utxt );
4405 assert( status == 0 );
4410 private:
4412 // Initializes a fiber object which has no associated executable function.
4414 this() @safe pure nothrow @nogc
4416 m_call = Call.NO;
4421 // Fiber entry point. Invokes the function or delegate passed on
4422 // construction (if any).
4424 final void run()
4426 switch ( m_call )
4428 case Call.FN:
4429 m_fn();
4430 break;
4431 case Call.DG:
4432 m_dg();
4433 break;
4434 default:
4435 break;
4440 private:
4442 // The type of routine passed on fiber construction.
4444 enum Call
4453 // Standard fiber data
4455 Call m_call;
4456 union
4458 void function() m_fn;
4459 void delegate() m_dg;
4461 bool m_isRunning;
4462 Throwable m_unhandled;
4463 State m_state;
4466 private:
4467 ///////////////////////////////////////////////////////////////////////////
4468 // Stack Management
4469 ///////////////////////////////////////////////////////////////////////////
4473 // Allocate a new stack for this fiber.
4475 final void allocStack( size_t sz, size_t guardPageSize ) nothrow
4478 assert( !m_pmem && !m_ctxt );
4480 body
4482 // adjust alloc size to a multiple of PAGESIZE
4483 sz += PAGESIZE - 1;
4484 sz -= sz % PAGESIZE;
4486 // NOTE: This instance of Thread.Context is dynamic so Fiber objects
4487 // can be collected by the GC so long as no user level references
4488 // to the object exist. If m_ctxt were not dynamic then its
4489 // presence in the global context list would be enough to keep
4490 // this object alive indefinitely. An alternative to allocating
4491 // room for this struct explicitly would be to mash it into the
4492 // base of the stack being allocated below. However, doing so
4493 // requires too much special logic to be worthwhile.
4494 m_ctxt = new Thread.Context;
4496 static if ( __traits( compiles, VirtualAlloc ) )
4498 // reserve memory for stack
4499 m_pmem = VirtualAlloc( null,
4500 sz + guardPageSize,
4501 MEM_RESERVE,
4502 PAGE_NOACCESS );
4503 if ( !m_pmem )
4504 onOutOfMemoryError();
4506 version (StackGrowsDown)
4508 void* stack = m_pmem + guardPageSize;
4509 void* guard = m_pmem;
4510 void* pbase = stack + sz;
4512 else
4514 void* stack = m_pmem;
4515 void* guard = m_pmem + sz;
4516 void* pbase = stack;
4519 // allocate reserved stack segment
4520 stack = VirtualAlloc( stack,
4522 MEM_COMMIT,
4523 PAGE_READWRITE );
4524 if ( !stack )
4525 onOutOfMemoryError();
4527 if (guardPageSize)
4529 // allocate reserved guard page
4530 guard = VirtualAlloc( guard,
4531 guardPageSize,
4532 MEM_COMMIT,
4533 PAGE_READWRITE | PAGE_GUARD );
4534 if ( !guard )
4535 onOutOfMemoryError();
4538 m_ctxt.bstack = pbase;
4539 m_ctxt.tstack = pbase;
4540 m_size = sz;
4542 else
4544 version (Posix) import core.sys.posix.sys.mman; // mmap
4545 version (FreeBSD) import core.sys.freebsd.sys.mman : MAP_ANON;
4546 version (NetBSD) import core.sys.netbsd.sys.mman : MAP_ANON;
4547 version (CRuntime_Glibc) import core.sys.linux.sys.mman : MAP_ANON;
4548 version (Darwin) import core.sys.darwin.sys.mman : MAP_ANON;
4550 static if ( __traits( compiles, mmap ) )
4552 // Allocate more for the memory guard
4553 sz += guardPageSize;
4555 m_pmem = mmap( null,
4557 PROT_READ | PROT_WRITE,
4558 MAP_PRIVATE | MAP_ANON,
4560 0 );
4561 if ( m_pmem == MAP_FAILED )
4562 m_pmem = null;
4564 else static if ( __traits( compiles, valloc ) )
4566 m_pmem = valloc( sz );
4568 else static if ( __traits( compiles, malloc ) )
4570 m_pmem = malloc( sz );
4572 else
4574 m_pmem = null;
4577 if ( !m_pmem )
4578 onOutOfMemoryError();
4580 version (StackGrowsDown)
4582 m_ctxt.bstack = m_pmem + sz;
4583 m_ctxt.tstack = m_pmem + sz;
4584 void* guard = m_pmem;
4586 else
4588 m_ctxt.bstack = m_pmem;
4589 m_ctxt.tstack = m_pmem;
4590 void* guard = m_pmem + sz - guardPageSize;
4592 m_size = sz;
4594 static if ( __traits( compiles, mmap ) )
4596 if (guardPageSize)
4598 // protect end of stack
4599 if ( mprotect(guard, guardPageSize, PROT_NONE) == -1 )
4600 abort();
4603 else
4605 // Supported only for mmap allocated memory - results are
4606 // undefined if applied to memory not obtained by mmap
4610 Thread.add( m_ctxt );
4615 // Free this fiber's stack.
4617 final void freeStack() nothrow @nogc
4620 assert( m_pmem && m_ctxt );
4622 body
4624 // NOTE: m_ctxt is guaranteed to be alive because it is held in the
4625 // global context list.
4626 Thread.slock.lock_nothrow();
4627 scope(exit) Thread.slock.unlock_nothrow();
4628 Thread.remove( m_ctxt );
4630 static if ( __traits( compiles, VirtualAlloc ) )
4632 VirtualFree( m_pmem, 0, MEM_RELEASE );
4634 else
4636 import core.sys.posix.sys.mman; // munmap
4638 static if ( __traits( compiles, mmap ) )
4640 munmap( m_pmem, m_size );
4642 else static if ( __traits( compiles, valloc ) )
4644 free( m_pmem );
4646 else static if ( __traits( compiles, malloc ) )
4648 free( m_pmem );
4651 m_pmem = null;
4652 m_ctxt = null;
4657 // Initialize the allocated stack.
4658 // Look above the definition of 'class Fiber' for some information about the implementation of this routine
4660 final void initStack() nothrow @nogc
4663 assert( m_ctxt.tstack && m_ctxt.tstack == m_ctxt.bstack );
4664 assert( cast(size_t) m_ctxt.bstack % (void*).sizeof == 0 );
4666 body
4668 void* pstack = m_ctxt.tstack;
4669 scope( exit ) m_ctxt.tstack = pstack;
4671 void push( size_t val ) nothrow
4673 version (StackGrowsDown)
4675 pstack -= size_t.sizeof;
4676 *(cast(size_t*) pstack) = val;
4678 else
4680 pstack += size_t.sizeof;
4681 *(cast(size_t*) pstack) = val;
4685 // NOTE: On OS X the stack must be 16-byte aligned according
4686 // to the IA-32 call spec. For x86_64 the stack also needs to
4687 // be aligned to 16-byte according to SysV AMD64 ABI.
4688 version (AlignFiberStackTo16Byte)
4690 version (StackGrowsDown)
4692 pstack = cast(void*)(cast(size_t)(pstack) - (cast(size_t)(pstack) & 0x0F));
4694 else
4696 pstack = cast(void*)(cast(size_t)(pstack) + (cast(size_t)(pstack) & 0x0F));
4700 version (AsmX86_Windows)
4702 version (StackGrowsDown) {} else static assert( false );
4704 // On Windows Server 2008 and 2008 R2, an exploit mitigation
4705 // technique known as SEHOP is activated by default. To avoid
4706 // hijacking of the exception handler chain, the presence of a
4707 // Windows-internal handler (ntdll.dll!FinalExceptionHandler) at
4708 // its end is tested by RaiseException. If it is not present, all
4709 // handlers are disregarded, and the program is thus aborted
4710 // (see http://blogs.technet.com/b/srd/archive/2009/02/02/
4711 // preventing-the-exploitation-of-seh-overwrites-with-sehop.aspx).
4712 // For new threads, this handler is installed by Windows immediately
4713 // after creation. To make exception handling work in fibers, we
4714 // have to insert it for our new stacks manually as well.
4716 // To do this, we first determine the handler by traversing the SEH
4717 // chain of the current thread until its end, and then construct a
4718 // registration block for the last handler on the newly created
4719 // thread. We then continue to push all the initial register values
4720 // for the first context switch as for the other implementations.
4722 // Note that this handler is never actually invoked, as we install
4723 // our own one on top of it in the fiber entry point function.
4724 // Thus, it should not have any effects on OSes not implementing
4725 // exception chain verification.
4727 alias fp_t = void function(); // Actual signature not relevant.
4728 static struct EXCEPTION_REGISTRATION
4730 EXCEPTION_REGISTRATION* next; // sehChainEnd if last one.
4731 fp_t handler;
4733 enum sehChainEnd = cast(EXCEPTION_REGISTRATION*) 0xFFFFFFFF;
4735 __gshared static fp_t finalHandler = null;
4736 if ( finalHandler is null )
4738 static EXCEPTION_REGISTRATION* fs0() nothrow
4740 asm pure nothrow @nogc
4742 naked;
4743 mov EAX, FS:[0];
4744 ret;
4747 auto reg = fs0();
4748 while ( reg.next != sehChainEnd ) reg = reg.next;
4750 // Benign races are okay here, just to avoid re-lookup on every
4751 // fiber creation.
4752 finalHandler = reg.handler;
4755 // When linking with /safeseh (supported by LDC, but not DMD)
4756 // the exception chain must not extend to the very top
4757 // of the stack, otherwise the exception chain is also considered
4758 // invalid. Reserving additional 4 bytes at the top of the stack will
4759 // keep the EXCEPTION_REGISTRATION below that limit
4760 size_t reserve = EXCEPTION_REGISTRATION.sizeof + 4;
4761 pstack -= reserve;
4762 *(cast(EXCEPTION_REGISTRATION*)pstack) =
4763 EXCEPTION_REGISTRATION( sehChainEnd, finalHandler );
4765 push( cast(size_t) &fiber_entryPoint ); // EIP
4766 push( cast(size_t) m_ctxt.bstack - reserve ); // EBP
4767 push( 0x00000000 ); // EDI
4768 push( 0x00000000 ); // ESI
4769 push( 0x00000000 ); // EBX
4770 push( cast(size_t) m_ctxt.bstack - reserve ); // FS:[0]
4771 push( cast(size_t) m_ctxt.bstack ); // FS:[4]
4772 push( cast(size_t) m_ctxt.bstack - m_size ); // FS:[8]
4773 push( 0x00000000 ); // EAX
4775 else version (AsmX86_64_Windows)
4777 // Using this trampoline instead of the raw fiber_entryPoint
4778 // ensures that during context switches, source and destination
4779 // stacks have the same alignment. Otherwise, the stack would need
4780 // to be shifted by 8 bytes for the first call, as fiber_entryPoint
4781 // is an actual function expecting a stack which is not aligned
4782 // to 16 bytes.
4783 static void trampoline()
4785 asm pure nothrow @nogc
4787 naked;
4788 sub RSP, 32; // Shadow space (Win64 calling convention)
4789 call fiber_entryPoint;
4790 xor RCX, RCX; // This should never be reached, as
4791 jmp RCX; // fiber_entryPoint must never return.
4795 push( cast(size_t) &trampoline ); // RIP
4796 push( 0x00000000_00000000 ); // RBP
4797 push( 0x00000000_00000000 ); // R12
4798 push( 0x00000000_00000000 ); // R13
4799 push( 0x00000000_00000000 ); // R14
4800 push( 0x00000000_00000000 ); // R15
4801 push( 0x00000000_00000000 ); // RDI
4802 push( 0x00000000_00000000 ); // RSI
4803 push( 0x00000000_00000000 ); // XMM6 (high)
4804 push( 0x00000000_00000000 ); // XMM6 (low)
4805 push( 0x00000000_00000000 ); // XMM7 (high)
4806 push( 0x00000000_00000000 ); // XMM7 (low)
4807 push( 0x00000000_00000000 ); // XMM8 (high)
4808 push( 0x00000000_00000000 ); // XMM8 (low)
4809 push( 0x00000000_00000000 ); // XMM9 (high)
4810 push( 0x00000000_00000000 ); // XMM9 (low)
4811 push( 0x00000000_00000000 ); // XMM10 (high)
4812 push( 0x00000000_00000000 ); // XMM10 (low)
4813 push( 0x00000000_00000000 ); // XMM11 (high)
4814 push( 0x00000000_00000000 ); // XMM11 (low)
4815 push( 0x00000000_00000000 ); // XMM12 (high)
4816 push( 0x00000000_00000000 ); // XMM12 (low)
4817 push( 0x00000000_00000000 ); // XMM13 (high)
4818 push( 0x00000000_00000000 ); // XMM13 (low)
4819 push( 0x00000000_00000000 ); // XMM14 (high)
4820 push( 0x00000000_00000000 ); // XMM14 (low)
4821 push( 0x00000000_00000000 ); // XMM15 (high)
4822 push( 0x00000000_00000000 ); // XMM15 (low)
4823 push( 0x00000000_00000000 ); // RBX
4824 push( 0xFFFFFFFF_FFFFFFFF ); // GS:[0]
4825 version (StackGrowsDown)
4827 push( cast(size_t) m_ctxt.bstack ); // GS:[8]
4828 push( cast(size_t) m_ctxt.bstack - m_size ); // GS:[16]
4830 else
4832 push( cast(size_t) m_ctxt.bstack ); // GS:[8]
4833 push( cast(size_t) m_ctxt.bstack + m_size ); // GS:[16]
4836 else version (AsmX86_Posix)
4838 push( 0x00000000 ); // Return address of fiber_entryPoint call
4839 push( cast(size_t) &fiber_entryPoint ); // EIP
4840 push( cast(size_t) m_ctxt.bstack ); // EBP
4841 push( 0x00000000 ); // EDI
4842 push( 0x00000000 ); // ESI
4843 push( 0x00000000 ); // EBX
4844 push( 0x00000000 ); // EAX
4846 else version (AsmX86_64_Posix)
4848 push( 0x00000000_00000000 ); // Return address of fiber_entryPoint call
4849 push( cast(size_t) &fiber_entryPoint ); // RIP
4850 push( cast(size_t) m_ctxt.bstack ); // RBP
4851 push( 0x00000000_00000000 ); // RBX
4852 push( 0x00000000_00000000 ); // R12
4853 push( 0x00000000_00000000 ); // R13
4854 push( 0x00000000_00000000 ); // R14
4855 push( 0x00000000_00000000 ); // R15
4857 else version (AsmPPC_Posix)
4859 version (StackGrowsDown)
4861 pstack -= int.sizeof * 5;
4863 else
4865 pstack += int.sizeof * 5;
4868 push( cast(size_t) &fiber_entryPoint ); // link register
4869 push( 0x00000000 ); // control register
4870 push( 0x00000000 ); // old stack pointer
4872 // GPR values
4873 version (StackGrowsDown)
4875 pstack -= int.sizeof * 20;
4877 else
4879 pstack += int.sizeof * 20;
4882 assert( (cast(size_t) pstack & 0x0f) == 0 );
4884 else version (AsmMIPS_O32_Posix)
4886 version (StackGrowsDown) {}
4887 else static assert(0);
4889 /* We keep the FP registers and the return address below
4890 * the stack pointer, so they don't get scanned by the
4891 * GC. The last frame before swapping the stack pointer is
4892 * organized like the following.
4894 * |-----------|<= frame pointer
4895 * | $gp |
4896 * | $s0-8 |
4897 * |-----------|<= stack pointer
4898 * | $ra |
4899 * | align(8) |
4900 * | $f20-30 |
4901 * |-----------|
4904 enum SZ_GP = 10 * size_t.sizeof; // $gp + $s0-8
4905 enum SZ_RA = size_t.sizeof; // $ra
4906 version (MIPS_HardFloat)
4908 enum SZ_FP = 6 * 8; // $f20-30
4909 enum ALIGN = -(SZ_FP + SZ_RA) & (8 - 1);
4911 else
4913 enum SZ_FP = 0;
4914 enum ALIGN = 0;
4917 enum BELOW = SZ_FP + ALIGN + SZ_RA;
4918 enum ABOVE = SZ_GP;
4919 enum SZ = BELOW + ABOVE;
4921 (cast(ubyte*)pstack - SZ)[0 .. SZ] = 0;
4922 pstack -= ABOVE;
4923 *cast(size_t*)(pstack - SZ_RA) = cast(size_t)&fiber_entryPoint;
4925 else version (AsmAArch64_Posix)
4927 // Like others, FP registers and return address (lr) are kept
4928 // below the saved stack top (tstack) to hide from GC scanning.
4929 // fiber_switchContext expects newp sp to look like this:
4930 // 19: x19
4931 // ...
4932 // 9: x29 (fp) <-- newp tstack
4933 // 8: x30 (lr) [&fiber_entryPoint]
4934 // 7: d8
4935 // ...
4936 // 0: d15
4938 version (StackGrowsDown) {}
4939 else
4940 static assert(false, "Only full descending stacks supported on AArch64");
4942 // Only need to set return address (lr). Everything else is fine
4943 // zero initialized.
4944 pstack -= size_t.sizeof * 11; // skip past x19-x29
4945 push(cast(size_t) &fiber_trampoline); // see threadasm.S for docs
4946 pstack += size_t.sizeof; // adjust sp (newp) above lr
4948 else version (AsmARM_Posix)
4950 /* We keep the FP registers and the return address below
4951 * the stack pointer, so they don't get scanned by the
4952 * GC. The last frame before swapping the stack pointer is
4953 * organized like the following.
4955 * | |-----------|<= 'frame starts here'
4956 * | | fp | (the actual frame pointer, r11 isn't
4957 * | | r10-r4 | updated and still points to the previous frame)
4958 * | |-----------|<= stack pointer
4959 * | | lr |
4960 * | | 4byte pad |
4961 * | | d15-d8 |(if FP supported)
4962 * | |-----------|
4964 * stack grows down: The pointer value here is smaller than some lines above
4966 // frame pointer can be zero, r10-r4 also zero initialized
4967 version (StackGrowsDown)
4968 pstack -= int.sizeof * 8;
4969 else
4970 static assert(false, "Only full descending stacks supported on ARM");
4972 // link register
4973 push( cast(size_t) &fiber_entryPoint );
4975 * We do not push padding and d15-d8 as those are zero initialized anyway
4976 * Position the stack pointer above the lr register
4978 pstack += int.sizeof * 1;
4980 else version (GNU_AsmX86_Windows)
4982 version (StackGrowsDown) {} else static assert( false );
4984 // Currently, MinGW doesn't utilize SEH exceptions.
4985 // See DMD AsmX86_Windows If this code ever becomes fails and SEH is used.
4987 push( 0x00000000 ); // Return address of fiber_entryPoint call
4988 push( cast(size_t) &fiber_entryPoint ); // EIP
4989 push( 0x00000000 ); // EBP
4990 push( 0x00000000 ); // EDI
4991 push( 0x00000000 ); // ESI
4992 push( 0x00000000 ); // EBX
4993 push( 0xFFFFFFFF ); // FS:[0] - Current SEH frame
4994 push( cast(size_t) m_ctxt.bstack ); // FS:[4] - Top of stack
4995 push( cast(size_t) m_ctxt.bstack - m_size ); // FS:[8] - Bottom of stack
4996 push( 0x00000000 ); // EAX
4998 else version (GNU_AsmX86_64_Windows)
5000 push( 0x00000000_00000000 ); // Return address of fiber_entryPoint call
5001 push( cast(size_t) &fiber_entryPoint ); // RIP
5002 push( 0x00000000_00000000 ); // RBP
5003 push( 0x00000000_00000000 ); // RBX
5004 push( 0x00000000_00000000 ); // R12
5005 push( 0x00000000_00000000 ); // R13
5006 push( 0x00000000_00000000 ); // R14
5007 push( 0x00000000_00000000 ); // R15
5008 push( 0xFFFFFFFF_FFFFFFFF ); // GS:[0] - Current SEH frame
5009 version (StackGrowsDown)
5011 push( cast(size_t) m_ctxt.bstack ); // GS:[8] - Top of stack
5012 push( cast(size_t) m_ctxt.bstack - m_size ); // GS:[16] - Bottom of stack
5014 else
5016 push( cast(size_t) m_ctxt.bstack ); // GS:[8] - Top of stack
5017 push( cast(size_t) m_ctxt.bstack + m_size ); // GS:[16] - Bottom of stack
5020 else static if ( __traits( compiles, ucontext_t ) )
5022 getcontext( &m_utxt );
5023 m_utxt.uc_stack.ss_sp = m_pmem;
5024 m_utxt.uc_stack.ss_size = m_size;
5025 makecontext( &m_utxt, &fiber_entryPoint, 0 );
5026 // NOTE: If ucontext is being used then the top of the stack will
5027 // be a pointer to the ucontext_t struct for that fiber.
5028 push( cast(size_t) &m_utxt );
5030 else
5031 static assert(0, "Not implemented");
5035 Thread.Context* m_ctxt;
5036 size_t m_size;
5037 void* m_pmem;
5039 static if ( __traits( compiles, ucontext_t ) )
5041 // NOTE: The static ucontext instance is used to represent the context
5042 // of the executing thread.
5043 static ucontext_t sm_utxt = void;
5044 ucontext_t m_utxt = void;
5045 ucontext_t* m_ucur = null;
5049 private:
5050 ///////////////////////////////////////////////////////////////////////////
5051 // Storage of Active Fiber
5052 ///////////////////////////////////////////////////////////////////////////
5056 // Sets a thread-local reference to the current fiber object.
5058 static void setThis( Fiber f ) nothrow @nogc
5060 sm_this = f;
5063 static Fiber sm_this;
5066 private:
5067 ///////////////////////////////////////////////////////////////////////////
5068 // Context Switching
5069 ///////////////////////////////////////////////////////////////////////////
5073 // Switches into the stack held by this fiber.
5075 final void switchIn() nothrow @nogc
5077 Thread tobj = Thread.getThis();
5078 void** oldp = &tobj.m_curr.tstack;
5079 void* newp = m_ctxt.tstack;
5081 // NOTE: The order of operations here is very important. The current
5082 // stack top must be stored before m_lock is set, and pushContext
5083 // must not be called until after m_lock is set. This process
5084 // is intended to prevent a race condition with the suspend
5085 // mechanism used for garbage collection. If it is not followed,
5086 // a badly timed collection could cause the GC to scan from the
5087 // bottom of one stack to the top of another, or to miss scanning
5088 // a stack that still contains valid data. The old stack pointer
5089 // oldp will be set again before the context switch to guarantee
5090 // that it points to exactly the correct stack location so the
5091 // successive pop operations will succeed.
5092 *oldp = getStackTop();
5093 atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, true);
5094 tobj.pushContext( m_ctxt );
5096 fiber_switchContext( oldp, newp );
5098 // NOTE: As above, these operations must be performed in a strict order
5099 // to prevent Bad Things from happening.
5100 tobj.popContext();
5101 atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, false);
5102 tobj.m_curr.tstack = tobj.m_curr.bstack;
5107 // Switches out of the current stack and into the enclosing stack.
5109 final void switchOut() nothrow @nogc
5111 Thread tobj = Thread.getThis();
5112 void** oldp = &m_ctxt.tstack;
5113 void* newp = tobj.m_curr.within.tstack;
5115 // NOTE: The order of operations here is very important. The current
5116 // stack top must be stored before m_lock is set, and pushContext
5117 // must not be called until after m_lock is set. This process
5118 // is intended to prevent a race condition with the suspend
5119 // mechanism used for garbage collection. If it is not followed,
5120 // a badly timed collection could cause the GC to scan from the
5121 // bottom of one stack to the top of another, or to miss scanning
5122 // a stack that still contains valid data. The old stack pointer
5123 // oldp will be set again before the context switch to guarantee
5124 // that it points to exactly the correct stack location so the
5125 // successive pop operations will succeed.
5126 *oldp = getStackTop();
5127 atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, true);
5129 fiber_switchContext( oldp, newp );
5131 // NOTE: As above, these operations must be performed in a strict order
5132 // to prevent Bad Things from happening.
5133 // NOTE: If use of this fiber is multiplexed across threads, the thread
5134 // executing here may be different from the one above, so get the
5135 // current thread handle before unlocking, etc.
5136 tobj = Thread.getThis();
5137 atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, false);
5138 tobj.m_curr.tstack = tobj.m_curr.bstack;
5143 version (unittest)
5145 class TestFiber : Fiber
5147 this()
5149 super(&run);
5152 void run()
5154 foreach (i; 0 .. 1000)
5156 sum += i;
5157 Fiber.yield();
5161 enum expSum = 1000 * 999 / 2;
5162 size_t sum;
5165 void runTen()
5167 TestFiber[10] fibs;
5168 foreach (ref fib; fibs)
5169 fib = new TestFiber();
5171 bool cont;
5172 do {
5173 cont = false;
5174 foreach (fib; fibs) {
5175 if (fib.state == Fiber.State.HOLD)
5177 fib.call();
5178 cont |= fib.state != Fiber.State.TERM;
5181 } while (cont);
5183 foreach (fib; fibs)
5185 assert(fib.sum == TestFiber.expSum);
5191 // Single thread running separate fibers
5192 unittest
5194 runTen();
5198 // Multiple threads running separate fibers
5199 unittest
5201 auto group = new ThreadGroup();
5202 foreach (_; 0 .. 4)
5204 group.create(&runTen);
5206 group.joinAll();
5210 // Multiple threads running shared fibers
5211 unittest
5213 shared bool[10] locks;
5214 TestFiber[10] fibs;
5216 void runShared()
5218 bool cont;
5219 do {
5220 cont = false;
5221 foreach (idx; 0 .. 10)
5223 if (cas(&locks[idx], false, true))
5225 if (fibs[idx].state == Fiber.State.HOLD)
5227 fibs[idx].call();
5228 cont |= fibs[idx].state != Fiber.State.TERM;
5230 locks[idx] = false;
5232 else
5234 cont = true;
5237 } while (cont);
5240 foreach (ref fib; fibs)
5242 fib = new TestFiber();
5245 auto group = new ThreadGroup();
5246 foreach (_; 0 .. 4)
5248 group.create(&runShared);
5250 group.joinAll();
5252 foreach (fib; fibs)
5254 assert(fib.sum == TestFiber.expSum);
5259 // Test exception handling inside fibers.
5260 version (Win32) {
5261 // broken on win32 under windows server 2012: bug 13821
5262 } else unittest {
5263 enum MSG = "Test message.";
5264 string caughtMsg;
5265 (new Fiber({
5268 throw new Exception(MSG);
5270 catch (Exception e)
5272 caughtMsg = e.msg;
5274 })).call();
5275 assert(caughtMsg == MSG);
5279 unittest
5281 int x = 0;
5283 (new Fiber({
5284 x++;
5285 })).call();
5286 assert( x == 1 );
5289 nothrow unittest
5291 new Fiber({}).call!(Fiber.Rethrow.no)();
5294 unittest
5296 new Fiber({}).call(Fiber.Rethrow.yes);
5297 new Fiber({}).call(Fiber.Rethrow.no);
5300 deprecated unittest
5302 new Fiber({}).call(true);
5303 new Fiber({}).call(false);
5306 version (Win32) {
5307 // broken on win32 under windows server 2012: bug 13821
5308 } else unittest {
5309 enum MSG = "Test message.";
5313 (new Fiber({
5314 throw new Exception( MSG );
5315 })).call();
5316 assert( false, "Expected rethrown exception." );
5318 catch ( Throwable t )
5320 assert( t.msg == MSG );
5324 // Test exception chaining when switching contexts in finally blocks.
5325 unittest
5327 static void throwAndYield(string msg) {
5328 try {
5329 throw new Exception(msg);
5330 } finally {
5331 Fiber.yield();
5335 static void fiber(string name) {
5336 try {
5337 try {
5338 throwAndYield(name ~ ".1");
5339 } finally {
5340 throwAndYield(name ~ ".2");
5342 } catch (Exception e) {
5343 assert(e.msg == name ~ ".1");
5344 assert(e.next);
5345 assert(e.next.msg == name ~ ".2");
5346 assert(!e.next.next);
5350 auto first = new Fiber(() => fiber("first"));
5351 auto second = new Fiber(() => fiber("second"));
5352 first.call();
5353 second.call();
5354 first.call();
5355 second.call();
5356 first.call();
5357 second.call();
5358 assert(first.state == Fiber.State.TERM);
5359 assert(second.state == Fiber.State.TERM);
5362 // Test Fiber resetting
5363 unittest
5365 static string method;
5367 static void foo()
5369 method = "foo";
5372 void bar()
5374 method = "bar";
5377 static void expect(Fiber fib, string s)
5379 assert(fib.state == Fiber.State.HOLD);
5380 fib.call();
5381 assert(fib.state == Fiber.State.TERM);
5382 assert(method == s); method = null;
5384 auto fib = new Fiber(&foo);
5385 expect(fib, "foo");
5387 fib.reset();
5388 expect(fib, "foo");
5390 fib.reset(&foo);
5391 expect(fib, "foo");
5393 fib.reset(&bar);
5394 expect(fib, "bar");
5396 fib.reset(function void(){method = "function";});
5397 expect(fib, "function");
5399 fib.reset(delegate void(){method = "delegate";});
5400 expect(fib, "delegate");
5403 // Test unsafe reset in hold state
5404 unittest
5406 auto fib = new Fiber(function {ubyte[2048] buf = void; Fiber.yield();}, 4096);
5407 foreach (_; 0 .. 10)
5409 fib.call();
5410 assert(fib.state == Fiber.State.HOLD);
5411 fib.reset();
5415 // stress testing GC stack scanning
5416 unittest
5418 import core.memory;
5420 static void unreferencedThreadObject()
5422 static void sleep() { Thread.sleep(dur!"msecs"(100)); }
5423 auto thread = new Thread(&sleep).start();
5425 unreferencedThreadObject();
5426 GC.collect();
5428 static class Foo
5430 this(int value)
5432 _value = value;
5435 int bar()
5437 return _value;
5440 int _value;
5443 static void collect()
5445 auto foo = new Foo(2);
5446 assert(foo.bar() == 2);
5447 GC.collect();
5448 Fiber.yield();
5449 GC.collect();
5450 assert(foo.bar() == 2);
5453 auto fiber = new Fiber(&collect);
5455 fiber.call();
5456 GC.collect();
5457 fiber.call();
5459 // thread reference
5460 auto foo = new Foo(2);
5462 void collect2()
5464 assert(foo.bar() == 2);
5465 GC.collect();
5466 Fiber.yield();
5467 GC.collect();
5468 assert(foo.bar() == 2);
5471 fiber = new Fiber(&collect2);
5473 fiber.call();
5474 GC.collect();
5475 fiber.call();
5477 static void recurse(size_t cnt)
5479 --cnt;
5480 Fiber.yield();
5481 if (cnt)
5483 auto fib = new Fiber(() { recurse(cnt); });
5484 fib.call();
5485 GC.collect();
5486 fib.call();
5489 fiber = new Fiber(() { recurse(20); });
5490 fiber.call();
5494 version (AsmX86_64_Windows)
5496 // Test Windows x64 calling convention
5497 unittest
5499 void testNonvolatileRegister(alias REG)()
5501 auto zeroRegister = new Fiber(() {
5502 mixin("asm pure nothrow @nogc { naked; xor "~REG~", "~REG~"; ret; }");
5504 long after;
5506 mixin("asm pure nothrow @nogc { mov "~REG~", 0xFFFFFFFFFFFFFFFF; }");
5507 zeroRegister.call();
5508 mixin("asm pure nothrow @nogc { mov after, "~REG~"; }");
5510 assert(after == -1);
5513 void testNonvolatileRegisterSSE(alias REG)()
5515 auto zeroRegister = new Fiber(() {
5516 mixin("asm pure nothrow @nogc { naked; xorpd "~REG~", "~REG~"; ret; }");
5518 long[2] before = [0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF], after;
5520 mixin("asm pure nothrow @nogc { movdqu "~REG~", before; }");
5521 zeroRegister.call();
5522 mixin("asm pure nothrow @nogc { movdqu after, "~REG~"; }");
5524 assert(before == after);
5527 testNonvolatileRegister!("R12")();
5528 testNonvolatileRegister!("R13")();
5529 testNonvolatileRegister!("R14")();
5530 testNonvolatileRegister!("R15")();
5531 testNonvolatileRegister!("RDI")();
5532 testNonvolatileRegister!("RSI")();
5533 testNonvolatileRegister!("RBX")();
5535 testNonvolatileRegisterSSE!("XMM6")();
5536 testNonvolatileRegisterSSE!("XMM7")();
5537 testNonvolatileRegisterSSE!("XMM8")();
5538 testNonvolatileRegisterSSE!("XMM9")();
5539 testNonvolatileRegisterSSE!("XMM10")();
5540 testNonvolatileRegisterSSE!("XMM11")();
5541 testNonvolatileRegisterSSE!("XMM12")();
5542 testNonvolatileRegisterSSE!("XMM13")();
5543 testNonvolatileRegisterSSE!("XMM14")();
5544 testNonvolatileRegisterSSE!("XMM15")();
5549 version (D_InlineAsm_X86_64)
5551 unittest
5553 void testStackAlignment()
5555 void* pRSP;
5556 asm pure nothrow @nogc
5558 mov pRSP, RSP;
5560 assert((cast(size_t)pRSP & 0xF) == 0);
5563 auto fib = new Fiber(&testStackAlignment);
5564 fib.call();
5568 // regression test for Issue 13416
5569 version (FreeBSD) unittest
5571 static void loop()
5573 pthread_attr_t attr;
5574 pthread_attr_init(&attr);
5575 auto thr = pthread_self();
5576 foreach (i; 0 .. 50)
5577 pthread_attr_get_np(thr, &attr);
5578 pthread_attr_destroy(&attr);
5581 auto thr = new Thread(&loop).start();
5582 foreach (i; 0 .. 50)
5584 thread_suspendAll();
5585 thread_resumeAll();
5587 thr.join();
5590 unittest
5592 // use >PAGESIZE to avoid stack overflow (e.g. in an syscall)
5593 auto thr = new Thread(function{}, 4096 + 1).start();
5594 thr.join();
5598 * Represents the ID of a thread, as returned by $(D Thread.)$(LREF id).
5599 * The exact type varies from platform to platform.
5601 version (Windows)
5602 alias ThreadID = uint;
5603 else
5604 version (Posix)
5605 alias ThreadID = pthread_t;