[bcl] Add CommonCrypto to corlib, Mono.Security and System.Core.
[mono-project.git] / libgc / pthread_stop_world.c
blob3bfdfbf35eddc525cb5667d1e6a258babf555ea5
1 #include "private/pthread_support.h"
3 #if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
4 && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS) \
5 && !defined(GC_DARWIN_THREADS) && !defined(GC_AIX_THREADS) \
6 && !defined(GC_OPENBSD_THREADS)
8 #include <signal.h>
9 #include <semaphore.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <sys/time.h>
14 /* work around a dlopen issue (bug #75390), undefs to avoid warnings with redefinitions */
15 #undef PACKAGE_BUGREPORT
16 #undef PACKAGE_NAME
17 #undef PACKAGE_STRING
18 #undef PACKAGE_TARNAME
19 #undef PACKAGE_VERSION
20 #include "mono/utils/mono-compiler.h"
22 #ifdef NACL
23 volatile int __nacl_thread_suspension_needed = 0;
24 pthread_t nacl_thread_parker = -1;
26 volatile int nacl_thread_parked[MAX_NACL_GC_THREADS];
27 volatile int nacl_thread_used[MAX_NACL_GC_THREADS];
28 volatile int nacl_thread_parking_inited = 0;
29 volatile int nacl_num_gc_threads = 0;
30 pthread_mutex_t nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
31 __thread int nacl_thread_idx = -1;
32 __thread GC_thread nacl_gc_thread_self = NULL;
33 #endif
35 #if DEBUG_THREADS
37 #ifndef NSIG
38 # if defined(MAXSIG)
39 # define NSIG (MAXSIG+1)
40 # elif defined(_NSIG)
41 # define NSIG _NSIG
42 # elif defined(__SIGRTMAX)
43 # define NSIG (__SIGRTMAX+1)
44 # else
45 --> please fix it
46 # endif
47 #endif
49 #ifndef NACL
50 void GC_print_sig_mask()
52 sigset_t blocked;
53 int i;
55 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
56 ABORT("pthread_sigmask");
57 GC_printf0("Blocked: ");
58 for (i = 1; i < NSIG; i++) {
59 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
61 GC_printf0("\n");
63 #endif /* NACL */
64 #endif
66 /* Remove the signals that we want to allow in thread stopping */
67 /* handler from a set. */
68 void GC_remove_allowed_signals(sigset_t *set)
70 # ifdef NO_SIGNALS
71 if (sigdelset(set, SIGINT) != 0
72 || sigdelset(set, SIGQUIT) != 0
73 || sigdelset(set, SIGABRT) != 0
74 || sigdelset(set, SIGTERM) != 0) {
75 ABORT("sigdelset() failed");
77 # endif
79 # ifdef MPROTECT_VDB
80 /* Handlers write to the thread structure, which is in the heap, */
81 /* and hence can trigger a protection fault. */
82 if (sigdelset(set, SIGSEGV) != 0
83 # ifdef SIGBUS
84 || sigdelset(set, SIGBUS) != 0
85 # endif
86 ) {
87 ABORT("sigdelset() failed");
89 # endif
92 static sigset_t suspend_handler_mask;
94 word GC_stop_count; /* Incremented at the beginning of GC_stop_world. */
96 #ifdef GC_OSF1_THREADS
97 GC_bool GC_retry_signals = TRUE;
98 #else
99 GC_bool GC_retry_signals = FALSE;
100 #endif
103 * We use signals to stop threads during GC.
105 * Suspended threads wait in signal handler for SIG_THR_RESTART.
106 * That's more portable than semaphores or condition variables.
107 * (We do use sem_post from a signal handler, but that should be portable.)
109 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
110 * Note that we can't just stop a thread; we need it to save its stack
111 * pointer(s) and acknowledge.
114 #ifndef SIG_THR_RESTART
115 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
116 # ifdef _SIGRTMIN
117 # define SIG_THR_RESTART _SIGRTMIN + 5
118 # else
119 # define SIG_THR_RESTART SIGRTMIN + 5
120 # endif
121 # else
122 # define SIG_THR_RESTART SIGXCPU
123 # endif
124 #endif
126 sem_t GC_suspend_ack_sem;
128 static void _GC_suspend_handler(int sig)
130 #ifndef NACL
131 int dummy;
132 pthread_t my_thread = pthread_self();
133 GC_thread me;
134 # ifdef PARALLEL_MARK
135 word my_mark_no = GC_mark_no;
136 /* Marker can't proceed until we acknowledge. Thus this is */
137 /* guaranteed to be the mark_no correspending to our */
138 /* suspension, i.e. the marker can't have incremented it yet. */
139 # endif
140 word my_stop_count = GC_stop_count;
142 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
144 #if DEBUG_THREADS
145 GC_printf1("Suspending 0x%lx\n", my_thread);
146 #endif
148 me = GC_lookup_thread(my_thread);
149 /* The lookup here is safe, since I'm doing this on behalf */
150 /* of a thread which holds the allocation lock in order */
151 /* to stop the world. Thus concurrent modification of the */
152 /* data structure is impossible. */
153 if (me -> stop_info.last_stop_count == my_stop_count) {
154 /* Duplicate signal. OK if we are retrying. */
155 if (!GC_retry_signals) {
156 WARN("Duplicate suspend signal in thread %lx\n",
157 pthread_self());
159 return;
161 # ifdef SPARC
162 me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
163 # else
164 me -> stop_info.stack_ptr = (ptr_t)(&dummy);
165 # endif
166 # ifdef IA64
167 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
168 # endif
170 /* Tell the thread that wants to stop the world that this */
171 /* thread has been stopped. Note that sem_post() is */
172 /* the only async-signal-safe primitive in LinuxThreads. */
173 sem_post(&GC_suspend_ack_sem);
174 me -> stop_info.last_stop_count = my_stop_count;
176 /* Wait until that thread tells us to restart by sending */
177 /* this thread a SIG_THR_RESTART signal. */
178 /* SIG_THR_RESTART should be masked at this point. Thus there */
179 /* is no race. */
180 do {
181 me->stop_info.signal = 0;
182 sigsuspend(&suspend_handler_mask); /* Wait for signal */
183 } while (me->stop_info.signal != SIG_THR_RESTART);
184 /* If the RESTART signal gets lost, we can still lose. That should be */
185 /* less likely than losing the SUSPEND signal, since we don't do much */
186 /* between the sem_post and sigsuspend. */
187 /* We'd need more handshaking to work around that, since we don't want */
188 /* to accidentally leave a RESTART signal pending, thus causing us to */
189 /* continue prematurely in a future round. */
191 /* Tell the thread that wants to start the world that this */
192 /* thread has been started. Note that sem_post() is */
193 /* the only async-signal-safe primitive in LinuxThreads. */
194 sem_post(&GC_suspend_ack_sem);
197 #if DEBUG_THREADS
198 GC_printf1("Continuing 0x%lx\n", my_thread);
199 #endif
201 #endif /* NACL */
204 void GC_suspend_handler(int sig)
206 int old_errno = errno;
207 _GC_suspend_handler(sig);
208 errno = old_errno;
211 static void _GC_restart_handler(int sig)
213 pthread_t my_thread = pthread_self();
214 GC_thread me;
216 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
218 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
219 /* The lookup here is safe, since I'm doing this on behalf */
220 /* of a thread which holds the allocation lock in order */
221 /* to stop the world. Thus concurrent modification of the */
222 /* data structure is impossible. */
223 me = GC_lookup_thread(my_thread);
224 me->stop_info.signal = SIG_THR_RESTART;
227 ** Note: even if we didn't do anything useful here,
228 ** it would still be necessary to have a signal handler,
229 ** rather than ignoring the signals, otherwise
230 ** the signals will not be delivered at all, and
231 ** will thus not interrupt the sigsuspend() above.
234 #if DEBUG_THREADS
235 GC_printf1("In GC_restart_handler for 0x%lx\n", pthread_self());
236 #endif
239 # ifdef IA64
240 # define IF_IA64(x) x
241 # else
242 # define IF_IA64(x)
243 # endif
244 /* We hold allocation lock. Should do exactly the right thing if the */
245 /* world is stopped. Should not fail if it isn't. */
246 static void pthread_push_all_stacks()
248 GC_bool found_me = FALSE;
249 int i;
250 GC_thread p;
251 ptr_t lo, hi;
252 /* On IA64, we also need to scan the register backing store. */
253 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
254 pthread_t me = pthread_self();
256 if (!GC_thr_initialized) GC_thr_init();
257 #if DEBUG_THREADS
258 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
259 #endif
260 for (i = 0; i < THREAD_TABLE_SZ; i++) {
261 for (p = GC_threads[i]; p != 0; p = p -> next) {
262 if (p -> flags & FINISHED) continue;
263 if (pthread_equal(p -> id, me)) {
264 # ifdef SPARC
265 lo = (ptr_t)GC_save_regs_in_stack();
266 # else
267 lo = GC_approx_sp();
268 # endif
269 found_me = TRUE;
270 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
271 } else {
272 lo = p -> stop_info.stack_ptr;
273 IF_IA64(bs_hi = p -> backing_store_ptr;)
275 if ((p -> flags & MAIN_THREAD) == 0) {
276 hi = p -> stack_end;
277 IF_IA64(bs_lo = p -> backing_store_end);
278 } else {
279 /* The original stack. */
280 hi = GC_stackbottom;
281 IF_IA64(bs_lo = BACKING_STORE_BASE;)
283 #if DEBUG_THREADS
284 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
285 (unsigned long) p -> id,
286 (unsigned long) lo, (unsigned long) hi);
287 #endif
288 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
289 if (p->altstack && lo >= p->altstack && lo <= p->altstack + p->altstack_size)
290 hi = p->altstack + p->altstack_size;
291 /* FIXME: Need to scan the normal stack too, but how ? */
293 # ifdef STACK_GROWS_UP
294 /* We got them backwards! */
295 GC_push_all_stack(hi, lo);
296 # else
297 GC_push_all_stack(lo, hi);
298 # endif
299 # ifdef NACL
300 /* Push reg_storage as roots, this will cover the reg context */
301 GC_push_all_stack(p -> stop_info.reg_storage, p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE);
302 # endif
303 # ifdef IA64
304 # if DEBUG_THREADS
305 GC_printf3("Reg stack for thread 0x%lx = [%lx,%lx)\n",
306 (unsigned long) p -> id,
307 (unsigned long) bs_lo, (unsigned long) bs_hi);
308 # endif
309 if (pthread_equal(p -> id, me)) {
310 GC_push_all_eager(bs_lo, bs_hi);
311 } else {
312 GC_push_all_stack(bs_lo, bs_hi);
314 # endif
317 if (!found_me && !GC_in_thread_creation)
318 ABORT("Collecting from unknown thread.");
321 void GC_restart_handler(int sig)
323 int old_errno = errno;
324 _GC_restart_handler (sig);
325 errno = old_errno;
328 /* We hold allocation lock. Should do exactly the right thing if the */
329 /* world is stopped. Should not fail if it isn't. */
330 void GC_push_all_stacks()
332 pthread_push_all_stacks();
335 /* There seems to be a very rare thread stopping problem. To help us */
336 /* debug that, we save the ids of the stopping thread. */
337 pthread_t GC_stopping_thread;
338 int GC_stopping_pid;
340 #ifdef PLATFORM_ANDROID
341 static
342 int android_thread_kill(pid_t tid, int sig)
344 int ret;
345 int old_errno = errno;
347 ret = tkill(tid, sig);
348 if (ret < 0) {
349 ret = errno;
350 errno = old_errno;
353 return ret;
355 #endif
357 /* We hold the allocation lock. Suspend all threads that might */
358 /* still be running. Return the number of suspend signals that */
359 /* were sent. */
360 int GC_suspend_all()
362 #ifndef NACL
363 int n_live_threads = 0;
364 int i;
365 GC_thread p;
366 int result;
367 pthread_t my_thread = pthread_self();
369 GC_stopping_thread = my_thread; /* debugging only. */
370 GC_stopping_pid = getpid(); /* debugging only. */
371 for (i = 0; i < THREAD_TABLE_SZ; i++) {
372 for (p = GC_threads[i]; p != 0; p = p -> next) {
373 if (p -> id != my_thread) {
374 if (p -> flags & FINISHED) continue;
375 if (p -> stop_info.last_stop_count == GC_stop_count) continue;
376 if (p -> thread_blocked) /* Will wait */ continue;
377 n_live_threads++;
378 #if DEBUG_THREADS
379 GC_printf1("Sending suspend signal to 0x%lx\n", p -> id);
380 #endif
382 #ifndef PLATFORM_ANDROID
383 result = pthread_kill(p -> id, SIG_SUSPEND);
384 #else
385 result = android_thread_kill(p -> kernel_id, SIG_SUSPEND);
386 #endif
387 switch(result) {
388 case ESRCH:
389 /* Not really there anymore. Possible? */
390 n_live_threads--;
391 break;
392 case 0:
393 break;
394 default:
395 ABORT("pthread_kill failed");
400 return n_live_threads;
401 #else /* NACL */
402 return 0;
403 #endif
406 /* Caller holds allocation lock. */
407 static void pthread_stop_world()
409 #ifndef NACL
410 int i;
411 int n_live_threads;
412 int code;
414 #if DEBUG_THREADS
415 GC_printf1("Stopping the world from 0x%lx\n", pthread_self());
416 #endif
418 n_live_threads = GC_suspend_all();
420 if (GC_retry_signals) {
421 unsigned long wait_usecs = 0; /* Total wait since retry. */
422 # define WAIT_UNIT 3000
423 # define RETRY_INTERVAL 100000
424 for (;;) {
425 int ack_count;
427 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
428 if (ack_count == n_live_threads) break;
429 if (wait_usecs > RETRY_INTERVAL) {
430 int newly_sent = GC_suspend_all();
432 # ifdef CONDPRINT
433 if (GC_print_stats) {
434 GC_printf1("Resent %ld signals after timeout\n",
435 newly_sent);
437 # endif
438 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
439 if (newly_sent < n_live_threads - ack_count) {
440 WARN("Lost some threads during GC_stop_world?!\n",0);
441 n_live_threads = ack_count + newly_sent;
443 wait_usecs = 0;
445 usleep(WAIT_UNIT);
446 wait_usecs += WAIT_UNIT;
449 for (i = 0; i < n_live_threads; i++) {
450 while (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
451 if (errno != EINTR) {
452 GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code);
453 ABORT("sem_wait for handler failed");
457 #if DEBUG_THREADS
458 GC_printf1("World stopped from 0x%lx\n", pthread_self());
459 #endif
460 GC_stopping_thread = 0; /* debugging only */
461 #else /* NACL */
462 GC_thread p;
463 int i;
464 int num_sleeps = 0;
466 #if DEBUG_THREADS
467 GC_printf1("pthread_stop_world: num_threads %d\n", nacl_num_gc_threads - 1);
468 #endif
469 nacl_thread_parker = pthread_self();
470 __nacl_thread_suspension_needed = 1;
472 while (1) {
473 #define NACL_PARK_WAIT_NANOSECONDS 100000
474 #define NANOS_PER_SECOND 1000000000
475 int num_threads_parked = 0;
476 struct timespec ts;
477 int num_used = 0;
478 /* Check the 'parked' flag for each thread the GC knows about */
479 for (i = 0; i < MAX_NACL_GC_THREADS && num_used < nacl_num_gc_threads; i++) {
480 if (nacl_thread_used[i] == 1) {
481 num_used++;
482 if (nacl_thread_parked[i] == 1) {
483 num_threads_parked++;
487 /* -1 for the current thread */
488 if (num_threads_parked >= nacl_num_gc_threads - 1)
489 break;
490 ts.tv_sec = 0;
491 ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
492 #if DEBUG_THREADS
493 GC_printf1("sleeping waiting for %d threads to park...\n", nacl_num_gc_threads - num_threads_parked - 1);
494 #endif
495 nanosleep(&ts, 0);
496 if (++num_sleeps > NANOS_PER_SECOND / NACL_PARK_WAIT_NANOSECONDS) {
497 GC_printf1("GC appears stalled waiting for %d threads to park...\n", nacl_num_gc_threads - num_threads_parked - 1);
498 num_sleeps = 0;
502 #endif /* NACL */
506 #ifdef NACL
508 #if __x86_64__
510 #define NACL_STORE_REGS() \
511 do { \
512 __asm__ __volatile__ ("push %rbx");\
513 __asm__ __volatile__ ("push %rbp");\
514 __asm__ __volatile__ ("push %r12");\
515 __asm__ __volatile__ ("push %r13");\
516 __asm__ __volatile__ ("push %r14");\
517 __asm__ __volatile__ ("push %r15");\
518 __asm__ __volatile__ ("mov %%esp, %0" : "=m" (nacl_gc_thread_self->stop_info.stack_ptr));\
519 memcpy(nacl_gc_thread_self->stop_info.reg_storage, nacl_gc_thread_self->stop_info.stack_ptr, NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
520 __asm__ __volatile__ ("naclasp $48, %r15");\
521 } while (0)
523 #elif __i386__
525 #define NACL_STORE_REGS() \
526 do { \
527 __asm__ __volatile__ ("push %ebx");\
528 __asm__ __volatile__ ("push %ebp");\
529 __asm__ __volatile__ ("push %esi");\
530 __asm__ __volatile__ ("push %edi");\
531 __asm__ __volatile__ ("mov %%esp, %0" : "=m" (nacl_gc_thread_self->stop_info.stack_ptr));\
532 memcpy(nacl_gc_thread_self->stop_info.reg_storage, nacl_gc_thread_self->stop_info.stack_ptr, NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
533 __asm__ __volatile__ ("add $16, %esp");\
534 } while (0)
536 #elif __arm__
538 #define NACL_STORE_REGS() \
539 do { \
540 __asm__ __volatile__ ( \
541 ".align 4\n\t" \
542 "bic %0, %0, #0xc0000000\n\t" \
543 "str sp, [%0]\n\t" \
544 "bic %1, %1, #0xc0000000\n\t" \
545 "stm %1, {r4-r8,r10-r12,lr}\n\t" \
547 : "r" (&nacl_gc_thread_self->stop_info.stack_ptr), \
548 "r"(nacl_gc_thread_self->stop_info.reg_storage) \
549 : "memory"); \
550 } while (0)
551 #else
553 #error "Please port NACL_STORE_REGS"
555 #endif
557 void nacl_pre_syscall_hook()
559 int local_dummy = 0;
560 if (nacl_thread_idx != -1) {
561 NACL_STORE_REGS();
562 nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&local_dummy);
563 nacl_thread_parked[nacl_thread_idx] = 1;
567 void __nacl_suspend_thread_if_needed();
569 void nacl_post_syscall_hook()
571 /* Calling __nacl_suspend_thread_if_needed() right away should guarantee we don't mutate the GC set. */
572 __nacl_suspend_thread_if_needed();
573 if (nacl_thread_idx != -1) {
574 nacl_thread_parked[nacl_thread_idx] = 0;
578 void __nacl_suspend_thread_if_needed() {
579 if (__nacl_thread_suspension_needed) {
580 pthread_t self = pthread_self();
581 int local_dummy = 0;
582 /* Don't try to park the thread parker. */
583 if (nacl_thread_parker == self)
584 return;
586 /* This can happen when a thread is created */
587 /* outside of the GC system (wthread mostly). */
588 if (nacl_thread_idx < 0)
589 return;
591 /* If it was already 'parked', we're returning from a syscall, */
592 /* so don't bother storing registers again, the GC has a set. */
593 if (!nacl_thread_parked[nacl_thread_idx]) {
594 NACL_STORE_REGS();
595 nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&local_dummy);
597 nacl_thread_parked[nacl_thread_idx] = 1;
598 while (__nacl_thread_suspension_needed)
599 ; /* spin */
600 nacl_thread_parked[nacl_thread_idx] = 0;
602 /* Clear out the reg storage for next suspend. */
603 memset(nacl_gc_thread_self->stop_info.reg_storage, 0, NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
607 #endif /* NACL */
609 /* Caller holds allocation lock. */
610 void GC_stop_world()
612 if (GC_notify_event)
613 GC_notify_event (GC_EVENT_PRE_STOP_WORLD);
614 GC_process_togglerefs ();
615 /* Make sure all free list construction has stopped before we start. */
616 /* No new construction can start, since free list construction is */
617 /* required to acquire and release the GC lock before it starts, */
618 /* and we have the lock. */
619 # ifdef PARALLEL_MARK
620 GC_acquire_mark_lock();
621 GC_ASSERT(GC_fl_builder_count == 0);
622 /* We should have previously waited for it to become zero. */
623 # endif /* PARALLEL_MARK */
624 ++GC_stop_count;
625 pthread_stop_world ();
626 # ifdef PARALLEL_MARK
627 GC_release_mark_lock();
628 # endif
629 if (GC_notify_event)
630 GC_notify_event (GC_EVENT_POST_STOP_WORLD);
633 /* Caller holds allocation lock, and has held it continuously since */
634 /* the world stopped. */
635 static void pthread_start_world()
637 #ifndef NACL
638 pthread_t my_thread = pthread_self();
639 register int i;
640 register GC_thread p;
641 register int n_live_threads = 0;
642 register int result;
643 int code;
645 # if DEBUG_THREADS
646 GC_printf0("World starting\n");
647 # endif
648 if (GC_notify_event)
649 GC_notify_event (GC_EVENT_PRE_START_WORLD);
651 for (i = 0; i < THREAD_TABLE_SZ; i++) {
652 for (p = GC_threads[i]; p != 0; p = p -> next) {
653 if (p -> id != my_thread) {
654 if (p -> flags & FINISHED) continue;
655 if (p -> thread_blocked) continue;
656 n_live_threads++;
657 #if DEBUG_THREADS
658 GC_printf1("Sending restart signal to 0x%lx\n", p -> id);
659 #endif
661 #ifndef PLATFORM_ANDROID
662 result = pthread_kill(p -> id, SIG_THR_RESTART);
663 #else
664 result = android_thread_kill(p -> kernel_id, SIG_THR_RESTART);
665 #endif
666 switch(result) {
667 case ESRCH:
668 /* Not really there anymore. Possible? */
669 n_live_threads--;
670 break;
671 case 0:
672 break;
673 default:
674 ABORT("pthread_kill failed");
680 #if DEBUG_THREADS
681 GC_printf0 ("All threads signaled");
682 #endif
684 for (i = 0; i < n_live_threads; i++) {
685 while (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
686 if (errno != EINTR) {
687 GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code);
688 ABORT("sem_wait for handler failed");
693 if (GC_notify_event)
694 GC_notify_event (GC_EVENT_POST_START_WORLD);
695 #if DEBUG_THREADS
696 GC_printf0("World started\n");
697 #endif
698 #else /* NACL */
699 if (GC_notify_event)
700 GC_notify_event (GC_EVENT_PRE_START_WORLD);
701 # if DEBUG_THREADS
702 GC_printf0("World starting\n");
703 # endif
704 __nacl_thread_suspension_needed = 0;
705 if (GC_notify_event)
706 GC_notify_event (GC_EVENT_POST_START_WORLD);
707 #endif /* NACL */
710 void GC_start_world()
712 pthread_start_world ();
715 static void pthread_stop_init() {
716 #ifndef NACL
717 struct sigaction act;
719 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
720 ABORT("sem_init failed");
722 act.sa_flags = SA_RESTART;
723 if (sigfillset(&act.sa_mask) != 0) {
724 ABORT("sigfillset() failed");
726 GC_remove_allowed_signals(&act.sa_mask);
727 /* SIG_THR_RESTART is set in the resulting mask. */
728 /* It is unmasked by the handler when necessary. */
729 act.sa_handler = GC_suspend_handler;
730 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
731 ABORT("Cannot set SIG_SUSPEND handler");
734 act.sa_handler = GC_restart_handler;
735 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
736 ABORT("Cannot set SIG_THR_RESTART handler");
739 /* Inititialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
740 if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset() failed");
741 GC_remove_allowed_signals(&suspend_handler_mask);
742 if (sigdelset(&suspend_handler_mask, SIG_THR_RESTART) != 0)
743 ABORT("sigdelset() failed");
745 /* Check for GC_RETRY_SIGNALS. */
746 if (0 != GETENV("GC_RETRY_SIGNALS")) {
747 GC_retry_signals = TRUE;
749 if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
750 GC_retry_signals = FALSE;
752 # ifdef CONDPRINT
753 if (GC_print_stats && GC_retry_signals) {
754 GC_printf0("Will retry suspend signal if necessary.\n");
756 # endif
757 #endif /* NACL */
760 /* We hold the allocation lock. */
761 void GC_stop_init()
763 pthread_stop_init ();
766 #endif