1 #include "private/pthread_support.h"
3 #if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
4 && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS) \
5 && !defined(GC_DARWIN_THREADS) && !defined(GC_AIX_THREADS) \
6 && !defined(GC_OPENBSD_THREADS)
14 /* work around a dlopen issue (bug #75390), undefs to avoid warnings with redefinitions */
15 #undef PACKAGE_BUGREPORT
18 #undef PACKAGE_TARNAME
19 #undef PACKAGE_VERSION
20 #include "mono/utils/mono-compiler.h"
23 volatile int __nacl_thread_suspension_needed
= 0;
24 pthread_t nacl_thread_parker
= -1;
26 volatile int nacl_thread_parked
[MAX_NACL_GC_THREADS
];
27 volatile int nacl_thread_used
[MAX_NACL_GC_THREADS
];
28 volatile int nacl_thread_parking_inited
= 0;
29 volatile int nacl_num_gc_threads
= 0;
30 pthread_mutex_t nacl_thread_alloc_lock
= PTHREAD_MUTEX_INITIALIZER
;
31 __thread
int nacl_thread_idx
= -1;
32 __thread GC_thread nacl_gc_thread_self
= NULL
;
39 # define NSIG (MAXSIG+1)
42 # elif defined(__SIGRTMAX)
43 # define NSIG (__SIGRTMAX+1)
50 void GC_print_sig_mask()
55 if (pthread_sigmask(SIG_BLOCK
, NULL
, &blocked
) != 0)
56 ABORT("pthread_sigmask");
57 GC_printf0("Blocked: ");
58 for (i
= 1; i
< NSIG
; i
++) {
59 if (sigismember(&blocked
, i
)) { GC_printf1("%ld ",(long) i
); }
66 /* Remove the signals that we want to allow in thread stopping */
67 /* handler from a set. */
68 void GC_remove_allowed_signals(sigset_t
*set
)
71 if (sigdelset(set
, SIGINT
) != 0
72 || sigdelset(set
, SIGQUIT
) != 0
73 || sigdelset(set
, SIGABRT
) != 0
74 || sigdelset(set
, SIGTERM
) != 0) {
75 ABORT("sigdelset() failed");
80 /* Handlers write to the thread structure, which is in the heap, */
81 /* and hence can trigger a protection fault. */
82 if (sigdelset(set
, SIGSEGV
) != 0
84 || sigdelset(set
, SIGBUS
) != 0
87 ABORT("sigdelset() failed");
92 static sigset_t suspend_handler_mask
;
94 word GC_stop_count
; /* Incremented at the beginning of GC_stop_world. */
96 #ifdef GC_OSF1_THREADS
97 GC_bool GC_retry_signals
= TRUE
;
99 GC_bool GC_retry_signals
= FALSE
;
103 * We use signals to stop threads during GC.
105 * Suspended threads wait in signal handler for SIG_THR_RESTART.
106 * That's more portable than semaphores or condition variables.
107 * (We do use sem_post from a signal handler, but that should be portable.)
109 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
110 * Note that we can't just stop a thread; we need it to save its stack
111 * pointer(s) and acknowledge.
114 #ifndef SIG_THR_RESTART
115 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
117 # define SIG_THR_RESTART _SIGRTMIN + 5
119 # define SIG_THR_RESTART SIGRTMIN + 5
122 # define SIG_THR_RESTART SIGXCPU
126 sem_t GC_suspend_ack_sem
;
128 static void _GC_suspend_handler(int sig
)
132 pthread_t my_thread
= pthread_self();
134 # ifdef PARALLEL_MARK
135 word my_mark_no
= GC_mark_no
;
136 /* Marker can't proceed until we acknowledge. Thus this is */
137 /* guaranteed to be the mark_no correspending to our */
138 /* suspension, i.e. the marker can't have incremented it yet. */
140 word my_stop_count
= GC_stop_count
;
142 if (sig
!= SIG_SUSPEND
) ABORT("Bad signal in suspend_handler");
145 GC_printf1("Suspending 0x%lx\n", my_thread
);
148 me
= GC_lookup_thread(my_thread
);
149 /* The lookup here is safe, since I'm doing this on behalf */
150 /* of a thread which holds the allocation lock in order */
151 /* to stop the world. Thus concurrent modification of the */
152 /* data structure is impossible. */
153 if (me
-> stop_info
.last_stop_count
== my_stop_count
) {
154 /* Duplicate signal. OK if we are retrying. */
155 if (!GC_retry_signals
) {
156 WARN("Duplicate suspend signal in thread %lx\n",
162 me
-> stop_info
.stack_ptr
= (ptr_t
)GC_save_regs_in_stack();
164 me
-> stop_info
.stack_ptr
= (ptr_t
)(&dummy
);
167 me
-> backing_store_ptr
= (ptr_t
)GC_save_regs_in_stack();
170 /* Tell the thread that wants to stop the world that this */
171 /* thread has been stopped. Note that sem_post() is */
172 /* the only async-signal-safe primitive in LinuxThreads. */
173 sem_post(&GC_suspend_ack_sem
);
174 me
-> stop_info
.last_stop_count
= my_stop_count
;
176 /* Wait until that thread tells us to restart by sending */
177 /* this thread a SIG_THR_RESTART signal. */
178 /* SIG_THR_RESTART should be masked at this point. Thus there */
181 me
->stop_info
.signal
= 0;
182 sigsuspend(&suspend_handler_mask
); /* Wait for signal */
183 } while (me
->stop_info
.signal
!= SIG_THR_RESTART
);
184 /* If the RESTART signal gets lost, we can still lose. That should be */
185 /* less likely than losing the SUSPEND signal, since we don't do much */
186 /* between the sem_post and sigsuspend. */
187 /* We'd need more handshaking to work around that, since we don't want */
188 /* to accidentally leave a RESTART signal pending, thus causing us to */
189 /* continue prematurely in a future round. */
191 /* Tell the thread that wants to start the world that this */
192 /* thread has been started. Note that sem_post() is */
193 /* the only async-signal-safe primitive in LinuxThreads. */
194 sem_post(&GC_suspend_ack_sem
);
198 GC_printf1("Continuing 0x%lx\n", my_thread
);
204 void GC_suspend_handler(int sig
)
206 int old_errno
= errno
;
207 _GC_suspend_handler(sig
);
211 static void _GC_restart_handler(int sig
)
213 pthread_t my_thread
= pthread_self();
216 if (sig
!= SIG_THR_RESTART
) ABORT("Bad signal in suspend_handler");
218 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
219 /* The lookup here is safe, since I'm doing this on behalf */
220 /* of a thread which holds the allocation lock in order */
221 /* to stop the world. Thus concurrent modification of the */
222 /* data structure is impossible. */
223 me
= GC_lookup_thread(my_thread
);
224 me
->stop_info
.signal
= SIG_THR_RESTART
;
227 ** Note: even if we didn't do anything useful here,
228 ** it would still be necessary to have a signal handler,
229 ** rather than ignoring the signals, otherwise
230 ** the signals will not be delivered at all, and
231 ** will thus not interrupt the sigsuspend() above.
235 GC_printf1("In GC_restart_handler for 0x%lx\n", pthread_self());
240 # define IF_IA64(x) x
244 /* We hold allocation lock. Should do exactly the right thing if the */
245 /* world is stopped. Should not fail if it isn't. */
246 static void pthread_push_all_stacks()
248 GC_bool found_me
= FALSE
;
252 /* On IA64, we also need to scan the register backing store. */
253 IF_IA64(ptr_t bs_lo
; ptr_t bs_hi
;)
254 pthread_t me
= pthread_self();
256 if (!GC_thr_initialized
) GC_thr_init();
258 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me
);
260 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
261 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
262 if (p
-> flags
& FINISHED
) continue;
263 if (pthread_equal(p
-> id
, me
)) {
265 lo
= (ptr_t
)GC_save_regs_in_stack();
270 IF_IA64(bs_hi
= (ptr_t
)GC_save_regs_in_stack();)
272 lo
= p
-> stop_info
.stack_ptr
;
273 IF_IA64(bs_hi
= p
-> backing_store_ptr
;)
275 if ((p
-> flags
& MAIN_THREAD
) == 0) {
277 IF_IA64(bs_lo
= p
-> backing_store_end
);
279 /* The original stack. */
281 IF_IA64(bs_lo
= BACKING_STORE_BASE
;)
284 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
285 (unsigned long) p
-> id
,
286 (unsigned long) lo
, (unsigned long) hi
);
288 if (0 == lo
) ABORT("GC_push_all_stacks: sp not set!\n");
289 if (p
->altstack
&& lo
>= p
->altstack
&& lo
<= p
->altstack
+ p
->altstack_size
)
290 hi
= p
->altstack
+ p
->altstack_size
;
291 /* FIXME: Need to scan the normal stack too, but how ? */
293 # ifdef STACK_GROWS_UP
294 /* We got them backwards! */
295 GC_push_all_stack(hi
, lo
);
297 GC_push_all_stack(lo
, hi
);
300 /* Push reg_storage as roots, this will cover the reg context */
301 GC_push_all_stack(p
-> stop_info
.reg_storage
, p
-> stop_info
.reg_storage
+ NACL_GC_REG_STORAGE_SIZE
);
305 GC_printf3("Reg stack for thread 0x%lx = [%lx,%lx)\n",
306 (unsigned long) p
-> id
,
307 (unsigned long) bs_lo
, (unsigned long) bs_hi
);
309 if (pthread_equal(p
-> id
, me
)) {
310 GC_push_all_eager(bs_lo
, bs_hi
);
312 GC_push_all_stack(bs_lo
, bs_hi
);
317 if (!found_me
&& !GC_in_thread_creation
)
318 ABORT("Collecting from unknown thread.");
321 void GC_restart_handler(int sig
)
323 int old_errno
= errno
;
324 _GC_restart_handler (sig
);
328 /* We hold allocation lock. Should do exactly the right thing if the */
329 /* world is stopped. Should not fail if it isn't. */
330 void GC_push_all_stacks()
332 pthread_push_all_stacks();
335 /* There seems to be a very rare thread stopping problem. To help us */
336 /* debug that, we save the ids of the stopping thread. */
337 pthread_t GC_stopping_thread
;
340 #ifdef PLATFORM_ANDROID
342 int android_thread_kill(pid_t tid
, int sig
)
345 int old_errno
= errno
;
347 ret
= tkill(tid
, sig
);
357 /* We hold the allocation lock. Suspend all threads that might */
358 /* still be running. Return the number of suspend signals that */
363 int n_live_threads
= 0;
367 pthread_t my_thread
= pthread_self();
369 GC_stopping_thread
= my_thread
; /* debugging only. */
370 GC_stopping_pid
= getpid(); /* debugging only. */
371 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
372 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
373 if (p
-> id
!= my_thread
) {
374 if (p
-> flags
& FINISHED
) continue;
375 if (p
-> stop_info
.last_stop_count
== GC_stop_count
) continue;
376 if (p
-> thread_blocked
) /* Will wait */ continue;
379 GC_printf1("Sending suspend signal to 0x%lx\n", p
-> id
);
382 #ifndef PLATFORM_ANDROID
383 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
385 result
= android_thread_kill(p
-> kernel_id
, SIG_SUSPEND
);
389 /* Not really there anymore. Possible? */
395 ABORT("pthread_kill failed");
400 return n_live_threads
;
406 /* Caller holds allocation lock. */
407 static void pthread_stop_world()
415 GC_printf1("Stopping the world from 0x%lx\n", pthread_self());
418 n_live_threads
= GC_suspend_all();
420 if (GC_retry_signals
) {
421 unsigned long wait_usecs
= 0; /* Total wait since retry. */
422 # define WAIT_UNIT 3000
423 # define RETRY_INTERVAL 100000
427 sem_getvalue(&GC_suspend_ack_sem
, &ack_count
);
428 if (ack_count
== n_live_threads
) break;
429 if (wait_usecs
> RETRY_INTERVAL
) {
430 int newly_sent
= GC_suspend_all();
433 if (GC_print_stats
) {
434 GC_printf1("Resent %ld signals after timeout\n",
438 sem_getvalue(&GC_suspend_ack_sem
, &ack_count
);
439 if (newly_sent
< n_live_threads
- ack_count
) {
440 WARN("Lost some threads during GC_stop_world?!\n",0);
441 n_live_threads
= ack_count
+ newly_sent
;
446 wait_usecs
+= WAIT_UNIT
;
449 for (i
= 0; i
< n_live_threads
; i
++) {
450 while (0 != (code
= sem_wait(&GC_suspend_ack_sem
))) {
451 if (errno
!= EINTR
) {
452 GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code
);
453 ABORT("sem_wait for handler failed");
458 GC_printf1("World stopped from 0x%lx\n", pthread_self());
460 GC_stopping_thread
= 0; /* debugging only */
467 GC_printf1("pthread_stop_world: num_threads %d\n", nacl_num_gc_threads
- 1);
469 nacl_thread_parker
= pthread_self();
470 __nacl_thread_suspension_needed
= 1;
473 #define NACL_PARK_WAIT_NANOSECONDS 100000
474 #define NANOS_PER_SECOND 1000000000
475 int num_threads_parked
= 0;
478 /* Check the 'parked' flag for each thread the GC knows about */
479 for (i
= 0; i
< MAX_NACL_GC_THREADS
&& num_used
< nacl_num_gc_threads
; i
++) {
480 if (nacl_thread_used
[i
] == 1) {
482 if (nacl_thread_parked
[i
] == 1) {
483 num_threads_parked
++;
487 /* -1 for the current thread */
488 if (num_threads_parked
>= nacl_num_gc_threads
- 1)
491 ts
.tv_nsec
= NACL_PARK_WAIT_NANOSECONDS
;
493 GC_printf1("sleeping waiting for %d threads to park...\n", nacl_num_gc_threads
- num_threads_parked
- 1);
496 if (++num_sleeps
> NANOS_PER_SECOND
/ NACL_PARK_WAIT_NANOSECONDS
) {
497 GC_printf1("GC appears stalled waiting for %d threads to park...\n", nacl_num_gc_threads
- num_threads_parked
- 1);
510 #define NACL_STORE_REGS() \
512 __asm__ __volatile__ ("push %rbx");\
513 __asm__ __volatile__ ("push %rbp");\
514 __asm__ __volatile__ ("push %r12");\
515 __asm__ __volatile__ ("push %r13");\
516 __asm__ __volatile__ ("push %r14");\
517 __asm__ __volatile__ ("push %r15");\
518 __asm__ __volatile__ ("mov %%esp, %0" : "=m" (nacl_gc_thread_self->stop_info.stack_ptr));\
519 memcpy(nacl_gc_thread_self->stop_info.reg_storage, nacl_gc_thread_self->stop_info.stack_ptr, NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
520 __asm__ __volatile__ ("naclasp $48, %r15");\
525 #define NACL_STORE_REGS() \
527 __asm__ __volatile__ ("push %ebx");\
528 __asm__ __volatile__ ("push %ebp");\
529 __asm__ __volatile__ ("push %esi");\
530 __asm__ __volatile__ ("push %edi");\
531 __asm__ __volatile__ ("mov %%esp, %0" : "=m" (nacl_gc_thread_self->stop_info.stack_ptr));\
532 memcpy(nacl_gc_thread_self->stop_info.reg_storage, nacl_gc_thread_self->stop_info.stack_ptr, NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
533 __asm__ __volatile__ ("add $16, %esp");\
538 #define NACL_STORE_REGS() \
540 __asm__ __volatile__ ( \
542 "bic %0, %0, #0xc0000000\n\t" \
544 "bic %1, %1, #0xc0000000\n\t" \
545 "stm %1, {r4-r8,r10-r12,lr}\n\t" \
547 : "r" (&nacl_gc_thread_self->stop_info.stack_ptr), \
548 "r"(nacl_gc_thread_self->stop_info.reg_storage) \
553 #error "Please port NACL_STORE_REGS"
557 void nacl_pre_syscall_hook()
560 if (nacl_thread_idx
!= -1) {
562 nacl_gc_thread_self
->stop_info
.stack_ptr
= (ptr_t
)(&local_dummy
);
563 nacl_thread_parked
[nacl_thread_idx
] = 1;
567 void __nacl_suspend_thread_if_needed();
569 void nacl_post_syscall_hook()
571 /* Calling __nacl_suspend_thread_if_needed() right away should guarantee we don't mutate the GC set. */
572 __nacl_suspend_thread_if_needed();
573 if (nacl_thread_idx
!= -1) {
574 nacl_thread_parked
[nacl_thread_idx
] = 0;
578 void __nacl_suspend_thread_if_needed() {
579 if (__nacl_thread_suspension_needed
) {
580 pthread_t self
= pthread_self();
582 /* Don't try to park the thread parker. */
583 if (nacl_thread_parker
== self
)
586 /* This can happen when a thread is created */
587 /* outside of the GC system (wthread mostly). */
588 if (nacl_thread_idx
< 0)
591 /* If it was already 'parked', we're returning from a syscall, */
592 /* so don't bother storing registers again, the GC has a set. */
593 if (!nacl_thread_parked
[nacl_thread_idx
]) {
595 nacl_gc_thread_self
->stop_info
.stack_ptr
= (ptr_t
)(&local_dummy
);
597 nacl_thread_parked
[nacl_thread_idx
] = 1;
598 while (__nacl_thread_suspension_needed
)
600 nacl_thread_parked
[nacl_thread_idx
] = 0;
602 /* Clear out the reg storage for next suspend. */
603 memset(nacl_gc_thread_self
->stop_info
.reg_storage
, 0, NACL_GC_REG_STORAGE_SIZE
* sizeof(ptr_t
));
609 /* Caller holds allocation lock. */
613 GC_notify_event (GC_EVENT_PRE_STOP_WORLD
);
614 GC_process_togglerefs ();
615 /* Make sure all free list construction has stopped before we start. */
616 /* No new construction can start, since free list construction is */
617 /* required to acquire and release the GC lock before it starts, */
618 /* and we have the lock. */
619 # ifdef PARALLEL_MARK
620 GC_acquire_mark_lock();
621 GC_ASSERT(GC_fl_builder_count
== 0);
622 /* We should have previously waited for it to become zero. */
623 # endif /* PARALLEL_MARK */
625 pthread_stop_world ();
626 # ifdef PARALLEL_MARK
627 GC_release_mark_lock();
630 GC_notify_event (GC_EVENT_POST_STOP_WORLD
);
633 /* Caller holds allocation lock, and has held it continuously since */
634 /* the world stopped. */
635 static void pthread_start_world()
638 pthread_t my_thread
= pthread_self();
640 register GC_thread p
;
641 register int n_live_threads
= 0;
646 GC_printf0("World starting\n");
649 GC_notify_event (GC_EVENT_PRE_START_WORLD
);
651 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
652 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
653 if (p
-> id
!= my_thread
) {
654 if (p
-> flags
& FINISHED
) continue;
655 if (p
-> thread_blocked
) continue;
658 GC_printf1("Sending restart signal to 0x%lx\n", p
-> id
);
661 #ifndef PLATFORM_ANDROID
662 result
= pthread_kill(p
-> id
, SIG_THR_RESTART
);
664 result
= android_thread_kill(p
-> kernel_id
, SIG_THR_RESTART
);
668 /* Not really there anymore. Possible? */
674 ABORT("pthread_kill failed");
681 GC_printf0 ("All threads signaled");
684 for (i
= 0; i
< n_live_threads
; i
++) {
685 while (0 != (code
= sem_wait(&GC_suspend_ack_sem
))) {
686 if (errno
!= EINTR
) {
687 GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code
);
688 ABORT("sem_wait for handler failed");
694 GC_notify_event (GC_EVENT_POST_START_WORLD
);
696 GC_printf0("World started\n");
700 GC_notify_event (GC_EVENT_PRE_START_WORLD
);
702 GC_printf0("World starting\n");
704 __nacl_thread_suspension_needed
= 0;
706 GC_notify_event (GC_EVENT_POST_START_WORLD
);
710 void GC_start_world()
712 pthread_start_world ();
715 static void pthread_stop_init() {
717 struct sigaction act
;
719 if (sem_init(&GC_suspend_ack_sem
, 0, 0) != 0)
720 ABORT("sem_init failed");
722 act
.sa_flags
= SA_RESTART
;
723 if (sigfillset(&act
.sa_mask
) != 0) {
724 ABORT("sigfillset() failed");
726 GC_remove_allowed_signals(&act
.sa_mask
);
727 /* SIG_THR_RESTART is set in the resulting mask. */
728 /* It is unmasked by the handler when necessary. */
729 act
.sa_handler
= GC_suspend_handler
;
730 if (sigaction(SIG_SUSPEND
, &act
, NULL
) != 0) {
731 ABORT("Cannot set SIG_SUSPEND handler");
734 act
.sa_handler
= GC_restart_handler
;
735 if (sigaction(SIG_THR_RESTART
, &act
, NULL
) != 0) {
736 ABORT("Cannot set SIG_THR_RESTART handler");
739 /* Inititialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
740 if (sigfillset(&suspend_handler_mask
) != 0) ABORT("sigfillset() failed");
741 GC_remove_allowed_signals(&suspend_handler_mask
);
742 if (sigdelset(&suspend_handler_mask
, SIG_THR_RESTART
) != 0)
743 ABORT("sigdelset() failed");
745 /* Check for GC_RETRY_SIGNALS. */
746 if (0 != GETENV("GC_RETRY_SIGNALS")) {
747 GC_retry_signals
= TRUE
;
749 if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
750 GC_retry_signals
= FALSE
;
753 if (GC_print_stats
&& GC_retry_signals
) {
754 GC_printf0("Will retry suspend signal if necessary.\n");
760 /* We hold the allocation lock. */
763 pthread_stop_init ();