Preliminary work towards threads on win32
[sbcl.git] / src / runtime / safepoint.c
blob224643d219693a327a98bde5369ea7d74b43d26c
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
11 #include "sbcl.h"
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
53 void
54 map_gc_page()
56 odxprint(misc, "map_gc_page");
57 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
59 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
62 void
63 unmap_gc_page()
65 odxprint(misc, "unmap_gc_page");
66 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
68 #endif /* !LISP_FEATURE_WIN32 */
70 static inline int
71 thread_may_gc()
73 /* Thread may gc if all of these are true:
74 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
75 * 2) GC_PENDING != :in-progress (outside of recursion protection)
76 * Note that we are in a safepoint here, which is always outside of PA. */
78 struct thread *self = arch_os_get_current_thread();
79 return (SymbolValue(GC_INHIBIT, self) == NIL
80 && (SymbolTlValue(GC_PENDING, self) == T ||
81 SymbolTlValue(GC_PENDING, self) == NIL));
84 #ifdef LISP_FEATURE_SB_THRUPTION
85 static inline int
86 thread_may_thrupt(os_context_t *ctx)
88 struct thread * self = arch_os_get_current_thread();
89 /* Thread may be interrupted if all of these are true:
90 * 1) Deferrables are unblocked in the context of the signal that
91 * went into the safepoint. -- Otherwise the surrounding code
92 * didn't want to be interrupted by a signal, so presumably it didn't
93 * want to be INTERRUPT-THREADed either.
94 * (See interrupt_handle_pending for an exception.)
95 * 2) On POSIX: There is no pending signal. This is important even
96 * after checking the sigmask, since we could be in the
97 * handle_pending trap following re-enabling of interrupts.
98 * Signals are unblocked in that case, but the signal is still
99 * pending; we want to run GC before handling the signal and
100 * therefore entered this safepoint. But the thruption would call
101 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
102 * trap, leading to recursion.
103 * 3) INTERRUPTS_ENABLED is non-nil.
104 * 4) No GC pending; it takes precedence.
105 * Note that we are in a safepoint here, which is always outside of PA. */
107 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
108 return 0;
110 if (SymbolValue(GC_PENDING, self) != NIL)
111 return 0;
113 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
114 return 0;
116 #ifdef LISP_FEATURE_WIN32
117 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
118 return 0;
119 #else
120 /* ctx is NULL if the caller wants to ignore the sigmask. */
121 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
122 return 0;
123 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
124 return 0;
125 #endif
127 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
128 /* This special case prevents TERMINATE-THREAD from hitting
129 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
130 * deferrables are already unblocked there. Further
131 * investigation may be in order. */
132 return 0;
134 return 1;
137 // returns 0 if skipped, 1 otherwise
139 check_pending_thruptions(os_context_t *ctx)
141 struct thread *p = arch_os_get_current_thread();
143 #ifdef LISP_FEATURE_WIN32
144 pthread_t pself = p->os_thread;
145 sigset_t oldset;
146 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
147 * in the self-kill case; instead we do it here while also clearing the
148 * "signal". */
149 if (pself->pending_signal_set)
150 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
151 SetSymbolValue(THRUPTION_PENDING, T, p);
152 #endif
154 if (!thread_may_thrupt(ctx))
155 return 0;
156 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
157 return 0;
158 SetSymbolValue(THRUPTION_PENDING, NIL, p);
160 #ifdef LISP_FEATURE_WIN32
161 oldset = pself->blocked_signal_set;
162 pself->blocked_signal_set = deferrable_sigset;
163 if (ctx) fake_foreign_function_call(ctx);
164 #else
165 sigset_t oldset;
166 block_deferrable_signals(0, &oldset);
167 #endif
169 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
171 #ifdef LISP_FEATURE_WIN32
172 if (ctx) undo_fake_foreign_function_call(ctx);
173 pself->blocked_signal_set = oldset;
174 if (ctx) ctx->sigmask = oldset;
175 #else
176 pthread_sigmask(SIG_SETMASK, &oldset, 0);
177 #endif
178 return 1;
180 #endif
183 on_stack_p(struct thread *th, void *esp)
185 return (void *)th->control_stack_start
186 <= esp && esp
187 < (void *)th->control_stack_end;
190 #ifndef LISP_FEATURE_WIN32
191 /* (Technically, we still allocate an altstack even on Windows. Since
192 * Windows has a contiguous stack with an automatic guard page of
193 * user-configurable size instead of an alternative stack though, the
194 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
196 on_altstack_p(struct thread *th, void *esp)
198 void *start = (void *)th+dynamic_values_bytes;
199 void *end = (char *)start + 32*SIGSTKSZ;
200 return start <= esp && esp < end;
202 #endif
204 void
205 assert_on_stack(struct thread *th, void *esp)
207 if (on_stack_p(th, esp))
208 return;
209 #ifndef LISP_FEATURE_WIN32
210 if (on_altstack_p(th, esp))
211 lose("thread %p: esp on altstack: %p", th, esp);
212 #endif
213 lose("thread %p: bogus esp: %p", th, esp);
216 // returns 0 if skipped, 1 otherwise
218 check_pending_gc(os_context_t *ctx)
220 odxprint(misc, "check_pending_gc");
221 struct thread * self = arch_os_get_current_thread();
222 int done = 0;
223 sigset_t sigset;
225 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
226 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
227 (SymbolValue(GC_PENDING,self) == NIL))) {
228 SetSymbolValue(IN_SAFEPOINT,NIL,self);
230 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
231 if ((SymbolTlValue(GC_PENDING, self) == T)) {
232 lispobj gc_happened = NIL;
234 bind_variable(IN_SAFEPOINT,T,self);
235 block_deferrable_signals(NULL,&sigset);
236 if(SymbolTlValue(GC_PENDING,self)==T)
237 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
238 unbind_variable(IN_SAFEPOINT,self);
239 thread_sigmask(SIG_SETMASK,&sigset,NULL);
240 if (gc_happened == T) {
241 /* POST_GC wants to enable interrupts */
242 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
243 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
244 odxprint(misc, "going to call POST_GC");
245 funcall0(StaticSymbolFunction(POST_GC));
247 done = 1;
251 return done;
254 /* Several ideas on interthread signalling should be
255 tried. Implementation below was chosen for its moderate size and
256 relative simplicity.
258 Mutex is the only (conventional) system synchronization primitive
259 used by it. Some of the code below looks weird with this
260 limitation; rwlocks, Windows Event Objects, or perhaps pthread
261 barriers could be used to improve clarity.
263 No condvars here: our pthreads_win32 is great, but it doesn't
264 provide wait morphing optimization; let's avoid extra context
265 switches and extra contention. */
267 struct gc_dispatcher {
269 /* Held by the first thread that decides to signal all others, for
270 the entire period while common GC safepoint page is
271 unmapped. This thread is called `STW (stop-the-world)
272 initiator' below. */
273 pthread_mutex_t mx_gpunmapped;
275 /* Held by STW initiator while it updates th_stw_initiator and
276 takes other locks in this structure */
277 pthread_mutex_t mx_gptransition;
279 /* Held by STW initiator until the world should be started (GC
280 complete, thruptions delivered). */
281 pthread_mutex_t mx_gcing;
283 /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
284 holds the GC Lisp-level mutex, but _couldn't_ become STW
285 initiator (i.e. another thread is already stopping the
286 world). */
287 pthread_mutex_t mx_subgc;
289 /* First thread (at this round) that decided to stop the world */
290 struct thread *th_stw_initiator;
292 /* Thread running SUB-GC under the `supervision' of STW
293 initiator */
294 struct thread *th_subgc;
296 /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
297 work without thundering herd. */
298 int stopped;
300 /* Thruption flag: Iff true, current STW initiator is delivering
301 thruptions and not GCing. */
302 boolean thruption;
304 } gc_dispatcher = {
305 /* mutexes lazy initialized, other data initially zeroed */
306 .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
307 .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
308 .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
309 .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
313 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
314 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
315 if `writable' is true.
317 Return true if there is a non-null value in the flag.
319 When a thread enters C code or leaves it, a per-thread location is
320 modified. That machine word serves as a not-in-Lisp flag; for
321 convenience, when in C, it's filled with a topmost stack location
322 that may contain Lisp data. When thread is in Lisp, the word
323 contains NULL.
325 GENCGC uses each thread's flag value for conservative garbage collection.
327 There is a full VM page reserved for this word; page permissions
328 are switched to read-only for race-free examine + wait + use
329 scenarios. */
330 static inline boolean
331 set_thread_csp_access(struct thread* p, boolean writable)
333 os_protect((os_vm_address_t) p->csp_around_foreign_call,
334 THREAD_CSP_PAGE_SIZE,
335 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
336 : (OS_VM_PROT_READ));
337 return !!*p->csp_around_foreign_call;
341 /* maybe_become_stw_initiator -- if there is no stop-the-world action
342 in progress, begin it by unmapping GC page, and record current
343 thread as STW initiator.
345 `thruption' flag affects some subtleties of stop/start methods:
346 waiting for other threads allowing GC; setting and clearing
347 STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
349 Return true if current thread becomes a GC initiator, or already
350 _is_ a STW initiator.
352 Unlike gc_stop_the_world and gc_start_the_world (that should be
353 used in matching pairs), maybe_become_stw_initiator is idempotent
354 within a stop-restart cycle. With this call, a thread may `reserve
355 the right' to stop the world as early as it wants. */
357 static inline boolean
358 maybe_become_stw_initiator(boolean thruption)
360 struct thread* self = arch_os_get_current_thread();
362 /* Double-checked locking. Possible word tearing on some
363 architectures, FIXME FIXME, but let's think of it when GENCGC
364 and threaded SBCL is ported to them. */
365 if (!gc_dispatcher.th_stw_initiator) {
366 odxprint(misc,"NULL STW BEFORE GPTRANSITION");
367 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
368 /* We hold mx_gptransition. Is there no STW initiator yet? */
369 if (!gc_dispatcher.th_stw_initiator) {
370 odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
371 /* Then we are... */
372 gc_dispatcher.th_stw_initiator = self;
373 gc_dispatcher.thruption = thruption;
375 /* hold mx_gcing until we restart the world */
376 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
378 /* and mx_gpunmapped until we remap common GC page */
379 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
381 /* we unmap it; other threads running Lisp code will now
382 trap. */
383 unmap_gc_page();
385 /* stop counter; the world is not stopped yet. */
386 gc_dispatcher.stopped = 0;
388 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
390 return gc_dispatcher.th_stw_initiator == self;
394 /* maybe_let_the_world_go -- if current thread is a STW initiator,
395 unlock internal GC structures, and return true. */
396 static inline boolean
397 maybe_let_the_world_go()
399 struct thread* self = arch_os_get_current_thread();
400 if (gc_dispatcher.th_stw_initiator == self) {
401 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
402 if (gc_dispatcher.th_stw_initiator == self) {
403 gc_dispatcher.th_stw_initiator = NULL;
405 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
406 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
407 return 1;
408 } else {
409 return 0;
414 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
415 complete if necessary), and make sure all other threads are either
416 stopped or gc-safe (i.e. running foreign calls).
418 If GC initiator already exists, gc_stop_the_world() either waits
419 for its completion, or cooperates with it: e.g. concurrent pending
420 thruption handler allows (SUB-GC) to complete under its
421 `supervision'.
423 Code sections bounded by gc_stop_the_world and gc_start_the_world
424 may be nested; inner calls don't stop or start threads,
425 decrementing or incrementing the stop counter instead. */
426 void
427 gc_stop_the_world()
429 struct thread* self = arch_os_get_current_thread(), *p;
430 boolean thruption;
431 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
432 /* If GC is enabled, this thread may wait for current STW
433 initiator without causing deadlock. */
434 if (!maybe_become_stw_initiator(0)) {
435 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
436 maybe_become_stw_initiator(0);
437 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
439 /* Now _this thread_ should be STW initiator */
440 gc_assert(self == gc_dispatcher.th_stw_initiator);
441 } else {
442 /* GC inhibited; e.g. we are inside SUB-GC */
443 if (!maybe_become_stw_initiator(0)) {
444 /* Some trouble. Inside SUB-GC, holding the Lisp-side
445 mutex, but some other thread is stopping the world. */
446 if (gc_dispatcher.thruption) {
447 /* Thruption. Wait until it's delivered */
448 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
449 /* Warning: mx_gcing is held recursively. */
450 gc_assert(maybe_become_stw_initiator(0));
451 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
452 } else {
453 /* In SUB-GC, holding mutex; other thread wants to
454 GC. */
455 if (gc_dispatcher.th_subgc == self) {
456 /* There is an outer gc_stop_the_world() by _this_
457 thread, running subordinately to initiator.
458 Just increase stop counter. */
459 ++gc_dispatcher.stopped;
460 return;
462 /* Register as subordinate collector thread: take
463 mx_subgc */
464 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
465 ++gc_dispatcher.stopped;
467 /* Unlocking thread's own thread_qrl() designates
468 `time to examine me' to other threads. */
469 pthread_mutex_unlock(thread_qrl(self));
471 /* STW (GC) initiator thread will see our thread needs
472 to finish GC. It will stop the world and itself,
473 and unlock its qrl. */
474 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
475 return;
479 thruption = gc_dispatcher.thruption; /* Thruption or GC? */
480 if (!gc_dispatcher.stopped++) {
481 /* Outermost stop: signal other threads */
482 pthread_mutex_lock(&all_threads_lock);
483 /* Phase 1: ensure all threads are aware of the need to stop,
484 or locked in the foreign code. */
485 for_each_thread(p) {
486 pthread_mutex_t *p_qrl = thread_qrl(p);
487 if (p==self)
488 continue;
490 /* Read-protect p's flag */
491 if (!set_thread_csp_access(p,0)) {
492 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
493 /* Thread is in Lisp, so it should trap (either in
494 Lisp or in Lisp->FFI transition). Trap handler
495 unlocks thread_qrl(p); when it happens, we're safe
496 to examine that thread. */
497 pthread_mutex_lock(p_qrl);
498 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
499 /* Mark thread for the future: should we collect, or
500 wait for its final permission? */
501 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
502 SetTlSymbolValue(GC_SAFE,T,p);
503 } else {
504 SetTlSymbolValue(GC_SAFE,NIL,p);
506 pthread_mutex_unlock(p_qrl);
507 } else {
508 /* In C; we just disabled writing. */
509 if (!thruption) {
510 if (SymbolTlValue(GC_INHIBIT,p)==T) {
511 /* GC inhibited there */
512 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
513 /* Enable writing. Such threads trap by
514 pending thruption when WITHOUT-GCING
515 section ends */
516 set_thread_csp_access(p,1);
517 SetTlSymbolValue(GC_SAFE,NIL,p);
518 } else {
519 /* Thread allows concurrent GC. It runs in C
520 (not a mutator), its in-Lisp flag is
521 read-only (so it traps on return). */
522 SetTlSymbolValue(GC_SAFE,T,p);
527 /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
528 map_gc_page();
529 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
530 /* Threads with GC inhibited -- continued */
531 odxprint(safepoints,"after remapping GC page %p",self);
533 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
534 if (!thruption) {
535 struct thread* priority_gc = NULL;
536 for_each_thread(p) {
537 if (p==self)
538 continue;
539 if (SymbolTlValue(GC_SAFE,p)!=T) {
540 /* Wait for thread to `park'. NB it _always_ does
541 it with a pending interrupt trap, so CSP locking is
542 not needed */
543 odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
544 WITH_STATE_SEM(p) {
545 pthread_mutex_lock(thread_qrl(p));
546 if (SymbolTlValue(GC_INHIBIT,p)==T) {
547 /* Concurrent GC invoked manually */
548 gc_assert(!priority_gc); /* Should be at most one at a time */
549 priority_gc = p;
551 pthread_mutex_unlock(thread_qrl(p));
554 if (!os_get_csp(p))
555 lose("gc_stop_the_world: no SP in parked thread: %p", p);
557 if (priority_gc) {
558 /* This thread is managing the entire process, so it
559 has to allow manually-invoked GC to complete */
560 if (!set_thread_csp_access(self,1)) {
561 /* Create T.O.S. */
562 *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
563 /* Unlock myself */
564 pthread_mutex_unlock(thread_qrl(self));
565 /* Priority GC should take over, holding
566 mx_subgc until it's done. */
567 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
568 /* Lock myself */
569 pthread_mutex_lock(thread_qrl(self));
570 *self->csp_around_foreign_call = 0;
571 SetTlSymbolValue(GC_PENDING,NIL,self);
572 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
573 } else {
574 /* Unlock myself */
575 pthread_mutex_unlock(thread_qrl(self));
576 /* Priority GC should take over, holding
577 mx_subgc until it's done. */
578 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
579 /* Lock myself */
580 pthread_mutex_lock(thread_qrl(self));
581 /* Unlock sub-gc */
582 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
590 /* gc_start_the_world() -- restart all other threads if the call
591 matches the _outermost_ gc_stop_the_world(), or decrement the stop
592 counter. */
593 void
594 gc_start_the_world()
596 struct thread* self = arch_os_get_current_thread(), *p;
597 boolean thruption = gc_dispatcher.thruption;
598 if (gc_dispatcher.th_stw_initiator != self) {
599 odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
600 gc_assert (gc_dispatcher.th_subgc == self);
601 if (--gc_dispatcher.stopped == 1) {
602 gc_dispatcher.th_subgc = NULL;
603 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
604 /* GC initiator may continue now */
605 pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
607 return;
610 gc_assert(gc_dispatcher.th_stw_initiator == self);
612 if (!--gc_dispatcher.stopped) {
613 for_each_thread(p) {
614 if (!thruption) {
615 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
616 SetTlSymbolValue(GC_PENDING,NIL,p);
618 if (
619 #ifdef LISP_FEATURE_SB_THRUPTION
620 SymbolTlValue(THRUPTION_PENDING,p)!=T
621 #else
622 1 /* trivially no thruption pending */
623 #endif
624 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
625 set_thread_csp_access(p,1);
627 pthread_mutex_unlock(&all_threads_lock);
628 /* Release everyone */
629 maybe_let_the_world_go();
634 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
635 GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
636 SUB-GC, auto-gc and thruption. */
637 static inline boolean
638 in_race_p()
640 struct thread* self = arch_os_get_current_thread(), *p;
641 boolean result = 0;
642 pthread_mutex_lock(&all_threads_lock);
643 for_each_thread(p) {
644 if (p!=self &&
645 SymbolTlValue(GC_PENDING,p)!=T &&
646 SymbolTlValue(GC_PENDING,p)!=NIL) {
647 result = 1;
648 break;
651 pthread_mutex_unlock(&all_threads_lock);
652 if (result) {
653 map_gc_page();
654 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
655 maybe_let_the_world_go();
657 return result;
660 static void
661 set_csp_from_context(struct thread *self, os_context_t *ctx)
663 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
664 /* On POSIX platforms, it is sufficient to investigate only the part
665 * of the stack that was live before the interrupt, because in
666 * addition, we consider interrupt contexts explicitly. On Windows,
667 * however, we do not keep an explicit stack of exception contexts,
668 * and instead arrange for the conservative stack scan to also cover
669 * the context implicitly. The obvious way to do that is to start
670 * at the context itself: */
671 #ifdef LISP_FEATURE_WIN32
672 gc_assert((void **) ctx < sp);
673 sp = (void**) ctx;
674 #endif
675 gc_assert((void **)self->control_stack_start
676 <= sp && sp
677 < (void **)self->control_stack_end);
678 *self->csp_around_foreign_call = (lispobj) sp;
681 void
682 thread_pitstop(os_context_t *ctxptr)
684 struct thread* self = arch_os_get_current_thread();
685 boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
687 odxprint(safepoints,"pitstop [%p]", ctxptr);
688 if (inhibitor) {
689 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
690 /* Free qrl to let know we're ready... */
691 WITH_STATE_SEM(self) {
692 pthread_mutex_unlock(thread_qrl(self));
693 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
694 pthread_mutex_lock(thread_qrl(self));
695 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
697 /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
698 pit-stop always waits for GC end) */
699 set_thread_csp_access(self,1);
700 } else {
701 if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
702 set_thread_csp_access(self,1);
703 check_pending_gc(ctxptr);
704 return;
706 if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
707 maybe_become_stw_initiator(0) && !in_race_p()) {
708 gc_stop_the_world();
709 set_thread_csp_access(self,1);
710 check_pending_gc(ctxptr);
711 gc_start_the_world();
712 } else {
713 /* An innocent thread which is not an initiator _and_ is
714 not objecting. */
715 odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
716 if (!set_thread_csp_access(self,1)) {
717 if (os_get_csp(self))
718 lose("thread_pitstop: would lose csp");
719 set_csp_from_context(self, ctxptr);
720 pthread_mutex_unlock(thread_qrl(self));
721 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
722 *self->csp_around_foreign_call = 0;
723 pthread_mutex_lock(thread_qrl(self));
724 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
725 } else {
726 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
727 set_thread_csp_access(self,1);
728 WITH_GC_AT_SAFEPOINTS_ONLY() {
729 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
730 #ifdef LISP_FEATURE_SB_THRUPTION
731 while (check_pending_thruptions(ctxptr))
733 #endif
735 return;
739 #ifdef LISP_FEATURE_SB_THRUPTION
740 while(check_pending_thruptions(ctxptr));
741 #endif
744 static inline void
745 thread_edge(os_context_t *ctxptr)
747 struct thread *self = arch_os_get_current_thread();
748 set_thread_csp_access(self,1);
749 if (os_get_csp(self)) {
750 if (!self->pc_around_foreign_call)
751 return; /* trivialize */
752 odxprint(safepoints,"edge leaving [%p]", ctxptr);
753 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
754 #ifdef LISP_FEATURE_SB_THRUPTION
755 if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
756 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
757 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
758 set_thread_csp_access(self,1);
759 WITH_GC_AT_SAFEPOINTS_ONLY() {
760 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
761 while (check_pending_thruptions(ctxptr))
764 } else
765 #endif
767 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
768 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
769 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
770 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
773 } else {
774 /* Entering. */
775 odxprint(safepoints,"edge entering [%p]", ctxptr);
776 #ifdef LISP_FEATURE_SB_THRUPTION
777 while(check_pending_thruptions(ctxptr))
779 #endif
780 if (os_get_csp(self))
781 lose("thread_edge: would lose csp");
782 set_csp_from_context(self, ctxptr);
783 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
784 pthread_mutex_unlock(thread_qrl(self));
785 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
786 *self->csp_around_foreign_call = 0;
787 pthread_mutex_lock(thread_qrl(self));
788 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
789 } else {
790 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
791 pthread_mutex_unlock(thread_qrl(self));
792 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
793 *self->csp_around_foreign_call = 0;
794 pthread_mutex_lock(thread_qrl(self));
795 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
801 /* thread_register_gc_trigger --
803 Called by GENCGC in each thread where GC_PENDING becomes T because
804 allocated memory size has crossed the threshold in
805 auto_gc_trigger. For the new collective GC sequence, its first call
806 marks a process-wide beginning of GC.
808 void
809 thread_register_gc_trigger()
811 odxprint(misc, "/thread_register_gc_trigger");
812 struct thread* self = arch_os_get_current_thread();
813 /* This function should be called instead of former
814 set_pseudo_atomic_interrupted(), e.g. never with true
815 GC_INHIBIT */
816 gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
818 /* unmap GC page, signal other threads... */
819 maybe_become_stw_initiator(0);
824 #ifdef LISP_FEATURE_SB_THRUPTION
825 /* wake_thread(thread) -- ensure a thruption delivery to
826 * `thread'. */
828 # ifdef LISP_FEATURE_WIN32
830 void
831 wake_thread_io(struct thread * thread)
833 SetEvent(thread->private_events.events[1]);
836 void
837 wake_thread_win32(struct thread *thread)
839 wake_thread_io(thread);
841 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
842 return;
844 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
846 if ((SymbolTlValue(GC_PENDING,thread)==T)||
847 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
848 return;
850 pthread_mutex_unlock(&all_threads_lock);
852 if (maybe_become_stw_initiator(1) && !in_race_p()) {
853 gc_stop_the_world();
854 gc_start_the_world();
856 pthread_mutex_lock(&all_threads_lock);
857 return;
859 # else
861 wake_thread_posix(os_thread_t os_thread)
863 int found = 0;
864 struct thread *thread;
865 struct thread *self = arch_os_get_current_thread();
867 /* Must not and need not attempt to signal ourselves while we're the
868 * STW initiator. */
869 if (self->os_thread == os_thread) {
870 SetTlSymbolValue(THRUPTION_PENDING,T,self);
871 WITH_GC_AT_SAFEPOINTS_ONLY()
872 while (check_pending_thruptions(0 /* ignore the sigmask */))
874 return 0;
877 /* We are not in a signal handler here, so need to block signals
878 * manually. */
879 sigset_t oldset;
880 block_deferrable_signals(0, &oldset);
882 if (!maybe_become_stw_initiator(1) || in_race_p()) {
883 /* we are not able to wake the thread up, but the STW initiator
884 * will take care of it (kludge: unless it is in foreign code).
885 * Let's at least try to get our return value right. */
886 pthread_mutex_lock(&all_threads_lock);
887 for_each_thread (thread)
888 if (thread->os_thread == os_thread) {
889 found = 1;
890 break;
892 pthread_mutex_unlock(&all_threads_lock);
893 goto cleanup;
895 gc_stop_the_world();
897 /* we hold the all_threads lock */
898 for_each_thread (thread)
899 if (thread->os_thread == os_thread) {
900 /* it's still alive... */
901 found = 1;
903 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
904 if (SymbolTlValue(GC_PENDING,thread) == T
905 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
906 break;
908 if (os_get_csp(thread)) {
909 /* ... and in foreign code. Push it into a safety
910 * transition. */
911 int status = pthread_kill(os_thread, SIGPIPE);
912 if (status)
913 lose("wake_thread_posix: pthread_kill failed with %d\n",
914 status);
916 break;
919 /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
920 gc_start_the_world();
922 cleanup:
923 pthread_sigmask(SIG_SETMASK, &oldset, 0);
924 return found ? 0 : -1;
926 #endif /* !LISP_FEATURE_WIN32 */
927 #endif /* LISP_FEATURE_SB_THRUPTION */
929 void
930 thread_in_safety_transition(os_context_t *ctx)
932 FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
933 thread_edge(ctx);
936 void
937 thread_in_lisp_raised(os_context_t *ctx)
939 FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
940 thread_pitstop(ctx);
943 void
944 thread_interrupted(os_context_t *ctx)
946 FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
947 thread_pitstop(ctx);
950 void**
951 os_get_csp(struct thread* th)
953 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
955 th->csp_around_foreign_call,
956 *(void***)th->csp_around_foreign_call,
957 th->control_stack_start,
958 th->control_stack_end));
959 return *(void***)th->csp_around_foreign_call;
963 #ifndef LISP_FEATURE_WIN32
965 # ifdef LISP_FEATURE_SB_THRUPTION
966 void
967 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
969 struct thread *self = arch_os_get_current_thread();
971 if (!os_get_csp(self))
972 /* In Lisp code. Do not run thruptions asynchronously. The
973 * next safepoint will take care of it. */
974 return;
976 /* In C code. As a rule, we assume that running thruptions is OK. */
977 fake_foreign_function_call(ctx);
978 thread_in_safety_transition(ctx);
979 undo_fake_foreign_function_call(ctx);
981 # endif
983 /* Designed to be of the same type as call_into_lisp. Ignores its
984 * arguments. */
985 lispobj
986 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
988 #if trap_GlobalSafepoint != 0x1a
989 # error trap_GlobalSafepoint mismatch
990 #endif
991 asm("int3; .byte 0x1a;");
992 return 0;
995 lispobj
996 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
998 #if trap_CspSafepoint != 0x1b
999 # error trap_CspSafepoint mismatch
1000 #endif
1001 asm("int3; .byte 0x1b;");
1002 return 0;
1006 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
1008 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
1009 fault_address,
1010 GC_SAFEPOINT_PAGE_ADDR,
1011 arch_os_get_current_thread()->csp_around_foreign_call));
1013 struct thread *self = arch_os_get_current_thread();
1015 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
1016 /* We're on the altstack and don't want to run Lisp code. */
1017 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
1018 return 1;
1021 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
1022 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
1023 return 1;
1026 /* not a safepoint */
1027 return 0;
1029 #endif /* LISP_FEATURE_WIN32 */
1031 void
1032 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
1034 struct thread* th = arch_os_get_current_thread();
1035 if (!th)
1036 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
1038 WITH_GC_AT_SAFEPOINTS_ONLY()
1039 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
1042 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */