Whitespace
[sbcl.git] / src / runtime / thread.c
blob29a5e3cb864291c208969fe2f385b77fa75c5030
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
12 #include "sbcl.h"
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <stddef.h>
21 #include <errno.h>
22 #include <sys/types.h>
23 #ifndef LISP_FEATURE_WIN32
24 #include <sys/wait.h>
25 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
33 #include "runtime.h"
34 #include "validate.h" /* for BINDING_STACK_SIZE etc */
35 #include "thread.h"
36 #include "arch.h"
37 #include "target-arch-os.h"
38 #include "os.h"
39 #include "globals.h"
40 #include "dynbind.h"
41 #include "genesis/cons.h"
42 #include "genesis/fdefn.h"
43 #include "interr.h" /* for lose() */
44 #include "alloc.h"
45 #include "gc-internal.h"
46 #include "cpputil.h"
47 #include "pseudo-atomic.h"
48 #include "interrupt.h"
49 #include "lispregs.h"
51 #ifdef LISP_FEATURE_SB_THREAD
53 #ifdef LISP_FEATURE_OPENBSD
54 #include <pthread_np.h>
55 #endif
57 #ifdef LISP_FEATURE_SUNOS
58 #include <thread.h>
59 #endif
61 #ifdef LISP_FEATURE_WIN32
62 # define IMMEDIATE_POST_MORTEM
63 #endif
65 #if defined(LISP_FEATURE_FREEBSD) || defined(LISP_FEATURE_DRAGONFLY) || defined (LISP_FEATURE_DARWIN)
66 #define LOCK_CREATE_THREAD
67 #endif
69 struct thread_post_mortem {
70 os_thread_t os_thread;
71 pthread_attr_t *os_attr;
72 os_vm_address_t os_address;
75 static struct thread_post_mortem * volatile pending_thread_post_mortem = 0;
76 #endif
78 int dynamic_values_bytes=TLS_SIZE*sizeof(lispobj); /* same for all threads */
79 struct thread *all_threads;
81 #ifdef LISP_FEATURE_SB_THREAD
82 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
83 #ifdef LOCK_CREATE_THREAD
84 static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
85 #endif
86 #ifdef LISP_FEATURE_GCC_TLS
87 __thread struct thread *current_thread;
88 #endif
89 pthread_key_t lisp_thread = 0;
90 #endif
92 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
93 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs)
94 # ifdef LISP_FEATURE_X86_64
95 __attribute__((sysv_abi))
96 # endif
98 #endif
100 static void
101 link_thread(struct thread *th)
103 if (all_threads) all_threads->prev=th;
104 th->next=all_threads;
105 th->prev=0;
106 all_threads=th;
109 #ifdef LISP_FEATURE_SB_THREAD
110 static void
111 unlink_thread(struct thread *th)
113 if (th->prev)
114 th->prev->next = th->next;
115 else
116 all_threads = th->next;
117 if (th->next)
118 th->next->prev = th->prev;
121 #ifndef LISP_FEATURE_SB_SAFEPOINT
122 /* Only access thread state with blockables blocked. */
123 lispobj
124 thread_state(struct thread *thread)
126 lispobj state;
127 sigset_t old;
128 block_blockable_signals(&old);
129 os_sem_wait(thread->state_sem, "thread_state");
130 state = thread->state;
131 os_sem_post(thread->state_sem, "thread_state");
132 thread_sigmask(SIG_SETMASK, &old, NULL);
133 return state;
136 void
137 set_thread_state(struct thread *thread, lispobj state)
139 int i, waitcount = 0;
140 sigset_t old;
141 block_blockable_signals(&old);
142 os_sem_wait(thread->state_sem, "set_thread_state");
143 if (thread->state != state) {
144 if ((STATE_STOPPED==state) ||
145 (STATE_DEAD==state)) {
146 waitcount = thread->state_not_running_waitcount;
147 thread->state_not_running_waitcount = 0;
148 for (i=0; i<waitcount; i++)
149 os_sem_post(thread->state_not_running_sem, "set_thread_state (not running)");
151 if ((STATE_RUNNING==state) ||
152 (STATE_DEAD==state)) {
153 waitcount = thread->state_not_stopped_waitcount;
154 thread->state_not_stopped_waitcount = 0;
155 for (i=0; i<waitcount; i++)
156 os_sem_post(thread->state_not_stopped_sem, "set_thread_state (not stopped)");
158 thread->state = state;
160 os_sem_post(thread->state_sem, "set_thread_state");
161 thread_sigmask(SIG_SETMASK, &old, NULL);
164 void
165 wait_for_thread_state_change(struct thread *thread, lispobj state)
167 sigset_t old;
168 os_sem_t *wait_sem;
169 block_blockable_signals(&old);
170 start:
171 os_sem_wait(thread->state_sem, "wait_for_thread_state_change");
172 if (thread->state == state) {
173 switch (state) {
174 case STATE_RUNNING:
175 wait_sem = thread->state_not_running_sem;
176 thread->state_not_running_waitcount++;
177 break;
178 case STATE_STOPPED:
179 wait_sem = thread->state_not_stopped_sem;
180 thread->state_not_stopped_waitcount++;
181 break;
182 default:
183 lose("Invalid state in wait_for_thread_state_change: "OBJ_FMTX"\n", state);
185 } else {
186 wait_sem = NULL;
188 os_sem_post(thread->state_sem, "wait_for_thread_state_change");
189 if (wait_sem) {
190 os_sem_wait(wait_sem, "wait_for_thread_state_change");
191 goto start;
193 thread_sigmask(SIG_SETMASK, &old, NULL);
195 #endif /* sb-safepoint */
196 #endif /* sb-thread */
198 static int
199 initial_thread_trampoline(struct thread *th)
201 lispobj function;
202 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
203 lispobj *args = NULL;
204 #endif
205 #ifdef LISP_FEATURE_SB_THREAD
206 pthread_setspecific(lisp_thread, (void *)1);
207 #endif
208 #if defined(THREADS_USING_GCSIGNAL) && (defined(LISP_FEATURE_PPC) || defined(LISP_FEATURE_ARM64))
209 /* SIG_STOP_FOR_GC defaults to blocked on PPC? */
210 unblock_gc_signals(0,0);
211 #endif
212 function = th->no_tls_value_marker;
213 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
214 if(arch_os_thread_init(th)==0) return 1;
215 link_thread(th);
216 th->os_thread=thread_self();
217 #ifndef LISP_FEATURE_WIN32
218 protect_control_stack_hard_guard_page(1, NULL);
219 #endif
220 protect_binding_stack_hard_guard_page(1, NULL);
221 protect_alien_stack_hard_guard_page(1, NULL);
222 #ifndef LISP_FEATURE_WIN32
223 protect_control_stack_guard_page(1, NULL);
224 #endif
225 protect_binding_stack_guard_page(1, NULL);
226 protect_alien_stack_guard_page(1, NULL);
228 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
229 return call_into_lisp_first_time(function,args,0);
230 #else
231 return funcall0(function);
232 #endif
235 #ifdef LISP_FEATURE_SB_THREAD
237 # if defined(IMMEDIATE_POST_MORTEM)
240 * If this feature is set, we are running on a stack managed by the OS,
241 * and no fancy delays are required for anything. Just do it.
243 static void
244 schedule_thread_post_mortem(struct thread *corpse)
246 pthread_detach(pthread_self());
247 gc_assert(!pthread_attr_destroy(corpse->os_attr));
248 #if defined(LISP_FEATURE_WIN32)
249 os_invalidate_free(corpse->os_address, THREAD_STRUCT_SIZE);
250 #else
251 os_invalidate(corpse->os_address, THREAD_STRUCT_SIZE);
252 #endif
255 # else
257 /* THREAD POST MORTEM CLEANUP
259 * Memory allocated for the thread stacks cannot be reclaimed while
260 * the thread is still alive, so we need a mechanism for post mortem
261 * cleanups. FIXME: We actually have three, for historical reasons as
262 * the saying goes. Do we really need three? Nikodemus guesses that
263 * not anymore, now that we properly call pthread_attr_destroy before
264 * freeing the stack. */
266 static struct thread_post_mortem *
267 plan_thread_post_mortem(struct thread *corpse)
269 if (corpse) {
270 struct thread_post_mortem *post_mortem = malloc(sizeof(struct thread_post_mortem));
271 gc_assert(post_mortem);
272 post_mortem->os_thread = corpse->os_thread;
273 post_mortem->os_attr = corpse->os_attr;
274 post_mortem->os_address = corpse->os_address;
276 return post_mortem;
277 } else {
278 /* FIXME: When does this happen? */
279 return NULL;
283 static void
284 perform_thread_post_mortem(struct thread_post_mortem *post_mortem)
286 #ifdef CREATE_POST_MORTEM_THREAD
287 pthread_detach(pthread_self());
288 #endif
289 int result;
290 if (post_mortem) {
291 #if defined(LOCK_CREATE_THREAD) && defined (LISP_FEATURE_DARWIN)
292 /* The thread may exit before pthread_create() has finished
293 initialization and it may write into already unmapped
294 memory. This lock doesn't actually need to protect
295 anything, just to make sure that at least one call to
296 pthread_create() has finished.
298 Possible improvements: stash the address of the thread
299 struct for which a pthread is being created and don't lock
300 here if it's not the one being terminated. */
301 result = pthread_mutex_lock(&create_thread_lock);
302 gc_assert(result == 0);
303 result = pthread_mutex_unlock(&create_thread_lock);
304 gc_assert(result == 0);
305 #endif
306 if ((result = pthread_join(post_mortem->os_thread, NULL))) {
307 lose("Error calling pthread_join in perform_thread_post_mortem:\n%s",
308 strerror(result));
310 if ((result = pthread_attr_destroy(post_mortem->os_attr))) {
311 lose("Error calling pthread_attr_destroy in perform_thread_post_mortem:\n%s",
312 strerror(result));
314 os_invalidate(post_mortem->os_address, THREAD_STRUCT_SIZE);
315 free(post_mortem);
319 static void
320 schedule_thread_post_mortem(struct thread *corpse)
322 struct thread_post_mortem *post_mortem = NULL;
323 if (corpse) {
324 post_mortem = plan_thread_post_mortem(corpse);
326 #ifdef CREATE_POST_MORTEM_THREAD
327 gc_assert(!pthread_create(&thread, NULL, perform_thread_post_mortem, post_mortem));
328 #else
329 post_mortem = (struct thread_post_mortem *)
330 swap_lispobjs((lispobj *)(void *)&pending_thread_post_mortem,
331 (lispobj)post_mortem);
332 perform_thread_post_mortem(post_mortem);
333 #endif
337 # endif /* !IMMEDIATE_POST_MORTEM */
339 /* Note: scribble must be stack-allocated */
340 static void
341 init_new_thread(struct thread *th, init_thread_data *scribble, int guardp)
343 int lock_ret;
345 pthread_setspecific(lisp_thread, (void *)1);
346 if(arch_os_thread_init(th)==0) {
347 /* FIXME: handle error */
348 lose("arch_os_thread_init failed\n");
351 th->os_thread=thread_self();
352 if (guardp)
353 protect_control_stack_guard_page(1, NULL);
354 protect_binding_stack_guard_page(1, NULL);
355 protect_alien_stack_guard_page(1, NULL);
356 /* Since GC can only know about this thread from the all_threads
357 * list and we're just adding this thread to it, there is no
358 * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which
359 * it is not). */
360 #ifdef LISP_FEATURE_SB_SAFEPOINT
361 *th->csp_around_foreign_call = (lispobj)scribble;
362 #endif
363 lock_ret = pthread_mutex_lock(&all_threads_lock);
364 gc_assert(lock_ret == 0);
365 link_thread(th);
366 lock_ret = pthread_mutex_unlock(&all_threads_lock);
367 gc_assert(lock_ret == 0);
369 /* Kludge: Changed the order of some steps between the safepoint/
370 * non-safepoint versions of this code. Can we unify this more?
372 #ifdef LISP_FEATURE_SB_SAFEPOINT
373 gc_state_lock();
374 gc_state_wait(GC_NONE);
375 gc_state_unlock();
376 push_gcing_safety(&scribble->safety);
377 #endif
380 static void
381 undo_init_new_thread(struct thread *th, init_thread_data *scribble)
383 int lock_ret;
385 /* Kludge: Changed the order of some steps between the safepoint/
386 * non-safepoint versions of this code. Can we unify this more?
388 #ifdef LISP_FEATURE_SB_SAFEPOINT
389 block_blockable_signals(0);
390 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
391 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
392 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region);
393 #endif
394 pop_gcing_safety(&scribble->safety);
395 lock_ret = pthread_mutex_lock(&all_threads_lock);
396 gc_assert(lock_ret == 0);
397 unlink_thread(th);
398 lock_ret = pthread_mutex_unlock(&all_threads_lock);
399 gc_assert(lock_ret == 0);
400 #else
401 /* Block GC */
402 block_blockable_signals(0);
403 set_thread_state(th, STATE_DEAD);
405 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
406 * thread, but since we are already dead it won't wait long. */
407 lock_ret = pthread_mutex_lock(&all_threads_lock);
408 gc_assert(lock_ret == 0);
410 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
411 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
412 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region);
413 #endif
414 unlink_thread(th);
415 pthread_mutex_unlock(&all_threads_lock);
416 gc_assert(lock_ret == 0);
417 #endif
419 arch_os_thread_cleanup(th);
421 #ifndef LISP_FEATURE_SB_SAFEPOINT
422 os_sem_destroy(th->state_sem);
423 os_sem_destroy(th->state_not_running_sem);
424 os_sem_destroy(th->state_not_stopped_sem);
425 #endif
428 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
429 mach_lisp_thread_destroy(th);
430 #endif
432 #if defined(LISP_FEATURE_WIN32)
433 int i;
434 for (i = 0; i<
435 (int) (sizeof(th->private_events.events)/
436 sizeof(th->private_events.events[0])); ++i) {
437 CloseHandle(th->private_events.events[i]);
439 TlsSetValue(OUR_TLS_INDEX,NULL);
440 #endif
442 /* Undo the association of the current pthread to its `struct thread',
443 * such that we can call arch_os_get_current_thread() later in this
444 * thread and cleanly get back NULL. */
445 #ifdef LISP_FEATURE_GCC_TLS
446 current_thread = NULL;
447 #else
448 pthread_setspecific(specials, NULL);
449 #endif
452 /* this is the first thing that runs in the child (which is why the
453 * silly calling convention). Basically it calls the user's requested
454 * lisp function after doing arch_os_thread_init and whatever other
455 * bookkeeping needs to be done
458 new_thread_trampoline(struct thread *th)
460 int result;
461 init_thread_data scribble;
463 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
464 check_deferrables_blocked_or_lose(0);
465 #ifndef LISP_FEATURE_SB_SAFEPOINT
466 check_gc_signals_unblocked_or_lose(0);
467 #endif
469 lispobj function = th->no_tls_value_marker;
470 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
471 init_new_thread(th, &scribble, 1);
472 result = funcall0(function);
473 undo_init_new_thread(th, &scribble);
475 schedule_thread_post_mortem(th);
477 FSHOW((stderr,"/exiting thread %lu\n", thread_self()));
478 return result;
481 static struct thread *create_thread_struct(lispobj);
483 void
484 attach_os_thread(init_thread_data *scribble)
486 os_thread_t os = pthread_self();
487 odxprint(misc, "attach_os_thread: attaching to %p", os);
489 struct thread *th = create_thread_struct(NIL);
490 block_deferrable_signals(&scribble->oldset);
491 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
492 /* We don't actually want a pthread_attr here, but rather than add
493 * `if's to the post-mostem, let's just keep that code happy by
494 * keeping it initialized: */
495 pthread_attr_init(th->os_attr);
497 #ifndef LISP_FEATURE_WIN32
498 /* On windows, arch_os_thread_init will take care of finding the
499 * stack. */
500 void *stack_addr;
501 size_t stack_size;
502 #ifdef LISP_FEATURE_OPENBSD
503 stack_t stack;
504 pthread_stackseg_np(os, &stack);
505 stack_size = stack.ss_size;
506 stack_addr = (void*)((size_t)stack.ss_sp - stack_size);
507 #elif defined LISP_FEATURE_SUNOS
508 stack_t stack;
509 thr_stksegment(&stack);
510 stack_size = stack.ss_size;
511 stack_addr = (void*)((size_t)stack.ss_sp - stack_size);
512 #elif defined(LISP_FEATURE_DARWIN)
513 stack_addr = pthread_get_stackaddr_np(os);
514 stack_size = pthread_get_stacksize_np(os);
515 #else
516 pthread_attr_t attr;
517 #ifdef LISP_FEATURE_FREEBSD
518 pthread_attr_get_np(os, &attr);
519 #else
520 int pthread_getattr_np(pthread_t, pthread_attr_t *);
521 pthread_getattr_np(os, &attr);
522 #endif
523 pthread_attr_getstack(&attr, &stack_addr, &stack_size);
524 #endif
526 th->control_stack_start = stack_addr;
527 th->control_stack_end = (void *) (((uintptr_t) stack_addr) + stack_size);
528 #endif
530 init_new_thread(th, scribble, 0);
532 /* We will be calling into Lisp soon, and the functions being called
533 * recklessly ignore the comment in target-thread which says that we
534 * must be careful to not cause GC while initializing a new thread.
535 * Since we first need to create a fresh thread object, it's really
536 * tempting to just perform such unsafe allocation though. So let's
537 * at least try to suppress GC before consing, and hope that it
538 * works: */
539 bind_variable(GC_INHIBIT, T, th);
541 uword_t stacksize
542 = (uword_t) th->control_stack_end - (uword_t) th->control_stack_start;
543 odxprint(misc, "attach_os_thread: attached %p as %p (0x%lx bytes stack)",
544 os, th, (long) stacksize);
547 void
548 detach_os_thread(init_thread_data *scribble)
550 struct thread *th = arch_os_get_current_thread();
551 odxprint(misc, "detach_os_thread: detaching");
553 undo_init_new_thread(th, scribble);
555 odxprint(misc, "deattach_os_thread: detached");
556 pthread_setspecific(lisp_thread, (void *)0);
557 thread_sigmask(SIG_SETMASK, &scribble->oldset, 0);
560 void
561 callback_wrapper_trampoline(
562 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
563 /* On the x86oid backends, the assembly wrapper happens to not pass
564 * in ENTER_ALIEN_CALLBACK explicitly for safepoints. However, the
565 * platforms with precise GC are tricky enough already, and I want
566 * to minimize the read-time conditionals. For those platforms, I'm
567 * only replacing funcall3 with callback_wrapper_trampoline while
568 * keeping the arguments unchanged. --DFL */
569 lispobj __attribute__((__unused__)) fun,
570 #endif
571 lispobj arg0, lispobj arg1, lispobj arg2)
573 #if defined(LISP_FEATURE_WIN32)
574 pthread_np_notice_thread();
575 #endif
576 struct thread* th = arch_os_get_current_thread();
577 if (!th) { /* callback invoked in non-lisp thread */
578 init_thread_data scribble;
579 attach_os_thread(&scribble);
580 funcall3(StaticSymbolFunction(ENTER_FOREIGN_CALLBACK), arg0,arg1,arg2);
581 detach_os_thread(&scribble);
582 return;
585 #ifdef LISP_FEATURE_WIN32
586 /* arg2 is the pointer to a return value, which sits on the stack */
587 th->carried_base_pointer = (os_context_register_t) *(((void**)arg2)-1);
588 #endif
590 #ifdef LISP_FEATURE_SB_SAFEPOINT
591 WITH_GC_AT_SAFEPOINTS_ONLY()
592 #endif
594 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
598 #endif /* LISP_FEATURE_SB_THREAD */
600 static void __attribute__((unused))
601 free_thread_struct(struct thread *th)
603 #if defined(LISP_FEATURE_WIN32)
604 os_invalidate_free((os_vm_address_t) th->os_address, THREAD_STRUCT_SIZE);
605 #else
606 os_invalidate((os_vm_address_t) th->os_address, THREAD_STRUCT_SIZE);
607 #endif
610 #ifdef LISP_FEATURE_SB_THREAD
611 /* FIXME: should be MAX_INTERRUPTS -1 ? */
612 const unsigned int tls_index_start =
613 MAX_INTERRUPTS + sizeof(struct thread)/sizeof(lispobj);
614 #endif
616 /* this is called from any other thread to create the new one, and
617 * initialize all parts of it that can be initialized from another
618 * thread
621 static struct thread *
622 create_thread_struct(lispobj initial_function) {
623 union per_thread_data *per_thread;
624 struct thread *th=0; /* subdue gcc */
625 void *spaces=0;
626 void *aligned_spaces=0;
627 #if defined(LISP_FEATURE_SB_THREAD) || defined(LISP_FEATURE_WIN32)
628 unsigned int i;
629 #endif
631 /* May as well allocate all the spaces at once: it saves us from
632 * having to decide what to do if only some of the allocations
633 * succeed. SPACES must be appropriately aligned, since the GC
634 * expects the control stack to start at a page boundary -- and
635 * the OS may have even more rigorous requirements. We can't rely
636 * on the alignment passed from os_validate, since that might
637 * assume the current (e.g. 4k) pagesize, while we calculate with
638 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
639 spaces=os_validate(0, THREAD_STRUCT_SIZE);
640 if(!spaces)
641 return NULL;
642 /* Aligning up is safe as THREAD_STRUCT_SIZE has
643 * THREAD_ALIGNMENT_BYTES padding. */
644 aligned_spaces = (void *)((((uword_t)(char *)spaces)
645 + THREAD_ALIGNMENT_BYTES-1)
646 &~(uword_t)(THREAD_ALIGNMENT_BYTES-1));
647 void* csp_page=
648 (aligned_spaces+
649 thread_control_stack_size+
650 BINDING_STACK_SIZE+
651 ALIEN_STACK_SIZE);
652 per_thread=(union per_thread_data *)
653 (csp_page + THREAD_CSP_PAGE_SIZE);
655 #ifdef LISP_FEATURE_SB_THREAD
656 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
657 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
658 #endif
660 th=&per_thread->thread;
661 th->os_address = spaces;
662 th->control_stack_start = aligned_spaces;
663 th->binding_stack_start=
664 (lispobj*)((void*)th->control_stack_start+thread_control_stack_size);
665 th->control_stack_end = th->binding_stack_start;
666 th->control_stack_guard_page_protected = T;
667 th->alien_stack_start=
668 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
669 set_binding_stack_pointer(th,th->binding_stack_start);
670 th->this=th;
671 th->os_thread=0;
673 #ifdef LISP_FEATURE_SB_SAFEPOINT
674 # ifdef LISP_FEATURE_WIN32
675 th->carried_base_pointer = 0;
676 # endif
677 # ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
678 th->pc_around_foreign_call = 0;
679 # endif
680 th->csp_around_foreign_call = csp_page;
681 #endif
683 struct nonpointer_thread_data *nonpointer_data
684 = (void *) &per_thread->dynamic_values[TLS_SIZE];
686 th->interrupt_data = &nonpointer_data->interrupt_data;
688 #ifdef LISP_FEATURE_SB_THREAD
689 th->os_attr = &nonpointer_data->os_attr;
690 # ifndef LISP_FEATURE_SB_SAFEPOINT
691 th->state_sem = &nonpointer_data->state_sem;
692 th->state_not_running_sem = &nonpointer_data->state_not_running_sem;
693 th->state_not_stopped_sem = &nonpointer_data->state_not_stopped_sem;
694 os_sem_init(th->state_sem, 1);
695 os_sem_init(th->state_not_running_sem, 0);
696 os_sem_init(th->state_not_stopped_sem, 0);
697 th->state_not_running_waitcount = 0;
698 th->state_not_stopped_waitcount = 0;
699 # endif
701 #endif
702 th->state=STATE_RUNNING;
703 #ifdef ALIEN_STACK_GROWS_DOWNWARD
704 th->alien_stack_pointer=((void *)th->alien_stack_start
705 + ALIEN_STACK_SIZE-N_WORD_BYTES);
706 #else
707 th->alien_stack_pointer=((void *)th->alien_stack_start);
708 #endif
710 #ifdef LISP_FEATURE_SB_THREAD
711 th->pseudo_atomic_bits=0;
712 #elif defined LISP_FEATURE_GENCGC
713 clear_pseudo_atomic_atomic(th);
714 clear_pseudo_atomic_interrupted(th);
715 #endif
717 #ifdef LISP_FEATURE_GENCGC
718 gc_set_region_empty(&th->alloc_region);
719 # if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
720 gc_set_region_empty(&th->sprof_alloc_region);
721 # endif
722 #endif
723 #ifdef LISP_FEATURE_SB_THREAD
724 /* This parallels the same logic in globals.c for the
725 * single-threaded foreign_function_call_active, KLUDGE and
726 * all. */
727 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
728 th->foreign_function_call_active = 0;
729 #else
730 th->foreign_function_call_active = 1;
731 #endif
732 #endif
734 #ifndef LISP_FEATURE_SB_THREAD
735 /* the tls-points-into-struct-thread trick is only good for threaded
736 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
737 * appropriate values from struct thread here, and make sure that
738 * we use the appropriate SymbolValue macros to access any of the
739 * variable quantities from the C runtime. It's not quite OAOOM,
740 * it just feels like it */
741 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
742 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
743 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
744 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
745 SetSymbolValue(ALIEN_STACK_POINTER,(lispobj)th->alien_stack_pointer,th);
746 #endif
747 #endif
748 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
749 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
750 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
751 bind_variable(INTERRUPT_PENDING, NIL,th);
752 bind_variable(INTERRUPTS_ENABLED,T,th);
753 bind_variable(ALLOW_WITH_INTERRUPTS,T,th);
754 bind_variable(GC_PENDING,NIL,th);
755 bind_variable(ALLOC_SIGNAL,NIL,th);
756 #ifdef PINNED_OBJECTS
757 bind_variable(PINNED_OBJECTS,NIL,th);
758 #endif
759 #ifdef LISP_FEATURE_SB_THREAD
760 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
761 #endif
762 #if defined(LISP_FEATURE_SB_SAFEPOINT)
763 bind_variable(GC_SAFE,NIL,th);
764 bind_variable(IN_SAFEPOINT,NIL,th);
765 #endif
766 #ifdef LISP_FEATURE_SB_THRUPTION
767 bind_variable(THRUPTION_PENDING,NIL,th);
768 bind_variable(RESTART_CLUSTERS,NIL,th);
769 #endif
770 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
771 access_control_stack_pointer(th)=th->control_stack_start;
772 #endif
774 th->interrupt_data->pending_handler = 0;
775 th->interrupt_data->gc_blocked_deferrables = 0;
776 #ifdef GENCGC_IS_PRECISE
777 th->interrupt_data->allocation_trap_context = 0;
778 #endif
779 th->no_tls_value_marker=initial_function;
781 #if defined(LISP_FEATURE_WIN32)
782 for (i = 0; i<sizeof(th->private_events.events)/
783 sizeof(th->private_events.events[0]); ++i) {
784 th->private_events.events[i] = CreateEvent(NULL,FALSE,FALSE,NULL);
786 th->synchronous_io_handle_and_flag = 0;
787 #endif
788 th->stepping = 0;
789 return th;
792 void create_initial_thread(lispobj initial_function) {
793 struct thread *th=create_thread_struct(initial_function);
794 #ifdef LISP_FEATURE_SB_THREAD
795 pthread_key_create(&lisp_thread, 0);
796 #endif
797 if(th) {
798 initial_thread_trampoline(th); /* no return */
799 } else lose("can't create initial thread\n");
802 #ifdef LISP_FEATURE_SB_THREAD
804 #ifndef __USE_XOPEN2K
805 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
806 size_t __stacksize);
807 #endif
809 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
811 /* The new thread inherits the restrictive signal mask set here,
812 * and enables signals again when it is set up properly. */
813 sigset_t oldset;
814 boolean r=1;
815 int retcode = 0, initcode;
817 FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
819 /* Blocking deferrable signals is enough, no need to block
820 * SIG_STOP_FOR_GC because the child process is not linked onto
821 * all_threads until it's ready. */
822 block_deferrable_signals(&oldset);
824 #ifdef LOCK_CREATE_THREAD
825 retcode = pthread_mutex_lock(&create_thread_lock);
826 gc_assert(retcode == 0);
827 FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
828 #endif
830 if((initcode = pthread_attr_init(th->os_attr)) ||
831 /* call_into_lisp_first_time switches the stack for the initial
832 * thread. For the others, we use this. */
833 #if defined(LISP_FEATURE_WIN32)
834 (pthread_attr_setstacksize(th->os_attr, thread_control_stack_size)) ||
835 #else
836 # if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
837 (pthread_attr_setstack(th->os_attr,th->control_stack_start,
838 thread_control_stack_size)) ||
839 # else
840 (pthread_attr_setstack(th->os_attr,th->alien_stack_start,
841 ALIEN_STACK_SIZE)) ||
842 # endif
843 #endif
844 (retcode = pthread_create
845 (kid_tid,th->os_attr,(void *(*)(void *))new_thread_trampoline,th))) {
846 FSHOW_SIGNAL((stderr, "init = %d\n", initcode));
847 FSHOW_SIGNAL((stderr, "pthread_create returned %d, errno %d\n",
848 retcode, errno));
849 if(retcode < 0) {
850 perror("create_os_thread");
852 r=0;
855 #ifdef LOCK_CREATE_THREAD
856 retcode = pthread_mutex_unlock(&create_thread_lock);
857 gc_assert(retcode == 0);
858 FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
859 #endif
860 thread_sigmask(SIG_SETMASK,&oldset,0);
861 return r;
864 os_thread_t create_thread(lispobj initial_function) {
865 struct thread *th, *thread = arch_os_get_current_thread();
866 os_thread_t kid_tid = 0;
868 /* Must defend against async unwinds. */
869 if (SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
870 lose("create_thread is not safe when interrupts are enabled.\n");
872 /* Assuming that a fresh thread struct has no lisp objects in it,
873 * linking it to all_threads can be left to the thread itself
874 * without fear of gc lossage. initial_function violates this
875 * assumption and must stay pinned until the child starts up. */
876 th = create_thread_struct(initial_function);
877 if (th && !create_os_thread(th,&kid_tid)) {
878 free_thread_struct(th);
879 kid_tid = 0;
881 return kid_tid;
884 /* stopping the world is a two-stage process. From this thread we signal
885 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
886 * the usual pseudo-atomic checks (we don't want to stop a thread while
887 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
890 * (With SB-SAFEPOINT, see the definitions in safepoint.c instead.)
892 #ifndef LISP_FEATURE_SB_SAFEPOINT
894 /* To avoid deadlocks when gc stops the world all clients of each
895 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
896 * holding the lock, but they must agree on which. */
897 void gc_stop_the_world()
899 struct thread *p,*th=arch_os_get_current_thread();
900 int status, lock_ret;
901 #ifdef LOCK_CREATE_THREAD
902 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
903 * on FreeBSD. */
904 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock\n"));
905 lock_ret = pthread_mutex_lock(&create_thread_lock);
906 gc_assert(lock_ret == 0);
907 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock\n"));
908 #endif
909 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock\n"));
910 /* keep threads from starting while the world is stopped. */
911 lock_ret = pthread_mutex_lock(&all_threads_lock); \
912 gc_assert(lock_ret == 0);
914 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock\n"));
915 /* stop all other threads by sending them SIG_STOP_FOR_GC */
916 for(p=all_threads; p; p=p->next) {
917 gc_assert(p->os_thread != 0);
918 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: thread=%lu, state=%x\n",
919 p->os_thread, thread_state(p)));
920 if((p!=th) && ((thread_state(p)==STATE_RUNNING))) {
921 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending thread %lu\n",
922 p->os_thread));
923 /* We already hold all_thread_lock, P can become DEAD but
924 * cannot exit, ergo it's safe to use pthread_kill. */
925 status=pthread_kill(p->os_thread,SIG_STOP_FOR_GC);
926 if (status==ESRCH) {
927 /* This thread has exited. */
928 gc_assert(thread_state(p)==STATE_DEAD);
929 } else if (status) {
930 lose("cannot send suspend thread=%lu: %d, %s\n",
931 p->os_thread,status,strerror(status));
935 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
936 for(p=all_threads;p;p=p->next) {
937 if (p!=th) {
938 FSHOW_SIGNAL
939 ((stderr,
940 "/gc_stop_the_world: waiting for thread=%lu: state=%x\n",
941 p->os_thread, thread_state(p)));
942 wait_for_thread_state_change(p, STATE_RUNNING);
943 if (p->state == STATE_RUNNING)
944 lose("/gc_stop_the_world: unexpected state");
947 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
950 void gc_start_the_world()
952 struct thread *p,*th=arch_os_get_current_thread();
953 int lock_ret;
954 /* if a resumed thread creates a new thread before we're done with
955 * this loop, the new thread will get consed on the front of
956 * all_threads, but it won't have been stopped so won't need
957 * restarting */
958 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
959 for(p=all_threads;p;p=p->next) {
960 gc_assert(p->os_thread!=0);
961 if (p!=th) {
962 lispobj state = thread_state(p);
963 if (state != STATE_DEAD) {
964 if(state != STATE_STOPPED) {
965 lose("gc_start_the_world: wrong thread state is %d\n",
966 fixnum_value(state));
968 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
969 p->os_thread));
970 set_thread_state(p, STATE_RUNNING);
975 lock_ret = pthread_mutex_unlock(&all_threads_lock);
976 gc_assert(lock_ret == 0);
977 #ifdef LOCK_CREATE_THREAD
978 lock_ret = pthread_mutex_unlock(&create_thread_lock);
979 gc_assert(lock_ret == 0);
980 #endif
982 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
985 #endif /* !LISP_FEATURE_SB_SAFEPOINT */
986 #endif /* !LISP_FEATURE_SB_THREAD */
989 thread_yield()
991 #ifdef LISP_FEATURE_SB_THREAD
992 return sched_yield();
993 #else
994 return 0;
995 #endif
999 wake_thread(os_thread_t os_thread)
1001 #if defined(LISP_FEATURE_WIN32)
1002 return kill_safely(os_thread, 1);
1003 #elif !defined(LISP_FEATURE_SB_THRUPTION)
1004 return kill_safely(os_thread, SIGPIPE);
1005 #else
1006 return wake_thread_posix(os_thread);
1007 #endif
1010 /* If the thread id given does not belong to a running thread (it has
1011 * exited or never even existed) pthread_kill _may_ fail with ESRCH,
1012 * but it is also allowed to just segfault, see
1013 * <http://udrepper.livejournal.com/16844.html>.
1015 * Relying on thread ids can easily backfire since ids are recycled
1016 * (NPTL recycles them extremely fast) so a signal can be sent to
1017 * another process if the one it was sent to exited.
1019 * For these reasons, we must make sure that the thread is still alive
1020 * when the pthread_kill is called and return if the thread is
1021 * exiting.
1023 * Note (DFL, 2011-06-22): At the time of writing, this function is only
1024 * used for INTERRUPT-THREAD, hence the wake_thread special-case for
1025 * Windows is OK. */
1027 kill_safely(os_thread_t os_thread, int signal)
1029 FSHOW_SIGNAL((stderr,"/kill_safely: %lu, %d\n", os_thread, signal));
1031 #ifdef LISP_FEATURE_SB_THREAD
1032 sigset_t oldset;
1033 struct thread *thread;
1034 /* Frequent special case: resignalling to self. The idea is
1035 * that leave_region safepoint will acknowledge the signal, so
1036 * there is no need to take locks, roll thread to safepoint
1037 * etc. */
1038 /* Kludge (on safepoint builds): At the moment, this isn't just
1039 * an optimization; rather it masks the fact that
1040 * gc_stop_the_world() grabs the all_threads mutex without
1041 * releasing it, and since we're not using recursive pthread
1042 * mutexes, the pthread_mutex_lock() around the all_threads loop
1043 * would go wrong. Why are we running interruptions while
1044 * stopping the world though? Test case is (:ASYNC-UNWIND
1045 * :SPECIALS), especially with s/10/100/ in both loops. */
1046 if (os_thread == pthread_self()) {
1047 pthread_kill(os_thread, signal);
1048 #ifdef LISP_FEATURE_WIN32
1049 check_pending_thruptions(NULL);
1050 #endif
1051 return 0;
1054 /* pthread_kill is not async signal safe and we don't want to be
1055 * interrupted while holding the lock. */
1056 block_deferrable_signals(&oldset);
1057 pthread_mutex_lock(&all_threads_lock);
1058 for (thread = all_threads; thread; thread = thread->next) {
1059 if (thread->os_thread == os_thread) {
1060 int status = pthread_kill(os_thread, signal);
1061 if (status)
1062 lose("kill_safely: pthread_kill failed with %d\n", status);
1063 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THRUPTION)
1064 wake_thread_win32(thread);
1065 #endif
1066 break;
1069 pthread_mutex_unlock(&all_threads_lock);
1070 thread_sigmask(SIG_SETMASK,&oldset,0);
1071 if (thread)
1072 return 0;
1073 else
1074 return -1;
1075 #elif defined(LISP_FEATURE_WIN32)
1076 return 0;
1077 #else
1078 int status;
1079 if (os_thread != 0)
1080 lose("kill_safely: who do you want to kill? %d?\n", os_thread);
1081 /* Dubious (as in don't know why it works) workaround for the
1082 * signal sometimes not being generated on darwin. */
1083 #ifdef LISP_FEATURE_DARWIN
1085 sigset_t oldset;
1086 sigprocmask(SIG_BLOCK, &deferrable_sigset, &oldset);
1087 status = raise(signal);
1088 sigprocmask(SIG_SETMASK,&oldset,0);
1090 #else
1091 status = raise(signal);
1092 #endif
1093 if (status == 0) {
1094 return 0;
1095 } else {
1096 lose("cannot raise signal %d, %d %s\n",
1097 signal, status, strerror(errno));
1099 #endif