2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
22 #include <sys/types.h>
23 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
34 #include "validate.h" /* for BINDING_STACK_SIZE etc */
37 #include "target-arch-os.h"
41 #include "genesis/cons.h"
42 #include "genesis/fdefn.h"
43 #include "interr.h" /* for lose() */
45 #include "gc-internal.h"
47 #include "pseudo-atomic.h"
48 #include "interrupt.h"
51 #ifdef LISP_FEATURE_SB_THREAD
53 #ifdef LISP_FEATURE_OPENBSD
54 #include <pthread_np.h>
57 #ifdef LISP_FEATURE_SUNOS
61 #ifdef LISP_FEATURE_WIN32
62 # define IMMEDIATE_POST_MORTEM
65 struct thread_post_mortem
{
66 os_thread_t os_thread
;
67 pthread_attr_t
*os_attr
;
68 os_vm_address_t os_address
;
71 static struct thread_post_mortem
* volatile pending_thread_post_mortem
= 0;
74 int dynamic_values_bytes
=TLS_SIZE
*sizeof(lispobj
); /* same for all threads */
75 struct thread
*all_threads
;
77 #ifdef LISP_FEATURE_SB_THREAD
78 pthread_mutex_t all_threads_lock
= PTHREAD_MUTEX_INITIALIZER
;
80 static pthread_mutex_t create_thread_lock
= PTHREAD_MUTEX_INITIALIZER
;
82 #ifdef LISP_FEATURE_GCC_TLS
83 __thread
struct thread
*current_thread
;
85 pthread_key_t lisp_thread
= 0;
88 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
89 extern lispobj
call_into_lisp_first_time(lispobj fun
, lispobj
*args
, int nargs
)
90 # ifdef LISP_FEATURE_X86_64
91 __attribute__((sysv_abi
))
97 link_thread(struct thread
*th
)
99 if (all_threads
) all_threads
->prev
=th
;
100 th
->next
=all_threads
;
105 #ifdef LISP_FEATURE_SB_THREAD
107 unlink_thread(struct thread
*th
)
110 th
->prev
->next
= th
->next
;
112 all_threads
= th
->next
;
114 th
->next
->prev
= th
->prev
;
117 #ifndef LISP_FEATURE_SB_SAFEPOINT
118 /* Only access thread state with blockables blocked. */
120 thread_state(struct thread
*thread
)
124 block_blockable_signals(&old
);
125 os_sem_wait(thread
->state_sem
, "thread_state");
126 state
= thread
->state
;
127 os_sem_post(thread
->state_sem
, "thread_state");
128 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
133 set_thread_state(struct thread
*thread
, lispobj state
)
135 int i
, waitcount
= 0;
137 block_blockable_signals(&old
);
138 os_sem_wait(thread
->state_sem
, "set_thread_state");
139 if (thread
->state
!= state
) {
140 if ((STATE_STOPPED
==state
) ||
141 (STATE_DEAD
==state
)) {
142 waitcount
= thread
->state_not_running_waitcount
;
143 thread
->state_not_running_waitcount
= 0;
144 for (i
=0; i
<waitcount
; i
++)
145 os_sem_post(thread
->state_not_running_sem
, "set_thread_state (not running)");
147 if ((STATE_RUNNING
==state
) ||
148 (STATE_DEAD
==state
)) {
149 waitcount
= thread
->state_not_stopped_waitcount
;
150 thread
->state_not_stopped_waitcount
= 0;
151 for (i
=0; i
<waitcount
; i
++)
152 os_sem_post(thread
->state_not_stopped_sem
, "set_thread_state (not stopped)");
154 thread
->state
= state
;
156 os_sem_post(thread
->state_sem
, "set_thread_state");
157 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
161 wait_for_thread_state_change(struct thread
*thread
, lispobj state
)
165 block_blockable_signals(&old
);
167 os_sem_wait(thread
->state_sem
, "wait_for_thread_state_change");
168 if (thread
->state
== state
) {
171 wait_sem
= thread
->state_not_running_sem
;
172 thread
->state_not_running_waitcount
++;
175 wait_sem
= thread
->state_not_stopped_sem
;
176 thread
->state_not_stopped_waitcount
++;
179 lose("Invalid state in wait_for_thread_state_change: "OBJ_FMTX
"\n", state
);
184 os_sem_post(thread
->state_sem
, "wait_for_thread_state_change");
186 os_sem_wait(wait_sem
, "wait_for_thread_state_change");
189 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
191 #endif /* sb-safepoint */
192 #endif /* sb-thread */
195 initial_thread_trampoline(struct thread
*th
)
198 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
199 lispobj
*args
= NULL
;
201 #ifdef LISP_FEATURE_SB_THREAD
202 pthread_setspecific(lisp_thread
, (void *)1);
204 #if defined(THREADS_USING_GCSIGNAL) && (defined(LISP_FEATURE_PPC) || defined(LISP_FEATURE_ARM64))
205 /* SIG_STOP_FOR_GC defaults to blocked on PPC? */
206 unblock_gc_signals(0,0);
208 function
= th
->no_tls_value_marker
;
209 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
210 if(arch_os_thread_init(th
)==0) return 1;
212 th
->os_thread
=thread_self();
213 #ifndef LISP_FEATURE_WIN32
214 protect_control_stack_hard_guard_page(1, NULL
);
216 protect_binding_stack_hard_guard_page(1, NULL
);
217 protect_alien_stack_hard_guard_page(1, NULL
);
218 #ifndef LISP_FEATURE_WIN32
219 protect_control_stack_guard_page(1, NULL
);
221 protect_binding_stack_guard_page(1, NULL
);
222 protect_alien_stack_guard_page(1, NULL
);
224 /* WIN32 has a special stack arrangment, calling
225 * call_into_lisp_first_time will put the new stack in the middle
226 * of the current stack */
227 #if !defined(LISP_FEATURE_WIN32) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
228 return call_into_lisp_first_time(function
,args
,0);
230 return funcall0(function
);
234 #ifdef LISP_FEATURE_SB_THREAD
236 # if defined(IMMEDIATE_POST_MORTEM)
239 * If this feature is set, we are running on a stack managed by the OS,
240 * and no fancy delays are required for anything. Just do it.
243 schedule_thread_post_mortem(struct thread
*corpse
)
245 pthread_detach(pthread_self());
246 int result
= pthread_attr_destroy(corpse
->os_attr
);
248 #if defined(LISP_FEATURE_WIN32)
249 os_invalidate_free(corpse
->os_address
, THREAD_STRUCT_SIZE
);
251 os_invalidate(corpse
->os_address
, THREAD_STRUCT_SIZE
);
257 /* THREAD POST MORTEM CLEANUP
259 * Memory allocated for the thread stacks cannot be reclaimed while
260 * the thread is still alive, so we need a mechanism for post mortem
261 * cleanups. FIXME: We actually have three, for historical reasons as
262 * the saying goes. Do we really need three? Nikodemus guesses that
263 * not anymore, now that we properly call pthread_attr_destroy before
264 * freeing the stack. */
266 static struct thread_post_mortem
*
267 plan_thread_post_mortem(struct thread
*corpse
)
270 struct thread_post_mortem
*post_mortem
= malloc(sizeof(struct thread_post_mortem
));
271 gc_assert(post_mortem
);
272 post_mortem
->os_thread
= corpse
->os_thread
;
273 post_mortem
->os_attr
= corpse
->os_attr
;
274 post_mortem
->os_address
= corpse
->os_address
;
278 /* FIXME: When does this happen? */
284 perform_thread_post_mortem(struct thread_post_mortem
*post_mortem
)
286 #ifdef CREATE_POST_MORTEM_THREAD
287 pthread_detach(pthread_self());
291 /* The thread may exit before pthread_create() has finished
292 initialization and it may write into already unmapped
293 memory. This lock doesn't actually need to protect
294 anything, just to make sure that at least one call to
295 pthread_create() has finished.
297 Possible improvements: stash the address of the thread
298 struct for which a pthread is being created and don't lock
299 here if it's not the one being terminated. */
300 result
= pthread_mutex_lock(&create_thread_lock
);
301 gc_assert(result
== 0);
302 result
= pthread_mutex_unlock(&create_thread_lock
);
303 gc_assert(result
== 0);
305 if ((result
= pthread_join(post_mortem
->os_thread
, NULL
))) {
306 lose("Error calling pthread_join in perform_thread_post_mortem:\n%s",
309 if ((result
= pthread_attr_destroy(post_mortem
->os_attr
))) {
310 lose("Error calling pthread_attr_destroy in perform_thread_post_mortem:\n%s",
313 os_invalidate(post_mortem
->os_address
, THREAD_STRUCT_SIZE
);
319 schedule_thread_post_mortem(struct thread
*corpse
)
321 struct thread_post_mortem
*post_mortem
= NULL
;
323 post_mortem
= plan_thread_post_mortem(corpse
);
325 #ifdef CREATE_POST_MORTEM_THREAD
327 int result
= pthread_create(&thread
, NULL
, perform_thread_post_mortem
, post_mortem
);
330 post_mortem
= (struct thread_post_mortem
*)
331 swap_lispobjs((lispobj
*)(void *)&pending_thread_post_mortem
,
332 (lispobj
)post_mortem
);
333 perform_thread_post_mortem(post_mortem
);
338 # endif /* !IMMEDIATE_POST_MORTEM */
340 /* Note: scribble must be stack-allocated */
342 init_new_thread(struct thread
*th
, init_thread_data
*scribble
, int guardp
)
346 pthread_setspecific(lisp_thread
, (void *)1);
347 if(arch_os_thread_init(th
)==0) {
348 /* FIXME: handle error */
349 lose("arch_os_thread_init failed\n");
352 th
->os_thread
=thread_self();
354 protect_control_stack_guard_page(1, NULL
);
355 protect_binding_stack_guard_page(1, NULL
);
356 protect_alien_stack_guard_page(1, NULL
);
357 /* Since GC can only know about this thread from the all_threads
358 * list and we're just adding this thread to it, there is no
359 * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which
361 #ifdef LISP_FEATURE_SB_SAFEPOINT
362 *th
->csp_around_foreign_call
= (lispobj
)scribble
;
364 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
365 gc_assert(lock_ret
== 0);
367 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
368 gc_assert(lock_ret
== 0);
370 /* Kludge: Changed the order of some steps between the safepoint/
371 * non-safepoint versions of this code. Can we unify this more?
373 #ifdef LISP_FEATURE_SB_SAFEPOINT
375 gc_state_wait(GC_NONE
);
377 push_gcing_safety(&scribble
->safety
);
382 undo_init_new_thread(struct thread
*th
, init_thread_data
*scribble
)
386 /* Kludge: Changed the order of some steps between the safepoint/
387 * non-safepoint versions of this code. Can we unify this more?
389 #ifdef LISP_FEATURE_SB_SAFEPOINT
390 block_blockable_signals(0);
391 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
392 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
393 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
395 pop_gcing_safety(&scribble
->safety
);
396 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
397 gc_assert(lock_ret
== 0);
399 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
400 gc_assert(lock_ret
== 0);
403 block_blockable_signals(0);
404 set_thread_state(th
, STATE_DEAD
);
406 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
407 * thread, but since we are already dead it won't wait long. */
408 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
409 gc_assert(lock_ret
== 0);
411 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
412 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
413 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
416 pthread_mutex_unlock(&all_threads_lock
);
417 gc_assert(lock_ret
== 0);
420 arch_os_thread_cleanup(th
);
422 #ifndef LISP_FEATURE_SB_SAFEPOINT
423 os_sem_destroy(th
->state_sem
);
424 os_sem_destroy(th
->state_not_running_sem
);
425 os_sem_destroy(th
->state_not_stopped_sem
);
429 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
430 mach_lisp_thread_destroy(th
);
433 #if defined(LISP_FEATURE_WIN32)
436 (int) (sizeof(th
->private_events
.events
)/
437 sizeof(th
->private_events
.events
[0])); ++i
) {
438 CloseHandle(th
->private_events
.events
[i
]);
440 TlsSetValue(OUR_TLS_INDEX
,NULL
);
443 /* Undo the association of the current pthread to its `struct thread',
444 * such that we can call arch_os_get_current_thread() later in this
445 * thread and cleanly get back NULL. */
446 #ifdef LISP_FEATURE_GCC_TLS
447 current_thread
= NULL
;
449 pthread_setspecific(specials
, NULL
);
453 /* this is the first thing that runs in the child (which is why the
454 * silly calling convention). Basically it calls the user's requested
455 * lisp function after doing arch_os_thread_init and whatever other
456 * bookkeeping needs to be done
459 new_thread_trampoline(struct thread
*th
)
462 init_thread_data scribble
;
464 FSHOW((stderr
,"/creating thread %lu\n", thread_self()));
465 check_deferrables_blocked_or_lose(0);
466 #ifndef LISP_FEATURE_SB_SAFEPOINT
467 check_gc_signals_unblocked_or_lose(0);
470 lispobj function
= th
->no_tls_value_marker
;
471 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
472 init_new_thread(th
, &scribble
, 1);
473 result
= funcall0(function
);
474 undo_init_new_thread(th
, &scribble
);
476 schedule_thread_post_mortem(th
);
478 FSHOW((stderr
,"/exiting thread %lu\n", thread_self()));
482 static struct thread
*create_thread_struct(lispobj
);
483 static void free_thread_struct(struct thread
*th
);
486 attach_os_thread(init_thread_data
*scribble
)
488 os_thread_t os
= pthread_self();
489 odxprint(misc
, "attach_os_thread: attaching to %p", os
);
491 struct thread
*th
= create_thread_struct(NIL
);
492 block_deferrable_signals(&scribble
->oldset
);
493 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
494 /* We don't actually want a pthread_attr here, but rather than add
495 * `if's to the post-mostem, let's just keep that code happy by
496 * keeping it initialized: */
497 pthread_attr_init(th
->os_attr
);
499 #if !defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
500 /* On windows, arch_os_thread_init will take care of finding the
504 #ifdef LISP_FEATURE_OPENBSD
506 pthread_stackseg_np(os
, &stack
);
507 stack_size
= stack
.ss_size
;
508 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
509 #elif defined LISP_FEATURE_SUNOS
511 thr_stksegment(&stack
);
512 stack_size
= stack
.ss_size
;
513 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
514 #elif defined(LISP_FEATURE_DARWIN)
515 stack_addr
= pthread_get_stackaddr_np(os
);
516 stack_size
= pthread_get_stacksize_np(os
);
519 #ifdef LISP_FEATURE_FREEBSD
520 pthread_attr_get_np(os
, &attr
);
522 int pthread_getattr_np(pthread_t
, pthread_attr_t
*);
523 pthread_getattr_np(os
, &attr
);
525 pthread_attr_getstack(&attr
, &stack_addr
, &stack_size
);
528 th
->control_stack_start
= stack_addr
;
529 th
->control_stack_end
= (void *) (((uintptr_t) stack_addr
) + stack_size
);
532 init_new_thread(th
, scribble
, 0);
534 /* We will be calling into Lisp soon, and the functions being called
535 * recklessly ignore the comment in target-thread which says that we
536 * must be careful to not cause GC while initializing a new thread.
537 * Since we first need to create a fresh thread object, it's really
538 * tempting to just perform such unsafe allocation though. So let's
539 * at least try to suppress GC before consing, and hope that it
541 bind_variable(GC_INHIBIT
, T
, th
);
544 = (uword_t
) th
->control_stack_end
- (uword_t
) th
->control_stack_start
;
545 odxprint(misc
, "attach_os_thread: attached %p as %p (0x%lx bytes stack)",
546 os
, th
, (long) stacksize
);
550 detach_os_thread(init_thread_data
*scribble
)
552 struct thread
*th
= arch_os_get_current_thread();
553 odxprint(misc
, "detach_os_thread: detaching");
555 undo_init_new_thread(th
, scribble
);
557 odxprint(misc
, "deattach_os_thread: detached");
558 pthread_setspecific(lisp_thread
, (void *)0);
559 thread_sigmask(SIG_SETMASK
, &scribble
->oldset
, 0);
560 free_thread_struct(th
);
564 callback_wrapper_trampoline(
565 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
566 /* On the x86oid backends, the assembly wrapper happens to not pass
567 * in ENTER_ALIEN_CALLBACK explicitly for safepoints. However, the
568 * platforms with precise GC are tricky enough already, and I want
569 * to minimize the read-time conditionals. For those platforms, I'm
570 * only replacing funcall3 with callback_wrapper_trampoline while
571 * keeping the arguments unchanged. --DFL */
572 lispobj
__attribute__((__unused__
)) fun
,
574 lispobj arg0
, lispobj arg1
, lispobj arg2
)
576 #if defined(LISP_FEATURE_WIN32)
577 pthread_np_notice_thread();
579 struct thread
* th
= arch_os_get_current_thread();
580 if (!th
) { /* callback invoked in non-lisp thread */
581 init_thread_data scribble
;
582 attach_os_thread(&scribble
);
583 funcall3(StaticSymbolFunction(ENTER_FOREIGN_CALLBACK
), arg0
,arg1
,arg2
);
584 detach_os_thread(&scribble
);
588 #ifdef LISP_FEATURE_WIN32
589 /* arg2 is the pointer to a return value, which sits on the stack */
590 th
->carried_base_pointer
= (os_context_register_t
) *(((void**)arg2
)-1);
593 #ifdef LISP_FEATURE_SB_SAFEPOINT
594 WITH_GC_AT_SAFEPOINTS_ONLY()
597 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK
, 0), arg0
, arg1
, arg2
);
601 #endif /* LISP_FEATURE_SB_THREAD */
603 static void __attribute__((unused
))
604 free_thread_struct(struct thread
*th
)
606 #if defined(LISP_FEATURE_WIN32)
607 os_invalidate_free((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
609 os_invalidate((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
613 #ifdef LISP_FEATURE_SB_THREAD
614 /* FIXME: should be MAX_INTERRUPTS -1 ? */
615 const unsigned int tls_index_start
=
616 MAX_INTERRUPTS
+ sizeof(struct thread
)/sizeof(lispobj
);
619 /* this is called from any other thread to create the new one, and
620 * initialize all parts of it that can be initialized from another
624 static struct thread
*
625 create_thread_struct(lispobj initial_function
) {
626 union per_thread_data
*per_thread
;
627 struct thread
*th
=0; /* subdue gcc */
629 void *aligned_spaces
=0;
630 #if defined(LISP_FEATURE_SB_THREAD) || defined(LISP_FEATURE_WIN32)
634 /* May as well allocate all the spaces at once: it saves us from
635 * having to decide what to do if only some of the allocations
636 * succeed. SPACES must be appropriately aligned, since the GC
637 * expects the control stack to start at a page boundary -- and
638 * the OS may have even more rigorous requirements. We can't rely
639 * on the alignment passed from os_validate, since that might
640 * assume the current (e.g. 4k) pagesize, while we calculate with
641 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
642 spaces
=os_validate(0, THREAD_STRUCT_SIZE
);
645 /* Aligning up is safe as THREAD_STRUCT_SIZE has
646 * THREAD_ALIGNMENT_BYTES padding. */
647 aligned_spaces
= (void *)((((uword_t
)(char *)spaces
)
648 + THREAD_ALIGNMENT_BYTES
-1)
649 &~(uword_t
)(THREAD_ALIGNMENT_BYTES
-1));
652 thread_control_stack_size
+
655 per_thread
=(union per_thread_data
*)
656 (csp_page
+ THREAD_CSP_PAGE_SIZE
);
658 #ifdef LISP_FEATURE_SB_THREAD
659 for(i
= 0; i
< (dynamic_values_bytes
/ sizeof(lispobj
)); i
++)
660 per_thread
->dynamic_values
[i
] = NO_TLS_VALUE_MARKER_WIDETAG
;
663 th
=&per_thread
->thread
;
664 th
->os_address
= spaces
;
665 th
->control_stack_start
= aligned_spaces
;
666 th
->binding_stack_start
=
667 (lispobj
*)((void*)th
->control_stack_start
+thread_control_stack_size
);
668 th
->control_stack_end
= th
->binding_stack_start
;
669 th
->control_stack_guard_page_protected
= T
;
670 th
->alien_stack_start
=
671 (lispobj
*)((void*)th
->binding_stack_start
+BINDING_STACK_SIZE
);
672 set_binding_stack_pointer(th
,th
->binding_stack_start
);
676 #ifdef LISP_FEATURE_SB_SAFEPOINT
677 # ifdef LISP_FEATURE_WIN32
678 th
->carried_base_pointer
= 0;
680 # ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
681 th
->pc_around_foreign_call
= 0;
683 th
->csp_around_foreign_call
= csp_page
;
686 struct nonpointer_thread_data
*nonpointer_data
687 = (void *) &per_thread
->dynamic_values
[TLS_SIZE
];
689 th
->interrupt_data
= &nonpointer_data
->interrupt_data
;
691 #ifdef LISP_FEATURE_SB_THREAD
692 th
->os_attr
= &nonpointer_data
->os_attr
;
693 # ifndef LISP_FEATURE_SB_SAFEPOINT
694 th
->state_sem
= &nonpointer_data
->state_sem
;
695 th
->state_not_running_sem
= &nonpointer_data
->state_not_running_sem
;
696 th
->state_not_stopped_sem
= &nonpointer_data
->state_not_stopped_sem
;
697 os_sem_init(th
->state_sem
, 1);
698 os_sem_init(th
->state_not_running_sem
, 0);
699 os_sem_init(th
->state_not_stopped_sem
, 0);
700 th
->state_not_running_waitcount
= 0;
701 th
->state_not_stopped_waitcount
= 0;
705 th
->state
=STATE_RUNNING
;
706 #ifdef ALIEN_STACK_GROWS_DOWNWARD
707 th
->alien_stack_pointer
=((void *)th
->alien_stack_start
708 + ALIEN_STACK_SIZE
-N_WORD_BYTES
);
710 th
->alien_stack_pointer
=((void *)th
->alien_stack_start
);
713 #ifdef LISP_FEATURE_SB_THREAD
714 th
->pseudo_atomic_bits
=0;
715 #elif defined LISP_FEATURE_GENCGC
716 clear_pseudo_atomic_atomic(th
);
717 clear_pseudo_atomic_interrupted(th
);
720 #ifdef LISP_FEATURE_GENCGC
721 gc_set_region_empty(&th
->alloc_region
);
722 # if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
723 gc_set_region_empty(&th
->sprof_alloc_region
);
726 #ifdef LISP_FEATURE_SB_THREAD
727 /* This parallels the same logic in globals.c for the
728 * single-threaded foreign_function_call_active, KLUDGE and
730 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
731 th
->foreign_function_call_active
= 0;
733 th
->foreign_function_call_active
= 1;
737 #ifndef LISP_FEATURE_SB_THREAD
738 /* the tls-points-into-struct-thread trick is only good for threaded
739 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
740 * appropriate values from struct thread here, and make sure that
741 * we use the appropriate SymbolValue macros to access any of the
742 * variable quantities from the C runtime. It's not quite OAOOM,
743 * it just feels like it */
744 SetSymbolValue(BINDING_STACK_START
,(lispobj
)th
->binding_stack_start
,th
);
745 SetSymbolValue(CONTROL_STACK_START
,(lispobj
)th
->control_stack_start
,th
);
746 SetSymbolValue(CONTROL_STACK_END
,(lispobj
)th
->control_stack_end
,th
);
747 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
748 SetSymbolValue(ALIEN_STACK_POINTER
,(lispobj
)th
->alien_stack_pointer
,th
);
751 bind_variable(CURRENT_CATCH_BLOCK
,make_fixnum(0),th
);
752 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK
,make_fixnum(0),th
);
753 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX
,make_fixnum(0),th
);
754 bind_variable(INTERRUPT_PENDING
, NIL
,th
);
755 bind_variable(INTERRUPTS_ENABLED
,T
,th
);
756 bind_variable(ALLOW_WITH_INTERRUPTS
,T
,th
);
757 bind_variable(GC_PENDING
,NIL
,th
);
758 bind_variable(ALLOC_SIGNAL
,NIL
,th
);
759 #ifdef PINNED_OBJECTS
760 bind_variable(PINNED_OBJECTS
,NIL
,th
);
762 #ifdef LISP_FEATURE_SB_THREAD
763 bind_variable(STOP_FOR_GC_PENDING
,NIL
,th
);
765 #if defined(LISP_FEATURE_SB_SAFEPOINT)
766 bind_variable(GC_SAFE
,NIL
,th
);
767 bind_variable(IN_SAFEPOINT
,NIL
,th
);
769 #ifdef LISP_FEATURE_SB_THRUPTION
770 bind_variable(THRUPTION_PENDING
,NIL
,th
);
771 bind_variable(RESTART_CLUSTERS
,NIL
,th
);
773 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
774 access_control_stack_pointer(th
)=th
->control_stack_start
;
777 th
->interrupt_data
->pending_handler
= 0;
778 th
->interrupt_data
->gc_blocked_deferrables
= 0;
779 #ifdef GENCGC_IS_PRECISE
780 th
->interrupt_data
->allocation_trap_context
= 0;
782 th
->no_tls_value_marker
=initial_function
;
784 #if defined(LISP_FEATURE_WIN32)
785 for (i
= 0; i
<sizeof(th
->private_events
.events
)/
786 sizeof(th
->private_events
.events
[0]); ++i
) {
787 th
->private_events
.events
[i
] = CreateEvent(NULL
,FALSE
,FALSE
,NULL
);
789 th
->synchronous_io_handle_and_flag
= 0;
795 void create_initial_thread(lispobj initial_function
) {
796 struct thread
*th
=create_thread_struct(initial_function
);
797 #ifdef LISP_FEATURE_SB_THREAD
798 pthread_key_create(&lisp_thread
, 0);
801 initial_thread_trampoline(th
); /* no return */
802 } else lose("can't create initial thread\n");
805 #ifdef LISP_FEATURE_SB_THREAD
807 #ifndef __USE_XOPEN2K
808 extern int pthread_attr_setstack (pthread_attr_t
*__attr
, void *__stackaddr
,
812 boolean
create_os_thread(struct thread
*th
,os_thread_t
*kid_tid
)
814 /* The new thread inherits the restrictive signal mask set here,
815 * and enables signals again when it is set up properly. */
818 int retcode
= 0, initcode
;
820 FSHOW_SIGNAL((stderr
,"/create_os_thread: creating new thread\n"));
822 /* Blocking deferrable signals is enough, no need to block
823 * SIG_STOP_FOR_GC because the child process is not linked onto
824 * all_threads until it's ready. */
825 block_deferrable_signals(&oldset
);
827 /* See perform_thread_post_mortem for at least one reason why this lock is neccessary */
828 retcode
= pthread_mutex_lock(&create_thread_lock
);
829 gc_assert(retcode
== 0);
830 FSHOW_SIGNAL((stderr
,"/create_os_thread: got lock\n"));
832 if((initcode
= pthread_attr_init(th
->os_attr
)) ||
833 /* call_into_lisp_first_time switches the stack for the initial
834 * thread. For the others, we use this. */
835 #if defined(LISP_FEATURE_WIN32)
836 (pthread_attr_setstacksize(th
->os_attr
, thread_control_stack_size
)) ||
838 # if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
839 (pthread_attr_setstack(th
->os_attr
,th
->control_stack_start
,
840 thread_control_stack_size
)) ||
842 (pthread_attr_setstack(th
->os_attr
,th
->alien_stack_start
,
843 ALIEN_STACK_SIZE
)) ||
846 (retcode
= pthread_create
847 (kid_tid
,th
->os_attr
,(void *(*)(void *))new_thread_trampoline
,th
))) {
848 FSHOW_SIGNAL((stderr
, "init = %d\n", initcode
));
849 FSHOW_SIGNAL((stderr
, "pthread_create returned %d, errno %d\n",
852 perror("create_os_thread");
857 retcode
= pthread_mutex_unlock(&create_thread_lock
);
858 gc_assert(retcode
== 0);
859 FSHOW_SIGNAL((stderr
,"/create_os_thread: released lock\n"));
861 thread_sigmask(SIG_SETMASK
,&oldset
,0);
865 os_thread_t
create_thread(lispobj initial_function
) {
866 struct thread
*th
, *thread
= arch_os_get_current_thread();
867 os_thread_t kid_tid
= 0;
869 /* Must defend against async unwinds. */
870 if (SymbolValue(INTERRUPTS_ENABLED
, thread
) != NIL
)
871 lose("create_thread is not safe when interrupts are enabled.\n");
873 /* Assuming that a fresh thread struct has no lisp objects in it,
874 * linking it to all_threads can be left to the thread itself
875 * without fear of gc lossage. initial_function violates this
876 * assumption and must stay pinned until the child starts up. */
877 th
= create_thread_struct(initial_function
);
878 if (th
&& !create_os_thread(th
,&kid_tid
)) {
879 free_thread_struct(th
);
885 /* stopping the world is a two-stage process. From this thread we signal
886 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
887 * the usual pseudo-atomic checks (we don't want to stop a thread while
888 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
891 * (With SB-SAFEPOINT, see the definitions in safepoint.c instead.)
893 #ifndef LISP_FEATURE_SB_SAFEPOINT
895 /* To avoid deadlocks when gc stops the world all clients of each
896 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
897 * holding the lock, but they must agree on which. */
898 void gc_stop_the_world()
900 struct thread
*p
,*th
=arch_os_get_current_thread();
901 int status
, lock_ret
;
902 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
904 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on create_thread_lock\n"));
905 lock_ret
= pthread_mutex_lock(&create_thread_lock
);
906 gc_assert(lock_ret
== 0);
907 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got create_thread_lock\n"));
908 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on lock\n"));
909 /* keep threads from starting while the world is stopped. */
910 lock_ret
= pthread_mutex_lock(&all_threads_lock
); \
911 gc_assert(lock_ret
== 0);
913 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got lock\n"));
914 /* stop all other threads by sending them SIG_STOP_FOR_GC */
915 for(p
=all_threads
; p
; p
=p
->next
) {
916 gc_assert(p
->os_thread
!= 0);
917 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: thread=%lu, state=%x\n",
918 p
->os_thread
, thread_state(p
)));
919 if((p
!=th
) && ((thread_state(p
)==STATE_RUNNING
))) {
920 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: suspending thread %lu\n",
922 /* We already hold all_thread_lock, P can become DEAD but
923 * cannot exit, ergo it's safe to use pthread_kill. */
924 status
=pthread_kill(p
->os_thread
,SIG_STOP_FOR_GC
);
926 /* This thread has exited. */
927 gc_assert(thread_state(p
)==STATE_DEAD
);
929 lose("cannot send suspend thread=%lu: %d, %s\n",
930 p
->os_thread
,status
,strerror(status
));
934 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:signals sent\n"));
935 for(p
=all_threads
;p
;p
=p
->next
) {
939 "/gc_stop_the_world: waiting for thread=%lu: state=%x\n",
940 p
->os_thread
, thread_state(p
)));
941 wait_for_thread_state_change(p
, STATE_RUNNING
);
942 if (p
->state
== STATE_RUNNING
)
943 lose("/gc_stop_the_world: unexpected state");
946 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:end\n"));
949 void gc_start_the_world()
951 struct thread
*p
,*th
=arch_os_get_current_thread();
953 /* if a resumed thread creates a new thread before we're done with
954 * this loop, the new thread will get consed on the front of
955 * all_threads, but it won't have been stopped so won't need
957 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:begin\n"));
958 for(p
=all_threads
;p
;p
=p
->next
) {
959 gc_assert(p
->os_thread
!=0);
961 lispobj state
= thread_state(p
);
962 if (state
!= STATE_DEAD
) {
963 if(state
!= STATE_STOPPED
) {
964 lose("gc_start_the_world: wrong thread state is %d\n",
965 fixnum_value(state
));
967 FSHOW_SIGNAL((stderr
, "/gc_start_the_world: resuming %lu\n",
969 set_thread_state(p
, STATE_RUNNING
);
974 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
975 gc_assert(lock_ret
== 0);
976 lock_ret
= pthread_mutex_unlock(&create_thread_lock
);
977 gc_assert(lock_ret
== 0);
980 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:end\n"));
983 #endif /* !LISP_FEATURE_SB_SAFEPOINT */
984 #endif /* !LISP_FEATURE_SB_THREAD */
989 #ifdef LISP_FEATURE_SB_THREAD
990 return sched_yield();
997 wake_thread(os_thread_t os_thread
)
999 #if defined(LISP_FEATURE_WIN32)
1000 return kill_safely(os_thread
, 1);
1001 #elif !defined(LISP_FEATURE_SB_THRUPTION)
1002 return kill_safely(os_thread
, SIGPIPE
);
1004 return wake_thread_posix(os_thread
);
1008 /* If the thread id given does not belong to a running thread (it has
1009 * exited or never even existed) pthread_kill _may_ fail with ESRCH,
1010 * but it is also allowed to just segfault, see
1011 * <http://udrepper.livejournal.com/16844.html>.
1013 * Relying on thread ids can easily backfire since ids are recycled
1014 * (NPTL recycles them extremely fast) so a signal can be sent to
1015 * another process if the one it was sent to exited.
1017 * For these reasons, we must make sure that the thread is still alive
1018 * when the pthread_kill is called and return if the thread is
1021 * Note (DFL, 2011-06-22): At the time of writing, this function is only
1022 * used for INTERRUPT-THREAD, hence the wake_thread special-case for
1025 kill_safely(os_thread_t os_thread
, int signal
)
1027 FSHOW_SIGNAL((stderr
,"/kill_safely: %lu, %d\n", os_thread
, signal
));
1029 #ifdef LISP_FEATURE_SB_THREAD
1031 struct thread
*thread
;
1032 /* Frequent special case: resignalling to self. The idea is
1033 * that leave_region safepoint will acknowledge the signal, so
1034 * there is no need to take locks, roll thread to safepoint
1036 /* Kludge (on safepoint builds): At the moment, this isn't just
1037 * an optimization; rather it masks the fact that
1038 * gc_stop_the_world() grabs the all_threads mutex without
1039 * releasing it, and since we're not using recursive pthread
1040 * mutexes, the pthread_mutex_lock() around the all_threads loop
1041 * would go wrong. Why are we running interruptions while
1042 * stopping the world though? Test case is (:ASYNC-UNWIND
1043 * :SPECIALS), especially with s/10/100/ in both loops. */
1044 if (os_thread
== pthread_self()) {
1045 pthread_kill(os_thread
, signal
);
1046 #ifdef LISP_FEATURE_WIN32
1047 check_pending_thruptions(NULL
);
1052 /* pthread_kill is not async signal safe and we don't want to be
1053 * interrupted while holding the lock. */
1054 block_deferrable_signals(&oldset
);
1055 pthread_mutex_lock(&all_threads_lock
);
1056 for (thread
= all_threads
; thread
; thread
= thread
->next
) {
1057 if (thread
->os_thread
== os_thread
) {
1058 int status
= pthread_kill(os_thread
, signal
);
1060 lose("kill_safely: pthread_kill failed with %d\n", status
);
1061 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THRUPTION)
1062 wake_thread_win32(thread
);
1067 pthread_mutex_unlock(&all_threads_lock
);
1068 thread_sigmask(SIG_SETMASK
,&oldset
,0);
1073 #elif defined(LISP_FEATURE_WIN32)
1078 lose("kill_safely: who do you want to kill? %d?\n", os_thread
);
1079 /* Dubious (as in don't know why it works) workaround for the
1080 * signal sometimes not being generated on darwin. */
1081 #ifdef LISP_FEATURE_DARWIN
1084 sigprocmask(SIG_BLOCK
, &deferrable_sigset
, &oldset
);
1085 status
= raise(signal
);
1086 sigprocmask(SIG_SETMASK
,&oldset
,0);
1089 status
= raise(signal
);
1094 lose("cannot raise signal %d, %d %s\n",
1095 signal
, status
, strerror(errno
));