2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
22 #include <sys/types.h>
23 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
34 #include "validate.h" /* for BINDING_STACK_SIZE etc */
37 #include "target-arch-os.h"
41 #include "genesis/cons.h"
42 #include "genesis/fdefn.h"
43 #include "interr.h" /* for lose() */
45 #include "gc-internal.h"
47 #include "pseudo-atomic.h"
48 #include "interrupt.h"
51 #ifdef LISP_FEATURE_SB_THREAD
53 #ifdef LISP_FEATURE_OPENBSD
54 #include <pthread_np.h>
57 #ifdef LISP_FEATURE_SUNOS
61 #ifdef LISP_FEATURE_WIN32
62 # define IMMEDIATE_POST_MORTEM
65 #if defined(LISP_FEATURE_FREEBSD) || defined(LISP_FEATURE_DRAGONFLY) || defined (LISP_FEATURE_DARWIN)
66 #define LOCK_CREATE_THREAD
69 struct thread_post_mortem
{
70 os_thread_t os_thread
;
71 pthread_attr_t
*os_attr
;
72 os_vm_address_t os_address
;
75 static struct thread_post_mortem
* volatile pending_thread_post_mortem
= 0;
78 int dynamic_values_bytes
=TLS_SIZE
*sizeof(lispobj
); /* same for all threads */
79 struct thread
*all_threads
;
81 #ifdef LISP_FEATURE_SB_THREAD
82 pthread_mutex_t all_threads_lock
= PTHREAD_MUTEX_INITIALIZER
;
83 #ifdef LOCK_CREATE_THREAD
84 static pthread_mutex_t create_thread_lock
= PTHREAD_MUTEX_INITIALIZER
;
86 #ifdef LISP_FEATURE_GCC_TLS
87 __thread
struct thread
*current_thread
;
89 pthread_key_t lisp_thread
= 0;
92 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
93 extern lispobj
call_into_lisp_first_time(lispobj fun
, lispobj
*args
, int nargs
)
94 # ifdef LISP_FEATURE_X86_64
95 __attribute__((sysv_abi
))
101 link_thread(struct thread
*th
)
103 if (all_threads
) all_threads
->prev
=th
;
104 th
->next
=all_threads
;
109 #ifdef LISP_FEATURE_SB_THREAD
111 unlink_thread(struct thread
*th
)
114 th
->prev
->next
= th
->next
;
116 all_threads
= th
->next
;
118 th
->next
->prev
= th
->prev
;
121 #ifndef LISP_FEATURE_SB_SAFEPOINT
122 /* Only access thread state with blockables blocked. */
124 thread_state(struct thread
*thread
)
128 block_blockable_signals(&old
);
129 os_sem_wait(thread
->state_sem
, "thread_state");
130 state
= thread
->state
;
131 os_sem_post(thread
->state_sem
, "thread_state");
132 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
137 set_thread_state(struct thread
*thread
, lispobj state
)
139 int i
, waitcount
= 0;
141 block_blockable_signals(&old
);
142 os_sem_wait(thread
->state_sem
, "set_thread_state");
143 if (thread
->state
!= state
) {
144 if ((STATE_STOPPED
==state
) ||
145 (STATE_DEAD
==state
)) {
146 waitcount
= thread
->state_not_running_waitcount
;
147 thread
->state_not_running_waitcount
= 0;
148 for (i
=0; i
<waitcount
; i
++)
149 os_sem_post(thread
->state_not_running_sem
, "set_thread_state (not running)");
151 if ((STATE_RUNNING
==state
) ||
152 (STATE_DEAD
==state
)) {
153 waitcount
= thread
->state_not_stopped_waitcount
;
154 thread
->state_not_stopped_waitcount
= 0;
155 for (i
=0; i
<waitcount
; i
++)
156 os_sem_post(thread
->state_not_stopped_sem
, "set_thread_state (not stopped)");
158 thread
->state
= state
;
160 os_sem_post(thread
->state_sem
, "set_thread_state");
161 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
165 wait_for_thread_state_change(struct thread
*thread
, lispobj state
)
169 block_blockable_signals(&old
);
171 os_sem_wait(thread
->state_sem
, "wait_for_thread_state_change");
172 if (thread
->state
== state
) {
175 wait_sem
= thread
->state_not_running_sem
;
176 thread
->state_not_running_waitcount
++;
179 wait_sem
= thread
->state_not_stopped_sem
;
180 thread
->state_not_stopped_waitcount
++;
183 lose("Invalid state in wait_for_thread_state_change: "OBJ_FMTX
"\n", state
);
188 os_sem_post(thread
->state_sem
, "wait_for_thread_state_change");
190 os_sem_wait(wait_sem
, "wait_for_thread_state_change");
193 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
195 #endif /* sb-safepoint */
196 #endif /* sb-thread */
199 initial_thread_trampoline(struct thread
*th
)
202 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
203 lispobj
*args
= NULL
;
205 #ifdef LISP_FEATURE_SB_THREAD
206 pthread_setspecific(lisp_thread
, (void *)1);
208 #if defined(THREADS_USING_GCSIGNAL) && (defined(LISP_FEATURE_PPC) || defined(LISP_FEATURE_ARM64))
209 /* SIG_STOP_FOR_GC defaults to blocked on PPC? */
210 unblock_gc_signals(0,0);
212 function
= th
->no_tls_value_marker
;
213 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
214 if(arch_os_thread_init(th
)==0) return 1;
216 th
->os_thread
=thread_self();
217 #ifndef LISP_FEATURE_WIN32
218 protect_control_stack_hard_guard_page(1, NULL
);
220 protect_binding_stack_hard_guard_page(1, NULL
);
221 protect_alien_stack_hard_guard_page(1, NULL
);
222 #ifndef LISP_FEATURE_WIN32
223 protect_control_stack_guard_page(1, NULL
);
225 protect_binding_stack_guard_page(1, NULL
);
226 protect_alien_stack_guard_page(1, NULL
);
228 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
229 return call_into_lisp_first_time(function
,args
,0);
231 return funcall0(function
);
235 #ifdef LISP_FEATURE_SB_THREAD
237 # if defined(IMMEDIATE_POST_MORTEM)
240 * If this feature is set, we are running on a stack managed by the OS,
241 * and no fancy delays are required for anything. Just do it.
244 schedule_thread_post_mortem(struct thread
*corpse
)
246 pthread_detach(pthread_self());
247 gc_assert(!pthread_attr_destroy(corpse
->os_attr
));
248 #if defined(LISP_FEATURE_WIN32)
249 os_invalidate_free(corpse
->os_address
, THREAD_STRUCT_SIZE
);
251 os_invalidate(corpse
->os_address
, THREAD_STRUCT_SIZE
);
257 /* THREAD POST MORTEM CLEANUP
259 * Memory allocated for the thread stacks cannot be reclaimed while
260 * the thread is still alive, so we need a mechanism for post mortem
261 * cleanups. FIXME: We actually have three, for historical reasons as
262 * the saying goes. Do we really need three? Nikodemus guesses that
263 * not anymore, now that we properly call pthread_attr_destroy before
264 * freeing the stack. */
266 static struct thread_post_mortem
*
267 plan_thread_post_mortem(struct thread
*corpse
)
270 struct thread_post_mortem
*post_mortem
= malloc(sizeof(struct thread_post_mortem
));
271 gc_assert(post_mortem
);
272 post_mortem
->os_thread
= corpse
->os_thread
;
273 post_mortem
->os_attr
= corpse
->os_attr
;
274 post_mortem
->os_address
= corpse
->os_address
;
278 /* FIXME: When does this happen? */
284 perform_thread_post_mortem(struct thread_post_mortem
*post_mortem
)
286 #ifdef CREATE_POST_MORTEM_THREAD
287 pthread_detach(pthread_self());
291 #if defined(LOCK_CREATE_THREAD) && defined (LISP_FEATURE_DARWIN)
292 /* The thread may exit before pthread_create() has finished
293 initialization and it may write into already unmapped
294 memory. This lock doesn't actually need to protect
295 anything, just to make sure that at least one call to
296 pthread_create() has finished.
298 Possible improvements: stash the address of the thread
299 struct for which a pthread is being created and don't lock
300 here if it's not the one being terminated. */
301 result
= pthread_mutex_lock(&create_thread_lock
);
302 gc_assert(result
== 0);
303 result
= pthread_mutex_unlock(&create_thread_lock
);
304 gc_assert(result
== 0);
306 if ((result
= pthread_join(post_mortem
->os_thread
, NULL
))) {
307 lose("Error calling pthread_join in perform_thread_post_mortem:\n%s",
310 if ((result
= pthread_attr_destroy(post_mortem
->os_attr
))) {
311 lose("Error calling pthread_attr_destroy in perform_thread_post_mortem:\n%s",
314 os_invalidate(post_mortem
->os_address
, THREAD_STRUCT_SIZE
);
320 schedule_thread_post_mortem(struct thread
*corpse
)
322 struct thread_post_mortem
*post_mortem
= NULL
;
324 post_mortem
= plan_thread_post_mortem(corpse
);
326 #ifdef CREATE_POST_MORTEM_THREAD
327 gc_assert(!pthread_create(&thread
, NULL
, perform_thread_post_mortem
, post_mortem
));
329 post_mortem
= (struct thread_post_mortem
*)
330 swap_lispobjs((lispobj
*)(void *)&pending_thread_post_mortem
,
331 (lispobj
)post_mortem
);
332 perform_thread_post_mortem(post_mortem
);
337 # endif /* !IMMEDIATE_POST_MORTEM */
339 /* Note: scribble must be stack-allocated */
341 init_new_thread(struct thread
*th
, init_thread_data
*scribble
, int guardp
)
345 pthread_setspecific(lisp_thread
, (void *)1);
346 if(arch_os_thread_init(th
)==0) {
347 /* FIXME: handle error */
348 lose("arch_os_thread_init failed\n");
351 th
->os_thread
=thread_self();
353 protect_control_stack_guard_page(1, NULL
);
354 protect_binding_stack_guard_page(1, NULL
);
355 protect_alien_stack_guard_page(1, NULL
);
356 /* Since GC can only know about this thread from the all_threads
357 * list and we're just adding this thread to it, there is no
358 * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which
360 #ifdef LISP_FEATURE_SB_SAFEPOINT
361 *th
->csp_around_foreign_call
= (lispobj
)scribble
;
363 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
364 gc_assert(lock_ret
== 0);
366 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
367 gc_assert(lock_ret
== 0);
369 /* Kludge: Changed the order of some steps between the safepoint/
370 * non-safepoint versions of this code. Can we unify this more?
372 #ifdef LISP_FEATURE_SB_SAFEPOINT
374 gc_state_wait(GC_NONE
);
376 push_gcing_safety(&scribble
->safety
);
381 undo_init_new_thread(struct thread
*th
, init_thread_data
*scribble
)
385 /* Kludge: Changed the order of some steps between the safepoint/
386 * non-safepoint versions of this code. Can we unify this more?
388 #ifdef LISP_FEATURE_SB_SAFEPOINT
389 block_blockable_signals(0);
390 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
391 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
392 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
394 pop_gcing_safety(&scribble
->safety
);
395 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
396 gc_assert(lock_ret
== 0);
398 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
399 gc_assert(lock_ret
== 0);
402 block_blockable_signals(0);
403 set_thread_state(th
, STATE_DEAD
);
405 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
406 * thread, but since we are already dead it won't wait long. */
407 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
408 gc_assert(lock_ret
== 0);
410 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
411 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
412 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
415 pthread_mutex_unlock(&all_threads_lock
);
416 gc_assert(lock_ret
== 0);
419 arch_os_thread_cleanup(th
);
421 #ifndef LISP_FEATURE_SB_SAFEPOINT
422 os_sem_destroy(th
->state_sem
);
423 os_sem_destroy(th
->state_not_running_sem
);
424 os_sem_destroy(th
->state_not_stopped_sem
);
428 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
429 mach_lisp_thread_destroy(th
);
432 #if defined(LISP_FEATURE_WIN32)
435 (int) (sizeof(th
->private_events
.events
)/
436 sizeof(th
->private_events
.events
[0])); ++i
) {
437 CloseHandle(th
->private_events
.events
[i
]);
439 TlsSetValue(OUR_TLS_INDEX
,NULL
);
442 /* Undo the association of the current pthread to its `struct thread',
443 * such that we can call arch_os_get_current_thread() later in this
444 * thread and cleanly get back NULL. */
445 #ifdef LISP_FEATURE_GCC_TLS
446 current_thread
= NULL
;
448 pthread_setspecific(specials
, NULL
);
452 /* this is the first thing that runs in the child (which is why the
453 * silly calling convention). Basically it calls the user's requested
454 * lisp function after doing arch_os_thread_init and whatever other
455 * bookkeeping needs to be done
458 new_thread_trampoline(struct thread
*th
)
461 init_thread_data scribble
;
463 FSHOW((stderr
,"/creating thread %lu\n", thread_self()));
464 check_deferrables_blocked_or_lose(0);
465 #ifndef LISP_FEATURE_SB_SAFEPOINT
466 check_gc_signals_unblocked_or_lose(0);
469 lispobj function
= th
->no_tls_value_marker
;
470 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
471 init_new_thread(th
, &scribble
, 1);
472 result
= funcall0(function
);
473 undo_init_new_thread(th
, &scribble
);
475 schedule_thread_post_mortem(th
);
477 FSHOW((stderr
,"/exiting thread %lu\n", thread_self()));
481 static struct thread
*create_thread_struct(lispobj
);
484 attach_os_thread(init_thread_data
*scribble
)
486 os_thread_t os
= pthread_self();
487 odxprint(misc
, "attach_os_thread: attaching to %p", os
);
489 struct thread
*th
= create_thread_struct(NIL
);
490 block_deferrable_signals(&scribble
->oldset
);
491 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
492 /* We don't actually want a pthread_attr here, but rather than add
493 * `if's to the post-mostem, let's just keep that code happy by
494 * keeping it initialized: */
495 pthread_attr_init(th
->os_attr
);
497 #ifndef LISP_FEATURE_WIN32
498 /* On windows, arch_os_thread_init will take care of finding the
502 #ifdef LISP_FEATURE_OPENBSD
504 pthread_stackseg_np(os
, &stack
);
505 stack_size
= stack
.ss_size
;
506 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
507 #elif defined LISP_FEATURE_SUNOS
509 thr_stksegment(&stack
);
510 stack_size
= stack
.ss_size
;
511 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
512 #elif defined(LISP_FEATURE_DARWIN)
513 stack_addr
= pthread_get_stackaddr_np(os
);
514 stack_size
= pthread_get_stacksize_np(os
);
517 #ifdef LISP_FEATURE_FREEBSD
518 pthread_attr_get_np(os
, &attr
);
520 int pthread_getattr_np(pthread_t
, pthread_attr_t
*);
521 pthread_getattr_np(os
, &attr
);
523 pthread_attr_getstack(&attr
, &stack_addr
, &stack_size
);
526 th
->control_stack_start
= stack_addr
;
527 th
->control_stack_end
= (void *) (((uintptr_t) stack_addr
) + stack_size
);
530 init_new_thread(th
, scribble
, 0);
532 /* We will be calling into Lisp soon, and the functions being called
533 * recklessly ignore the comment in target-thread which says that we
534 * must be careful to not cause GC while initializing a new thread.
535 * Since we first need to create a fresh thread object, it's really
536 * tempting to just perform such unsafe allocation though. So let's
537 * at least try to suppress GC before consing, and hope that it
539 bind_variable(GC_INHIBIT
, T
, th
);
542 = (uword_t
) th
->control_stack_end
- (uword_t
) th
->control_stack_start
;
543 odxprint(misc
, "attach_os_thread: attached %p as %p (0x%lx bytes stack)",
544 os
, th
, (long) stacksize
);
548 detach_os_thread(init_thread_data
*scribble
)
550 struct thread
*th
= arch_os_get_current_thread();
551 odxprint(misc
, "detach_os_thread: detaching");
553 undo_init_new_thread(th
, scribble
);
555 odxprint(misc
, "deattach_os_thread: detached");
556 pthread_setspecific(lisp_thread
, (void *)0);
557 thread_sigmask(SIG_SETMASK
, &scribble
->oldset
, 0);
561 callback_wrapper_trampoline(
562 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
563 /* On the x86oid backends, the assembly wrapper happens to not pass
564 * in ENTER_ALIEN_CALLBACK explicitly for safepoints. However, the
565 * platforms with precise GC are tricky enough already, and I want
566 * to minimize the read-time conditionals. For those platforms, I'm
567 * only replacing funcall3 with callback_wrapper_trampoline while
568 * keeping the arguments unchanged. --DFL */
569 lispobj
__attribute__((__unused__
)) fun
,
571 lispobj arg0
, lispobj arg1
, lispobj arg2
)
573 #if defined(LISP_FEATURE_WIN32)
574 pthread_np_notice_thread();
576 struct thread
* th
= arch_os_get_current_thread();
577 if (!th
) { /* callback invoked in non-lisp thread */
578 init_thread_data scribble
;
579 attach_os_thread(&scribble
);
580 funcall3(StaticSymbolFunction(ENTER_FOREIGN_CALLBACK
), arg0
,arg1
,arg2
);
581 detach_os_thread(&scribble
);
585 #ifdef LISP_FEATURE_WIN32
586 /* arg2 is the pointer to a return value, which sits on the stack */
587 th
->carried_base_pointer
= (os_context_register_t
) *(((void**)arg2
)-1);
590 #ifdef LISP_FEATURE_SB_SAFEPOINT
591 WITH_GC_AT_SAFEPOINTS_ONLY()
594 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK
, 0), arg0
, arg1
, arg2
);
598 #endif /* LISP_FEATURE_SB_THREAD */
600 static void __attribute__((unused
))
601 free_thread_struct(struct thread
*th
)
603 #if defined(LISP_FEATURE_WIN32)
604 os_invalidate_free((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
606 os_invalidate((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
610 #ifdef LISP_FEATURE_SB_THREAD
611 /* FIXME: should be MAX_INTERRUPTS -1 ? */
612 const unsigned int tls_index_start
=
613 MAX_INTERRUPTS
+ sizeof(struct thread
)/sizeof(lispobj
);
616 /* this is called from any other thread to create the new one, and
617 * initialize all parts of it that can be initialized from another
621 static struct thread
*
622 create_thread_struct(lispobj initial_function
) {
623 union per_thread_data
*per_thread
;
624 struct thread
*th
=0; /* subdue gcc */
626 void *aligned_spaces
=0;
627 #if defined(LISP_FEATURE_SB_THREAD) || defined(LISP_FEATURE_WIN32)
631 /* May as well allocate all the spaces at once: it saves us from
632 * having to decide what to do if only some of the allocations
633 * succeed. SPACES must be appropriately aligned, since the GC
634 * expects the control stack to start at a page boundary -- and
635 * the OS may have even more rigorous requirements. We can't rely
636 * on the alignment passed from os_validate, since that might
637 * assume the current (e.g. 4k) pagesize, while we calculate with
638 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
639 spaces
=os_validate(0, THREAD_STRUCT_SIZE
);
642 /* Aligning up is safe as THREAD_STRUCT_SIZE has
643 * THREAD_ALIGNMENT_BYTES padding. */
644 aligned_spaces
= (void *)((((uword_t
)(char *)spaces
)
645 + THREAD_ALIGNMENT_BYTES
-1)
646 &~(uword_t
)(THREAD_ALIGNMENT_BYTES
-1));
649 thread_control_stack_size
+
652 per_thread
=(union per_thread_data
*)
653 (csp_page
+ THREAD_CSP_PAGE_SIZE
);
655 #ifdef LISP_FEATURE_SB_THREAD
656 for(i
= 0; i
< (dynamic_values_bytes
/ sizeof(lispobj
)); i
++)
657 per_thread
->dynamic_values
[i
] = NO_TLS_VALUE_MARKER_WIDETAG
;
660 th
=&per_thread
->thread
;
661 th
->os_address
= spaces
;
662 th
->control_stack_start
= aligned_spaces
;
663 th
->binding_stack_start
=
664 (lispobj
*)((void*)th
->control_stack_start
+thread_control_stack_size
);
665 th
->control_stack_end
= th
->binding_stack_start
;
666 th
->control_stack_guard_page_protected
= T
;
667 th
->alien_stack_start
=
668 (lispobj
*)((void*)th
->binding_stack_start
+BINDING_STACK_SIZE
);
669 set_binding_stack_pointer(th
,th
->binding_stack_start
);
673 #ifdef LISP_FEATURE_SB_SAFEPOINT
674 # ifdef LISP_FEATURE_WIN32
675 th
->carried_base_pointer
= 0;
677 # ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
678 th
->pc_around_foreign_call
= 0;
680 th
->csp_around_foreign_call
= csp_page
;
683 struct nonpointer_thread_data
*nonpointer_data
684 = (void *) &per_thread
->dynamic_values
[TLS_SIZE
];
686 th
->interrupt_data
= &nonpointer_data
->interrupt_data
;
688 #ifdef LISP_FEATURE_SB_THREAD
689 th
->os_attr
= &nonpointer_data
->os_attr
;
690 # ifndef LISP_FEATURE_SB_SAFEPOINT
691 th
->state_sem
= &nonpointer_data
->state_sem
;
692 th
->state_not_running_sem
= &nonpointer_data
->state_not_running_sem
;
693 th
->state_not_stopped_sem
= &nonpointer_data
->state_not_stopped_sem
;
694 os_sem_init(th
->state_sem
, 1);
695 os_sem_init(th
->state_not_running_sem
, 0);
696 os_sem_init(th
->state_not_stopped_sem
, 0);
697 th
->state_not_running_waitcount
= 0;
698 th
->state_not_stopped_waitcount
= 0;
702 th
->state
=STATE_RUNNING
;
703 #ifdef ALIEN_STACK_GROWS_DOWNWARD
704 th
->alien_stack_pointer
=((void *)th
->alien_stack_start
705 + ALIEN_STACK_SIZE
-N_WORD_BYTES
);
707 th
->alien_stack_pointer
=((void *)th
->alien_stack_start
);
710 #ifdef LISP_FEATURE_SB_THREAD
711 th
->pseudo_atomic_bits
=0;
712 #elif defined LISP_FEATURE_GENCGC
713 clear_pseudo_atomic_atomic(th
);
714 clear_pseudo_atomic_interrupted(th
);
717 #ifdef LISP_FEATURE_GENCGC
718 gc_set_region_empty(&th
->alloc_region
);
719 # if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
720 gc_set_region_empty(&th
->sprof_alloc_region
);
723 #ifdef LISP_FEATURE_SB_THREAD
724 /* This parallels the same logic in globals.c for the
725 * single-threaded foreign_function_call_active, KLUDGE and
727 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
728 th
->foreign_function_call_active
= 0;
730 th
->foreign_function_call_active
= 1;
734 #ifndef LISP_FEATURE_SB_THREAD
735 /* the tls-points-into-struct-thread trick is only good for threaded
736 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
737 * appropriate values from struct thread here, and make sure that
738 * we use the appropriate SymbolValue macros to access any of the
739 * variable quantities from the C runtime. It's not quite OAOOM,
740 * it just feels like it */
741 SetSymbolValue(BINDING_STACK_START
,(lispobj
)th
->binding_stack_start
,th
);
742 SetSymbolValue(CONTROL_STACK_START
,(lispobj
)th
->control_stack_start
,th
);
743 SetSymbolValue(CONTROL_STACK_END
,(lispobj
)th
->control_stack_end
,th
);
744 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
745 SetSymbolValue(ALIEN_STACK_POINTER
,(lispobj
)th
->alien_stack_pointer
,th
);
748 bind_variable(CURRENT_CATCH_BLOCK
,make_fixnum(0),th
);
749 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK
,make_fixnum(0),th
);
750 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX
,make_fixnum(0),th
);
751 bind_variable(INTERRUPT_PENDING
, NIL
,th
);
752 bind_variable(INTERRUPTS_ENABLED
,T
,th
);
753 bind_variable(ALLOW_WITH_INTERRUPTS
,T
,th
);
754 bind_variable(GC_PENDING
,NIL
,th
);
755 bind_variable(ALLOC_SIGNAL
,NIL
,th
);
756 #ifdef PINNED_OBJECTS
757 bind_variable(PINNED_OBJECTS
,NIL
,th
);
759 #ifdef LISP_FEATURE_SB_THREAD
760 bind_variable(STOP_FOR_GC_PENDING
,NIL
,th
);
762 #if defined(LISP_FEATURE_SB_SAFEPOINT)
763 bind_variable(GC_SAFE
,NIL
,th
);
764 bind_variable(IN_SAFEPOINT
,NIL
,th
);
766 #ifdef LISP_FEATURE_SB_THRUPTION
767 bind_variable(THRUPTION_PENDING
,NIL
,th
);
768 bind_variable(RESTART_CLUSTERS
,NIL
,th
);
770 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
771 access_control_stack_pointer(th
)=th
->control_stack_start
;
774 th
->interrupt_data
->pending_handler
= 0;
775 th
->interrupt_data
->gc_blocked_deferrables
= 0;
776 #ifdef GENCGC_IS_PRECISE
777 th
->interrupt_data
->allocation_trap_context
= 0;
779 th
->no_tls_value_marker
=initial_function
;
781 #if defined(LISP_FEATURE_WIN32)
782 for (i
= 0; i
<sizeof(th
->private_events
.events
)/
783 sizeof(th
->private_events
.events
[0]); ++i
) {
784 th
->private_events
.events
[i
] = CreateEvent(NULL
,FALSE
,FALSE
,NULL
);
786 th
->synchronous_io_handle_and_flag
= 0;
792 void create_initial_thread(lispobj initial_function
) {
793 struct thread
*th
=create_thread_struct(initial_function
);
794 #ifdef LISP_FEATURE_SB_THREAD
795 pthread_key_create(&lisp_thread
, 0);
798 initial_thread_trampoline(th
); /* no return */
799 } else lose("can't create initial thread\n");
802 #ifdef LISP_FEATURE_SB_THREAD
804 #ifndef __USE_XOPEN2K
805 extern int pthread_attr_setstack (pthread_attr_t
*__attr
, void *__stackaddr
,
809 boolean
create_os_thread(struct thread
*th
,os_thread_t
*kid_tid
)
811 /* The new thread inherits the restrictive signal mask set here,
812 * and enables signals again when it is set up properly. */
815 int retcode
= 0, initcode
;
817 FSHOW_SIGNAL((stderr
,"/create_os_thread: creating new thread\n"));
819 /* Blocking deferrable signals is enough, no need to block
820 * SIG_STOP_FOR_GC because the child process is not linked onto
821 * all_threads until it's ready. */
822 block_deferrable_signals(&oldset
);
824 #ifdef LOCK_CREATE_THREAD
825 retcode
= pthread_mutex_lock(&create_thread_lock
);
826 gc_assert(retcode
== 0);
827 FSHOW_SIGNAL((stderr
,"/create_os_thread: got lock\n"));
830 if((initcode
= pthread_attr_init(th
->os_attr
)) ||
831 /* call_into_lisp_first_time switches the stack for the initial
832 * thread. For the others, we use this. */
833 #if defined(LISP_FEATURE_WIN32)
834 (pthread_attr_setstacksize(th
->os_attr
, thread_control_stack_size
)) ||
836 # if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
837 (pthread_attr_setstack(th
->os_attr
,th
->control_stack_start
,
838 thread_control_stack_size
)) ||
840 (pthread_attr_setstack(th
->os_attr
,th
->alien_stack_start
,
841 ALIEN_STACK_SIZE
)) ||
844 (retcode
= pthread_create
845 (kid_tid
,th
->os_attr
,(void *(*)(void *))new_thread_trampoline
,th
))) {
846 FSHOW_SIGNAL((stderr
, "init = %d\n", initcode
));
847 FSHOW_SIGNAL((stderr
, "pthread_create returned %d, errno %d\n",
850 perror("create_os_thread");
855 #ifdef LOCK_CREATE_THREAD
856 retcode
= pthread_mutex_unlock(&create_thread_lock
);
857 gc_assert(retcode
== 0);
858 FSHOW_SIGNAL((stderr
,"/create_os_thread: released lock\n"));
860 thread_sigmask(SIG_SETMASK
,&oldset
,0);
864 os_thread_t
create_thread(lispobj initial_function
) {
865 struct thread
*th
, *thread
= arch_os_get_current_thread();
866 os_thread_t kid_tid
= 0;
868 /* Must defend against async unwinds. */
869 if (SymbolValue(INTERRUPTS_ENABLED
, thread
) != NIL
)
870 lose("create_thread is not safe when interrupts are enabled.\n");
872 /* Assuming that a fresh thread struct has no lisp objects in it,
873 * linking it to all_threads can be left to the thread itself
874 * without fear of gc lossage. initial_function violates this
875 * assumption and must stay pinned until the child starts up. */
876 th
= create_thread_struct(initial_function
);
877 if (th
&& !create_os_thread(th
,&kid_tid
)) {
878 free_thread_struct(th
);
884 /* stopping the world is a two-stage process. From this thread we signal
885 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
886 * the usual pseudo-atomic checks (we don't want to stop a thread while
887 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
890 * (With SB-SAFEPOINT, see the definitions in safepoint.c instead.)
892 #ifndef LISP_FEATURE_SB_SAFEPOINT
894 /* To avoid deadlocks when gc stops the world all clients of each
895 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
896 * holding the lock, but they must agree on which. */
897 void gc_stop_the_world()
899 struct thread
*p
,*th
=arch_os_get_current_thread();
900 int status
, lock_ret
;
901 #ifdef LOCK_CREATE_THREAD
902 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
904 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on create_thread_lock\n"));
905 lock_ret
= pthread_mutex_lock(&create_thread_lock
);
906 gc_assert(lock_ret
== 0);
907 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got create_thread_lock\n"));
909 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on lock\n"));
910 /* keep threads from starting while the world is stopped. */
911 lock_ret
= pthread_mutex_lock(&all_threads_lock
); \
912 gc_assert(lock_ret
== 0);
914 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got lock\n"));
915 /* stop all other threads by sending them SIG_STOP_FOR_GC */
916 for(p
=all_threads
; p
; p
=p
->next
) {
917 gc_assert(p
->os_thread
!= 0);
918 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: thread=%lu, state=%x\n",
919 p
->os_thread
, thread_state(p
)));
920 if((p
!=th
) && ((thread_state(p
)==STATE_RUNNING
))) {
921 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: suspending thread %lu\n",
923 /* We already hold all_thread_lock, P can become DEAD but
924 * cannot exit, ergo it's safe to use pthread_kill. */
925 status
=pthread_kill(p
->os_thread
,SIG_STOP_FOR_GC
);
927 /* This thread has exited. */
928 gc_assert(thread_state(p
)==STATE_DEAD
);
930 lose("cannot send suspend thread=%lu: %d, %s\n",
931 p
->os_thread
,status
,strerror(status
));
935 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:signals sent\n"));
936 for(p
=all_threads
;p
;p
=p
->next
) {
940 "/gc_stop_the_world: waiting for thread=%lu: state=%x\n",
941 p
->os_thread
, thread_state(p
)));
942 wait_for_thread_state_change(p
, STATE_RUNNING
);
943 if (p
->state
== STATE_RUNNING
)
944 lose("/gc_stop_the_world: unexpected state");
947 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:end\n"));
950 void gc_start_the_world()
952 struct thread
*p
,*th
=arch_os_get_current_thread();
954 /* if a resumed thread creates a new thread before we're done with
955 * this loop, the new thread will get consed on the front of
956 * all_threads, but it won't have been stopped so won't need
958 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:begin\n"));
959 for(p
=all_threads
;p
;p
=p
->next
) {
960 gc_assert(p
->os_thread
!=0);
962 lispobj state
= thread_state(p
);
963 if (state
!= STATE_DEAD
) {
964 if(state
!= STATE_STOPPED
) {
965 lose("gc_start_the_world: wrong thread state is %d\n",
966 fixnum_value(state
));
968 FSHOW_SIGNAL((stderr
, "/gc_start_the_world: resuming %lu\n",
970 set_thread_state(p
, STATE_RUNNING
);
975 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
976 gc_assert(lock_ret
== 0);
977 #ifdef LOCK_CREATE_THREAD
978 lock_ret
= pthread_mutex_unlock(&create_thread_lock
);
979 gc_assert(lock_ret
== 0);
982 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:end\n"));
985 #endif /* !LISP_FEATURE_SB_SAFEPOINT */
986 #endif /* !LISP_FEATURE_SB_THREAD */
991 #ifdef LISP_FEATURE_SB_THREAD
992 return sched_yield();
999 wake_thread(os_thread_t os_thread
)
1001 #if defined(LISP_FEATURE_WIN32)
1002 return kill_safely(os_thread
, 1);
1003 #elif !defined(LISP_FEATURE_SB_THRUPTION)
1004 return kill_safely(os_thread
, SIGPIPE
);
1006 return wake_thread_posix(os_thread
);
1010 /* If the thread id given does not belong to a running thread (it has
1011 * exited or never even existed) pthread_kill _may_ fail with ESRCH,
1012 * but it is also allowed to just segfault, see
1013 * <http://udrepper.livejournal.com/16844.html>.
1015 * Relying on thread ids can easily backfire since ids are recycled
1016 * (NPTL recycles them extremely fast) so a signal can be sent to
1017 * another process if the one it was sent to exited.
1019 * For these reasons, we must make sure that the thread is still alive
1020 * when the pthread_kill is called and return if the thread is
1023 * Note (DFL, 2011-06-22): At the time of writing, this function is only
1024 * used for INTERRUPT-THREAD, hence the wake_thread special-case for
1027 kill_safely(os_thread_t os_thread
, int signal
)
1029 FSHOW_SIGNAL((stderr
,"/kill_safely: %lu, %d\n", os_thread
, signal
));
1031 #ifdef LISP_FEATURE_SB_THREAD
1033 struct thread
*thread
;
1034 /* Frequent special case: resignalling to self. The idea is
1035 * that leave_region safepoint will acknowledge the signal, so
1036 * there is no need to take locks, roll thread to safepoint
1038 /* Kludge (on safepoint builds): At the moment, this isn't just
1039 * an optimization; rather it masks the fact that
1040 * gc_stop_the_world() grabs the all_threads mutex without
1041 * releasing it, and since we're not using recursive pthread
1042 * mutexes, the pthread_mutex_lock() around the all_threads loop
1043 * would go wrong. Why are we running interruptions while
1044 * stopping the world though? Test case is (:ASYNC-UNWIND
1045 * :SPECIALS), especially with s/10/100/ in both loops. */
1046 if (os_thread
== pthread_self()) {
1047 pthread_kill(os_thread
, signal
);
1048 #ifdef LISP_FEATURE_WIN32
1049 check_pending_thruptions(NULL
);
1054 /* pthread_kill is not async signal safe and we don't want to be
1055 * interrupted while holding the lock. */
1056 block_deferrable_signals(&oldset
);
1057 pthread_mutex_lock(&all_threads_lock
);
1058 for (thread
= all_threads
; thread
; thread
= thread
->next
) {
1059 if (thread
->os_thread
== os_thread
) {
1060 int status
= pthread_kill(os_thread
, signal
);
1062 lose("kill_safely: pthread_kill failed with %d\n", status
);
1063 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THRUPTION)
1064 wake_thread_win32(thread
);
1069 pthread_mutex_unlock(&all_threads_lock
);
1070 thread_sigmask(SIG_SETMASK
,&oldset
,0);
1075 #elif defined(LISP_FEATURE_WIN32)
1080 lose("kill_safely: who do you want to kill? %d?\n", os_thread
);
1081 /* Dubious (as in don't know why it works) workaround for the
1082 * signal sometimes not being generated on darwin. */
1083 #ifdef LISP_FEATURE_DARWIN
1086 sigprocmask(SIG_BLOCK
, &deferrable_sigset
, &oldset
);
1087 status
= raise(signal
);
1088 sigprocmask(SIG_SETMASK
,&oldset
,0);
1091 status
= raise(signal
);
1096 lose("cannot raise signal %d, %d %s\n",
1097 signal
, status
, strerror(errno
));