2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
22 #include <sys/types.h>
23 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
34 #include "validate.h" /* for BINDING_STACK_SIZE etc */
37 #include "target-arch-os.h"
41 #include "genesis/cons.h"
42 #include "genesis/fdefn.h"
43 #include "interr.h" /* for lose() */
45 #include "gc-internal.h"
47 #include "pseudo-atomic.h"
48 #include "interrupt.h"
51 #ifdef LISP_FEATURE_SB_THREAD
53 #ifdef LISP_FEATURE_OPENBSD
54 #include <pthread_np.h>
57 #ifdef LISP_FEATURE_SUNOS
61 #ifdef LISP_FEATURE_WIN32
62 # define IMMEDIATE_POST_MORTEM
65 struct thread_post_mortem
{
66 os_thread_t os_thread
;
67 pthread_attr_t
*os_attr
;
68 os_vm_address_t os_address
;
71 static struct thread_post_mortem
* volatile pending_thread_post_mortem
= 0;
74 int dynamic_values_bytes
=TLS_SIZE
*sizeof(lispobj
); /* same for all threads */
75 struct thread
*all_threads
;
77 #ifdef LISP_FEATURE_SB_THREAD
78 pthread_mutex_t all_threads_lock
= PTHREAD_MUTEX_INITIALIZER
;
80 static pthread_mutex_t create_thread_lock
= PTHREAD_MUTEX_INITIALIZER
;
82 #ifdef LISP_FEATURE_GCC_TLS
83 __thread
struct thread
*current_thread
;
85 pthread_key_t lisp_thread
= 0;
88 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
89 extern lispobj
call_into_lisp_first_time(lispobj fun
, lispobj
*args
, int nargs
)
90 # ifdef LISP_FEATURE_X86_64
91 __attribute__((sysv_abi
))
97 link_thread(struct thread
*th
)
99 if (all_threads
) all_threads
->prev
=th
;
100 th
->next
=all_threads
;
105 #ifdef LISP_FEATURE_SB_THREAD
107 unlink_thread(struct thread
*th
)
110 th
->prev
->next
= th
->next
;
112 all_threads
= th
->next
;
114 th
->next
->prev
= th
->prev
;
117 #ifndef LISP_FEATURE_SB_SAFEPOINT
118 /* Only access thread state with blockables blocked. */
120 thread_state(struct thread
*thread
)
124 block_blockable_signals(&old
);
125 os_sem_wait(thread
->state_sem
, "thread_state");
126 state
= thread
->state
;
127 os_sem_post(thread
->state_sem
, "thread_state");
128 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
133 set_thread_state(struct thread
*thread
, lispobj state
)
135 int i
, waitcount
= 0;
137 block_blockable_signals(&old
);
138 os_sem_wait(thread
->state_sem
, "set_thread_state");
139 if (thread
->state
!= state
) {
140 if ((STATE_STOPPED
==state
) ||
141 (STATE_DEAD
==state
)) {
142 waitcount
= thread
->state_not_running_waitcount
;
143 thread
->state_not_running_waitcount
= 0;
144 for (i
=0; i
<waitcount
; i
++)
145 os_sem_post(thread
->state_not_running_sem
, "set_thread_state (not running)");
147 if ((STATE_RUNNING
==state
) ||
148 (STATE_DEAD
==state
)) {
149 waitcount
= thread
->state_not_stopped_waitcount
;
150 thread
->state_not_stopped_waitcount
= 0;
151 for (i
=0; i
<waitcount
; i
++)
152 os_sem_post(thread
->state_not_stopped_sem
, "set_thread_state (not stopped)");
154 thread
->state
= state
;
156 os_sem_post(thread
->state_sem
, "set_thread_state");
157 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
161 wait_for_thread_state_change(struct thread
*thread
, lispobj state
)
165 block_blockable_signals(&old
);
167 os_sem_wait(thread
->state_sem
, "wait_for_thread_state_change");
168 if (thread
->state
== state
) {
171 wait_sem
= thread
->state_not_running_sem
;
172 thread
->state_not_running_waitcount
++;
175 wait_sem
= thread
->state_not_stopped_sem
;
176 thread
->state_not_stopped_waitcount
++;
179 lose("Invalid state in wait_for_thread_state_change: "OBJ_FMTX
"\n", state
);
184 os_sem_post(thread
->state_sem
, "wait_for_thread_state_change");
186 os_sem_wait(wait_sem
, "wait_for_thread_state_change");
189 thread_sigmask(SIG_SETMASK
, &old
, NULL
);
191 #endif /* sb-safepoint */
192 #endif /* sb-thread */
195 initial_thread_trampoline(struct thread
*th
)
198 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
199 lispobj
*args
= NULL
;
201 #ifdef LISP_FEATURE_SB_THREAD
202 pthread_setspecific(lisp_thread
, (void *)1);
204 #if defined(THREADS_USING_GCSIGNAL) && (defined(LISP_FEATURE_PPC) || defined(LISP_FEATURE_ARM64))
205 /* SIG_STOP_FOR_GC defaults to blocked on PPC? */
206 unblock_gc_signals(0,0);
208 function
= th
->no_tls_value_marker
;
209 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
210 if(arch_os_thread_init(th
)==0) return 1;
212 th
->os_thread
=thread_self();
213 #ifndef LISP_FEATURE_WIN32
214 protect_control_stack_hard_guard_page(1, NULL
);
216 protect_binding_stack_hard_guard_page(1, NULL
);
217 protect_alien_stack_hard_guard_page(1, NULL
);
218 #ifndef LISP_FEATURE_WIN32
219 protect_control_stack_guard_page(1, NULL
);
221 protect_binding_stack_guard_page(1, NULL
);
222 protect_alien_stack_guard_page(1, NULL
);
224 /* WIN32 has a special stack arrangment, calling
225 * call_into_lisp_first_time will put the new stack in the middle
226 * of the current stack */
227 #if !defined(LISP_FEATURE_WIN32) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
228 return call_into_lisp_first_time(function
,args
,0);
230 return funcall0(function
);
234 #ifdef LISP_FEATURE_SB_THREAD
236 # if defined(IMMEDIATE_POST_MORTEM)
239 * If this feature is set, we are running on a stack managed by the OS,
240 * and no fancy delays are required for anything. Just do it.
243 schedule_thread_post_mortem(struct thread
*corpse
)
245 pthread_detach(pthread_self());
246 int result
= pthread_attr_destroy(corpse
->os_attr
);
248 #if defined(LISP_FEATURE_WIN32)
249 os_invalidate_free(corpse
->os_address
, THREAD_STRUCT_SIZE
);
251 os_invalidate(corpse
->os_address
, THREAD_STRUCT_SIZE
);
257 /* THREAD POST MORTEM CLEANUP
259 * Memory allocated for the thread stacks cannot be reclaimed while
260 * the thread is still alive, so we need a mechanism for post mortem
261 * cleanups. FIXME: We actually have three, for historical reasons as
262 * the saying goes. Do we really need three? Nikodemus guesses that
263 * not anymore, now that we properly call pthread_attr_destroy before
264 * freeing the stack. */
266 static struct thread_post_mortem
*
267 plan_thread_post_mortem(struct thread
*corpse
)
270 struct thread_post_mortem
*post_mortem
= malloc(sizeof(struct thread_post_mortem
));
271 gc_assert(post_mortem
);
272 post_mortem
->os_thread
= corpse
->os_thread
;
273 post_mortem
->os_attr
= corpse
->os_attr
;
274 post_mortem
->os_address
= corpse
->os_address
;
278 /* FIXME: When does this happen? */
284 perform_thread_post_mortem(struct thread_post_mortem
*post_mortem
)
286 #ifdef CREATE_POST_MORTEM_THREAD
287 pthread_detach(pthread_self());
291 /* The thread may exit before pthread_create() has finished
292 initialization and it may write into already unmapped
293 memory. This lock doesn't actually need to protect
294 anything, just to make sure that at least one call to
295 pthread_create() has finished.
297 Possible improvements: stash the address of the thread
298 struct for which a pthread is being created and don't lock
299 here if it's not the one being terminated. */
300 result
= pthread_mutex_lock(&create_thread_lock
);
301 gc_assert(result
== 0);
302 result
= pthread_mutex_unlock(&create_thread_lock
);
303 gc_assert(result
== 0);
305 if ((result
= pthread_join(post_mortem
->os_thread
, NULL
))) {
306 lose("Error calling pthread_join in perform_thread_post_mortem:\n%s",
309 if ((result
= pthread_attr_destroy(post_mortem
->os_attr
))) {
310 lose("Error calling pthread_attr_destroy in perform_thread_post_mortem:\n%s",
313 os_invalidate(post_mortem
->os_address
, THREAD_STRUCT_SIZE
);
319 schedule_thread_post_mortem(struct thread
*corpse
)
321 struct thread_post_mortem
*post_mortem
= NULL
;
323 post_mortem
= plan_thread_post_mortem(corpse
);
325 #ifdef CREATE_POST_MORTEM_THREAD
327 int result
= pthread_create(&thread
, NULL
, perform_thread_post_mortem
, post_mortem
);
330 post_mortem
= (struct thread_post_mortem
*)
331 swap_lispobjs((lispobj
*)(void *)&pending_thread_post_mortem
,
332 (lispobj
)post_mortem
);
333 perform_thread_post_mortem(post_mortem
);
338 # endif /* !IMMEDIATE_POST_MORTEM */
340 /* Note: scribble must be stack-allocated */
342 init_new_thread(struct thread
*th
, init_thread_data
*scribble
, int guardp
)
346 pthread_setspecific(lisp_thread
, (void *)1);
347 if(arch_os_thread_init(th
)==0) {
348 /* FIXME: handle error */
349 lose("arch_os_thread_init failed\n");
352 th
->os_thread
=thread_self();
354 protect_control_stack_guard_page(1, NULL
);
355 protect_binding_stack_guard_page(1, NULL
);
356 protect_alien_stack_guard_page(1, NULL
);
357 /* Since GC can only know about this thread from the all_threads
358 * list and we're just adding this thread to it, there is no
359 * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which
361 #ifdef LISP_FEATURE_SB_SAFEPOINT
362 *th
->csp_around_foreign_call
= (lispobj
)scribble
;
364 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
365 gc_assert(lock_ret
== 0);
367 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
368 gc_assert(lock_ret
== 0);
370 /* Kludge: Changed the order of some steps between the safepoint/
371 * non-safepoint versions of this code. Can we unify this more?
373 #ifdef LISP_FEATURE_SB_SAFEPOINT
375 gc_state_wait(GC_NONE
);
377 push_gcing_safety(&scribble
->safety
);
382 undo_init_new_thread(struct thread
*th
, init_thread_data
*scribble
)
386 /* Kludge: Changed the order of some steps between the safepoint/
387 * non-safepoint versions of this code. Can we unify this more?
389 #ifdef LISP_FEATURE_SB_SAFEPOINT
390 block_blockable_signals(0);
391 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
392 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
393 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
395 pop_gcing_safety(&scribble
->safety
);
396 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
397 gc_assert(lock_ret
== 0);
399 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
400 gc_assert(lock_ret
== 0);
403 block_blockable_signals(0);
404 set_thread_state(th
, STATE_DEAD
);
406 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
407 * thread, but since we are already dead it won't wait long. */
408 lock_ret
= pthread_mutex_lock(&all_threads_lock
);
409 gc_assert(lock_ret
== 0);
411 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
412 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
413 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
416 pthread_mutex_unlock(&all_threads_lock
);
417 gc_assert(lock_ret
== 0);
420 arch_os_thread_cleanup(th
);
422 #ifndef LISP_FEATURE_SB_SAFEPOINT
423 os_sem_destroy(th
->state_sem
);
424 os_sem_destroy(th
->state_not_running_sem
);
425 os_sem_destroy(th
->state_not_stopped_sem
);
429 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
430 mach_lisp_thread_destroy(th
);
433 #if defined(LISP_FEATURE_WIN32)
436 (int) (sizeof(th
->private_events
.events
)/
437 sizeof(th
->private_events
.events
[0])); ++i
) {
438 CloseHandle(th
->private_events
.events
[i
]);
440 TlsSetValue(OUR_TLS_INDEX
,NULL
);
443 /* Undo the association of the current pthread to its `struct thread',
444 * such that we can call arch_os_get_current_thread() later in this
445 * thread and cleanly get back NULL. */
446 #ifdef LISP_FEATURE_GCC_TLS
447 current_thread
= NULL
;
449 pthread_setspecific(specials
, NULL
);
453 /* this is the first thing that runs in the child (which is why the
454 * silly calling convention). Basically it calls the user's requested
455 * lisp function after doing arch_os_thread_init and whatever other
456 * bookkeeping needs to be done
459 new_thread_trampoline(struct thread
*th
)
462 init_thread_data scribble
;
464 FSHOW((stderr
,"/creating thread %lu\n", thread_self()));
465 check_deferrables_blocked_or_lose(0);
466 #ifndef LISP_FEATURE_SB_SAFEPOINT
467 check_gc_signals_unblocked_or_lose(0);
470 lispobj function
= th
->no_tls_value_marker
;
471 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
472 init_new_thread(th
, &scribble
, 1);
473 result
= funcall0(function
);
474 undo_init_new_thread(th
, &scribble
);
476 schedule_thread_post_mortem(th
);
478 FSHOW((stderr
,"/exiting thread %lu\n", thread_self()));
482 static struct thread
*create_thread_struct(lispobj
);
483 static void free_thread_struct(struct thread
*th
);
486 attach_os_thread(init_thread_data
*scribble
)
488 os_thread_t os
= pthread_self();
489 odxprint(misc
, "attach_os_thread: attaching to %p", os
);
491 struct thread
*th
= create_thread_struct(NIL
);
492 block_deferrable_signals(&scribble
->oldset
);
493 th
->no_tls_value_marker
= NO_TLS_VALUE_MARKER_WIDETAG
;
494 /* We don't actually want a pthread_attr here, but rather than add
495 * `if's to the post-mostem, let's just keep that code happy by
496 * keeping it initialized: */
497 pthread_attr_init(th
->os_attr
);
499 #if !defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
500 /* On windows, arch_os_thread_init will take care of finding the
504 #ifdef LISP_FEATURE_OPENBSD
506 pthread_stackseg_np(os
, &stack
);
507 stack_size
= stack
.ss_size
;
508 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
509 #elif defined LISP_FEATURE_SUNOS
511 thr_stksegment(&stack
);
512 stack_size
= stack
.ss_size
;
513 stack_addr
= (void*)((size_t)stack
.ss_sp
- stack_size
);
514 #elif defined(LISP_FEATURE_DARWIN)
515 stack_addr
= pthread_get_stackaddr_np(os
);
516 stack_size
= pthread_get_stacksize_np(os
);
519 #ifdef LISP_FEATURE_FREEBSD
520 pthread_attr_get_np(os
, &attr
);
522 int pthread_getattr_np(pthread_t
, pthread_attr_t
*);
523 pthread_getattr_np(os
, &attr
);
525 pthread_attr_getstack(&attr
, &stack_addr
, &stack_size
);
526 pthread_attr_destroy(&attr
);
529 th
->control_stack_start
= stack_addr
;
530 th
->control_stack_end
= (void *) (((uintptr_t) stack_addr
) + stack_size
);
533 init_new_thread(th
, scribble
, 0);
535 /* We will be calling into Lisp soon, and the functions being called
536 * recklessly ignore the comment in target-thread which says that we
537 * must be careful to not cause GC while initializing a new thread.
538 * Since we first need to create a fresh thread object, it's really
539 * tempting to just perform such unsafe allocation though. So let's
540 * at least try to suppress GC before consing, and hope that it
542 bind_variable(GC_INHIBIT
, T
, th
);
545 = (uword_t
) th
->control_stack_end
- (uword_t
) th
->control_stack_start
;
546 odxprint(misc
, "attach_os_thread: attached %p as %p (0x%lx bytes stack)",
547 os
, th
, (long) stacksize
);
551 detach_os_thread(init_thread_data
*scribble
)
553 struct thread
*th
= arch_os_get_current_thread();
554 odxprint(misc
, "detach_os_thread: detaching");
556 undo_init_new_thread(th
, scribble
);
558 odxprint(misc
, "deattach_os_thread: detached");
559 pthread_setspecific(lisp_thread
, (void *)0);
560 thread_sigmask(SIG_SETMASK
, &scribble
->oldset
, 0);
561 free_thread_struct(th
);
565 callback_wrapper_trampoline(
566 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
567 /* On the x86oid backends, the assembly wrapper happens to not pass
568 * in ENTER_ALIEN_CALLBACK explicitly for safepoints. However, the
569 * platforms with precise GC are tricky enough already, and I want
570 * to minimize the read-time conditionals. For those platforms, I'm
571 * only replacing funcall3 with callback_wrapper_trampoline while
572 * keeping the arguments unchanged. --DFL */
573 lispobj
__attribute__((__unused__
)) fun
,
575 lispobj arg0
, lispobj arg1
, lispobj arg2
)
577 #if defined(LISP_FEATURE_WIN32)
578 pthread_np_notice_thread();
580 struct thread
* th
= arch_os_get_current_thread();
581 if (!th
) { /* callback invoked in non-lisp thread */
582 init_thread_data scribble
;
583 attach_os_thread(&scribble
);
584 funcall3(StaticSymbolFunction(ENTER_FOREIGN_CALLBACK
), arg0
,arg1
,arg2
);
585 detach_os_thread(&scribble
);
589 #ifdef LISP_FEATURE_WIN32
590 /* arg2 is the pointer to a return value, which sits on the stack */
591 th
->carried_base_pointer
= (os_context_register_t
) *(((void**)arg2
)-1);
594 #ifdef LISP_FEATURE_SB_SAFEPOINT
595 WITH_GC_AT_SAFEPOINTS_ONLY()
598 funcall3(StaticSymbolFunction(ENTER_ALIEN_CALLBACK
), arg0
, arg1
, arg2
);
602 #endif /* LISP_FEATURE_SB_THREAD */
604 static void __attribute__((unused
))
605 free_thread_struct(struct thread
*th
)
607 #if defined(LISP_FEATURE_WIN32)
608 os_invalidate_free((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
610 os_invalidate((os_vm_address_t
) th
->os_address
, THREAD_STRUCT_SIZE
);
614 #ifdef LISP_FEATURE_SB_THREAD
615 /* FIXME: should be MAX_INTERRUPTS -1 ? */
616 const unsigned int tls_index_start
=
617 MAX_INTERRUPTS
+ sizeof(struct thread
)/sizeof(lispobj
);
620 /* this is called from any other thread to create the new one, and
621 * initialize all parts of it that can be initialized from another
625 static struct thread
*
626 create_thread_struct(lispobj initial_function
) {
627 union per_thread_data
*per_thread
;
628 struct thread
*th
=0; /* subdue gcc */
630 char *aligned_spaces
=0;
631 #if defined(LISP_FEATURE_SB_THREAD) || defined(LISP_FEATURE_WIN32)
635 /* May as well allocate all the spaces at once: it saves us from
636 * having to decide what to do if only some of the allocations
637 * succeed. SPACES must be appropriately aligned, since the GC
638 * expects the control stack to start at a page boundary -- and
639 * the OS may have even more rigorous requirements. We can't rely
640 * on the alignment passed from os_validate, since that might
641 * assume the current (e.g. 4k) pagesize, while we calculate with
642 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
643 spaces
= os_allocate(THREAD_STRUCT_SIZE
);
646 /* Aligning up is safe as THREAD_STRUCT_SIZE has
647 * THREAD_ALIGNMENT_BYTES padding. */
648 aligned_spaces
= (void *)((((uword_t
)(char *)spaces
)
649 + THREAD_ALIGNMENT_BYTES
-1)
650 &~(uword_t
)(THREAD_ALIGNMENT_BYTES
-1));
653 thread_control_stack_size
+
656 per_thread
=(union per_thread_data
*)
657 (csp_page
+ THREAD_CSP_PAGE_SIZE
);
659 #ifdef LISP_FEATURE_SB_THREAD
660 for(i
= 0; i
< (dynamic_values_bytes
/ sizeof(lispobj
)); i
++)
661 per_thread
->dynamic_values
[i
] = NO_TLS_VALUE_MARKER_WIDETAG
;
664 th
=&per_thread
->thread
;
665 th
->os_address
= spaces
;
666 th
->control_stack_start
= (lispobj
*)aligned_spaces
;
667 th
->binding_stack_start
=
668 (lispobj
*)((char*)th
->control_stack_start
+thread_control_stack_size
);
669 th
->control_stack_end
= th
->binding_stack_start
;
670 th
->control_stack_guard_page_protected
= T
;
671 th
->alien_stack_start
=
672 (lispobj
*)((char*)th
->binding_stack_start
+BINDING_STACK_SIZE
);
673 set_binding_stack_pointer(th
,th
->binding_stack_start
);
677 #ifdef LISP_FEATURE_SB_SAFEPOINT
678 # ifdef LISP_FEATURE_WIN32
679 th
->carried_base_pointer
= 0;
681 # ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
682 th
->pc_around_foreign_call
= 0;
684 th
->csp_around_foreign_call
= csp_page
;
687 struct nonpointer_thread_data
*nonpointer_data
688 = (void *) &per_thread
->dynamic_values
[TLS_SIZE
];
690 th
->interrupt_data
= &nonpointer_data
->interrupt_data
;
692 #ifdef LISP_FEATURE_SB_THREAD
693 th
->os_attr
= &nonpointer_data
->os_attr
;
694 # ifndef LISP_FEATURE_SB_SAFEPOINT
695 th
->state_sem
= &nonpointer_data
->state_sem
;
696 th
->state_not_running_sem
= &nonpointer_data
->state_not_running_sem
;
697 th
->state_not_stopped_sem
= &nonpointer_data
->state_not_stopped_sem
;
698 os_sem_init(th
->state_sem
, 1);
699 os_sem_init(th
->state_not_running_sem
, 0);
700 os_sem_init(th
->state_not_stopped_sem
, 0);
701 th
->state_not_running_waitcount
= 0;
702 th
->state_not_stopped_waitcount
= 0;
706 th
->state
=STATE_RUNNING
;
707 #ifdef ALIEN_STACK_GROWS_DOWNWARD
708 th
->alien_stack_pointer
=(lispobj
*)((char*)th
->alien_stack_start
709 + ALIEN_STACK_SIZE
-N_WORD_BYTES
);
711 th
->alien_stack_pointer
=(lispobj
*)((char*)th
->alien_stack_start
);
714 #ifdef LISP_FEATURE_SB_THREAD
715 th
->pseudo_atomic_bits
=0;
716 #elif defined LISP_FEATURE_GENCGC
717 clear_pseudo_atomic_atomic(th
);
718 clear_pseudo_atomic_interrupted(th
);
721 #ifdef LISP_FEATURE_GENCGC
722 gc_set_region_empty(&th
->alloc_region
);
723 # if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
724 gc_set_region_empty(&th
->sprof_alloc_region
);
727 #ifdef LISP_FEATURE_SB_THREAD
728 /* This parallels the same logic in globals.c for the
729 * single-threaded foreign_function_call_active, KLUDGE and
731 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
732 th
->foreign_function_call_active
= 0;
734 th
->foreign_function_call_active
= 1;
738 #ifndef LISP_FEATURE_SB_THREAD
739 /* the tls-points-into-struct-thread trick is only good for threaded
740 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
741 * appropriate values from struct thread here, and make sure that
742 * we use the appropriate SymbolValue macros to access any of the
743 * variable quantities from the C runtime. It's not quite OAOOM,
744 * it just feels like it */
745 SetSymbolValue(BINDING_STACK_START
,(lispobj
)th
->binding_stack_start
,th
);
746 SetSymbolValue(CONTROL_STACK_START
,(lispobj
)th
->control_stack_start
,th
);
747 SetSymbolValue(CONTROL_STACK_END
,(lispobj
)th
->control_stack_end
,th
);
748 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
749 SetSymbolValue(ALIEN_STACK_POINTER
,(lispobj
)th
->alien_stack_pointer
,th
);
752 bind_variable(CURRENT_CATCH_BLOCK
,make_fixnum(0),th
);
753 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK
,make_fixnum(0),th
);
754 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX
,make_fixnum(0),th
);
755 bind_variable(INTERRUPT_PENDING
, NIL
,th
);
756 bind_variable(INTERRUPTS_ENABLED
,T
,th
);
757 bind_variable(ALLOW_WITH_INTERRUPTS
,T
,th
);
758 bind_variable(GC_PENDING
,NIL
,th
);
759 bind_variable(ALLOC_SIGNAL
,NIL
,th
);
760 #ifdef PINNED_OBJECTS
761 bind_variable(PINNED_OBJECTS
,NIL
,th
);
763 #ifdef LISP_FEATURE_SB_THREAD
764 bind_variable(STOP_FOR_GC_PENDING
,NIL
,th
);
766 #if defined(LISP_FEATURE_SB_SAFEPOINT)
767 bind_variable(GC_SAFE
,NIL
,th
);
768 bind_variable(IN_SAFEPOINT
,NIL
,th
);
770 #ifdef LISP_FEATURE_SB_THRUPTION
771 bind_variable(THRUPTION_PENDING
,NIL
,th
);
772 bind_variable(RESTART_CLUSTERS
,NIL
,th
);
774 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
775 access_control_stack_pointer(th
)=th
->control_stack_start
;
778 th
->interrupt_data
->pending_handler
= 0;
779 th
->interrupt_data
->gc_blocked_deferrables
= 0;
780 #ifdef GENCGC_IS_PRECISE
781 th
->interrupt_data
->allocation_trap_context
= 0;
783 th
->no_tls_value_marker
=initial_function
;
785 #if defined(LISP_FEATURE_WIN32)
786 for (i
= 0; i
<sizeof(th
->private_events
.events
)/
787 sizeof(th
->private_events
.events
[0]); ++i
) {
788 th
->private_events
.events
[i
] = CreateEvent(NULL
,FALSE
,FALSE
,NULL
);
790 th
->synchronous_io_handle_and_flag
= 0;
796 void create_initial_thread(lispobj initial_function
) {
797 struct thread
*th
=create_thread_struct(initial_function
);
798 #ifdef LISP_FEATURE_SB_THREAD
799 pthread_key_create(&lisp_thread
, 0);
802 initial_thread_trampoline(th
); /* no return */
803 } else lose("can't create initial thread\n");
806 #ifdef LISP_FEATURE_SB_THREAD
808 #ifndef __USE_XOPEN2K
809 extern int pthread_attr_setstack (pthread_attr_t
*__attr
, void *__stackaddr
,
813 boolean
create_os_thread(struct thread
*th
,os_thread_t
*kid_tid
)
815 /* The new thread inherits the restrictive signal mask set here,
816 * and enables signals again when it is set up properly. */
819 int retcode
= 0, initcode
;
821 FSHOW_SIGNAL((stderr
,"/create_os_thread: creating new thread\n"));
823 /* Blocking deferrable signals is enough, no need to block
824 * SIG_STOP_FOR_GC because the child process is not linked onto
825 * all_threads until it's ready. */
826 block_deferrable_signals(&oldset
);
828 /* See perform_thread_post_mortem for at least one reason why this lock is neccessary */
829 retcode
= pthread_mutex_lock(&create_thread_lock
);
830 gc_assert(retcode
== 0);
831 FSHOW_SIGNAL((stderr
,"/create_os_thread: got lock\n"));
833 if((initcode
= pthread_attr_init(th
->os_attr
)) ||
834 /* call_into_lisp_first_time switches the stack for the initial
835 * thread. For the others, we use this. */
836 #if defined(LISP_FEATURE_WIN32)
837 (pthread_attr_setstacksize(th
->os_attr
, thread_control_stack_size
)) ||
839 # if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
840 (pthread_attr_setstack(th
->os_attr
,th
->control_stack_start
,
841 thread_control_stack_size
)) ||
843 (pthread_attr_setstack(th
->os_attr
,th
->alien_stack_start
,
844 ALIEN_STACK_SIZE
)) ||
847 (retcode
= pthread_create
848 (kid_tid
,th
->os_attr
,(void *(*)(void *))new_thread_trampoline
,th
))) {
849 FSHOW_SIGNAL((stderr
, "init = %d\n", initcode
));
850 FSHOW_SIGNAL((stderr
, "pthread_create returned %d, errno %d\n",
853 perror("create_os_thread");
858 retcode
= pthread_mutex_unlock(&create_thread_lock
);
859 gc_assert(retcode
== 0);
860 FSHOW_SIGNAL((stderr
,"/create_os_thread: released lock\n"));
862 thread_sigmask(SIG_SETMASK
,&oldset
,0);
866 os_thread_t
create_thread(lispobj initial_function
) {
867 struct thread
*th
, *thread
= arch_os_get_current_thread();
868 os_thread_t kid_tid
= 0;
870 /* Must defend against async unwinds. */
871 if (SymbolValue(INTERRUPTS_ENABLED
, thread
) != NIL
)
872 lose("create_thread is not safe when interrupts are enabled.\n");
874 /* Assuming that a fresh thread struct has no lisp objects in it,
875 * linking it to all_threads can be left to the thread itself
876 * without fear of gc lossage. initial_function violates this
877 * assumption and must stay pinned until the child starts up. */
878 th
= create_thread_struct(initial_function
);
879 if (th
&& !create_os_thread(th
,&kid_tid
)) {
880 free_thread_struct(th
);
886 /* stopping the world is a two-stage process. From this thread we signal
887 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
888 * the usual pseudo-atomic checks (we don't want to stop a thread while
889 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
892 * (With SB-SAFEPOINT, see the definitions in safepoint.c instead.)
894 #ifndef LISP_FEATURE_SB_SAFEPOINT
896 /* To avoid deadlocks when gc stops the world all clients of each
897 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
898 * holding the lock, but they must agree on which. */
899 void gc_stop_the_world()
901 struct thread
*p
,*th
=arch_os_get_current_thread();
902 int status
, lock_ret
;
903 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
905 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on create_thread_lock\n"));
906 lock_ret
= pthread_mutex_lock(&create_thread_lock
);
907 gc_assert(lock_ret
== 0);
908 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got create_thread_lock\n"));
909 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:waiting on lock\n"));
910 /* keep threads from starting while the world is stopped. */
911 lock_ret
= pthread_mutex_lock(&all_threads_lock
); \
912 gc_assert(lock_ret
== 0);
914 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:got lock\n"));
915 /* stop all other threads by sending them SIG_STOP_FOR_GC */
916 for(p
=all_threads
; p
; p
=p
->next
) {
917 gc_assert(p
->os_thread
!= 0);
918 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: thread=%lu, state=%x\n",
919 p
->os_thread
, thread_state(p
)));
920 if((p
!=th
) && ((thread_state(p
)==STATE_RUNNING
))) {
921 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world: suspending thread %lu\n",
923 /* We already hold all_thread_lock, P can become DEAD but
924 * cannot exit, ergo it's safe to use pthread_kill. */
925 status
=pthread_kill(p
->os_thread
,SIG_STOP_FOR_GC
);
927 /* This thread has exited. */
928 gc_assert(thread_state(p
)==STATE_DEAD
);
930 lose("cannot send suspend thread=%lu: %d, %s\n",
931 p
->os_thread
,status
,strerror(status
));
935 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:signals sent\n"));
936 for(p
=all_threads
;p
;p
=p
->next
) {
940 "/gc_stop_the_world: waiting for thread=%lu: state=%x\n",
941 p
->os_thread
, thread_state(p
)));
942 wait_for_thread_state_change(p
, STATE_RUNNING
);
943 if (p
->state
== STATE_RUNNING
)
944 lose("/gc_stop_the_world: unexpected state");
947 FSHOW_SIGNAL((stderr
,"/gc_stop_the_world:end\n"));
950 void gc_start_the_world()
952 struct thread
*p
,*th
=arch_os_get_current_thread();
954 /* if a resumed thread creates a new thread before we're done with
955 * this loop, the new thread will get consed on the front of
956 * all_threads, but it won't have been stopped so won't need
958 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:begin\n"));
959 for(p
=all_threads
;p
;p
=p
->next
) {
960 gc_assert(p
->os_thread
!=0);
962 lispobj state
= thread_state(p
);
963 if (state
!= STATE_DEAD
) {
964 if(state
!= STATE_STOPPED
) {
965 lose("gc_start_the_world: wrong thread state is %d\n",
966 fixnum_value(state
));
968 FSHOW_SIGNAL((stderr
, "/gc_start_the_world: resuming %lu\n",
970 set_thread_state(p
, STATE_RUNNING
);
975 lock_ret
= pthread_mutex_unlock(&all_threads_lock
);
976 gc_assert(lock_ret
== 0);
977 lock_ret
= pthread_mutex_unlock(&create_thread_lock
);
978 gc_assert(lock_ret
== 0);
981 FSHOW_SIGNAL((stderr
,"/gc_start_the_world:end\n"));
984 #endif /* !LISP_FEATURE_SB_SAFEPOINT */
985 #endif /* !LISP_FEATURE_SB_THREAD */
990 #ifdef LISP_FEATURE_SB_THREAD
991 return sched_yield();
998 wake_thread(os_thread_t os_thread
)
1000 #if defined(LISP_FEATURE_WIN32)
1001 return kill_safely(os_thread
, 1);
1002 #elif !defined(LISP_FEATURE_SB_THRUPTION)
1003 return kill_safely(os_thread
, SIGPIPE
);
1005 return wake_thread_posix(os_thread
);
1009 /* If the thread id given does not belong to a running thread (it has
1010 * exited or never even existed) pthread_kill _may_ fail with ESRCH,
1011 * but it is also allowed to just segfault, see
1012 * <http://udrepper.livejournal.com/16844.html>.
1014 * Relying on thread ids can easily backfire since ids are recycled
1015 * (NPTL recycles them extremely fast) so a signal can be sent to
1016 * another process if the one it was sent to exited.
1018 * For these reasons, we must make sure that the thread is still alive
1019 * when the pthread_kill is called and return if the thread is
1022 * Note (DFL, 2011-06-22): At the time of writing, this function is only
1023 * used for INTERRUPT-THREAD, hence the wake_thread special-case for
1026 kill_safely(os_thread_t os_thread
, int signal
)
1028 FSHOW_SIGNAL((stderr
,"/kill_safely: %lu, %d\n", os_thread
, signal
));
1030 #ifdef LISP_FEATURE_SB_THREAD
1032 struct thread
*thread
;
1033 /* Frequent special case: resignalling to self. The idea is
1034 * that leave_region safepoint will acknowledge the signal, so
1035 * there is no need to take locks, roll thread to safepoint
1037 /* Kludge (on safepoint builds): At the moment, this isn't just
1038 * an optimization; rather it masks the fact that
1039 * gc_stop_the_world() grabs the all_threads mutex without
1040 * releasing it, and since we're not using recursive pthread
1041 * mutexes, the pthread_mutex_lock() around the all_threads loop
1042 * would go wrong. Why are we running interruptions while
1043 * stopping the world though? Test case is (:ASYNC-UNWIND
1044 * :SPECIALS), especially with s/10/100/ in both loops. */
1045 if (os_thread
== pthread_self()) {
1046 pthread_kill(os_thread
, signal
);
1047 #ifdef LISP_FEATURE_WIN32
1048 check_pending_thruptions(NULL
);
1053 /* pthread_kill is not async signal safe and we don't want to be
1054 * interrupted while holding the lock. */
1055 block_deferrable_signals(&oldset
);
1056 pthread_mutex_lock(&all_threads_lock
);
1057 for (thread
= all_threads
; thread
; thread
= thread
->next
) {
1058 if (thread
->os_thread
== os_thread
) {
1059 int status
= pthread_kill(os_thread
, signal
);
1061 lose("kill_safely: pthread_kill failed with %d\n", status
);
1062 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THRUPTION)
1063 wake_thread_win32(thread
);
1068 pthread_mutex_unlock(&all_threads_lock
);
1069 thread_sigmask(SIG_SETMASK
,&oldset
,0);
1074 #elif defined(LISP_FEATURE_WIN32)
1079 lose("kill_safely: who do you want to kill? %d?\n", os_thread
);
1080 /* Dubious (as in don't know why it works) workaround for the
1081 * signal sometimes not being generated on darwin. */
1082 #ifdef LISP_FEATURE_DARWIN
1085 sigprocmask(SIG_BLOCK
, &deferrable_sigset
, &oldset
);
1086 status
= raise(signal
);
1087 sigprocmask(SIG_SETMASK
,&oldset
,0);
1090 status
= raise(signal
);
1095 lose("cannot raise signal %d, %d %s\n",
1096 signal
, status
, strerror(errno
));