1.0.12.9: don't limit TOUCH-OBJECT to descriptor-regs
[sbcl.git] / src / runtime / thread.c
blobd38fd72183669741f8d2049d96e092e8ba36a904
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
12 #include "sbcl.h"
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
28 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
29 #include <mach/mach.h>
30 #include <mach/mach_error.h>
31 #include <mach/mach_types.h>
32 #endif
34 #include "runtime.h"
35 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
36 #include "alloc.h"
37 #include "thread.h"
38 #include "arch.h"
39 #include "target-arch-os.h"
40 #include "os.h"
41 #include "globals.h"
42 #include "dynbind.h"
43 #include "genesis/cons.h"
44 #include "genesis/fdefn.h"
45 #include "interr.h" /* for lose() */
46 #include "gc-internal.h"
48 #ifdef LISP_FEATURE_WIN32
50 * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
51 * so define it arbitrarily
53 #define SIGSTKSZ 1024
54 #endif
56 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_SB_THREAD)
57 #define QUEUE_FREEABLE_THREAD_STACKS
58 #define LOCK_CREATE_THREAD
59 #endif
61 #ifdef LISP_FEATURE_FREEBSD
62 #define CREATE_CLEANUP_THREAD
63 #define LOCK_CREATE_THREAD
64 #endif
66 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
68 struct freeable_stack {
69 #ifdef QUEUE_FREEABLE_THREAD_STACKS
70 struct freeable_stack *next;
71 #endif
72 os_thread_t os_thread;
73 os_vm_address_t os_address;
77 #ifdef QUEUE_FREEABLE_THREAD_STACKS
78 static struct freeable_stack * volatile freeable_stack_queue = 0;
79 static int freeable_stack_count = 0;
80 pthread_mutex_t freeable_stack_lock = PTHREAD_MUTEX_INITIALIZER;
81 #else
82 static struct freeable_stack * volatile freeable_stack = 0;
83 #endif
85 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
86 struct thread * volatile all_threads;
87 extern struct interrupt_data * global_interrupt_data;
89 #ifdef LISP_FEATURE_SB_THREAD
90 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
91 #ifdef LOCK_CREATE_THREAD
92 static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
93 #endif
94 #ifdef LISP_FEATURE_GCC_TLS
95 __thread struct thread *current_thread;
96 #endif
97 #endif
99 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
100 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
101 #endif
103 static void
104 link_thread(struct thread *th)
106 if (all_threads) all_threads->prev=th;
107 th->next=all_threads;
108 th->prev=0;
109 all_threads=th;
112 #ifdef LISP_FEATURE_SB_THREAD
113 static void
114 unlink_thread(struct thread *th)
116 if (th->prev)
117 th->prev->next = th->next;
118 else
119 all_threads = th->next;
120 if (th->next)
121 th->next->prev = th->prev;
123 #endif
125 static int
126 initial_thread_trampoline(struct thread *th)
128 lispobj function;
129 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
130 lispobj *args = NULL;
131 #endif
132 function = th->no_tls_value_marker;
133 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
134 if(arch_os_thread_init(th)==0) return 1;
135 link_thread(th);
136 th->os_thread=thread_self();
137 #ifndef LISP_FEATURE_WIN32
138 protect_control_stack_guard_page(1);
139 #endif
141 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
142 return call_into_lisp_first_time(function,args,0);
143 #else
144 return funcall0(function);
145 #endif
148 #define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
149 ALIEN_STACK_SIZE + dynamic_values_bytes + \
150 32 * SIGSTKSZ + \
151 BACKEND_PAGE_SIZE)
153 #ifdef LISP_FEATURE_SB_THREAD
155 #ifdef QUEUE_FREEABLE_THREAD_STACKS
157 static void
158 queue_freeable_thread_stack(struct thread *thread_to_be_cleaned_up)
160 struct freeable_stack *new_freeable_stack = 0;
161 if (thread_to_be_cleaned_up) {
162 /* FIXME: os_validate is mmap -- for small things like these
163 * malloc would probably perform better. */
164 new_freeable_stack = (struct freeable_stack *)
165 os_validate(0, sizeof(struct freeable_stack));
166 new_freeable_stack->next = NULL;
167 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
168 new_freeable_stack->os_address = thread_to_be_cleaned_up->os_address;
169 pthread_mutex_lock(&freeable_stack_lock);
170 if (freeable_stack_queue) {
171 struct freeable_stack *next;
172 next = freeable_stack_queue;
173 while (next->next) {
174 next = next->next;
176 next->next = new_freeable_stack;
177 } else {
178 freeable_stack_queue = new_freeable_stack;
180 freeable_stack_count++;
181 pthread_mutex_unlock(&freeable_stack_lock);
185 #define FREEABLE_STACK_QUEUE_SIZE 4
187 static void
188 free_freeable_stacks() {
189 if (freeable_stack_queue && (freeable_stack_count > FREEABLE_STACK_QUEUE_SIZE)) {
190 struct freeable_stack* old;
191 pthread_mutex_lock(&freeable_stack_lock);
192 old = freeable_stack_queue;
193 freeable_stack_queue = old->next;
194 freeable_stack_count--;
195 gc_assert(pthread_join(old->os_thread, NULL) == 0);
196 FSHOW((stderr, "freeing thread %x stack\n", old->os_thread));
197 os_invalidate(old->os_address, THREAD_STRUCT_SIZE);
198 os_invalidate((os_vm_address_t)old, sizeof(struct freeable_stack));
199 pthread_mutex_unlock(&freeable_stack_lock);
203 #elif defined(CREATE_CLEANUP_THREAD)
204 static void *
205 cleanup_thread(void *arg)
207 struct freeable_stack *freeable = arg;
208 pthread_t self = pthread_self();
210 FSHOW((stderr, "/cleaner thread(%p): joining %p\n",
211 self, freeable->os_thread));
212 gc_assert(pthread_join(freeable->os_thread, NULL) == 0);
213 FSHOW((stderr, "/cleaner thread(%p): free stack %p\n",
214 self, freeable->stack));
215 os_invalidate(freeable->os_address, THREAD_STRUCT_SIZE);
216 free(freeable);
218 pthread_detach(self);
220 return NULL;
223 static void
224 create_cleanup_thread(struct thread *thread_to_be_cleaned_up)
226 pthread_t thread;
227 int result;
229 if (thread_to_be_cleaned_up) {
230 struct freeable_stack *freeable =
231 malloc(sizeof(struct freeable_stack));
232 gc_assert(freeable != NULL);
233 freeable->os_thread = thread_to_be_cleaned_up->os_thread;
234 freeable->os_address =
235 (os_vm_address_t) thread_to_be_cleaned_up->os_address;
236 result = pthread_create(&thread, NULL, cleanup_thread, freeable);
237 gc_assert(result == 0);
241 #else
242 static void
243 free_thread_stack_later(struct thread *thread_to_be_cleaned_up)
245 struct freeable_stack *new_freeable_stack = 0;
246 if (thread_to_be_cleaned_up) {
247 new_freeable_stack = (struct freeable_stack *)
248 os_validate(0, sizeof(struct freeable_stack));
249 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
250 new_freeable_stack->os_address = (os_vm_address_t)
251 thread_to_be_cleaned_up->os_address;
253 new_freeable_stack = (struct freeable_stack *)
254 swap_lispobjs((lispobj *)(void *)&freeable_stack,
255 (lispobj)new_freeable_stack);
256 if (new_freeable_stack) {
257 FSHOW((stderr,"/reaping %p\n", (void*) new_freeable_stack->os_thread));
258 /* Under NPTL pthread_join really waits until the thread
259 * exists and the stack can be safely freed. This is sadly not
260 * mandated by the pthread spec. */
261 gc_assert(pthread_join(new_freeable_stack->os_thread, NULL) == 0);
262 os_invalidate(new_freeable_stack->os_address, THREAD_STRUCT_SIZE);
263 os_invalidate((os_vm_address_t) new_freeable_stack,
264 sizeof(struct freeable_stack));
267 #endif
269 /* this is the first thing that runs in the child (which is why the
270 * silly calling convention). Basically it calls the user's requested
271 * lisp function after doing arch_os_thread_init and whatever other
272 * bookkeeping needs to be done
275 new_thread_trampoline(struct thread *th)
277 lispobj function;
278 int result, lock_ret;
280 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
281 function = th->no_tls_value_marker;
282 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
283 if(arch_os_thread_init(th)==0) {
284 /* FIXME: handle error */
285 lose("arch_os_thread_init failed\n");
288 th->os_thread=thread_self();
289 protect_control_stack_guard_page(1);
290 /* Since GC can only know about this thread from the all_threads
291 * list and we're just adding this thread to it there is no danger
292 * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
293 * not). */
294 lock_ret = pthread_mutex_lock(&all_threads_lock);
295 gc_assert(lock_ret == 0);
296 link_thread(th);
297 lock_ret = pthread_mutex_unlock(&all_threads_lock);
298 gc_assert(lock_ret == 0);
300 result = funcall0(function);
302 /* Block GC */
303 block_blockable_signals();
304 th->state=STATE_DEAD;
306 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
307 * thread, but since we are already dead it won't wait long. */
308 lock_ret = pthread_mutex_lock(&all_threads_lock);
309 gc_assert(lock_ret == 0);
311 gc_alloc_update_page_tables(0, &th->alloc_region);
312 unlink_thread(th);
313 pthread_mutex_unlock(&all_threads_lock);
314 gc_assert(lock_ret == 0);
316 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
317 os_invalidate((os_vm_address_t)th->interrupt_data,
318 (sizeof (struct interrupt_data)));
320 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
321 FSHOW((stderr, "Deallocating mach port %x\n", THREAD_STRUCT_TO_EXCEPTION_PORT(th)));
322 mach_port_move_member(mach_task_self(),
323 THREAD_STRUCT_TO_EXCEPTION_PORT(th),
324 MACH_PORT_NULL);
325 mach_port_deallocate(mach_task_self(),
326 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
327 mach_port_destroy(mach_task_self(),
328 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
329 #endif
331 #ifdef QUEUE_FREEABLE_THREAD_STACKS
332 queue_freeable_thread_stack(th);
333 #elif defined(CREATE_CLEANUP_THREAD)
334 create_cleanup_thread(th);
335 #else
336 free_thread_stack_later(th);
337 #endif
339 FSHOW((stderr,"/exiting thread %p\n", thread_self()));
340 return result;
343 #endif /* LISP_FEATURE_SB_THREAD */
345 static void
346 free_thread_struct(struct thread *th)
348 if (th->interrupt_data)
349 os_invalidate((os_vm_address_t) th->interrupt_data,
350 (sizeof (struct interrupt_data)));
351 os_invalidate((os_vm_address_t) th->os_address,
352 THREAD_STRUCT_SIZE);
355 /* this is called from any other thread to create the new one, and
356 * initialize all parts of it that can be initialized from another
357 * thread
360 static struct thread *
361 create_thread_struct(lispobj initial_function) {
362 union per_thread_data *per_thread;
363 struct thread *th=0; /* subdue gcc */
364 void *spaces=0;
365 void *aligned_spaces=0;
366 #ifdef LISP_FEATURE_SB_THREAD
367 int i;
368 #endif
370 /* May as well allocate all the spaces at once: it saves us from
371 * having to decide what to do if only some of the allocations
372 * succeed. SPACES must be page-aligned, since the GC expects the
373 * control stack to start at a page boundary. We can't rely on the
374 * alignment passed from os_validate, since that might assume the
375 * current (e.g. 4k) pagesize, while we calculate with the biggest
376 * (e.g. 64k) pagesize allowed by the ABI. */
377 spaces=os_validate(0, THREAD_STRUCT_SIZE);
378 if(!spaces)
379 return NULL;
380 /* Aligning up is safe as THREAD_STRUCT_SIZE has BACKEND_PAGE_SIZE
381 * padding. */
382 aligned_spaces = (void *)((((unsigned long)(char *)spaces)
383 + BACKEND_PAGE_SIZE - 1)
384 & ~(unsigned long)(BACKEND_PAGE_SIZE - 1));
385 per_thread=(union per_thread_data *)
386 (aligned_spaces+
387 THREAD_CONTROL_STACK_SIZE+
388 BINDING_STACK_SIZE+
389 ALIEN_STACK_SIZE);
391 #ifdef LISP_FEATURE_SB_THREAD
392 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
393 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
394 if (all_threads == 0) {
395 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
396 SetSymbolValue
397 (FREE_TLS_INDEX,
398 /* FIXME: should be MAX_INTERRUPTS -1 ? */
399 make_fixnum(MAX_INTERRUPTS+
400 sizeof(struct thread)/sizeof(lispobj)),
402 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
404 #define STATIC_TLS_INIT(sym,field) \
405 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
406 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
408 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
409 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
410 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
411 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
412 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
413 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
414 STATIC_TLS_INIT(PSEUDO_ATOMIC_BITS,pseudo_atomic_bits);
415 #endif
416 #undef STATIC_TLS_INIT
418 #endif
420 th=&per_thread->thread;
421 th->os_address = spaces;
422 th->control_stack_start = aligned_spaces;
423 th->binding_stack_start=
424 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
425 th->control_stack_end = th->binding_stack_start;
426 th->alien_stack_start=
427 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
428 th->binding_stack_pointer=th->binding_stack_start;
429 th->this=th;
430 th->os_thread=0;
431 th->state=STATE_RUNNING;
432 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
433 th->alien_stack_pointer=((void *)th->alien_stack_start
434 + ALIEN_STACK_SIZE-N_WORD_BYTES);
435 #else
436 th->alien_stack_pointer=((void *)th->alien_stack_start);
437 #endif
438 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
439 th->pseudo_atomic_bits=0;
440 #endif
441 #ifdef LISP_FEATURE_GENCGC
442 gc_set_region_empty(&th->alloc_region);
443 #endif
445 #ifndef LISP_FEATURE_SB_THREAD
446 /* the tls-points-into-struct-thread trick is only good for threaded
447 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
448 * appropriate values from struct thread here, and make sure that
449 * we use the appropriate SymbolValue macros to access any of the
450 * variable quantities from the C runtime. It's not quite OAOOM,
451 * it just feels like it */
452 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
453 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
454 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
455 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
456 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
457 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
458 SetSymbolValue(PSEUDO_ATOMIC_BITS,(lispobj)th->pseudo_atomic_bits,th);
459 #else
460 current_binding_stack_pointer=th->binding_stack_pointer;
461 current_control_stack_pointer=th->control_stack_start;
462 #endif
463 #endif
464 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
465 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
466 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
467 bind_variable(INTERRUPT_PENDING, NIL,th);
468 bind_variable(INTERRUPTS_ENABLED,T,th);
469 bind_variable(ALLOW_WITH_INTERRUPTS,T,th);
470 bind_variable(GC_PENDING,NIL,th);
471 #ifdef LISP_FEATURE_SB_THREAD
472 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
473 #endif
475 th->interrupt_data = (struct interrupt_data *)
476 os_validate(0,(sizeof (struct interrupt_data)));
477 if (!th->interrupt_data) {
478 free_thread_struct(th);
479 return 0;
481 th->interrupt_data->pending_handler = 0;
482 th->no_tls_value_marker=initial_function;
484 th->stepping = NIL;
485 return th;
488 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
489 mach_port_t setup_mach_exception_handling_thread();
490 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
492 #endif
494 void create_initial_thread(lispobj initial_function) {
495 struct thread *th=create_thread_struct(initial_function);
496 if(th) {
497 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
498 kern_return_t ret;
500 setup_mach_exception_handling_thread();
501 #endif
502 initial_thread_trampoline(th); /* no return */
503 } else lose("can't create initial thread\n");
506 #ifdef LISP_FEATURE_SB_THREAD
508 #ifndef __USE_XOPEN2K
509 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
510 size_t __stacksize);
511 #endif
513 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
515 /* The new thread inherits the restrictive signal mask set here,
516 * and enables signals again when it is set up properly. */
517 pthread_attr_t attr;
518 sigset_t newset,oldset;
519 boolean r=1;
520 int retcode = 0, initcode;
522 FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
524 #ifdef LOCK_CREATE_THREAD
525 retcode = pthread_mutex_lock(&create_thread_lock);
526 gc_assert(retcode == 0);
527 FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
528 #endif
529 sigemptyset(&newset);
530 /* Blocking deferrable signals is enough, no need to block
531 * SIG_STOP_FOR_GC because the child process is not linked onto
532 * all_threads until it's ready. */
533 sigaddset_deferrable(&newset);
534 thread_sigmask(SIG_BLOCK, &newset, &oldset);
536 #if defined(LISP_FEATURE_DARWIN)
537 #define CONTROL_STACK_ADJUST 8192 /* darwin wants page-aligned stacks */
538 #else
539 #define CONTROL_STACK_ADJUST 16
540 #endif
542 if((initcode = pthread_attr_init(&attr)) ||
543 /* FIXME: why do we even have this in the first place? */
544 (pthread_attr_setstack(&attr,th->control_stack_start,
545 THREAD_CONTROL_STACK_SIZE-CONTROL_STACK_ADJUST)) ||
546 #undef CONTROL_STACK_ADJUST
547 (retcode = pthread_create
548 (kid_tid,&attr,(void *(*)(void *))new_thread_trampoline,th))) {
549 FSHOW_SIGNAL((stderr, "init = %d\n", initcode));
550 FSHOW_SIGNAL((stderr, printf("pthread_create returned %d, errno %d\n", retcode, errno)));
551 FSHOW_SIGNAL((stderr, "wanted stack size %d, min stack size %d\n",
552 THREAD_CONTROL_STACK_SIZE-16, PTHREAD_STACK_MIN));
553 if(retcode < 0) {
554 perror("create_os_thread");
556 r=0;
559 #ifdef QUEUE_FREEABLE_THREAD_STACKS
560 free_freeable_stacks();
561 #endif
562 thread_sigmask(SIG_SETMASK,&oldset,0);
563 #ifdef LOCK_CREATE_THREAD
564 retcode = pthread_mutex_unlock(&create_thread_lock);
565 gc_assert(retcode == 0);
566 FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
567 #endif
568 return r;
571 os_thread_t create_thread(lispobj initial_function) {
572 struct thread *th;
573 os_thread_t kid_tid;
575 /* Assuming that a fresh thread struct has no lisp objects in it,
576 * linking it to all_threads can be left to the thread itself
577 * without fear of gc lossage. initial_function violates this
578 * assumption and must stay pinned until the child starts up. */
579 th = create_thread_struct(initial_function);
580 if(th==0) return 0;
582 if (create_os_thread(th,&kid_tid)) {
583 return kid_tid;
584 } else {
585 free_thread_struct(th);
586 return 0;
590 /* Send the signo to os_thread, retry if the rt signal queue is
591 * full. */
593 kill_thread_safely(os_thread_t os_thread, int signo)
595 int r;
596 /* The man page does not mention EAGAIN as a valid return value
597 * for either pthread_kill or kill. But that's theory, this is
598 * practice. By waiting here we assume that the delivery of this
599 * signal is not necessary for the delivery of the signals in the
600 * queue. In other words, we _assume_ there are no deadlocks. */
601 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
602 /* wait a bit then try again in the hope of the rt signal
603 * queue not being full */
604 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
605 /* FIXME: some kind of backoff (random, exponential) would be
606 * nice. */
607 sleep(1);
609 return r;
612 int signal_interrupt_thread(os_thread_t os_thread)
614 int status = kill_thread_safely(os_thread, SIG_INTERRUPT_THREAD);
615 if (status == 0) {
616 return 0;
617 } else if (status == ESRCH) {
618 return -1;
619 } else {
620 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s\n",
621 os_thread, status, strerror(status));
625 /* stopping the world is a two-stage process. From this thread we signal
626 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
627 * the usual pseudo-atomic checks (we don't want to stop a thread while
628 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
631 /* To avoid deadlocks when gc stops the world all clients of each
632 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
633 * holding the lock, but they must agree on which. */
634 void gc_stop_the_world()
636 struct thread *p,*th=arch_os_get_current_thread();
637 int status, lock_ret;
638 #ifdef LOCK_CREATE_THREAD
639 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
640 * on FreeBSD. */
641 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock, thread=%lu\n",
642 th->os_thread));
643 lock_ret = pthread_mutex_lock(&create_thread_lock);
644 gc_assert(lock_ret == 0);
645 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock, thread=%lu\n",
646 th->os_thread));
647 #endif
648 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
649 th->os_thread));
650 /* keep threads from starting while the world is stopped. */
651 lock_ret = pthread_mutex_lock(&all_threads_lock); \
652 gc_assert(lock_ret == 0);
654 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
655 th->os_thread));
656 /* stop all other threads by sending them SIG_STOP_FOR_GC */
657 for(p=all_threads; p; p=p->next) {
658 gc_assert(p->os_thread != 0);
659 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: p->state: %x\n", p->state));
660 if((p!=th) && ((p->state==STATE_RUNNING))) {
661 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %x, os_thread %x\n",
662 p, p->os_thread));
663 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
664 if (status==ESRCH) {
665 /* This thread has exited. */
666 gc_assert(p->state==STATE_DEAD);
667 } else if (status) {
668 lose("cannot send suspend thread=%lu: %d, %s\n",
669 p->os_thread,status,strerror(status));
673 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
674 /* wait for the running threads to stop or finish */
675 for(p=all_threads;p;) {
676 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: th: %p, p: %p\n", th, p));
677 if((p!=th) && (p->state==STATE_RUNNING)) {
678 sched_yield();
679 } else {
680 p=p->next;
683 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
686 void gc_start_the_world()
688 struct thread *p,*th=arch_os_get_current_thread();
689 int status, lock_ret;
690 /* if a resumed thread creates a new thread before we're done with
691 * this loop, the new thread will get consed on the front of
692 * all_threads, but it won't have been stopped so won't need
693 * restarting */
694 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
695 for(p=all_threads;p;p=p->next) {
696 gc_assert(p->os_thread!=0);
697 if((p!=th) && (p->state!=STATE_DEAD)) {
698 if(p->state!=STATE_SUSPENDED) {
699 lose("gc_start_the_world: wrong thread state is %d\n",
700 fixnum_value(p->state));
702 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
703 p->os_thread));
704 p->state=STATE_RUNNING;
706 #if defined(SIG_RESUME_FROM_GC)
707 status=kill_thread_safely(p->os_thread,SIG_RESUME_FROM_GC);
708 #else
709 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
710 #endif
711 if (status) {
712 lose("cannot resume thread=%lu: %d, %s\n",
713 p->os_thread,status,strerror(status));
717 /* If we waited here until all threads leave STATE_SUSPENDED, then
718 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
719 * performance implications, but does away with the 'rt signal
720 * queue full' problem. */
722 lock_ret = pthread_mutex_unlock(&all_threads_lock);
723 gc_assert(lock_ret == 0);
724 #ifdef LOCK_CREATE_THREAD
725 lock_ret = pthread_mutex_unlock(&create_thread_lock);
726 gc_assert(lock_ret == 0);
727 #endif
729 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
731 #endif
734 thread_yield()
736 #ifdef LISP_FEATURE_SB_THREAD
737 return sched_yield();
738 #else
739 return 0;
740 #endif