[ruby/etc] bump up to 1.3.1
[ruby-80x24.org.git] / thread.c
blob36723c6fc06b8b4648af6e6ceb4712a2ff5daa45
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Design
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
29 ------------------------------------------------------------------------
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
38 Every VM can run parallel.
40 Ruby threads are scheduled by OS thread scheduler.
42 ------------------------------------------------------------------------
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
64 /* for model 2 */
66 #include "ruby/internal/config.h"
68 #ifdef __linux__
69 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70 # include <alloca.h>
71 #endif
73 #include "eval_intern.h"
74 #include "gc.h"
75 #include "hrtime.h"
76 #include "internal.h"
77 #include "internal/class.h"
78 #include "internal/cont.h"
79 #include "internal/error.h"
80 #include "internal/hash.h"
81 #include "internal/io.h"
82 #include "internal/object.h"
83 #include "internal/proc.h"
84 #include "ruby/fiber/scheduler.h"
85 #include "internal/signal.h"
86 #include "internal/thread.h"
87 #include "internal/time.h"
88 #include "internal/warnings.h"
89 #include "iseq.h"
90 #include "mjit.h"
91 #include "ruby/debug.h"
92 #include "ruby/io.h"
93 #include "ruby/thread.h"
94 #include "ruby/thread_native.h"
95 #include "timev.h"
96 #include "vm_core.h"
97 #include "ractor_core.h"
98 #include "vm_debug.h"
99 #include "vm_sync.h"
101 #ifndef USE_NATIVE_THREAD_PRIORITY
102 #define USE_NATIVE_THREAD_PRIORITY 0
103 #define RUBY_THREAD_PRIORITY_MAX 3
104 #define RUBY_THREAD_PRIORITY_MIN -3
105 #endif
107 #ifndef THREAD_DEBUG
108 #define THREAD_DEBUG 0
109 #endif
111 static VALUE rb_cThreadShield;
113 static VALUE sym_immediate;
114 static VALUE sym_on_blocking;
115 static VALUE sym_never;
117 enum SLEEP_FLAGS {
118 SLEEP_DEADLOCKABLE = 0x1,
119 SLEEP_SPURIOUS_CHECK = 0x2
122 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
123 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
125 static inline VALUE
126 rb_thread_local_storage(VALUE thread)
128 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
129 rb_ivar_set(thread, idLocals, rb_hash_new());
130 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
132 return rb_ivar_get(thread, idLocals);
135 static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
136 static void sleep_forever(rb_thread_t *th, unsigned int fl);
137 static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker);
138 static int rb_threadptr_dead(rb_thread_t *th);
139 static void rb_check_deadlock(rb_ractor_t *r);
140 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
141 static const char *thread_status_name(rb_thread_t *th, int detail);
142 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
143 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
144 static int consume_communication_pipe(int fd);
145 static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
146 void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
148 #define eKillSignal INT2FIX(0)
149 #define eTerminateSignal INT2FIX(1)
150 static volatile int system_working = 1;
152 struct waiting_fd {
153 struct list_node wfd_node; /* <=> vm.waiting_fds */
154 rb_thread_t *th;
155 int fd;
158 /********************************************************************************/
160 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
162 struct rb_blocking_region_buffer {
163 enum rb_thread_status prev_status;
166 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
167 static void unblock_function_clear(rb_thread_t *th);
169 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
170 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
171 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
173 #define GVL_UNLOCK_BEGIN(th) do { \
174 RB_GC_SAVE_MACHINE_CONTEXT(th); \
175 gvl_release(rb_ractor_gvl(th->ractor));
177 #define GVL_UNLOCK_END(th) \
178 gvl_acquire(rb_ractor_gvl(th->ractor), th); \
179 rb_ractor_thread_switch(th->ractor, th); \
180 } while(0)
182 #ifdef __GNUC__
183 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
184 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
185 #else
186 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
187 #endif
188 #else
189 #define only_if_constant(expr, notconst) notconst
190 #endif
191 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
192 struct rb_blocking_region_buffer __region; \
193 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
194 /* always return true unless fail_if_interrupted */ \
195 !only_if_constant(fail_if_interrupted, TRUE)) { \
196 exec; \
197 blocking_region_end(th, &__region); \
198 }; \
199 } while(0)
202 * returns true if this thread was spuriously interrupted, false otherwise
203 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
205 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
206 static inline int
207 vm_check_ints_blocking(rb_execution_context_t *ec)
209 rb_thread_t *th = rb_ec_thread_ptr(ec);
211 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
212 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
214 else {
215 th->pending_interrupt_queue_checked = 0;
216 RUBY_VM_SET_INTERRUPT(ec);
218 return rb_threadptr_execute_interrupts(th, 1);
222 rb_vm_check_ints_blocking(rb_execution_context_t *ec)
224 return vm_check_ints_blocking(ec);
228 * poll() is supported by many OSes, but so far Linux is the only
229 * one we know of that supports using poll() in all places select()
230 * would work.
232 #if defined(HAVE_POLL)
233 # if defined(__linux__)
234 # define USE_POLL
235 # endif
236 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
237 # define USE_POLL
238 /* FreeBSD does not set POLLOUT when POLLHUP happens */
239 # define POLLERR_SET (POLLHUP | POLLERR)
240 # endif
241 #endif
243 static void
244 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
245 const struct timeval *timeout)
247 if (timeout) {
248 *rel = rb_timeval2hrtime(timeout);
249 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
250 *to = rel;
252 else {
253 *to = 0;
257 #if THREAD_DEBUG
258 #ifdef HAVE_VA_ARGS_MACRO
259 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
260 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
261 #define POSITION_FORMAT "%s:%d:"
262 #define POSITION_ARGS ,file, line
263 #else
264 void rb_thread_debug(const char *fmt, ...);
265 #define thread_debug rb_thread_debug
266 #define POSITION_FORMAT
267 #define POSITION_ARGS
268 #endif
270 # ifdef NON_SCALAR_THREAD_ID
271 #define fill_thread_id_string ruby_fill_thread_id_string
272 const char *
273 ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
275 extern const char ruby_digitmap[];
276 size_t i;
278 buf[0] = '0';
279 buf[1] = 'x';
280 for (i = 0; i < sizeof(thid); i++) {
281 # ifdef LITTLE_ENDIAN
282 size_t j = sizeof(thid) - i - 1;
283 # else
284 size_t j = i;
285 # endif
286 unsigned char c = (unsigned char)((char *)&thid)[j];
287 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
288 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
290 buf[sizeof(rb_thread_id_string_t)-1] = '\0';
291 return buf;
293 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
294 # define thread_id_str(th) ((th)->thread_id_string)
295 # define PRI_THREAD_ID "s"
296 # endif
298 # if THREAD_DEBUG < 0
299 static int rb_thread_debug_enabled;
302 * call-seq:
303 * Thread.DEBUG -> num
305 * Returns the thread debug level. Available only if compiled with
306 * THREAD_DEBUG=-1.
309 static VALUE
310 rb_thread_s_debug(VALUE _)
312 return INT2NUM(rb_thread_debug_enabled);
316 * call-seq:
317 * Thread.DEBUG = num
319 * Sets the thread debug level. Available only if compiled with
320 * THREAD_DEBUG=-1.
323 static VALUE
324 rb_thread_s_debug_set(VALUE self, VALUE val)
326 rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
327 return val;
329 # else
330 # define rb_thread_debug_enabled THREAD_DEBUG
331 # endif
332 #else
333 #define thread_debug if(0)printf
334 #endif
336 #ifndef fill_thread_id_str
337 # define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
338 # define fill_thread_id_str(th) (void)0
339 # define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
340 # define PRI_THREAD_ID "p"
341 #endif
343 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
344 void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
346 static void
347 ubf_sigwait(void *ignore)
349 rb_thread_wakeup_timer_thread(0);
352 #include THREAD_IMPL_SRC
354 #if defined(_WIN32)
356 #define DEBUG_OUT() \
357 WaitForSingleObject(&debug_mutex, INFINITE); \
358 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
359 fflush(stdout); \
360 ReleaseMutex(&debug_mutex);
362 #elif defined(HAVE_PTHREAD_H)
364 #define DEBUG_OUT() \
365 pthread_mutex_lock(&debug_mutex); \
366 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
367 fill_thread_id_string(pthread_self(), thread_id_string), buf); \
368 fflush(stdout); \
369 pthread_mutex_unlock(&debug_mutex);
371 #endif
374 * TODO: somebody with win32 knowledge should be able to get rid of
375 * timer-thread by busy-waiting on signals. And it should be possible
376 * to make the GVL in thread_pthread.c be platform-independent.
378 #ifndef BUSY_WAIT_SIGNALS
379 # define BUSY_WAIT_SIGNALS (0)
380 #endif
382 #ifndef USE_EVENTFD
383 # define USE_EVENTFD (0)
384 #endif
386 #if THREAD_DEBUG
387 static int debug_mutex_initialized = 1;
388 static rb_nativethread_lock_t debug_mutex;
390 void
391 rb_thread_debug(
392 #ifdef HAVE_VA_ARGS_MACRO
393 const char *file, int line,
394 #endif
395 const char *fmt, ...)
397 va_list args;
398 char buf[BUFSIZ];
399 #ifdef NON_SCALAR_THREAD_ID
400 rb_thread_id_string_t thread_id_string;
401 #endif
403 if (!rb_thread_debug_enabled) return;
405 if (debug_mutex_initialized == 1) {
406 debug_mutex_initialized = 0;
407 rb_native_mutex_initialize(&debug_mutex);
410 va_start(args, fmt);
411 vsnprintf(buf, BUFSIZ, fmt, args);
412 va_end(args);
414 DEBUG_OUT();
416 #endif
418 #include "thread_sync.c"
420 void
421 rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
423 gvl_release(gvl);
424 gvl_destroy(gvl);
427 void
428 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
430 rb_native_mutex_initialize(lock);
433 void
434 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
436 rb_native_mutex_destroy(lock);
439 void
440 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
442 rb_native_mutex_lock(lock);
445 void
446 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
448 rb_native_mutex_unlock(lock);
451 static int
452 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
454 do {
455 if (fail_if_interrupted) {
456 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
457 return FALSE;
460 else {
461 RUBY_VM_CHECK_INTS(th->ec);
464 rb_native_mutex_lock(&th->interrupt_lock);
465 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
466 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
468 VM_ASSERT(th->unblock.func == NULL);
470 th->unblock.func = func;
471 th->unblock.arg = arg;
472 rb_native_mutex_unlock(&th->interrupt_lock);
474 return TRUE;
477 static void
478 unblock_function_clear(rb_thread_t *th)
480 rb_native_mutex_lock(&th->interrupt_lock);
481 th->unblock.func = 0;
482 rb_native_mutex_unlock(&th->interrupt_lock);
485 static void
486 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
488 rb_native_mutex_lock(&th->interrupt_lock);
490 if (trap) {
491 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
493 else {
494 RUBY_VM_SET_INTERRUPT(th->ec);
496 if (th->unblock.func != NULL) {
497 (th->unblock.func)(th->unblock.arg);
499 else {
500 /* none */
502 rb_native_mutex_unlock(&th->interrupt_lock);
505 void
506 rb_threadptr_interrupt(rb_thread_t *th)
508 rb_threadptr_interrupt_common(th, 0);
511 static void
512 threadptr_trap_interrupt(rb_thread_t *th)
514 rb_threadptr_interrupt_common(th, 1);
517 static void
518 terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
520 rb_thread_t *th = 0;
522 list_for_each(&r->threads.set, th, lt_node) {
523 if (th != main_thread) {
524 thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
525 thread_id_str(th), thread_status_name(th, TRUE));
526 rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
527 rb_threadptr_interrupt(th);
528 thread_debug("terminate_all: end (thid: %"PRI_THREAD_ID", status: %s)\n",
529 thread_id_str(th), thread_status_name(th, TRUE));
531 else {
532 thread_debug("terminate_all: main thread (%p)\n", (void *)th);
537 static void
538 rb_threadptr_join_list_wakeup(rb_thread_t *thread)
540 while (thread->join_list) {
541 struct rb_waiting_list *join_list = thread->join_list;
543 // Consume the entry from the join list:
544 thread->join_list = join_list->next;
546 rb_thread_t *target_thread = join_list->thread;
548 if (target_thread->scheduler != Qnil && rb_fiberptr_blocking(join_list->fiber) == 0) {
549 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
551 else {
552 rb_threadptr_interrupt(target_thread);
554 switch (target_thread->status) {
555 case THREAD_STOPPED:
556 case THREAD_STOPPED_FOREVER:
557 target_thread->status = THREAD_RUNNABLE;
558 default:
559 break;
565 void
566 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
568 while (th->keeping_mutexes) {
569 rb_mutex_t *mutex = th->keeping_mutexes;
570 th->keeping_mutexes = mutex->next_mutex;
572 /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
574 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
575 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
579 void
580 rb_thread_terminate_all(rb_thread_t *th)
582 rb_ractor_t *cr = th->ractor;
583 rb_execution_context_t * volatile ec = th->ec;
584 volatile int sleeping = 0;
586 if (cr->threads.main != th) {
587 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
588 (void *)cr->threads.main, (void *)th);
591 /* unlock all locking mutexes */
592 rb_threadptr_unlock_all_locking_mutexes(th);
594 EC_PUSH_TAG(ec);
595 if (EC_EXEC_TAG() == TAG_NONE) {
596 retry:
597 thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
598 terminate_all(cr, th);
600 while (rb_ractor_living_thread_num(cr) > 1) {
601 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
603 * Thread exiting routine in thread_start_func_2 notify
604 * me when the last sub-thread exit.
606 sleeping = 1;
607 native_sleep(th, &rel);
608 RUBY_VM_CHECK_INTS_BLOCKING(ec);
609 sleeping = 0;
612 else {
614 * When caught an exception (e.g. Ctrl+C), let's broadcast
615 * kill request again to ensure killing all threads even
616 * if they are blocked on sleep, mutex, etc.
618 if (sleeping) {
619 sleeping = 0;
620 goto retry;
623 EC_POP_TAG();
626 void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
628 static void
629 thread_cleanup_func_before_exec(void *th_ptr)
631 rb_thread_t *th = th_ptr;
632 th->status = THREAD_KILLED;
634 // The thread stack doesn't exist in the forked process:
635 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
637 rb_threadptr_root_fiber_terminate(th);
640 static void
641 thread_cleanup_func(void *th_ptr, int atfork)
643 rb_thread_t *th = th_ptr;
645 th->locking_mutex = Qfalse;
646 thread_cleanup_func_before_exec(th_ptr);
649 * Unfortunately, we can't release native threading resource at fork
650 * because libc may have unstable locking state therefore touching
651 * a threading resource may cause a deadlock.
653 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
654 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
655 * which calls free(3), so there is a small memory leak atfork, here.
657 if (atfork)
658 return;
660 rb_native_mutex_destroy(&th->interrupt_lock);
661 native_thread_destroy(th);
664 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
665 static VALUE rb_thread_to_s(VALUE thread);
667 void
668 ruby_thread_init_stack(rb_thread_t *th)
670 native_thread_init_stack(th);
673 const VALUE *
674 rb_vm_proc_local_ep(VALUE proc)
676 const VALUE *ep = vm_proc_ep(proc);
678 if (ep) {
679 return rb_vm_ep_local_ep(ep);
681 else {
682 return NULL;
686 // for ractor, defined in vm.c
687 VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
688 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
690 static VALUE
691 thread_do_start_proc(rb_thread_t *th)
693 VALUE args = th->invoke_arg.proc.args;
694 const VALUE *args_ptr;
695 int args_len;
696 VALUE procval = th->invoke_arg.proc.proc;
697 rb_proc_t *proc;
698 GetProcPtr(procval, proc);
700 th->ec->errinfo = Qnil;
701 th->ec->root_lep = rb_vm_proc_local_ep(procval);
702 th->ec->root_svar = Qfalse;
704 vm_check_ints_blocking(th->ec);
706 if (th->invoke_type == thread_invoke_type_ractor_proc) {
707 VALUE self = rb_ractor_self(th->ractor);
708 VM_ASSERT(FIXNUM_P(args));
709 args_len = FIX2INT(args);
710 args_ptr = ALLOCA_N(VALUE, args_len);
711 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
712 vm_check_ints_blocking(th->ec);
714 return rb_vm_invoke_proc_with_self(
715 th->ec, proc, self,
716 args_len, args_ptr,
717 th->invoke_arg.proc.kw_splat,
718 VM_BLOCK_HANDLER_NONE
721 else {
722 args_len = RARRAY_LENINT(args);
723 if (args_len < 8) {
724 /* free proc.args if the length is enough small */
725 args_ptr = ALLOCA_N(VALUE, args_len);
726 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
727 th->invoke_arg.proc.args = Qnil;
729 else {
730 args_ptr = RARRAY_CONST_PTR(args);
733 vm_check_ints_blocking(th->ec);
735 return rb_vm_invoke_proc(
736 th->ec, proc,
737 args_len, args_ptr,
738 th->invoke_arg.proc.kw_splat,
739 VM_BLOCK_HANDLER_NONE
744 static void
745 thread_do_start(rb_thread_t *th)
747 native_set_thread_name(th);
748 VALUE result = Qundef;
750 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
752 switch (th->invoke_type) {
753 case thread_invoke_type_proc:
754 result = thread_do_start_proc(th);
755 break;
757 case thread_invoke_type_ractor_proc:
758 result = thread_do_start_proc(th);
759 rb_ractor_atexit(th->ec, result);
760 break;
762 case thread_invoke_type_func:
763 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
764 break;
766 case thread_invoke_type_none:
767 rb_bug("unreachable");
770 rb_fiber_scheduler_set(Qnil);
772 th->value = result;
774 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
777 void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
779 static int
780 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
782 STACK_GROW_DIR_DETECTION;
783 enum ruby_tag_type state;
784 VALUE errinfo = Qnil;
785 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
786 rb_thread_t *ractor_main_th = th->ractor->threads.main;
787 VALUE * vm_stack = NULL;
789 VM_ASSERT(th != th->vm->ractor.main_thread);
790 thread_debug("thread start: %p\n", (void *)th);
792 // setup native thread
793 gvl_acquire(rb_ractor_gvl(th->ractor), th);
794 ruby_thread_set_native(th);
796 // setup ractor
797 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
798 RB_VM_LOCK();
800 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
801 rb_ractor_t *r = th->ractor;
802 r->r_stdin = rb_io_prep_stdin();
803 r->r_stdout = rb_io_prep_stdout();
804 r->r_stderr = rb_io_prep_stderr();
806 RB_VM_UNLOCK();
809 // This assertion is not passed on win32 env. Check it later.
810 // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
812 // setup VM and machine stack
813 vm_stack = alloca(size * sizeof(VALUE));
814 VM_ASSERT(vm_stack);
816 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
817 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
818 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
820 thread_debug("thread start (get lock): %p\n", (void *)th);
822 // Ensure that we are not joinable.
823 VM_ASSERT(th->value == Qundef);
825 EC_PUSH_TAG(th->ec);
827 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
828 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
830 else {
831 errinfo = th->ec->errinfo;
833 if (state == TAG_FATAL) {
834 if (th->invoke_type == thread_invoke_type_ractor_proc) {
835 rb_ractor_atexit(th->ec, Qnil);
837 /* fatal error within this thread, need to stop whole script */
839 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
840 /* exit on main_thread. */
842 else {
843 if (th->report_on_exception) {
844 VALUE mesg = rb_thread_to_s(th->self);
845 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
846 rb_write_error_str(mesg);
847 rb_ec_error_print(th->ec, errinfo);
850 if (th->invoke_type == thread_invoke_type_ractor_proc) {
851 rb_ractor_atexit_exception(th->ec);
854 if (th->vm->thread_abort_on_exception ||
855 th->abort_on_exception || RTEST(ruby_debug)) {
856 /* exit on main_thread */
858 else {
859 errinfo = Qnil;
862 th->value = Qnil;
865 // The thread is effectively finished and can be joined.
866 VM_ASSERT(th->value != Qundef);
868 rb_threadptr_join_list_wakeup(th);
869 rb_threadptr_unlock_all_locking_mutexes(th);
871 if (th->invoke_type == thread_invoke_type_ractor_proc) {
872 rb_thread_terminate_all(th);
873 rb_ractor_teardown(th->ec);
876 th->status = THREAD_KILLED;
877 thread_debug("thread end: %p\n", (void *)th);
879 if (th->vm->ractor.main_thread == th) {
880 ruby_stop(0);
883 if (RB_TYPE_P(errinfo, T_OBJECT)) {
884 /* treat with normal error object */
885 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
888 EC_POP_TAG();
890 rb_ec_clear_current_thread_trace_func(th->ec);
892 /* locking_mutex must be Qfalse */
893 if (th->locking_mutex != Qfalse) {
894 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
895 (void *)th, th->locking_mutex);
898 if (ractor_main_th->status == THREAD_KILLED &&
899 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
900 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
901 rb_threadptr_interrupt(ractor_main_th);
904 rb_check_deadlock(th->ractor);
906 rb_fiber_close(th->ec->fiber_ptr);
908 thread_cleanup_func(th, FALSE);
909 VM_ASSERT(th->ec->vm_stack == NULL);
911 if (th->invoke_type == thread_invoke_type_ractor_proc) {
912 // after rb_ractor_living_threads_remove()
913 // GC will happen anytime and this ractor can be collected (and destroy GVL).
914 // So gvl_release() should be before it.
915 gvl_release(rb_ractor_gvl(th->ractor));
916 rb_ractor_living_threads_remove(th->ractor, th);
918 else {
919 rb_ractor_living_threads_remove(th->ractor, th);
920 gvl_release(rb_ractor_gvl(th->ractor));
923 return 0;
926 struct thread_create_params {
927 enum thread_invoke_type type;
929 // for normal proc thread
930 VALUE args;
931 VALUE proc;
933 // for ractor
934 rb_ractor_t *g;
936 // for func
937 VALUE (*fn)(void *);
940 static VALUE
941 thread_create_core(VALUE thval, struct thread_create_params *params)
943 rb_execution_context_t *ec = GET_EC();
944 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
945 int err;
947 if (OBJ_FROZEN(current_th->thgroup)) {
948 rb_raise(rb_eThreadError,
949 "can't start a new thread (frozen ThreadGroup)");
952 switch (params->type) {
953 case thread_invoke_type_proc:
954 th->invoke_type = thread_invoke_type_proc;
955 th->invoke_arg.proc.args = params->args;
956 th->invoke_arg.proc.proc = params->proc;
957 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
958 break;
960 case thread_invoke_type_ractor_proc:
961 #if RACTOR_CHECK_MODE > 0
962 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
963 #endif
964 th->invoke_type = thread_invoke_type_ractor_proc;
965 th->ractor = params->g;
966 th->ractor->threads.main = th;
967 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
968 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
969 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
970 rb_ractor_send_parameters(ec, params->g, params->args);
971 break;
973 case thread_invoke_type_func:
974 th->invoke_type = thread_invoke_type_func;
975 th->invoke_arg.func.func = params->fn;
976 th->invoke_arg.func.arg = (void *)params->args;
977 break;
979 default:
980 rb_bug("unreachable");
983 th->priority = current_th->priority;
984 th->thgroup = current_th->thgroup;
986 th->pending_interrupt_queue = rb_ary_tmp_new(0);
987 th->pending_interrupt_queue_checked = 0;
988 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
989 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
991 rb_native_mutex_initialize(&th->interrupt_lock);
993 RUBY_DEBUG_LOG("r:%u th:%p", rb_ractor_id(th->ractor), (void *)th);
995 rb_ractor_living_threads_insert(th->ractor, th);
997 /* kick thread */
998 err = native_thread_create(th);
999 if (err) {
1000 th->status = THREAD_KILLED;
1001 rb_ractor_living_threads_remove(th->ractor, th);
1002 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
1004 return thval;
1007 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
1010 * call-seq:
1011 * Thread.new { ... } -> thread
1012 * Thread.new(*args, &proc) -> thread
1013 * Thread.new(*args) { |args| ... } -> thread
1015 * Creates a new thread executing the given block.
1017 * Any +args+ given to ::new will be passed to the block:
1019 * arr = []
1020 * a, b, c = 1, 2, 3
1021 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
1022 * arr #=> [1, 2, 3]
1024 * A ThreadError exception is raised if ::new is called without a block.
1026 * If you're going to subclass Thread, be sure to call super in your
1027 * +initialize+ method, otherwise a ThreadError will be raised.
1029 static VALUE
1030 thread_s_new(int argc, VALUE *argv, VALUE klass)
1032 rb_thread_t *th;
1033 VALUE thread = rb_thread_alloc(klass);
1035 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
1036 rb_raise(rb_eThreadError, "can't alloc thread");
1039 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
1040 th = rb_thread_ptr(thread);
1041 if (!threadptr_initialized(th)) {
1042 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
1043 klass);
1045 return thread;
1049 * call-seq:
1050 * Thread.start([args]*) {|args| block } -> thread
1051 * Thread.fork([args]*) {|args| block } -> thread
1053 * Basically the same as ::new. However, if class Thread is subclassed, then
1054 * calling +start+ in that subclass will not invoke the subclass's
1055 * +initialize+ method.
1058 static VALUE
1059 thread_start(VALUE klass, VALUE args)
1061 struct thread_create_params params = {
1062 .type = thread_invoke_type_proc,
1063 .args = args,
1064 .proc = rb_block_proc(),
1066 return thread_create_core(rb_thread_alloc(klass), &params);
1069 static VALUE
1070 threadptr_invoke_proc_location(rb_thread_t *th)
1072 if (th->invoke_type == thread_invoke_type_proc) {
1073 return rb_proc_location(th->invoke_arg.proc.proc);
1075 else {
1076 return Qnil;
1080 /* :nodoc: */
1081 static VALUE
1082 thread_initialize(VALUE thread, VALUE args)
1084 rb_thread_t *th = rb_thread_ptr(thread);
1086 if (!rb_block_given_p()) {
1087 rb_raise(rb_eThreadError, "must be called with a block");
1089 else if (th->invoke_type != thread_invoke_type_none) {
1090 VALUE loc = threadptr_invoke_proc_location(th);
1091 if (!NIL_P(loc)) {
1092 rb_raise(rb_eThreadError,
1093 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
1094 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
1096 else {
1097 rb_raise(rb_eThreadError, "already initialized thread");
1100 else {
1101 struct thread_create_params params = {
1102 .type = thread_invoke_type_proc,
1103 .args = args,
1104 .proc = rb_block_proc(),
1106 return thread_create_core(thread, &params);
1110 VALUE
1111 rb_thread_create(VALUE (*fn)(void *), void *arg)
1113 struct thread_create_params params = {
1114 .type = thread_invoke_type_func,
1115 .fn = fn,
1116 .args = (VALUE)arg,
1118 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1121 VALUE
1122 rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
1124 struct thread_create_params params = {
1125 .type = thread_invoke_type_ractor_proc,
1126 .g = g,
1127 .args = args,
1128 .proc = proc,
1130 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1134 struct join_arg {
1135 struct rb_waiting_list *waiter;
1136 rb_thread_t *target;
1137 VALUE timeout;
1138 rb_hrtime_t *limit;
1141 static VALUE
1142 remove_from_join_list(VALUE arg)
1144 struct join_arg *p = (struct join_arg *)arg;
1145 rb_thread_t *target_thread = p->target;
1147 if (target_thread->status != THREAD_KILLED) {
1148 struct rb_waiting_list **join_list = &target_thread->join_list;
1150 while (*join_list) {
1151 if (*join_list == p->waiter) {
1152 *join_list = (*join_list)->next;
1153 break;
1156 join_list = &(*join_list)->next;
1160 return Qnil;
1163 static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
1165 static int
1166 thread_finished(rb_thread_t *th)
1168 return th->status == THREAD_KILLED || th->value != Qundef;
1171 static VALUE
1172 thread_join_sleep(VALUE arg)
1174 struct join_arg *p = (struct join_arg *)arg;
1175 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1176 rb_hrtime_t end = 0, *limit = p->limit;
1178 if (limit) {
1179 end = rb_hrtime_add(*limit, rb_hrtime_now());
1182 while (!thread_finished(target_th)) {
1183 VALUE scheduler = rb_fiber_scheduler_current();
1185 if (scheduler != Qnil) {
1186 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1188 else if (!limit) {
1189 th->status = THREAD_STOPPED_FOREVER;
1190 rb_ractor_sleeper_threads_inc(th->ractor);
1191 rb_check_deadlock(th->ractor);
1192 native_sleep(th, 0);
1193 rb_ractor_sleeper_threads_dec(th->ractor);
1195 else {
1196 if (hrtime_update_expire(limit, end)) {
1197 thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
1198 thread_id_str(target_th));
1199 return Qfalse;
1201 th->status = THREAD_STOPPED;
1202 native_sleep(th, limit);
1204 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1205 th->status = THREAD_RUNNABLE;
1206 thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID", status: %s)\n",
1207 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1209 return Qtrue;
1212 static VALUE
1213 thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1215 rb_execution_context_t *ec = GET_EC();
1216 rb_thread_t *th = ec->thread_ptr;
1217 rb_fiber_t *fiber = ec->fiber_ptr;
1219 if (th == target_th) {
1220 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1223 if (th->ractor->threads.main == target_th) {
1224 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1227 thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
1228 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1230 if (target_th->status != THREAD_KILLED) {
1231 struct rb_waiting_list waiter;
1232 waiter.next = target_th->join_list;
1233 waiter.thread = th;
1234 waiter.fiber = fiber;
1235 target_th->join_list = &waiter;
1237 struct join_arg arg;
1238 arg.waiter = &waiter;
1239 arg.target = target_th;
1240 arg.timeout = timeout;
1241 arg.limit = limit;
1243 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1244 return Qnil;
1248 thread_debug("thread_join: success (thid: %"PRI_THREAD_ID", status: %s)\n",
1249 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1251 if (target_th->ec->errinfo != Qnil) {
1252 VALUE err = target_th->ec->errinfo;
1254 if (FIXNUM_P(err)) {
1255 switch (err) {
1256 case INT2FIX(TAG_FATAL):
1257 thread_debug("thread_join: terminated (thid: %"PRI_THREAD_ID", status: %s)\n",
1258 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1260 /* OK. killed. */
1261 break;
1262 default:
1263 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1266 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1267 rb_bug("thread_join: THROW_DATA should not reach here.");
1269 else {
1270 /* normal exception */
1271 rb_exc_raise(err);
1274 return target_th->self;
1278 * call-seq:
1279 * thr.join -> thr
1280 * thr.join(limit) -> thr
1282 * The calling thread will suspend execution and run this +thr+.
1284 * Does not return until +thr+ exits or until the given +limit+ seconds have
1285 * passed.
1287 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1288 * returned.
1290 * Any threads not joined will be killed when the main program exits.
1292 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1293 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1294 * will be processed at this time.
1296 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1297 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1298 * x.join # Let thread x finish, thread a will be killed on exit.
1299 * #=> "axyz"
1301 * The following example illustrates the +limit+ parameter.
1303 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1304 * puts "Waiting" until y.join(0.15)
1306 * This will produce:
1308 * tick...
1309 * Waiting
1310 * tick...
1311 * Waiting
1312 * tick...
1313 * tick...
1316 static VALUE
1317 thread_join_m(int argc, VALUE *argv, VALUE self)
1319 VALUE timeout = Qnil;
1320 rb_hrtime_t rel = 0, *limit = 0;
1322 if (rb_check_arity(argc, 0, 1)) {
1323 timeout = argv[0];
1326 // Convert the timeout eagerly, so it's always converted and deterministic
1328 * This supports INFINITY and negative values, so we can't use
1329 * rb_time_interval right now...
1331 if (NIL_P(timeout)) {
1332 /* unlimited */
1334 else if (FIXNUM_P(timeout)) {
1335 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1336 limit = &rel;
1338 else {
1339 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1342 return thread_join(rb_thread_ptr(self), timeout, limit);
1346 * call-seq:
1347 * thr.value -> obj
1349 * Waits for +thr+ to complete, using #join, and returns its value or raises
1350 * the exception which terminated the thread.
1352 * a = Thread.new { 2 + 2 }
1353 * a.value #=> 4
1355 * b = Thread.new { raise 'something went wrong' }
1356 * b.value #=> RuntimeError: something went wrong
1359 static VALUE
1360 thread_value(VALUE self)
1362 rb_thread_t *th = rb_thread_ptr(self);
1363 thread_join(th, Qnil, 0);
1364 return th->value;
1368 * Thread Scheduling
1372 * Back when we used "struct timeval", not all platforms implemented
1373 * tv_sec as time_t. Nowadays we use "struct timespec" and tv_sec
1374 * seems to be implemented more consistently across platforms.
1375 * At least other parts of our code hasn't had to deal with non-time_t
1376 * tv_sec in timespec...
1378 #define TIMESPEC_SEC_MAX TIMET_MAX
1379 #define TIMESPEC_SEC_MIN TIMET_MIN
1381 COMPILER_WARNING_PUSH
1382 #if __has_warning("-Wimplicit-int-float-conversion")
1383 COMPILER_WARNING_IGNORED(-Wimplicit-int-float-conversion)
1384 #elif defined(_MSC_VER)
1385 /* C4305: 'initializing': truncation from '__int64' to 'const double' */
1386 COMPILER_WARNING_IGNORED(4305)
1387 #endif
1388 static const double TIMESPEC_SEC_MAX_as_double = TIMESPEC_SEC_MAX;
1389 COMPILER_WARNING_POP
1391 static rb_hrtime_t *
1392 double2hrtime(rb_hrtime_t *hrt, double d)
1394 /* assume timespec.tv_sec has same signedness as time_t */
1395 const double TIMESPEC_SEC_MAX_PLUS_ONE = 2.0 * (TIMESPEC_SEC_MAX_as_double / 2.0 + 1.0);
1397 if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1398 return NULL;
1400 else if (d <= 0) {
1401 *hrt = 0;
1403 else {
1404 *hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
1406 return hrt;
1409 static void
1410 getclockofday(struct timespec *ts)
1412 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1413 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1414 return;
1415 #endif
1416 rb_timespec_now(ts);
1420 * Don't inline this, since library call is already time consuming
1421 * and we don't want "struct timespec" on stack too long for GC
1423 NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1424 rb_hrtime_t
1425 rb_hrtime_now(void)
1427 struct timespec ts;
1429 getclockofday(&ts);
1430 return rb_timespec2hrtime(&ts);
1433 static void
1434 sleep_forever(rb_thread_t *th, unsigned int fl)
1436 enum rb_thread_status prev_status = th->status;
1437 enum rb_thread_status status;
1438 int woke;
1440 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1441 th->status = status;
1442 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1443 while (th->status == status) {
1444 if (fl & SLEEP_DEADLOCKABLE) {
1445 rb_ractor_sleeper_threads_inc(th->ractor);
1446 rb_check_deadlock(th->ractor);
1448 native_sleep(th, 0);
1449 if (fl & SLEEP_DEADLOCKABLE) {
1450 rb_ractor_sleeper_threads_dec(th->ractor);
1452 woke = vm_check_ints_blocking(th->ec);
1453 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1454 break;
1456 th->status = prev_status;
1460 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1461 * being uninitialized, maybe other versions, too.
1463 COMPILER_WARNING_PUSH
1464 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1465 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1466 #endif
1467 #ifndef PRIu64
1468 #define PRIu64 PRI_64_PREFIX "u"
1469 #endif
1471 * @end is the absolute time when @ts is set to expire
1472 * Returns true if @end has past
1473 * Updates @ts and returns false otherwise
1475 static int
1476 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1478 rb_hrtime_t now = rb_hrtime_now();
1480 if (now > end) return 1;
1481 thread_debug("hrtime_update_expire: "
1482 "%"PRIu64" > %"PRIu64"\n",
1483 (uint64_t)end, (uint64_t)now);
1484 *timeout = end - now;
1485 return 0;
1487 COMPILER_WARNING_POP
1489 static int
1490 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1492 enum rb_thread_status prev_status = th->status;
1493 int woke;
1494 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1496 th->status = THREAD_STOPPED;
1497 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1498 while (th->status == THREAD_STOPPED) {
1499 native_sleep(th, &rel);
1500 woke = vm_check_ints_blocking(th->ec);
1501 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1502 break;
1503 if (hrtime_update_expire(&rel, end))
1504 break;
1505 woke = 1;
1507 th->status = prev_status;
1508 return woke;
1511 void
1512 rb_thread_sleep_forever(void)
1514 thread_debug("rb_thread_sleep_forever\n");
1515 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1518 void
1519 rb_thread_sleep_deadly(void)
1521 thread_debug("rb_thread_sleep_deadly\n");
1522 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1525 void
1526 rb_thread_sleep_interruptible(void)
1528 rb_thread_t *th = GET_THREAD();
1529 enum rb_thread_status prev_status = th->status;
1531 th->status = THREAD_STOPPED;
1532 native_sleep(th, 0);
1533 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1534 th->status = prev_status;
1537 static void
1538 rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker)
1540 VALUE scheduler = rb_fiber_scheduler_current();
1541 if (scheduler != Qnil) {
1542 rb_fiber_scheduler_block(scheduler, blocker, Qnil);
1544 else {
1545 thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1546 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1550 void
1551 rb_thread_wait_for(struct timeval time)
1553 rb_thread_t *th = GET_THREAD();
1555 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1559 * CAUTION: This function causes thread switching.
1560 * rb_thread_check_ints() check ruby's interrupts.
1561 * some interrupt needs thread switching/invoke handlers,
1562 * and so on.
1565 void
1566 rb_thread_check_ints(void)
1568 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1572 * Hidden API for tcl/tk wrapper.
1573 * There is no guarantee to perpetuate it.
1576 rb_thread_check_trap_pending(void)
1578 return rb_signal_buff_size() != 0;
1581 /* This function can be called in blocking region. */
1583 rb_thread_interrupted(VALUE thval)
1585 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1588 void
1589 rb_thread_sleep(int sec)
1591 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
1594 static void
1595 rb_thread_schedule_limits(uint32_t limits_us)
1597 thread_debug("rb_thread_schedule\n");
1598 if (!rb_thread_alone()) {
1599 rb_thread_t *th = GET_THREAD();
1601 if (th->running_time_us >= limits_us) {
1602 thread_debug("rb_thread_schedule/switch start\n");
1603 RB_GC_SAVE_MACHINE_CONTEXT(th);
1604 gvl_yield(rb_ractor_gvl(th->ractor), th);
1605 rb_ractor_thread_switch(th->ractor, th);
1606 thread_debug("rb_thread_schedule/switch done\n");
1611 void
1612 rb_thread_schedule(void)
1614 rb_thread_schedule_limits(0);
1615 RUBY_VM_CHECK_INTS(GET_EC());
1618 /* blocking region */
1620 static inline int
1621 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1622 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1624 region->prev_status = th->status;
1625 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1626 th->blocking_region_buffer = region;
1627 th->status = THREAD_STOPPED;
1628 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1629 thread_debug("enter blocking region (%p)\n", (void *)th);
1630 RB_GC_SAVE_MACHINE_CONTEXT(th);
1631 gvl_release(rb_ractor_gvl(th->ractor));
1632 return TRUE;
1634 else {
1635 return FALSE;
1639 static inline void
1640 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1642 /* entry to ubf_list still permitted at this point, make it impossible: */
1643 unblock_function_clear(th);
1644 /* entry to ubf_list impossible at this point, so unregister is safe: */
1645 unregister_ubf_list(th);
1647 gvl_acquire(rb_ractor_gvl(th->ractor), th);
1648 rb_ractor_thread_switch(th->ractor, th);
1650 thread_debug("leave blocking region (%p)\n", (void *)th);
1651 th->blocking_region_buffer = 0;
1652 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1653 if (th->status == THREAD_STOPPED) {
1654 th->status = region->prev_status;
1658 void *
1659 rb_nogvl(void *(*func)(void *), void *data1,
1660 rb_unblock_function_t *ubf, void *data2,
1661 int flags)
1663 void *val = 0;
1664 rb_execution_context_t *ec = GET_EC();
1665 rb_thread_t *th = rb_ec_thread_ptr(ec);
1666 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1667 bool is_main_thread = vm->ractor.main_thread == th;
1668 int saved_errno = 0;
1669 VALUE ubf_th = Qfalse;
1671 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1672 ubf = ubf_select;
1673 data2 = th;
1675 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1676 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1677 vm->ubf_async_safe = 1;
1679 else {
1680 ubf_th = rb_thread_start_unblock_thread();
1684 BLOCKING_REGION(th, {
1685 val = func(data1);
1686 saved_errno = errno;
1687 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1689 if (is_main_thread) vm->ubf_async_safe = 0;
1691 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1692 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1695 if (ubf_th != Qfalse) {
1696 thread_value(rb_thread_kill(ubf_th));
1699 errno = saved_errno;
1701 return val;
1705 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1706 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1707 * without interrupt process.
1709 * rb_thread_call_without_gvl() does:
1710 * (1) Check interrupts.
1711 * (2) release GVL.
1712 * Other Ruby threads may run in parallel.
1713 * (3) call func with data1
1714 * (4) acquire GVL.
1715 * Other Ruby threads can not run in parallel any more.
1716 * (5) Check interrupts.
1718 * rb_thread_call_without_gvl2() does:
1719 * (1) Check interrupt and return if interrupted.
1720 * (2) release GVL.
1721 * (3) call func with data1 and a pointer to the flags.
1722 * (4) acquire GVL.
1724 * If another thread interrupts this thread (Thread#kill, signal delivery,
1725 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1726 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1727 * toggling a cancellation flag, canceling the invocation of a call inside
1728 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1730 * There are built-in ubfs and you can specify these ubfs:
1732 * * RUBY_UBF_IO: ubf for IO operation
1733 * * RUBY_UBF_PROCESS: ubf for process operation
1735 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1736 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1737 * provide proper ubf(), your program will not stop for Control+C or other
1738 * shutdown events.
1740 * "Check interrupts" on above list means checking asynchronous
1741 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1742 * request, and so on) and calling corresponding procedures
1743 * (such as `trap' for signals, raise an exception for Thread#raise).
1744 * If `func()' finished and received interrupts, you may skip interrupt
1745 * checking. For example, assume the following func() it reads data from file.
1747 * read_func(...) {
1748 * // (a) before read
1749 * read(buffer); // (b) reading
1750 * // (c) after read
1753 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1754 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1755 * at (c), after *read* operation is completed, checking interrupts is harmful
1756 * because it causes irrevocable side-effect, the read data will vanish. To
1757 * avoid such problem, the `read_func()' should be used with
1758 * `rb_thread_call_without_gvl2()'.
1760 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1761 * immediately. This function does not show when the execution was interrupted.
1762 * For example, there are 4 possible timing (a), (b), (c) and before calling
1763 * read_func(). You need to record progress of a read_func() and check
1764 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1765 * `rb_thread_check_ints()' correctly or your program can not process proper
1766 * process such as `trap' and so on.
1768 * NOTE: You can not execute most of Ruby C API and touch Ruby
1769 * objects in `func()' and `ubf()', including raising an
1770 * exception, because current thread doesn't acquire GVL
1771 * (it causes synchronization problems). If you need to
1772 * call ruby functions either use rb_thread_call_with_gvl()
1773 * or read source code of C APIs and confirm safety by
1774 * yourself.
1776 * NOTE: In short, this API is difficult to use safely. I recommend you
1777 * use other ways if you have. We lack experiences to use this API.
1778 * Please report your problem related on it.
1780 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1781 * for a short running `func()'. Be sure to benchmark and use this
1782 * mechanism when `func()' consumes enough time.
1784 * Safe C API:
1785 * * rb_thread_interrupted() - check interrupt flag
1786 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1787 * they will work without GVL, and may acquire GVL when GC is needed.
1789 void *
1790 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1791 rb_unblock_function_t *ubf, void *data2)
1793 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1796 void *
1797 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1798 rb_unblock_function_t *ubf, void *data2)
1800 return rb_nogvl(func, data1, ubf, data2, 0);
1803 VALUE
1804 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1806 volatile VALUE val = Qundef; /* shouldn't be used */
1807 rb_execution_context_t * volatile ec = GET_EC();
1808 volatile int saved_errno = 0;
1809 enum ruby_tag_type state;
1811 struct waiting_fd waiting_fd = {
1812 .fd = fd,
1813 .th = rb_ec_thread_ptr(ec)
1816 RB_VM_LOCK_ENTER();
1818 list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
1820 RB_VM_LOCK_LEAVE();
1822 EC_PUSH_TAG(ec);
1823 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1824 BLOCKING_REGION(waiting_fd.th, {
1825 val = func(data1);
1826 saved_errno = errno;
1827 }, ubf_select, waiting_fd.th, FALSE);
1829 EC_POP_TAG();
1832 * must be deleted before jump
1833 * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
1835 RB_VM_LOCK_ENTER();
1837 list_del(&waiting_fd.wfd_node);
1839 RB_VM_LOCK_LEAVE();
1841 if (state) {
1842 EC_JUMP_TAG(ec, state);
1844 /* TODO: check func() */
1845 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1847 errno = saved_errno;
1849 return val;
1853 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1855 * After releasing GVL using
1856 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1857 * methods. If you need to access Ruby you must use this function
1858 * rb_thread_call_with_gvl().
1860 * This function rb_thread_call_with_gvl() does:
1861 * (1) acquire GVL.
1862 * (2) call passed function `func'.
1863 * (3) release GVL.
1864 * (4) return a value which is returned at (2).
1866 * NOTE: You should not return Ruby object at (2) because such Object
1867 * will not be marked.
1869 * NOTE: If an exception is raised in `func', this function DOES NOT
1870 * protect (catch) the exception. If you have any resources
1871 * which should free before throwing exception, you need use
1872 * rb_protect() in `func' and return a value which represents
1873 * exception was raised.
1875 * NOTE: This function should not be called by a thread which was not
1876 * created as Ruby thread (created by Thread.new or so). In other
1877 * words, this function *DOES NOT* associate or convert a NON-Ruby
1878 * thread to a Ruby thread.
1880 void *
1881 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1883 rb_thread_t *th = ruby_thread_from_native();
1884 struct rb_blocking_region_buffer *brb;
1885 struct rb_unblock_callback prev_unblock;
1886 void *r;
1888 if (th == 0) {
1889 /* Error has occurred, but we can't use rb_bug()
1890 * because this thread is not Ruby's thread.
1891 * What should we do?
1893 bp();
1894 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1895 exit(EXIT_FAILURE);
1898 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1899 prev_unblock = th->unblock;
1901 if (brb == 0) {
1902 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1905 blocking_region_end(th, brb);
1906 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1907 r = (*func)(data1);
1908 /* leave from Ruby world: You can not access Ruby values, etc. */
1909 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1910 RUBY_ASSERT_ALWAYS(released);
1911 return r;
1915 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1918 *** This API is EXPERIMENTAL!
1919 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1924 ruby_thread_has_gvl_p(void)
1926 rb_thread_t *th = ruby_thread_from_native();
1928 if (th && th->blocking_region_buffer == 0) {
1929 return 1;
1931 else {
1932 return 0;
1937 * call-seq:
1938 * Thread.pass -> nil
1940 * Give the thread scheduler a hint to pass execution to another thread.
1941 * A running thread may or may not switch, it depends on OS and processor.
1944 static VALUE
1945 thread_s_pass(VALUE klass)
1947 rb_thread_schedule();
1948 return Qnil;
1951 /*****************************************************/
1954 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1956 * Async events such as an exception thrown by Thread#raise,
1957 * Thread#kill and thread termination (after main thread termination)
1958 * will be queued to th->pending_interrupt_queue.
1959 * - clear: clear the queue.
1960 * - enque: enqueue err object into queue.
1961 * - deque: dequeue err object from queue.
1962 * - active_p: return 1 if the queue should be checked.
1964 * All rb_threadptr_pending_interrupt_* functions are called by
1965 * a GVL acquired thread, of course.
1966 * Note that all "rb_" prefix APIs need GVL to call.
1969 void
1970 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1972 rb_ary_clear(th->pending_interrupt_queue);
1975 void
1976 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1978 rb_ary_push(th->pending_interrupt_queue, v);
1979 th->pending_interrupt_queue_checked = 0;
1982 static void
1983 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1985 if (!th->pending_interrupt_queue) {
1986 rb_raise(rb_eThreadError, "uninitialized thread");
1990 enum handle_interrupt_timing {
1991 INTERRUPT_NONE,
1992 INTERRUPT_IMMEDIATE,
1993 INTERRUPT_ON_BLOCKING,
1994 INTERRUPT_NEVER
1997 static enum handle_interrupt_timing
1998 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2000 VALUE mask;
2001 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2002 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2003 VALUE mod;
2004 long i;
2006 for (i=0; i<mask_stack_len; i++) {
2007 mask = mask_stack[mask_stack_len-(i+1)];
2009 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2010 VALUE klass = mod;
2011 VALUE sym;
2013 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2014 klass = RBASIC(mod)->klass;
2016 else if (mod != RCLASS_ORIGIN(mod)) {
2017 continue;
2020 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2021 if (sym == sym_immediate) {
2022 return INTERRUPT_IMMEDIATE;
2024 else if (sym == sym_on_blocking) {
2025 return INTERRUPT_ON_BLOCKING;
2027 else if (sym == sym_never) {
2028 return INTERRUPT_NEVER;
2030 else {
2031 rb_raise(rb_eThreadError, "unknown mask signature");
2035 /* try to next mask */
2037 return INTERRUPT_NONE;
2040 static int
2041 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2043 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2046 static int
2047 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2049 int i;
2050 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2051 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2052 if (rb_class_inherited_p(e, err)) {
2053 return TRUE;
2056 return FALSE;
2059 static VALUE
2060 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2062 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2063 int i;
2065 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2066 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2068 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2070 switch (mask_timing) {
2071 case INTERRUPT_ON_BLOCKING:
2072 if (timing != INTERRUPT_ON_BLOCKING) {
2073 break;
2075 /* fall through */
2076 case INTERRUPT_NONE: /* default: IMMEDIATE */
2077 case INTERRUPT_IMMEDIATE:
2078 rb_ary_delete_at(th->pending_interrupt_queue, i);
2079 return err;
2080 case INTERRUPT_NEVER:
2081 break;
2085 th->pending_interrupt_queue_checked = 1;
2086 return Qundef;
2087 #else
2088 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2089 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2090 th->pending_interrupt_queue_checked = 1;
2092 return err;
2093 #endif
2096 static int
2097 threadptr_pending_interrupt_active_p(rb_thread_t *th)
2100 * For optimization, we don't check async errinfo queue
2101 * if the queue and the thread interrupt mask were not changed
2102 * since last check.
2104 if (th->pending_interrupt_queue_checked) {
2105 return 0;
2108 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2109 return 0;
2112 return 1;
2115 static int
2116 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2118 VALUE *maskp = (VALUE *)args;
2120 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2121 rb_raise(rb_eArgError, "unknown mask signature");
2124 if (!*maskp) {
2125 *maskp = rb_ident_hash_new();
2127 rb_hash_aset(*maskp, key, val);
2129 return ST_CONTINUE;
2133 * call-seq:
2134 * Thread.handle_interrupt(hash) { ... } -> result of the block
2136 * Changes asynchronous interrupt timing.
2138 * _interrupt_ means asynchronous event and corresponding procedure
2139 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2140 * and main thread termination (if main thread terminates, then all
2141 * other thread will be killed).
2143 * The given +hash+ has pairs like <code>ExceptionClass =>
2144 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2145 * the given block. The TimingSymbol can be one of the following symbols:
2147 * [+:immediate+] Invoke interrupts immediately.
2148 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2149 * [+:never+] Never invoke all interrupts.
2151 * _BlockingOperation_ means that the operation will block the calling thread,
2152 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2153 * operation executed without GVL.
2155 * Masked asynchronous interrupts are delayed until they are enabled.
2156 * This method is similar to sigprocmask(3).
2158 * === NOTE
2160 * Asynchronous interrupts are difficult to use.
2162 * If you need to communicate between threads, please consider to use another way such as Queue.
2164 * Or use them with deep understanding about this method.
2166 * === Usage
2168 * In this example, we can guard from Thread#raise exceptions.
2170 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2171 * ignored in the first block of the main thread. In the second
2172 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2174 * th = Thread.new do
2175 * Thread.handle_interrupt(RuntimeError => :never) {
2176 * begin
2177 * # You can write resource allocation code safely.
2178 * Thread.handle_interrupt(RuntimeError => :immediate) {
2179 * # ...
2181 * ensure
2182 * # You can write resource deallocation code safely.
2183 * end
2185 * end
2186 * Thread.pass
2187 * # ...
2188 * th.raise "stop"
2190 * While we are ignoring the RuntimeError exception, it's safe to write our
2191 * resource allocation code. Then, the ensure block is where we can safely
2192 * deallocate your resources.
2194 * ==== Guarding from Timeout::Error
2196 * In the next example, we will guard from the Timeout::Error exception. This
2197 * will help prevent from leaking resources when Timeout::Error exceptions occur
2198 * during normal ensure clause. For this example we use the help of the
2199 * standard library Timeout, from lib/timeout.rb
2201 * require 'timeout'
2202 * Thread.handle_interrupt(Timeout::Error => :never) {
2203 * timeout(10){
2204 * # Timeout::Error doesn't occur here
2205 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2206 * # possible to be killed by Timeout::Error
2207 * # while blocking operation
2209 * # Timeout::Error doesn't occur here
2213 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2214 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2215 * operation that will block the calling thread is susceptible to a
2216 * Timeout::Error exception being raised.
2218 * ==== Stack control settings
2220 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2221 * to control more than one ExceptionClass and TimingSymbol at a time.
2223 * Thread.handle_interrupt(FooError => :never) {
2224 * Thread.handle_interrupt(BarError => :never) {
2225 * # FooError and BarError are prohibited.
2229 * ==== Inheritance with ExceptionClass
2231 * All exceptions inherited from the ExceptionClass parameter will be considered.
2233 * Thread.handle_interrupt(Exception => :never) {
2234 * # all exceptions inherited from Exception are prohibited.
2237 * For handling all interrupts, use +Object+ and not +Exception+
2238 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2240 static VALUE
2241 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2243 VALUE mask;
2244 rb_execution_context_t * volatile ec = GET_EC();
2245 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2246 volatile VALUE r = Qnil;
2247 enum ruby_tag_type state;
2249 if (!rb_block_given_p()) {
2250 rb_raise(rb_eArgError, "block is needed.");
2253 mask = 0;
2254 mask_arg = rb_to_hash_type(mask_arg);
2255 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2256 if (!mask) {
2257 return rb_yield(Qnil);
2259 OBJ_FREEZE_RAW(mask);
2260 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2261 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2262 th->pending_interrupt_queue_checked = 0;
2263 RUBY_VM_SET_INTERRUPT(th->ec);
2266 EC_PUSH_TAG(th->ec);
2267 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2268 r = rb_yield(Qnil);
2270 EC_POP_TAG();
2272 rb_ary_pop(th->pending_interrupt_mask_stack);
2273 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2274 th->pending_interrupt_queue_checked = 0;
2275 RUBY_VM_SET_INTERRUPT(th->ec);
2278 RUBY_VM_CHECK_INTS(th->ec);
2280 if (state) {
2281 EC_JUMP_TAG(th->ec, state);
2284 return r;
2288 * call-seq:
2289 * target_thread.pending_interrupt?(error = nil) -> true/false
2291 * Returns whether or not the asynchronous queue is empty for the target thread.
2293 * If +error+ is given, then check only for +error+ type deferred events.
2295 * See ::pending_interrupt? for more information.
2297 static VALUE
2298 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2300 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2302 if (!target_th->pending_interrupt_queue) {
2303 return Qfalse;
2305 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2306 return Qfalse;
2308 if (rb_check_arity(argc, 0, 1)) {
2309 VALUE err = argv[0];
2310 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2311 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2313 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2315 else {
2316 return Qtrue;
2321 * call-seq:
2322 * Thread.pending_interrupt?(error = nil) -> true/false
2324 * Returns whether or not the asynchronous queue is empty.
2326 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2327 * this method can be used to determine if there are any deferred events.
2329 * If you find this method returns true, then you may finish +:never+ blocks.
2331 * For example, the following method processes deferred asynchronous events
2332 * immediately.
2334 * def Thread.kick_interrupt_immediately
2335 * Thread.handle_interrupt(Object => :immediate) {
2336 * Thread.pass
2338 * end
2340 * If +error+ is given, then check only for +error+ type deferred events.
2342 * === Usage
2344 * th = Thread.new{
2345 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2346 * while true
2347 * ...
2348 * # reach safe point to invoke interrupt
2349 * if Thread.pending_interrupt?
2350 * Thread.handle_interrupt(Object => :immediate){}
2351 * end
2352 * ...
2353 * end
2356 * ...
2357 * th.raise # stop thread
2359 * This example can also be written as the following, which you should use to
2360 * avoid asynchronous interrupts.
2362 * flag = true
2363 * th = Thread.new{
2364 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2365 * while true
2366 * ...
2367 * # reach safe point to invoke interrupt
2368 * break if flag == false
2369 * ...
2370 * end
2373 * ...
2374 * flag = false # stop thread
2377 static VALUE
2378 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2380 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2383 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2385 static void
2386 rb_threadptr_to_kill(rb_thread_t *th)
2388 rb_threadptr_pending_interrupt_clear(th);
2389 th->status = THREAD_RUNNABLE;
2390 th->to_kill = 1;
2391 th->ec->errinfo = INT2FIX(TAG_FATAL);
2392 EC_JUMP_TAG(th->ec, TAG_FATAL);
2395 static inline rb_atomic_t
2396 threadptr_get_interrupts(rb_thread_t *th)
2398 rb_execution_context_t *ec = th->ec;
2399 rb_atomic_t interrupt;
2400 rb_atomic_t old;
2402 do {
2403 interrupt = ec->interrupt_flag;
2404 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2405 } while (old != interrupt);
2406 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2409 MJIT_FUNC_EXPORTED int
2410 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2412 rb_atomic_t interrupt;
2413 int postponed_job_interrupt = 0;
2414 int ret = FALSE;
2416 if (th->ec->raised_flag) return ret;
2418 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2419 int sig;
2420 int timer_interrupt;
2421 int pending_interrupt;
2422 int trap_interrupt;
2423 int terminate_interrupt;
2425 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2426 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2427 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2428 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2429 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2431 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2432 RB_VM_LOCK_ENTER();
2433 RB_VM_LOCK_LEAVE();
2436 if (postponed_job_interrupt) {
2437 rb_postponed_job_flush(th->vm);
2440 /* signal handling */
2441 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2442 enum rb_thread_status prev_status = th->status;
2443 int sigwait_fd = rb_sigwait_fd_get(th);
2445 if (sigwait_fd >= 0) {
2446 (void)consume_communication_pipe(sigwait_fd);
2447 ruby_sigchld_handler(th->vm);
2448 rb_sigwait_fd_put(th, sigwait_fd);
2449 rb_sigwait_fd_migrate(th->vm);
2451 th->status = THREAD_RUNNABLE;
2452 while ((sig = rb_get_next_signal()) != 0) {
2453 ret |= rb_signal_exec(th, sig);
2455 th->status = prev_status;
2458 /* exception from another thread */
2459 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2460 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2461 thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2462 ret = TRUE;
2464 if (err == Qundef) {
2465 /* no error */
2467 else if (err == eKillSignal /* Thread#kill received */ ||
2468 err == eTerminateSignal /* Terminate thread */ ||
2469 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2470 terminate_interrupt = 1;
2472 else {
2473 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2474 /* the only special exception to be queued across thread */
2475 err = ruby_vm_special_exception_copy(err);
2477 /* set runnable if th was slept. */
2478 if (th->status == THREAD_STOPPED ||
2479 th->status == THREAD_STOPPED_FOREVER)
2480 th->status = THREAD_RUNNABLE;
2481 rb_exc_raise(err);
2485 if (terminate_interrupt) {
2486 rb_threadptr_to_kill(th);
2489 if (timer_interrupt) {
2490 uint32_t limits_us = TIME_QUANTUM_USEC;
2492 if (th->priority > 0)
2493 limits_us <<= th->priority;
2494 else
2495 limits_us >>= -th->priority;
2497 if (th->status == THREAD_RUNNABLE)
2498 th->running_time_us += TIME_QUANTUM_USEC;
2500 VM_ASSERT(th->ec->cfp);
2501 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2502 0, 0, 0, Qundef);
2504 rb_thread_schedule_limits(limits_us);
2507 return ret;
2510 void
2511 rb_thread_execute_interrupts(VALUE thval)
2513 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2516 static void
2517 rb_threadptr_ready(rb_thread_t *th)
2519 rb_threadptr_interrupt(th);
2522 static VALUE
2523 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2525 VALUE exc;
2527 if (rb_threadptr_dead(target_th)) {
2528 return Qnil;
2531 if (argc == 0) {
2532 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2534 else {
2535 exc = rb_make_exception(argc, argv);
2538 /* making an exception object can switch thread,
2539 so we need to check thread deadness again */
2540 if (rb_threadptr_dead(target_th)) {
2541 return Qnil;
2544 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2545 rb_threadptr_pending_interrupt_enque(target_th, exc);
2546 rb_threadptr_interrupt(target_th);
2547 return Qnil;
2550 void
2551 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2553 VALUE argv[2];
2555 argv[0] = rb_eSignal;
2556 argv[1] = INT2FIX(sig);
2557 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2560 void
2561 rb_threadptr_signal_exit(rb_thread_t *th)
2563 VALUE argv[2];
2565 argv[0] = rb_eSystemExit;
2566 argv[1] = rb_str_new2("exit");
2568 // TODO: check signal raise deliverly
2569 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2573 rb_ec_set_raised(rb_execution_context_t *ec)
2575 if (ec->raised_flag & RAISED_EXCEPTION) {
2576 return 1;
2578 ec->raised_flag |= RAISED_EXCEPTION;
2579 return 0;
2583 rb_ec_reset_raised(rb_execution_context_t *ec)
2585 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2586 return 0;
2588 ec->raised_flag &= ~RAISED_EXCEPTION;
2589 return 1;
2593 rb_notify_fd_close(int fd, struct list_head *busy)
2595 rb_vm_t *vm = GET_THREAD()->vm;
2596 struct waiting_fd *wfd = 0, *next;
2598 RB_VM_LOCK_ENTER();
2600 list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2601 if (wfd->fd == fd) {
2602 rb_thread_t *th = wfd->th;
2603 VALUE err;
2605 list_del(&wfd->wfd_node);
2606 list_add(busy, &wfd->wfd_node);
2608 err = th->vm->special_exceptions[ruby_error_stream_closed];
2609 rb_threadptr_pending_interrupt_enque(th, err);
2610 rb_threadptr_interrupt(th);
2614 RB_VM_LOCK_LEAVE();
2616 return !list_empty(busy);
2619 void
2620 rb_thread_fd_close(int fd)
2622 struct list_head busy;
2624 list_head_init(&busy);
2625 if (rb_notify_fd_close(fd, &busy)) {
2626 do rb_thread_schedule(); while (!list_empty(&busy));
2631 * call-seq:
2632 * thr.raise
2633 * thr.raise(string)
2634 * thr.raise(exception [, string [, array]])
2636 * Raises an exception from the given thread. The caller does not have to be
2637 * +thr+. See Kernel#raise for more information.
2639 * Thread.abort_on_exception = true
2640 * a = Thread.new { sleep(200) }
2641 * a.raise("Gotcha")
2643 * This will produce:
2645 * prog.rb:3: Gotcha (RuntimeError)
2646 * from prog.rb:2:in `initialize'
2647 * from prog.rb:2:in `new'
2648 * from prog.rb:2
2651 static VALUE
2652 thread_raise_m(int argc, VALUE *argv, VALUE self)
2654 rb_thread_t *target_th = rb_thread_ptr(self);
2655 const rb_thread_t *current_th = GET_THREAD();
2657 threadptr_check_pending_interrupt_queue(target_th);
2658 rb_threadptr_raise(target_th, argc, argv);
2660 /* To perform Thread.current.raise as Kernel.raise */
2661 if (current_th == target_th) {
2662 RUBY_VM_CHECK_INTS(target_th->ec);
2664 return Qnil;
2669 * call-seq:
2670 * thr.exit -> thr
2671 * thr.kill -> thr
2672 * thr.terminate -> thr
2674 * Terminates +thr+ and schedules another thread to be run, returning
2675 * the terminated Thread. If this is the main thread, or the last
2676 * thread, exits the process.
2679 VALUE
2680 rb_thread_kill(VALUE thread)
2682 rb_thread_t *th = rb_thread_ptr(thread);
2684 if (th->to_kill || th->status == THREAD_KILLED) {
2685 return thread;
2687 if (th == th->vm->ractor.main_thread) {
2688 rb_exit(EXIT_SUCCESS);
2691 thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2693 if (th == GET_THREAD()) {
2694 /* kill myself immediately */
2695 rb_threadptr_to_kill(th);
2697 else {
2698 threadptr_check_pending_interrupt_queue(th);
2699 rb_threadptr_pending_interrupt_enque(th, eKillSignal);
2700 rb_threadptr_interrupt(th);
2702 return thread;
2706 rb_thread_to_be_killed(VALUE thread)
2708 rb_thread_t *th = rb_thread_ptr(thread);
2710 if (th->to_kill || th->status == THREAD_KILLED) {
2711 return TRUE;
2713 return FALSE;
2717 * call-seq:
2718 * Thread.kill(thread) -> thread
2720 * Causes the given +thread+ to exit, see also Thread::exit.
2722 * count = 0
2723 * a = Thread.new { loop { count += 1 } }
2724 * sleep(0.1) #=> 0
2725 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2726 * count #=> 93947
2727 * a.alive? #=> false
2730 static VALUE
2731 rb_thread_s_kill(VALUE obj, VALUE th)
2733 return rb_thread_kill(th);
2738 * call-seq:
2739 * Thread.exit -> thread
2741 * Terminates the currently running thread and schedules another thread to be
2742 * run.
2744 * If this thread is already marked to be killed, ::exit returns the Thread.
2746 * If this is the main thread, or the last thread, exit the process.
2749 static VALUE
2750 rb_thread_exit(VALUE _)
2752 rb_thread_t *th = GET_THREAD();
2753 return rb_thread_kill(th->self);
2758 * call-seq:
2759 * thr.wakeup -> thr
2761 * Marks a given thread as eligible for scheduling, however it may still
2762 * remain blocked on I/O.
2764 * *Note:* This does not invoke the scheduler, see #run for more information.
2766 * c = Thread.new { Thread.stop; puts "hey!" }
2767 * sleep 0.1 while c.status!='sleep'
2768 * c.wakeup
2769 * c.join
2770 * #=> "hey!"
2773 VALUE
2774 rb_thread_wakeup(VALUE thread)
2776 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2777 rb_raise(rb_eThreadError, "killed thread");
2779 return thread;
2782 VALUE
2783 rb_thread_wakeup_alive(VALUE thread)
2785 rb_thread_t *target_th = rb_thread_ptr(thread);
2786 if (target_th->status == THREAD_KILLED) return Qnil;
2788 rb_threadptr_ready(target_th);
2790 if (target_th->status == THREAD_STOPPED ||
2791 target_th->status == THREAD_STOPPED_FOREVER) {
2792 target_th->status = THREAD_RUNNABLE;
2795 return thread;
2800 * call-seq:
2801 * thr.run -> thr
2803 * Wakes up +thr+, making it eligible for scheduling.
2805 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2806 * sleep 0.1 while a.status!='sleep'
2807 * puts "Got here"
2808 * a.run
2809 * a.join
2811 * This will produce:
2814 * Got here
2817 * See also the instance method #wakeup.
2820 VALUE
2821 rb_thread_run(VALUE thread)
2823 rb_thread_wakeup(thread);
2824 rb_thread_schedule();
2825 return thread;
2829 VALUE
2830 rb_thread_stop(void)
2832 if (rb_thread_alone()) {
2833 rb_raise(rb_eThreadError,
2834 "stopping only thread\n\tnote: use sleep to stop forever");
2836 rb_thread_sleep_deadly();
2837 return Qnil;
2841 * call-seq:
2842 * Thread.stop -> nil
2844 * Stops execution of the current thread, putting it into a ``sleep'' state,
2845 * and schedules execution of another thread.
2847 * a = Thread.new { print "a"; Thread.stop; print "c" }
2848 * sleep 0.1 while a.status!='sleep'
2849 * print "b"
2850 * a.run
2851 * a.join
2852 * #=> "abc"
2855 static VALUE
2856 thread_stop(VALUE _)
2858 return rb_thread_stop();
2861 /********************************************************************/
2863 VALUE
2864 rb_thread_list(void)
2866 // TODO
2867 return rb_ractor_thread_list(GET_RACTOR());
2871 * call-seq:
2872 * Thread.list -> array
2874 * Returns an array of Thread objects for all threads that are either runnable
2875 * or stopped.
2877 * Thread.new { sleep(200) }
2878 * Thread.new { 1000000.times {|i| i*i } }
2879 * Thread.new { Thread.stop }
2880 * Thread.list.each {|t| p t}
2882 * This will produce:
2884 * #<Thread:0x401b3e84 sleep>
2885 * #<Thread:0x401b3f38 run>
2886 * #<Thread:0x401b3fb0 sleep>
2887 * #<Thread:0x401bdf4c run>
2890 static VALUE
2891 thread_list(VALUE _)
2893 return rb_thread_list();
2896 VALUE
2897 rb_thread_current(void)
2899 return GET_THREAD()->self;
2903 * call-seq:
2904 * Thread.current -> thread
2906 * Returns the currently executing thread.
2908 * Thread.current #=> #<Thread:0x401bdf4c run>
2911 static VALUE
2912 thread_s_current(VALUE klass)
2914 return rb_thread_current();
2917 VALUE
2918 rb_thread_main(void)
2920 return GET_RACTOR()->threads.main->self;
2924 * call-seq:
2925 * Thread.main -> thread
2927 * Returns the main thread.
2930 static VALUE
2931 rb_thread_s_main(VALUE klass)
2933 return rb_thread_main();
2938 * call-seq:
2939 * Thread.abort_on_exception -> true or false
2941 * Returns the status of the global ``abort on exception'' condition.
2943 * The default is +false+.
2945 * When set to +true+, if any thread is aborted by an exception, the
2946 * raised exception will be re-raised in the main thread.
2948 * Can also be specified by the global $DEBUG flag or command line option
2949 * +-d+.
2951 * See also ::abort_on_exception=.
2953 * There is also an instance level method to set this for a specific thread,
2954 * see #abort_on_exception.
2957 static VALUE
2958 rb_thread_s_abort_exc(VALUE _)
2960 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2965 * call-seq:
2966 * Thread.abort_on_exception= boolean -> true or false
2968 * When set to +true+, if any thread is aborted by an exception, the
2969 * raised exception will be re-raised in the main thread.
2970 * Returns the new state.
2972 * Thread.abort_on_exception = true
2973 * t1 = Thread.new do
2974 * puts "In new thread"
2975 * raise "Exception from thread"
2976 * end
2977 * sleep(1)
2978 * puts "not reached"
2980 * This will produce:
2982 * In new thread
2983 * prog.rb:4: Exception from thread (RuntimeError)
2984 * from prog.rb:2:in `initialize'
2985 * from prog.rb:2:in `new'
2986 * from prog.rb:2
2988 * See also ::abort_on_exception.
2990 * There is also an instance level method to set this for a specific thread,
2991 * see #abort_on_exception=.
2994 static VALUE
2995 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2997 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2998 return val;
3003 * call-seq:
3004 * thr.abort_on_exception -> true or false
3006 * Returns the status of the thread-local ``abort on exception'' condition for
3007 * this +thr+.
3009 * The default is +false+.
3011 * See also #abort_on_exception=.
3013 * There is also a class level method to set this for all threads, see
3014 * ::abort_on_exception.
3017 static VALUE
3018 rb_thread_abort_exc(VALUE thread)
3020 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3025 * call-seq:
3026 * thr.abort_on_exception= boolean -> true or false
3028 * When set to +true+, if this +thr+ is aborted by an exception, the
3029 * raised exception will be re-raised in the main thread.
3031 * See also #abort_on_exception.
3033 * There is also a class level method to set this for all threads, see
3034 * ::abort_on_exception=.
3037 static VALUE
3038 rb_thread_abort_exc_set(VALUE thread, VALUE val)
3040 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3041 return val;
3046 * call-seq:
3047 * Thread.report_on_exception -> true or false
3049 * Returns the status of the global ``report on exception'' condition.
3051 * The default is +true+ since Ruby 2.5.
3053 * All threads created when this flag is true will report
3054 * a message on $stderr if an exception kills the thread.
3056 * Thread.new { 1.times { raise } }
3058 * will produce this output on $stderr:
3060 * #<Thread:...> terminated with exception (report_on_exception is true):
3061 * Traceback (most recent call last):
3062 * 2: from -e:1:in `block in <main>'
3063 * 1: from -e:1:in `times'
3065 * This is done to catch errors in threads early.
3066 * In some cases, you might not want this output.
3067 * There are multiple ways to avoid the extra output:
3069 * * If the exception is not intended, the best is to fix the cause of
3070 * the exception so it does not happen anymore.
3071 * * If the exception is intended, it might be better to rescue it closer to
3072 * where it is raised rather then let it kill the Thread.
3073 * * If it is guaranteed the Thread will be joined with Thread#join or
3074 * Thread#value, then it is safe to disable this report with
3075 * <code>Thread.current.report_on_exception = false</code>
3076 * when starting the Thread.
3077 * However, this might handle the exception much later, or not at all
3078 * if the Thread is never joined due to the parent thread being blocked, etc.
3080 * See also ::report_on_exception=.
3082 * There is also an instance level method to set this for a specific thread,
3083 * see #report_on_exception=.
3087 static VALUE
3088 rb_thread_s_report_exc(VALUE _)
3090 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3095 * call-seq:
3096 * Thread.report_on_exception= boolean -> true or false
3098 * Returns the new state.
3099 * When set to +true+, all threads created afterwards will inherit the
3100 * condition and report a message on $stderr if an exception kills a thread:
3102 * Thread.report_on_exception = true
3103 * t1 = Thread.new do
3104 * puts "In new thread"
3105 * raise "Exception from thread"
3106 * end
3107 * sleep(1)
3108 * puts "In the main thread"
3110 * This will produce:
3112 * In new thread
3113 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3114 * Traceback (most recent call last):
3115 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3116 * In the main thread
3118 * See also ::report_on_exception.
3120 * There is also an instance level method to set this for a specific thread,
3121 * see #report_on_exception=.
3124 static VALUE
3125 rb_thread_s_report_exc_set(VALUE self, VALUE val)
3127 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3128 return val;
3133 * call-seq:
3134 * Thread.ignore_deadlock -> true or false
3136 * Returns the status of the global ``ignore deadlock'' condition.
3137 * The default is +false+, so that deadlock conditions are not ignored.
3139 * See also ::ignore_deadlock=.
3143 static VALUE
3144 rb_thread_s_ignore_deadlock(VALUE _)
3146 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3151 * call-seq:
3152 * Thread.ignore_deadlock = boolean -> true or false
3154 * Returns the new state.
3155 * When set to +true+, the VM will not check for deadlock conditions.
3156 * It is only useful to set this if your application can break a
3157 * deadlock condition via some other means, such as a signal.
3159 * Thread.ignore_deadlock = true
3160 * queue = Thread::Queue.new
3162 * trap(:SIGUSR1){queue.push "Received signal"}
3164 * # raises fatal error unless ignoring deadlock
3165 * puts queue.pop
3167 * See also ::ignore_deadlock.
3170 static VALUE
3171 rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3173 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3174 return val;
3179 * call-seq:
3180 * thr.report_on_exception -> true or false
3182 * Returns the status of the thread-local ``report on exception'' condition for
3183 * this +thr+.
3185 * The default value when creating a Thread is the value of
3186 * the global flag Thread.report_on_exception.
3188 * See also #report_on_exception=.
3190 * There is also a class level method to set this for all new threads, see
3191 * ::report_on_exception=.
3194 static VALUE
3195 rb_thread_report_exc(VALUE thread)
3197 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3202 * call-seq:
3203 * thr.report_on_exception= boolean -> true or false
3205 * When set to +true+, a message is printed on $stderr if an exception
3206 * kills this +thr+. See ::report_on_exception for details.
3208 * See also #report_on_exception.
3210 * There is also a class level method to set this for all new threads, see
3211 * ::report_on_exception=.
3214 static VALUE
3215 rb_thread_report_exc_set(VALUE thread, VALUE val)
3217 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3218 return val;
3223 * call-seq:
3224 * thr.group -> thgrp or nil
3226 * Returns the ThreadGroup which contains the given thread.
3228 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3231 VALUE
3232 rb_thread_group(VALUE thread)
3234 return rb_thread_ptr(thread)->thgroup;
3237 static const char *
3238 thread_status_name(rb_thread_t *th, int detail)
3240 switch (th->status) {
3241 case THREAD_RUNNABLE:
3242 return th->to_kill ? "aborting" : "run";
3243 case THREAD_STOPPED_FOREVER:
3244 if (detail) return "sleep_forever";
3245 case THREAD_STOPPED:
3246 return "sleep";
3247 case THREAD_KILLED:
3248 return "dead";
3249 default:
3250 return "unknown";
3254 static int
3255 rb_threadptr_dead(rb_thread_t *th)
3257 return th->status == THREAD_KILLED;
3262 * call-seq:
3263 * thr.status -> string, false or nil
3265 * Returns the status of +thr+.
3267 * [<tt>"sleep"</tt>]
3268 * Returned if this thread is sleeping or waiting on I/O
3269 * [<tt>"run"</tt>]
3270 * When this thread is executing
3271 * [<tt>"aborting"</tt>]
3272 * If this thread is aborting
3273 * [+false+]
3274 * When this thread is terminated normally
3275 * [+nil+]
3276 * If terminated with an exception.
3278 * a = Thread.new { raise("die now") }
3279 * b = Thread.new { Thread.stop }
3280 * c = Thread.new { Thread.exit }
3281 * d = Thread.new { sleep }
3282 * d.kill #=> #<Thread:0x401b3678 aborting>
3283 * a.status #=> nil
3284 * b.status #=> "sleep"
3285 * c.status #=> false
3286 * d.status #=> "aborting"
3287 * Thread.current.status #=> "run"
3289 * See also the instance methods #alive? and #stop?
3292 static VALUE
3293 rb_thread_status(VALUE thread)
3295 rb_thread_t *target_th = rb_thread_ptr(thread);
3297 if (rb_threadptr_dead(target_th)) {
3298 if (!NIL_P(target_th->ec->errinfo) &&
3299 !FIXNUM_P(target_th->ec->errinfo)) {
3300 return Qnil;
3302 else {
3303 return Qfalse;
3306 else {
3307 return rb_str_new2(thread_status_name(target_th, FALSE));
3313 * call-seq:
3314 * thr.alive? -> true or false
3316 * Returns +true+ if +thr+ is running or sleeping.
3318 * thr = Thread.new { }
3319 * thr.join #=> #<Thread:0x401b3fb0 dead>
3320 * Thread.current.alive? #=> true
3321 * thr.alive? #=> false
3323 * See also #stop? and #status.
3326 static VALUE
3327 rb_thread_alive_p(VALUE thread)
3329 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3333 * call-seq:
3334 * thr.stop? -> true or false
3336 * Returns +true+ if +thr+ is dead or sleeping.
3338 * a = Thread.new { Thread.stop }
3339 * b = Thread.current
3340 * a.stop? #=> true
3341 * b.stop? #=> false
3343 * See also #alive? and #status.
3346 static VALUE
3347 rb_thread_stop_p(VALUE thread)
3349 rb_thread_t *th = rb_thread_ptr(thread);
3351 if (rb_threadptr_dead(th)) {
3352 return Qtrue;
3354 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3358 * call-seq:
3359 * thr.name -> string
3361 * show the name of the thread.
3364 static VALUE
3365 rb_thread_getname(VALUE thread)
3367 return rb_thread_ptr(thread)->name;
3371 * call-seq:
3372 * thr.name=(name) -> string
3374 * set given name to the ruby thread.
3375 * On some platform, it may set the name to pthread and/or kernel.
3378 static VALUE
3379 rb_thread_setname(VALUE thread, VALUE name)
3381 rb_thread_t *target_th = rb_thread_ptr(thread);
3383 if (!NIL_P(name)) {
3384 rb_encoding *enc;
3385 StringValueCStr(name);
3386 enc = rb_enc_get(name);
3387 if (!rb_enc_asciicompat(enc)) {
3388 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3389 rb_enc_name(enc));
3391 name = rb_str_new_frozen(name);
3393 target_th->name = name;
3394 if (threadptr_initialized(target_th)) {
3395 native_set_another_thread_name(target_th->thread_id, name);
3397 return name;
3400 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3402 * call-seq:
3403 * thr.native_thread_id -> integer
3405 * Return the native thread ID which is used by the Ruby thread.
3407 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3408 * * On Linux it is TID returned by gettid(2).
3409 * * On macOS it is the system-wide unique integral ID of thread returned
3410 * by pthread_threadid_np(3).
3411 * * On FreeBSD it is the unique integral ID of the thread returned by
3412 * pthread_getthreadid_np(3).
3413 * * On Windows it is the thread identifier returned by GetThreadId().
3414 * * On other platforms, it raises NotImplementedError.
3416 * NOTE:
3417 * If the thread is not associated yet or already deassociated with a native
3418 * thread, it returns _nil_.
3419 * If the Ruby implementation uses M:N thread model, the ID may change
3420 * depending on the timing.
3423 static VALUE
3424 rb_thread_native_thread_id(VALUE thread)
3426 rb_thread_t *target_th = rb_thread_ptr(thread);
3427 if (rb_threadptr_dead(target_th)) return Qnil;
3428 return native_thread_native_thread_id(target_th);
3430 #else
3431 # define rb_thread_native_thread_id rb_f_notimplement
3432 #endif
3435 * call-seq:
3436 * thr.to_s -> string
3438 * Dump the name, id, and status of _thr_ to a string.
3441 static VALUE
3442 rb_thread_to_s(VALUE thread)
3444 VALUE cname = rb_class_path(rb_obj_class(thread));
3445 rb_thread_t *target_th = rb_thread_ptr(thread);
3446 const char *status;
3447 VALUE str, loc;
3449 status = thread_status_name(target_th, TRUE);
3450 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3451 if (!NIL_P(target_th->name)) {
3452 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3454 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3455 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3456 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3458 rb_str_catf(str, " %s>", status);
3460 return str;
3463 /* variables for recursive traversals */
3464 #define recursive_key id__recursive_key__
3466 static VALUE
3467 threadptr_local_aref(rb_thread_t *th, ID id)
3469 if (id == recursive_key) {
3470 return th->ec->local_storage_recursive_hash;
3472 else {
3473 VALUE val;
3474 struct rb_id_table *local_storage = th->ec->local_storage;
3476 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3477 return val;
3479 else {
3480 return Qnil;
3485 VALUE
3486 rb_thread_local_aref(VALUE thread, ID id)
3488 return threadptr_local_aref(rb_thread_ptr(thread), id);
3492 * call-seq:
3493 * thr[sym] -> obj or nil
3495 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3496 * if not explicitly inside a Fiber), using either a symbol or a string name.
3497 * If the specified variable does not exist, returns +nil+.
3500 * Thread.new { Thread.current["name"] = "A" },
3501 * Thread.new { Thread.current[:name] = "B" },
3502 * Thread.new { Thread.current["name"] = "C" }
3503 * ].each do |th|
3504 * th.join
3505 * puts "#{th.inspect}: #{th[:name]}"
3506 * end
3508 * This will produce:
3510 * #<Thread:0x00000002a54220 dead>: A
3511 * #<Thread:0x00000002a541a8 dead>: B
3512 * #<Thread:0x00000002a54130 dead>: C
3514 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3515 * This confusion did not exist in Ruby 1.8 because
3516 * fibers are only available since Ruby 1.9.
3517 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3518 * following idiom for dynamic scope.
3520 * def meth(newvalue)
3521 * begin
3522 * oldvalue = Thread.current[:name]
3523 * Thread.current[:name] = newvalue
3524 * yield
3525 * ensure
3526 * Thread.current[:name] = oldvalue
3527 * end
3528 * end
3530 * The idiom may not work as dynamic scope if the methods are thread-local
3531 * and a given block switches fiber.
3533 * f = Fiber.new {
3534 * meth(1) {
3535 * Fiber.yield
3538 * meth(2) {
3539 * f.resume
3541 * f.resume
3542 * p Thread.current[:name]
3543 * #=> nil if fiber-local
3544 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3546 * For thread-local variables, please see #thread_variable_get and
3547 * #thread_variable_set.
3551 static VALUE
3552 rb_thread_aref(VALUE thread, VALUE key)
3554 ID id = rb_check_id(&key);
3555 if (!id) return Qnil;
3556 return rb_thread_local_aref(thread, id);
3560 * call-seq:
3561 * thr.fetch(sym) -> obj
3562 * thr.fetch(sym) { } -> obj
3563 * thr.fetch(sym, default) -> obj
3565 * Returns a fiber-local for the given key. If the key can't be
3566 * found, there are several options: With no other arguments, it will
3567 * raise a KeyError exception; if <i>default</i> is given, then that
3568 * will be returned; if the optional code block is specified, then
3569 * that will be run and its result returned. See Thread#[] and
3570 * Hash#fetch.
3572 static VALUE
3573 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3575 VALUE key, val;
3576 ID id;
3577 rb_thread_t *target_th = rb_thread_ptr(self);
3578 int block_given;
3580 rb_check_arity(argc, 1, 2);
3581 key = argv[0];
3583 block_given = rb_block_given_p();
3584 if (block_given && argc == 2) {
3585 rb_warn("block supersedes default value argument");
3588 id = rb_check_id(&key);
3590 if (id == recursive_key) {
3591 return target_th->ec->local_storage_recursive_hash;
3593 else if (id && target_th->ec->local_storage &&
3594 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3595 return val;
3597 else if (block_given) {
3598 return rb_yield(key);
3600 else if (argc == 1) {
3601 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3603 else {
3604 return argv[1];
3608 static VALUE
3609 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3611 if (id == recursive_key) {
3612 th->ec->local_storage_recursive_hash = val;
3613 return val;
3615 else {
3616 struct rb_id_table *local_storage = th->ec->local_storage;
3618 if (NIL_P(val)) {
3619 if (!local_storage) return Qnil;
3620 rb_id_table_delete(local_storage, id);
3621 return Qnil;
3623 else {
3624 if (local_storage == NULL) {
3625 th->ec->local_storage = local_storage = rb_id_table_create(0);
3627 rb_id_table_insert(local_storage, id, val);
3628 return val;
3633 VALUE
3634 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3636 if (OBJ_FROZEN(thread)) {
3637 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3640 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3644 * call-seq:
3645 * thr[sym] = obj -> obj
3647 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3648 * using either a symbol or a string.
3650 * See also Thread#[].
3652 * For thread-local variables, please see #thread_variable_set and
3653 * #thread_variable_get.
3656 static VALUE
3657 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3659 return rb_thread_local_aset(self, rb_to_id(id), val);
3663 * call-seq:
3664 * thr.thread_variable_get(key) -> obj or nil
3666 * Returns the value of a thread local variable that has been set. Note that
3667 * these are different than fiber local values. For fiber local values,
3668 * please see Thread#[] and Thread#[]=.
3670 * Thread local values are carried along with threads, and do not respect
3671 * fibers. For example:
3673 * Thread.new {
3674 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3675 * Thread.current["foo"] = "bar" # set a fiber local
3677 * Fiber.new {
3678 * Fiber.yield [
3679 * Thread.current.thread_variable_get("foo"), # get the thread local
3680 * Thread.current["foo"], # get the fiber local
3682 * }.resume
3683 * }.join.value # => ['bar', nil]
3685 * The value "bar" is returned for the thread local, where nil is returned
3686 * for the fiber local. The fiber is executed in the same thread, so the
3687 * thread local values are available.
3690 static VALUE
3691 rb_thread_variable_get(VALUE thread, VALUE key)
3693 VALUE locals;
3695 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3696 return Qnil;
3698 locals = rb_thread_local_storage(thread);
3699 return rb_hash_aref(locals, rb_to_symbol(key));
3703 * call-seq:
3704 * thr.thread_variable_set(key, value)
3706 * Sets a thread local with +key+ to +value+. Note that these are local to
3707 * threads, and not to fibers. Please see Thread#thread_variable_get and
3708 * Thread#[] for more information.
3711 static VALUE
3712 rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3714 VALUE locals;
3716 if (OBJ_FROZEN(thread)) {
3717 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3720 locals = rb_thread_local_storage(thread);
3721 return rb_hash_aset(locals, rb_to_symbol(key), val);
3725 * call-seq:
3726 * thr.key?(sym) -> true or false
3728 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3729 * variable.
3731 * me = Thread.current
3732 * me[:oliver] = "a"
3733 * me.key?(:oliver) #=> true
3734 * me.key?(:stanley) #=> false
3737 static VALUE
3738 rb_thread_key_p(VALUE self, VALUE key)
3740 VALUE val;
3741 ID id = rb_check_id(&key);
3742 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3744 if (!id || local_storage == NULL) {
3745 return Qfalse;
3747 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3750 static enum rb_id_table_iterator_result
3751 thread_keys_i(ID key, VALUE value, void *ary)
3753 rb_ary_push((VALUE)ary, ID2SYM(key));
3754 return ID_TABLE_CONTINUE;
3758 rb_thread_alone(void)
3760 // TODO
3761 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3765 * call-seq:
3766 * thr.keys -> array
3768 * Returns an array of the names of the fiber-local variables (as Symbols).
3770 * thr = Thread.new do
3771 * Thread.current[:cat] = 'meow'
3772 * Thread.current["dog"] = 'woof'
3773 * end
3774 * thr.join #=> #<Thread:0x401b3f10 dead>
3775 * thr.keys #=> [:dog, :cat]
3778 static VALUE
3779 rb_thread_keys(VALUE self)
3781 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3782 VALUE ary = rb_ary_new();
3784 if (local_storage) {
3785 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3787 return ary;
3790 static int
3791 keys_i(VALUE key, VALUE value, VALUE ary)
3793 rb_ary_push(ary, key);
3794 return ST_CONTINUE;
3798 * call-seq:
3799 * thr.thread_variables -> array
3801 * Returns an array of the names of the thread-local variables (as Symbols).
3803 * thr = Thread.new do
3804 * Thread.current.thread_variable_set(:cat, 'meow')
3805 * Thread.current.thread_variable_set("dog", 'woof')
3806 * end
3807 * thr.join #=> #<Thread:0x401b3f10 dead>
3808 * thr.thread_variables #=> [:dog, :cat]
3810 * Note that these are not fiber local variables. Please see Thread#[] and
3811 * Thread#thread_variable_get for more details.
3814 static VALUE
3815 rb_thread_variables(VALUE thread)
3817 VALUE locals;
3818 VALUE ary;
3820 ary = rb_ary_new();
3821 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3822 return ary;
3824 locals = rb_thread_local_storage(thread);
3825 rb_hash_foreach(locals, keys_i, ary);
3827 return ary;
3831 * call-seq:
3832 * thr.thread_variable?(key) -> true or false
3834 * Returns +true+ if the given string (or symbol) exists as a thread-local
3835 * variable.
3837 * me = Thread.current
3838 * me.thread_variable_set(:oliver, "a")
3839 * me.thread_variable?(:oliver) #=> true
3840 * me.thread_variable?(:stanley) #=> false
3842 * Note that these are not fiber local variables. Please see Thread#[] and
3843 * Thread#thread_variable_get for more details.
3846 static VALUE
3847 rb_thread_variable_p(VALUE thread, VALUE key)
3849 VALUE locals;
3851 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3852 return Qfalse;
3854 locals = rb_thread_local_storage(thread);
3856 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3860 * call-seq:
3861 * thr.priority -> integer
3863 * Returns the priority of <i>thr</i>. Default is inherited from the
3864 * current thread which creating the new thread, or zero for the
3865 * initial main thread; higher-priority thread will run more frequently
3866 * than lower-priority threads (but lower-priority threads can also run).
3868 * This is just hint for Ruby thread scheduler. It may be ignored on some
3869 * platform.
3871 * Thread.current.priority #=> 0
3874 static VALUE
3875 rb_thread_priority(VALUE thread)
3877 return INT2NUM(rb_thread_ptr(thread)->priority);
3882 * call-seq:
3883 * thr.priority= integer -> thr
3885 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3886 * will run more frequently than lower-priority threads (but lower-priority
3887 * threads can also run).
3889 * This is just hint for Ruby thread scheduler. It may be ignored on some
3890 * platform.
3892 * count1 = count2 = 0
3893 * a = Thread.new do
3894 * loop { count1 += 1 }
3895 * end
3896 * a.priority = -1
3898 * b = Thread.new do
3899 * loop { count2 += 1 }
3900 * end
3901 * b.priority = -2
3902 * sleep 1 #=> 1
3903 * count1 #=> 622504
3904 * count2 #=> 5832
3907 static VALUE
3908 rb_thread_priority_set(VALUE thread, VALUE prio)
3910 rb_thread_t *target_th = rb_thread_ptr(thread);
3911 int priority;
3913 #if USE_NATIVE_THREAD_PRIORITY
3914 target_th->priority = NUM2INT(prio);
3915 native_thread_apply_priority(th);
3916 #else
3917 priority = NUM2INT(prio);
3918 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3919 priority = RUBY_THREAD_PRIORITY_MAX;
3921 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3922 priority = RUBY_THREAD_PRIORITY_MIN;
3924 target_th->priority = (int8_t)priority;
3925 #endif
3926 return INT2NUM(target_th->priority);
3929 /* for IO */
3931 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3934 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3935 * in select(2) system call.
3937 * - Linux 2.2.12 (?)
3938 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3939 * select(2) documents how to allocate fd_set dynamically.
3940 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3941 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3942 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3943 * select(2) documents how to allocate fd_set dynamically.
3944 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3945 * - HP-UX documents how to allocate fd_set dynamically.
3946 * http://docs.hp.com/en/B2355-60105/select.2.html
3947 * - Solaris 8 has select_large_fdset
3948 * - Mac OS X 10.7 (Lion)
3949 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3950 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3951 * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3953 * When fd_set is not big enough to hold big file descriptors,
3954 * it should be allocated dynamically.
3955 * Note that this assumes fd_set is structured as bitmap.
3957 * rb_fd_init allocates the memory.
3958 * rb_fd_term free the memory.
3959 * rb_fd_set may re-allocates bitmap.
3961 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3964 void
3965 rb_fd_init(rb_fdset_t *fds)
3967 fds->maxfd = 0;
3968 fds->fdset = ALLOC(fd_set);
3969 FD_ZERO(fds->fdset);
3972 void
3973 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3975 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3977 if (size < sizeof(fd_set))
3978 size = sizeof(fd_set);
3979 dst->maxfd = src->maxfd;
3980 dst->fdset = xmalloc(size);
3981 memcpy(dst->fdset, src->fdset, size);
3984 void
3985 rb_fd_term(rb_fdset_t *fds)
3987 if (fds->fdset) xfree(fds->fdset);
3988 fds->maxfd = 0;
3989 fds->fdset = 0;
3992 void
3993 rb_fd_zero(rb_fdset_t *fds)
3995 if (fds->fdset)
3996 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3999 static void
4000 rb_fd_resize(int n, rb_fdset_t *fds)
4002 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4003 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4005 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4006 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4008 if (m > o) {
4009 fds->fdset = xrealloc(fds->fdset, m);
4010 memset((char *)fds->fdset + o, 0, m - o);
4012 if (n >= fds->maxfd) fds->maxfd = n + 1;
4015 void
4016 rb_fd_set(int n, rb_fdset_t *fds)
4018 rb_fd_resize(n, fds);
4019 FD_SET(n, fds->fdset);
4022 void
4023 rb_fd_clr(int n, rb_fdset_t *fds)
4025 if (n >= fds->maxfd) return;
4026 FD_CLR(n, fds->fdset);
4030 rb_fd_isset(int n, const rb_fdset_t *fds)
4032 if (n >= fds->maxfd) return 0;
4033 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4036 void
4037 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4039 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4041 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4042 dst->maxfd = max;
4043 dst->fdset = xrealloc(dst->fdset, size);
4044 memcpy(dst->fdset, src, size);
4047 void
4048 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4050 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4052 if (size < sizeof(fd_set))
4053 size = sizeof(fd_set);
4054 dst->maxfd = src->maxfd;
4055 dst->fdset = xrealloc(dst->fdset, size);
4056 memcpy(dst->fdset, src->fdset, size);
4060 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4062 fd_set *r = NULL, *w = NULL, *e = NULL;
4063 if (readfds) {
4064 rb_fd_resize(n - 1, readfds);
4065 r = rb_fd_ptr(readfds);
4067 if (writefds) {
4068 rb_fd_resize(n - 1, writefds);
4069 w = rb_fd_ptr(writefds);
4071 if (exceptfds) {
4072 rb_fd_resize(n - 1, exceptfds);
4073 e = rb_fd_ptr(exceptfds);
4075 return select(n, r, w, e, timeout);
4078 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4080 #undef FD_ZERO
4081 #undef FD_SET
4082 #undef FD_CLR
4083 #undef FD_ISSET
4085 #define FD_ZERO(f) rb_fd_zero(f)
4086 #define FD_SET(i, f) rb_fd_set((i), (f))
4087 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4088 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4090 #elif defined(_WIN32)
4092 void
4093 rb_fd_init(rb_fdset_t *set)
4095 set->capa = FD_SETSIZE;
4096 set->fdset = ALLOC(fd_set);
4097 FD_ZERO(set->fdset);
4100 void
4101 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4103 rb_fd_init(dst);
4104 rb_fd_dup(dst, src);
4107 void
4108 rb_fd_term(rb_fdset_t *set)
4110 xfree(set->fdset);
4111 set->fdset = NULL;
4112 set->capa = 0;
4115 void
4116 rb_fd_set(int fd, rb_fdset_t *set)
4118 unsigned int i;
4119 SOCKET s = rb_w32_get_osfhandle(fd);
4121 for (i = 0; i < set->fdset->fd_count; i++) {
4122 if (set->fdset->fd_array[i] == s) {
4123 return;
4126 if (set->fdset->fd_count >= (unsigned)set->capa) {
4127 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4128 set->fdset =
4129 rb_xrealloc_mul_add(
4130 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4132 set->fdset->fd_array[set->fdset->fd_count++] = s;
4135 #undef FD_ZERO
4136 #undef FD_SET
4137 #undef FD_CLR
4138 #undef FD_ISSET
4140 #define FD_ZERO(f) rb_fd_zero(f)
4141 #define FD_SET(i, f) rb_fd_set((i), (f))
4142 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4143 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4145 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4147 #endif
4149 #ifndef rb_fd_no_init
4150 #define rb_fd_no_init(fds) (void)(fds)
4151 #endif
4153 static int
4154 wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4156 if (*result < 0) {
4157 switch (errnum) {
4158 case EINTR:
4159 #ifdef ERESTART
4160 case ERESTART:
4161 #endif
4162 *result = 0;
4163 if (rel && hrtime_update_expire(rel, end)) {
4164 *rel = 0;
4166 return TRUE;
4168 return FALSE;
4170 else if (*result == 0) {
4171 /* check for spurious wakeup */
4172 if (rel) {
4173 return !hrtime_update_expire(rel, end);
4175 return TRUE;
4177 return FALSE;
4180 struct select_set {
4181 int max;
4182 int sigwait_fd;
4183 rb_thread_t *th;
4184 rb_fdset_t *rset;
4185 rb_fdset_t *wset;
4186 rb_fdset_t *eset;
4187 rb_fdset_t orig_rset;
4188 rb_fdset_t orig_wset;
4189 rb_fdset_t orig_eset;
4190 struct timeval *timeout;
4193 static VALUE
4194 select_set_free(VALUE p)
4196 struct select_set *set = (struct select_set *)p;
4198 if (set->sigwait_fd >= 0) {
4199 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4200 rb_sigwait_fd_migrate(set->th->vm);
4203 rb_fd_term(&set->orig_rset);
4204 rb_fd_term(&set->orig_wset);
4205 rb_fd_term(&set->orig_eset);
4207 return Qfalse;
4210 static const rb_hrtime_t *
4211 sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4212 int *drained_p)
4214 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4216 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4217 *drained_p = check_signals_nogvl(th, sigwait_fd);
4218 if (!orig || *orig > quantum)
4219 return &quantum;
4222 return orig;
4225 #define sigwait_signals_fd(result, cond, sigwait_fd) \
4226 (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4228 static VALUE
4229 do_select(VALUE p)
4231 struct select_set *set = (struct select_set *)p;
4232 int result = 0;
4233 int lerrno;
4234 rb_hrtime_t *to, rel, end = 0;
4236 timeout_prepare(&to, &rel, &end, set->timeout);
4237 #define restore_fdset(dst, src) \
4238 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4239 #define do_select_update() \
4240 (restore_fdset(set->rset, &set->orig_rset), \
4241 restore_fdset(set->wset, &set->orig_wset), \
4242 restore_fdset(set->eset, &set->orig_eset), \
4243 TRUE)
4245 do {
4246 int drained;
4247 lerrno = 0;
4249 BLOCKING_REGION(set->th, {
4250 const rb_hrtime_t *sto;
4251 struct timeval tv;
4253 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4254 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4255 result = native_fd_select(set->max, set->rset, set->wset,
4256 set->eset,
4257 rb_hrtime2timeval(&tv, sto), set->th);
4258 if (result < 0) lerrno = errno;
4260 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4262 if (set->sigwait_fd >= 0) {
4263 int fd = sigwait_signals_fd(result,
4264 rb_fd_isset(set->sigwait_fd, set->rset),
4265 set->sigwait_fd);
4266 (void)check_signals_nogvl(set->th, fd);
4269 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4270 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4272 if (result < 0) {
4273 errno = lerrno;
4276 return (VALUE)result;
4279 static rb_fdset_t *
4280 init_set_fd(int fd, rb_fdset_t *fds)
4282 if (fd < 0) {
4283 return 0;
4285 rb_fd_init(fds);
4286 rb_fd_set(fd, fds);
4288 return fds;
4292 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4293 struct timeval *timeout)
4295 struct select_set set;
4297 set.th = GET_THREAD();
4298 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4299 set.max = max;
4300 set.rset = read;
4301 set.wset = write;
4302 set.eset = except;
4303 set.timeout = timeout;
4305 if (!set.rset && !set.wset && !set.eset) {
4306 if (!timeout) {
4307 rb_thread_sleep_forever();
4308 return 0;
4310 rb_thread_wait_for(*timeout);
4311 return 0;
4314 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4315 if (set.sigwait_fd >= 0) {
4316 if (set.rset)
4317 rb_fd_set(set.sigwait_fd, set.rset);
4318 else
4319 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4320 if (set.sigwait_fd >= set.max) {
4321 set.max = set.sigwait_fd + 1;
4324 #define fd_init_copy(f) do { \
4325 if (set.f) { \
4326 rb_fd_resize(set.max - 1, set.f); \
4327 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4328 rb_fd_init_copy(&set.orig_##f, set.f); \
4331 else { \
4332 rb_fd_no_init(&set.orig_##f); \
4334 } while (0)
4335 fd_init_copy(rset);
4336 fd_init_copy(wset);
4337 fd_init_copy(eset);
4338 #undef fd_init_copy
4340 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4343 #ifdef USE_POLL
4345 /* The same with linux kernel. TODO: make platform independent definition. */
4346 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4347 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4348 #define POLLEX_SET (POLLPRI)
4350 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4351 # define POLLERR_SET (0)
4352 #endif
4355 * returns a mask of events
4358 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4360 struct pollfd fds[2];
4361 int result = 0;
4362 int drained;
4363 nfds_t nfds;
4364 rb_unblock_function_t *ubf;
4365 struct waiting_fd wfd;
4366 int state;
4367 volatile int lerrno;
4369 wfd.th = GET_THREAD();
4370 wfd.fd = fd;
4372 RB_VM_LOCK_ENTER();
4374 list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4376 RB_VM_LOCK_LEAVE();
4378 EC_PUSH_TAG(wfd.th->ec);
4379 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4380 rb_hrtime_t *to, rel, end = 0;
4381 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4382 timeout_prepare(&to, &rel, &end, timeout);
4383 fds[0].fd = fd;
4384 fds[0].events = (short)events;
4385 fds[0].revents = 0;
4386 do {
4387 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4389 if (fds[1].fd >= 0) {
4390 fds[1].events = POLLIN;
4391 fds[1].revents = 0;
4392 nfds = 2;
4393 ubf = ubf_sigwait;
4395 else {
4396 nfds = 1;
4397 ubf = ubf_select;
4400 lerrno = 0;
4401 BLOCKING_REGION(wfd.th, {
4402 const rb_hrtime_t *sto;
4403 struct timespec ts;
4405 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4406 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4407 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4408 if (result < 0) lerrno = errno;
4410 }, ubf, wfd.th, TRUE);
4412 if (fds[1].fd >= 0) {
4413 int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4414 (void)check_signals_nogvl(wfd.th, fd1);
4415 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4416 rb_sigwait_fd_migrate(wfd.th->vm);
4418 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4419 } while (wait_retryable(&result, lerrno, to, end));
4421 EC_POP_TAG();
4423 RB_VM_LOCK_ENTER();
4425 list_del(&wfd.wfd_node);
4427 RB_VM_LOCK_LEAVE();
4429 if (state) {
4430 EC_JUMP_TAG(wfd.th->ec, state);
4433 if (result < 0) {
4434 errno = lerrno;
4435 return -1;
4438 if (fds[0].revents & POLLNVAL) {
4439 errno = EBADF;
4440 return -1;
4444 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4445 * Therefore we need to fix it up.
4447 result = 0;
4448 if (fds[0].revents & POLLIN_SET)
4449 result |= RB_WAITFD_IN;
4450 if (fds[0].revents & POLLOUT_SET)
4451 result |= RB_WAITFD_OUT;
4452 if (fds[0].revents & POLLEX_SET)
4453 result |= RB_WAITFD_PRI;
4455 /* all requested events are ready if there is an error */
4456 if (fds[0].revents & POLLERR_SET)
4457 result |= events;
4459 return result;
4461 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4462 struct select_args {
4463 union {
4464 int fd;
4465 int error;
4466 } as;
4467 rb_fdset_t *read;
4468 rb_fdset_t *write;
4469 rb_fdset_t *except;
4470 struct waiting_fd wfd;
4471 struct timeval *tv;
4474 static VALUE
4475 select_single(VALUE ptr)
4477 struct select_args *args = (struct select_args *)ptr;
4478 int r;
4480 r = rb_thread_fd_select(args->as.fd + 1,
4481 args->read, args->write, args->except, args->tv);
4482 if (r == -1)
4483 args->as.error = errno;
4484 if (r > 0) {
4485 r = 0;
4486 if (args->read && rb_fd_isset(args->as.fd, args->read))
4487 r |= RB_WAITFD_IN;
4488 if (args->write && rb_fd_isset(args->as.fd, args->write))
4489 r |= RB_WAITFD_OUT;
4490 if (args->except && rb_fd_isset(args->as.fd, args->except))
4491 r |= RB_WAITFD_PRI;
4493 return (VALUE)r;
4496 static VALUE
4497 select_single_cleanup(VALUE ptr)
4499 struct select_args *args = (struct select_args *)ptr;
4501 list_del(&args->wfd.wfd_node);
4502 if (args->read) rb_fd_term(args->read);
4503 if (args->write) rb_fd_term(args->write);
4504 if (args->except) rb_fd_term(args->except);
4506 return (VALUE)-1;
4510 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4512 rb_fdset_t rfds, wfds, efds;
4513 struct select_args args;
4514 int r;
4515 VALUE ptr = (VALUE)&args;
4517 args.as.fd = fd;
4518 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4519 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4520 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4521 args.tv = timeout;
4522 args.wfd.fd = fd;
4523 args.wfd.th = GET_THREAD();
4525 RB_VM_LOCK_ENTER();
4527 list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4529 RB_VM_LOCK_LEAVE();
4531 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4532 if (r == -1)
4533 errno = args.as.error;
4535 return r;
4537 #endif /* ! USE_POLL */
4540 * for GC
4543 #ifdef USE_CONSERVATIVE_STACK_END
4544 void
4545 rb_gc_set_stack_end(VALUE **stack_end_p)
4547 VALUE stack_end;
4548 *stack_end_p = &stack_end;
4550 #endif
4556 void
4557 rb_threadptr_check_signal(rb_thread_t *mth)
4559 /* mth must be main_thread */
4560 if (rb_signal_buff_size() > 0) {
4561 /* wakeup main thread */
4562 threadptr_trap_interrupt(mth);
4566 static void
4567 async_bug_fd(const char *mesg, int errno_arg, int fd)
4569 char buff[64];
4570 size_t n = strlcpy(buff, mesg, sizeof(buff));
4571 if (n < sizeof(buff)-3) {
4572 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4574 rb_async_bug_errno(buff, errno_arg);
4577 /* VM-dependent API is not available for this function */
4578 static int
4579 consume_communication_pipe(int fd)
4581 #if USE_EVENTFD
4582 uint64_t buff[1];
4583 #else
4584 /* buffer can be shared because no one refers to them. */
4585 static char buff[1024];
4586 #endif
4587 ssize_t result;
4588 int ret = FALSE; /* for rb_sigwait_sleep */
4591 * disarm UBF_TIMER before we read, because it can become
4592 * re-armed at any time via sighandler and the pipe will refill
4593 * We can disarm it because this thread is now processing signals
4594 * and we do not want unnecessary SIGVTALRM
4596 ubf_timer_disarm();
4598 while (1) {
4599 result = read(fd, buff, sizeof(buff));
4600 if (result > 0) {
4601 ret = TRUE;
4602 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4603 return ret;
4606 else if (result == 0) {
4607 return ret;
4609 else if (result < 0) {
4610 int e = errno;
4611 switch (e) {
4612 case EINTR:
4613 continue; /* retry */
4614 case EAGAIN:
4615 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4616 case EWOULDBLOCK:
4617 #endif
4618 return ret;
4619 default:
4620 async_bug_fd("consume_communication_pipe: read", e, fd);
4626 static int
4627 check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4629 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4630 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4631 ubf_wakeup_all_threads();
4632 ruby_sigchld_handler(vm);
4633 if (rb_signal_buff_size()) {
4634 if (th == vm->ractor.main_thread) {
4635 /* no need to lock + wakeup if already in main thread */
4636 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4638 else {
4639 threadptr_trap_interrupt(vm->ractor.main_thread);
4641 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4643 return ret;
4646 void
4647 rb_thread_stop_timer_thread(void)
4649 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4650 native_reset_timer_thread();
4654 void
4655 rb_thread_reset_timer_thread(void)
4657 native_reset_timer_thread();
4660 void
4661 rb_thread_start_timer_thread(void)
4663 system_working = 1;
4664 rb_thread_create_timer_thread();
4667 static int
4668 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4670 int i;
4671 VALUE coverage = (VALUE)val;
4672 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4673 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4675 if (lines) {
4676 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4677 rb_ary_clear(lines);
4679 else {
4680 int i;
4681 for (i = 0; i < RARRAY_LEN(lines); i++) {
4682 if (RARRAY_AREF(lines, i) != Qnil)
4683 RARRAY_ASET(lines, i, INT2FIX(0));
4687 if (branches) {
4688 VALUE counters = RARRAY_AREF(branches, 1);
4689 for (i = 0; i < RARRAY_LEN(counters); i++) {
4690 RARRAY_ASET(counters, i, INT2FIX(0));
4694 return ST_CONTINUE;
4697 void
4698 rb_clear_coverages(void)
4700 VALUE coverages = rb_get_coverages();
4701 if (RTEST(coverages)) {
4702 rb_hash_foreach(coverages, clear_coverage_i, 0);
4706 #if defined(HAVE_WORKING_FORK)
4708 static void
4709 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4711 rb_thread_t *i = 0;
4712 rb_vm_t *vm = th->vm;
4713 rb_ractor_t *r = th->ractor;
4714 vm->ractor.main_ractor = r;
4715 vm->ractor.main_thread = th;
4716 r->threads.main = th;
4717 r->status_ = ractor_created;
4719 gvl_atfork(rb_ractor_gvl(th->ractor));
4720 ubf_list_atfork();
4722 // OK. Only this thread accesses:
4723 list_for_each(&vm->ractor.set, r, vmlr_node) {
4724 list_for_each(&r->threads.set, i, lt_node) {
4725 atfork(i, th);
4728 rb_vm_living_threads_init(vm);
4730 rb_ractor_atfork(vm, th);
4732 /* may be held by MJIT threads in parent */
4733 rb_native_mutex_initialize(&vm->waitpid_lock);
4734 rb_native_mutex_initialize(&vm->workqueue_lock);
4736 /* may be held by any thread in parent */
4737 rb_native_mutex_initialize(&th->interrupt_lock);
4739 vm->fork_gen++;
4740 rb_ractor_sleeper_threads_clear(th->ractor);
4741 rb_clear_coverages();
4743 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4744 VM_ASSERT(vm->ractor.cnt == 1);
4747 static void
4748 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4750 if (th != current_th) {
4751 rb_mutex_abandon_keeping_mutexes(th);
4752 rb_mutex_abandon_locking_mutex(th);
4753 thread_cleanup_func(th, TRUE);
4757 void rb_fiber_atfork(rb_thread_t *);
4758 void
4759 rb_thread_atfork(void)
4761 rb_thread_t *th = GET_THREAD();
4762 rb_thread_atfork_internal(th, terminate_atfork_i);
4763 th->join_list = NULL;
4764 rb_fiber_atfork(th);
4766 /* We don't want reproduce CVE-2003-0900. */
4767 rb_reset_random_seed();
4769 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4770 mjit_child_after_fork();
4773 static void
4774 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4776 if (th != current_th) {
4777 thread_cleanup_func_before_exec(th);
4781 void
4782 rb_thread_atfork_before_exec(void)
4784 rb_thread_t *th = GET_THREAD();
4785 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4787 #else
4788 void
4789 rb_thread_atfork(void)
4793 void
4794 rb_thread_atfork_before_exec(void)
4797 #endif
4799 struct thgroup {
4800 int enclosed;
4801 VALUE group;
4804 static size_t
4805 thgroup_memsize(const void *ptr)
4807 return sizeof(struct thgroup);
4810 static const rb_data_type_t thgroup_data_type = {
4811 "thgroup",
4812 {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4813 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4817 * Document-class: ThreadGroup
4819 * ThreadGroup provides a means of keeping track of a number of threads as a
4820 * group.
4822 * A given Thread object can only belong to one ThreadGroup at a time; adding
4823 * a thread to a new group will remove it from any previous group.
4825 * Newly created threads belong to the same group as the thread from which they
4826 * were created.
4830 * Document-const: Default
4832 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4833 * by default.
4835 static VALUE
4836 thgroup_s_alloc(VALUE klass)
4838 VALUE group;
4839 struct thgroup *data;
4841 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4842 data->enclosed = 0;
4843 data->group = group;
4845 return group;
4849 * call-seq:
4850 * thgrp.list -> array
4852 * Returns an array of all existing Thread objects that belong to this group.
4854 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4857 static VALUE
4858 thgroup_list(VALUE group)
4860 VALUE ary = rb_ary_new();
4861 rb_thread_t *th = 0;
4862 rb_ractor_t *r = GET_RACTOR();
4864 list_for_each(&r->threads.set, th, lt_node) {
4865 if (th->thgroup == group) {
4866 rb_ary_push(ary, th->self);
4869 return ary;
4874 * call-seq:
4875 * thgrp.enclose -> thgrp
4877 * Prevents threads from being added to or removed from the receiving
4878 * ThreadGroup.
4880 * New threads can still be started in an enclosed ThreadGroup.
4882 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4883 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4884 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4885 * tg.add thr
4886 * #=> ThreadError: can't move from the enclosed thread group
4889 static VALUE
4890 thgroup_enclose(VALUE group)
4892 struct thgroup *data;
4894 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4895 data->enclosed = 1;
4897 return group;
4902 * call-seq:
4903 * thgrp.enclosed? -> true or false
4905 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4908 static VALUE
4909 thgroup_enclosed_p(VALUE group)
4911 struct thgroup *data;
4913 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4914 return RBOOL(data->enclosed);
4919 * call-seq:
4920 * thgrp.add(thread) -> thgrp
4922 * Adds the given +thread+ to this group, removing it from any other
4923 * group to which it may have previously been a member.
4925 * puts "Initial group is #{ThreadGroup::Default.list}"
4926 * tg = ThreadGroup.new
4927 * t1 = Thread.new { sleep }
4928 * t2 = Thread.new { sleep }
4929 * puts "t1 is #{t1}"
4930 * puts "t2 is #{t2}"
4931 * tg.add(t1)
4932 * puts "Initial group now #{ThreadGroup::Default.list}"
4933 * puts "tg group now #{tg.list}"
4935 * This will produce:
4937 * Initial group is #<Thread:0x401bdf4c>
4938 * t1 is #<Thread:0x401b3c90>
4939 * t2 is #<Thread:0x401b3c18>
4940 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4941 * tg group now #<Thread:0x401b3c90>
4944 static VALUE
4945 thgroup_add(VALUE group, VALUE thread)
4947 rb_thread_t *target_th = rb_thread_ptr(thread);
4948 struct thgroup *data;
4950 if (OBJ_FROZEN(group)) {
4951 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4953 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4954 if (data->enclosed) {
4955 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4958 if (OBJ_FROZEN(target_th->thgroup)) {
4959 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4961 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4962 if (data->enclosed) {
4963 rb_raise(rb_eThreadError,
4964 "can't move from the enclosed thread group");
4967 target_th->thgroup = group;
4968 return group;
4972 * Document-class: ThreadShield
4974 static void
4975 thread_shield_mark(void *ptr)
4977 rb_gc_mark((VALUE)ptr);
4980 static const rb_data_type_t thread_shield_data_type = {
4981 "thread_shield",
4982 {thread_shield_mark, 0, 0,},
4983 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4986 static VALUE
4987 thread_shield_alloc(VALUE klass)
4989 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4992 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4993 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4994 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4995 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4996 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4997 static inline unsigned int
4998 rb_thread_shield_waiting(VALUE b)
5000 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5003 static inline void
5004 rb_thread_shield_waiting_inc(VALUE b)
5006 unsigned int w = rb_thread_shield_waiting(b);
5007 w++;
5008 if (w > THREAD_SHIELD_WAITING_MAX)
5009 rb_raise(rb_eRuntimeError, "waiting count overflow");
5010 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5011 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5014 static inline void
5015 rb_thread_shield_waiting_dec(VALUE b)
5017 unsigned int w = rb_thread_shield_waiting(b);
5018 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5019 w--;
5020 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5021 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5024 VALUE
5025 rb_thread_shield_new(void)
5027 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5028 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5029 return thread_shield;
5033 * Wait a thread shield.
5035 * Returns
5036 * true: acquired the thread shield
5037 * false: the thread shield was destroyed and no other threads waiting
5038 * nil: the thread shield was destroyed but still in use
5040 VALUE
5041 rb_thread_shield_wait(VALUE self)
5043 VALUE mutex = GetThreadShieldPtr(self);
5044 rb_mutex_t *m;
5046 if (!mutex) return Qfalse;
5047 m = mutex_ptr(mutex);
5048 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5049 rb_thread_shield_waiting_inc(self);
5050 rb_mutex_lock(mutex);
5051 rb_thread_shield_waiting_dec(self);
5052 if (DATA_PTR(self)) return Qtrue;
5053 rb_mutex_unlock(mutex);
5054 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5057 static VALUE
5058 thread_shield_get_mutex(VALUE self)
5060 VALUE mutex = GetThreadShieldPtr(self);
5061 if (!mutex)
5062 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5063 return mutex;
5067 * Release a thread shield, and return true if it has waiting threads.
5069 VALUE
5070 rb_thread_shield_release(VALUE self)
5072 VALUE mutex = thread_shield_get_mutex(self);
5073 rb_mutex_unlock(mutex);
5074 return RBOOL(rb_thread_shield_waiting(self) > 0);
5078 * Release and destroy a thread shield, and return true if it has waiting threads.
5080 VALUE
5081 rb_thread_shield_destroy(VALUE self)
5083 VALUE mutex = thread_shield_get_mutex(self);
5084 DATA_PTR(self) = 0;
5085 rb_mutex_unlock(mutex);
5086 return RBOOL(rb_thread_shield_waiting(self) > 0);
5089 static VALUE
5090 threadptr_recursive_hash(rb_thread_t *th)
5092 return th->ec->local_storage_recursive_hash;
5095 static void
5096 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5098 th->ec->local_storage_recursive_hash = hash;
5101 ID rb_frame_last_func(void);
5104 * Returns the current "recursive list" used to detect recursion.
5105 * This list is a hash table, unique for the current thread and for
5106 * the current __callee__.
5109 static VALUE
5110 recursive_list_access(VALUE sym)
5112 rb_thread_t *th = GET_THREAD();
5113 VALUE hash = threadptr_recursive_hash(th);
5114 VALUE list;
5115 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5116 hash = rb_ident_hash_new();
5117 threadptr_recursive_hash_set(th, hash);
5118 list = Qnil;
5120 else {
5121 list = rb_hash_aref(hash, sym);
5123 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5124 list = rb_ident_hash_new();
5125 rb_hash_aset(hash, sym, list);
5127 return list;
5131 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5132 * in the recursion list.
5133 * Assumes the recursion list is valid.
5136 static VALUE
5137 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5139 #if SIZEOF_LONG == SIZEOF_VOIDP
5140 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5141 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5142 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5143 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5144 #endif
5146 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5147 if (pair_list == Qundef)
5148 return Qfalse;
5149 if (paired_obj_id) {
5150 if (!RB_TYPE_P(pair_list, T_HASH)) {
5151 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5152 return Qfalse;
5154 else {
5155 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5156 return Qfalse;
5159 return Qtrue;
5163 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5164 * For a single obj, it sets list[obj] to Qtrue.
5165 * For a pair, it sets list[obj] to paired_obj_id if possible,
5166 * otherwise list[obj] becomes a hash like:
5167 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5168 * Assumes the recursion list is valid.
5171 static void
5172 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5174 VALUE pair_list;
5176 if (!paired_obj) {
5177 rb_hash_aset(list, obj, Qtrue);
5179 else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
5180 rb_hash_aset(list, obj, paired_obj);
5182 else {
5183 if (!RB_TYPE_P(pair_list, T_HASH)){
5184 VALUE other_paired_obj = pair_list;
5185 pair_list = rb_hash_new();
5186 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5187 rb_hash_aset(list, obj, pair_list);
5189 rb_hash_aset(pair_list, paired_obj, Qtrue);
5194 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5195 * For a pair, if list[obj] is a hash, then paired_obj_id is
5196 * removed from the hash and no attempt is made to simplify
5197 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5198 * Assumes the recursion list is valid.
5201 static int
5202 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5204 if (paired_obj) {
5205 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5206 if (pair_list == Qundef) {
5207 return 0;
5209 if (RB_TYPE_P(pair_list, T_HASH)) {
5210 rb_hash_delete_entry(pair_list, paired_obj);
5211 if (!RHASH_EMPTY_P(pair_list)) {
5212 return 1; /* keep hash until is empty */
5216 rb_hash_delete_entry(list, obj);
5217 return 1;
5220 struct exec_recursive_params {
5221 VALUE (*func) (VALUE, VALUE, int);
5222 VALUE list;
5223 VALUE obj;
5224 VALUE pairid;
5225 VALUE arg;
5228 static VALUE
5229 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5231 struct exec_recursive_params *p = (void *)data;
5232 return (*p->func)(p->obj, p->arg, FALSE);
5236 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5237 * current method is called recursively on obj, or on the pair <obj, pairid>
5238 * If outer is 0, then the innermost func will be called with recursive set
5239 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5240 * all inner func are short-circuited by throw.
5241 * Implementation details: the value thrown is the recursive list which is
5242 * proper to the current method and unlikely to be caught anywhere else.
5243 * list[recursive_key] is used as a flag for the outermost call.
5246 static VALUE
5247 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
5249 VALUE result = Qundef;
5250 const ID mid = rb_frame_last_func();
5251 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5252 struct exec_recursive_params p;
5253 int outermost;
5254 p.list = recursive_list_access(sym);
5255 p.obj = obj;
5256 p.pairid = pairid;
5257 p.arg = arg;
5258 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5260 if (recursive_check(p.list, p.obj, pairid)) {
5261 if (outer && !outermost) {
5262 rb_throw_obj(p.list, p.list);
5264 return (*func)(obj, arg, TRUE);
5266 else {
5267 enum ruby_tag_type state;
5269 p.func = func;
5271 if (outermost) {
5272 recursive_push(p.list, ID2SYM(recursive_key), 0);
5273 recursive_push(p.list, p.obj, p.pairid);
5274 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5275 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5276 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5277 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5278 if (result == p.list) {
5279 result = (*func)(obj, arg, TRUE);
5282 else {
5283 volatile VALUE ret = Qundef;
5284 recursive_push(p.list, p.obj, p.pairid);
5285 EC_PUSH_TAG(GET_EC());
5286 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5287 ret = (*func)(obj, arg, FALSE);
5289 EC_POP_TAG();
5290 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5291 goto invalid;
5293 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5294 result = ret;
5297 *(volatile struct exec_recursive_params *)&p;
5298 return result;
5300 invalid:
5301 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5302 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5303 sym, rb_thread_current());
5304 UNREACHABLE_RETURN(Qundef);
5308 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5309 * current method is called recursively on obj
5312 VALUE
5313 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5315 return exec_recursive(func, obj, 0, arg, 0);
5319 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5320 * current method is called recursively on the ordered pair <obj, paired_obj>
5323 VALUE
5324 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5326 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0);
5330 * If recursion is detected on the current method and obj, the outermost
5331 * func will be called with (obj, arg, Qtrue). All inner func will be
5332 * short-circuited using throw.
5335 VALUE
5336 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5338 return exec_recursive(func, obj, 0, arg, 1);
5342 * If recursion is detected on the current method, obj and paired_obj,
5343 * the outermost func will be called with (obj, arg, Qtrue). All inner
5344 * func will be short-circuited using throw.
5347 VALUE
5348 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5350 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1);
5354 * call-seq:
5355 * thread.backtrace -> array or nil
5357 * Returns the current backtrace of the target thread.
5361 static VALUE
5362 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5364 return rb_vm_thread_backtrace(argc, argv, thval);
5367 /* call-seq:
5368 * thread.backtrace_locations(*args) -> array or nil
5370 * Returns the execution stack for the target thread---an array containing
5371 * backtrace location objects.
5373 * See Thread::Backtrace::Location for more information.
5375 * This method behaves similarly to Kernel#caller_locations except it applies
5376 * to a specific thread.
5378 static VALUE
5379 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5381 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5384 void
5385 Init_Thread_Mutex(void)
5387 rb_thread_t *th = GET_THREAD();
5389 rb_native_mutex_initialize(&th->vm->waitpid_lock);
5390 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5391 rb_native_mutex_initialize(&th->interrupt_lock);
5395 * Document-class: ThreadError
5397 * Raised when an invalid operation is attempted on a thread.
5399 * For example, when no other thread has been started:
5401 * Thread.stop
5403 * This will raises the following exception:
5405 * ThreadError: stopping only thread
5406 * note: use sleep to stop forever
5409 void
5410 Init_Thread(void)
5412 VALUE cThGroup;
5413 rb_thread_t *th = GET_THREAD();
5415 sym_never = ID2SYM(rb_intern_const("never"));
5416 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5417 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5419 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5420 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5421 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5422 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5423 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5424 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5425 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5426 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5427 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5428 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5429 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5430 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5431 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5432 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5433 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5434 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5435 #if THREAD_DEBUG < 0
5436 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5437 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5438 #endif
5439 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5440 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5441 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5443 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5444 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5445 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5446 rb_define_method(rb_cThread, "value", thread_value, 0);
5447 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5448 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5449 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5450 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5451 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5452 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5453 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5454 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5455 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5456 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5457 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5458 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5459 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5460 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5461 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5462 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5463 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5464 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5465 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5466 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5467 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5468 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5469 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5470 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5471 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5472 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5474 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5475 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5476 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5477 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5478 rb_define_alias(rb_cThread, "inspect", "to_s");
5480 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5481 "stream closed in another thread");
5483 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5484 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5485 rb_define_method(cThGroup, "list", thgroup_list, 0);
5486 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5487 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5488 rb_define_method(cThGroup, "add", thgroup_add, 1);
5491 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5492 rb_define_const(cThGroup, "Default", th->thgroup);
5495 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
5497 /* init thread core */
5499 /* main thread setting */
5501 /* acquire global vm lock */
5502 rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
5503 gvl_acquire(gvl, th);
5505 th->pending_interrupt_queue = rb_ary_tmp_new(0);
5506 th->pending_interrupt_queue_checked = 0;
5507 th->pending_interrupt_mask_stack = rb_ary_tmp_new(0);
5511 rb_thread_create_timer_thread();
5513 Init_thread_sync();
5517 ruby_native_thread_p(void)
5519 rb_thread_t *th = ruby_thread_from_native();
5521 return th != 0;
5524 static void
5525 debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5527 rb_thread_t *th = 0;
5528 VALUE sep = rb_str_new_cstr("\n ");
5530 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5531 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5532 (void *)GET_THREAD(), (void *)r->threads.main);
5534 list_for_each(&r->threads.set, th, lt_node) {
5535 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5536 "native:%"PRI_THREAD_ID" int:%u",
5537 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5539 if (th->locking_mutex) {
5540 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5541 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5542 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5546 struct rb_waiting_list *list = th->join_list;
5547 while (list) {
5548 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5549 list = list->next;
5552 rb_str_catf(msg, "\n ");
5553 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5554 rb_str_catf(msg, "\n");
5558 static void
5559 rb_check_deadlock(rb_ractor_t *r)
5561 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5563 int found = 0;
5564 rb_thread_t *th = NULL;
5565 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5566 int ltnum = rb_ractor_living_thread_num(r);
5568 if (ltnum > sleeper_num) return;
5569 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5570 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5572 list_for_each(&r->threads.set, th, lt_node) {
5573 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5574 found = 1;
5576 else if (th->locking_mutex) {
5577 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5578 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) {
5579 found = 1;
5582 if (found)
5583 break;
5586 if (!found) {
5587 VALUE argv[2];
5588 argv[0] = rb_eFatal;
5589 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5590 debug_deadlock_check(r, argv[1]);
5591 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5592 rb_threadptr_raise(r->threads.main, 2, argv);
5596 static void
5597 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5599 const rb_control_frame_t *cfp = GET_EC()->cfp;
5600 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5601 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5602 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5603 if (lines) {
5604 long line = rb_sourceline() - 1;
5605 long count;
5606 VALUE num;
5607 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5608 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5609 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - cfp->iseq->body->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5610 rb_ary_push(lines, LONG2FIX(line + 1));
5611 return;
5613 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5614 return;
5616 num = RARRAY_AREF(lines, line);
5617 if (!FIXNUM_P(num)) return;
5618 count = FIX2LONG(num) + 1;
5619 if (POSFIXABLE(count)) {
5620 RARRAY_ASET(lines, line, LONG2FIX(count));
5626 static void
5627 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5629 const rb_control_frame_t *cfp = GET_EC()->cfp;
5630 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5631 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5632 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5633 if (branches) {
5634 long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5635 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5636 VALUE counters = RARRAY_AREF(branches, 1);
5637 VALUE num = RARRAY_AREF(counters, idx);
5638 count = FIX2LONG(num) + 1;
5639 if (POSFIXABLE(count)) {
5640 RARRAY_ASET(counters, idx, LONG2FIX(count));
5646 const rb_method_entry_t *
5647 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5649 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5651 if (!me->def) return NULL; // negative cme
5653 retry:
5654 switch (me->def->type) {
5655 case VM_METHOD_TYPE_ISEQ: {
5656 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5657 rb_iseq_location_t *loc = &iseq->body->location;
5658 path = rb_iseq_path(iseq);
5659 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5660 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5661 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5662 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5663 break;
5665 case VM_METHOD_TYPE_BMETHOD: {
5666 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5667 if (iseq) {
5668 rb_iseq_location_t *loc;
5669 rb_iseq_check(iseq);
5670 path = rb_iseq_path(iseq);
5671 loc = &iseq->body->location;
5672 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5673 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5674 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5675 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5676 break;
5678 return NULL;
5680 case VM_METHOD_TYPE_ALIAS:
5681 me = me->def->body.alias.original_me;
5682 goto retry;
5683 case VM_METHOD_TYPE_REFINED:
5684 me = me->def->body.refined.orig_me;
5685 if (!me) return NULL;
5686 goto retry;
5687 default:
5688 return NULL;
5691 /* found */
5692 if (RB_TYPE_P(path, T_ARRAY)) {
5693 path = rb_ary_entry(path, 1);
5694 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5696 if (resolved_location) {
5697 resolved_location[0] = path;
5698 resolved_location[1] = beg_pos_lineno;
5699 resolved_location[2] = beg_pos_column;
5700 resolved_location[3] = end_pos_lineno;
5701 resolved_location[4] = end_pos_column;
5703 return me;
5706 static void
5707 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5709 const rb_control_frame_t *cfp = GET_EC()->cfp;
5710 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5711 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5712 VALUE rcount;
5713 long count;
5715 me = rb_resolve_me_location(me, 0);
5716 if (!me) return;
5718 rcount = rb_hash_aref(me2counter, (VALUE) me);
5719 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5720 if (POSFIXABLE(count)) {
5721 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5725 VALUE
5726 rb_get_coverages(void)
5728 return GET_VM()->coverages;
5732 rb_get_coverage_mode(void)
5734 return GET_VM()->coverage_mode;
5737 void
5738 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5740 GET_VM()->coverages = coverages;
5741 GET_VM()->me2counter = me2counter;
5742 GET_VM()->coverage_mode = mode;
5745 void
5746 rb_resume_coverages(void)
5748 int mode = GET_VM()->coverage_mode;
5749 VALUE me2counter = GET_VM()->me2counter;
5750 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5751 if (mode & COVERAGE_TARGET_BRANCHES) {
5752 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5754 if (mode & COVERAGE_TARGET_METHODS) {
5755 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5759 void
5760 rb_suspend_coverages(void)
5762 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5763 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5764 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5766 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5767 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5771 /* Make coverage arrays empty so old covered files are no longer tracked. */
5772 void
5773 rb_reset_coverages(void)
5775 rb_clear_coverages();
5776 rb_iseq_remove_coverage_all();
5777 GET_VM()->coverages = Qfalse;
5780 VALUE
5781 rb_default_coverage(int n)
5783 VALUE coverage = rb_ary_tmp_new_fill(3);
5784 VALUE lines = Qfalse, branches = Qfalse;
5785 int mode = GET_VM()->coverage_mode;
5787 if (mode & COVERAGE_TARGET_LINES) {
5788 lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5790 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5792 if (mode & COVERAGE_TARGET_BRANCHES) {
5793 branches = rb_ary_tmp_new_fill(2);
5794 /* internal data structures for branch coverage:
5796 * { branch base node =>
5797 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5798 * branch target id =>
5799 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5800 * ...
5801 * }],
5802 * ...
5805 * Example:
5806 * { NODE_CASE =>
5807 * [1, 0, 4, 3, {
5808 * NODE_WHEN => [2, 8, 2, 9, 0],
5809 * NODE_WHEN => [3, 8, 3, 9, 1],
5810 * ...
5811 * }],
5812 * ...
5815 VALUE structure = rb_hash_new();
5816 rb_obj_hide(structure);
5817 RARRAY_ASET(branches, 0, structure);
5818 /* branch execution counters */
5819 RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5821 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5823 return coverage;
5826 static VALUE
5827 uninterruptible_exit(VALUE v)
5829 rb_thread_t *cur_th = GET_THREAD();
5830 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5832 cur_th->pending_interrupt_queue_checked = 0;
5833 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5834 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5836 return Qnil;
5839 VALUE
5840 rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5842 VALUE interrupt_mask = rb_ident_hash_new();
5843 rb_thread_t *cur_th = GET_THREAD();
5845 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5846 OBJ_FREEZE_RAW(interrupt_mask);
5847 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5849 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5851 RUBY_VM_CHECK_INTS(cur_th->ec);
5852 return ret;