[rubygems/rubygems] Use a constant empty tar header to avoid extra allocations
[ruby.git] / thread.c
blobb8ba61e188bb2e26cce343d2dd4dda3fc9016ade
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Design
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
29 ------------------------------------------------------------------------
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
38 Every VM can run parallel.
40 Ruby threads are scheduled by OS thread scheduler.
42 ------------------------------------------------------------------------
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
64 /* for model 2 */
66 #include "ruby/internal/config.h"
68 #ifdef __linux__
69 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70 # include <alloca.h>
71 #endif
73 #define TH_SCHED(th) (&(th)->ractor->threads.sched)
75 #include "eval_intern.h"
76 #include "hrtime.h"
77 #include "internal.h"
78 #include "internal/class.h"
79 #include "internal/cont.h"
80 #include "internal/error.h"
81 #include "internal/gc.h"
82 #include "internal/hash.h"
83 #include "internal/io.h"
84 #include "internal/object.h"
85 #include "internal/proc.h"
86 #include "ruby/fiber/scheduler.h"
87 #include "internal/signal.h"
88 #include "internal/thread.h"
89 #include "internal/time.h"
90 #include "internal/warnings.h"
91 #include "iseq.h"
92 #include "rjit.h"
93 #include "ruby/debug.h"
94 #include "ruby/io.h"
95 #include "ruby/thread.h"
96 #include "ruby/thread_native.h"
97 #include "timev.h"
98 #include "vm_core.h"
99 #include "ractor_core.h"
100 #include "vm_debug.h"
101 #include "vm_sync.h"
103 #if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104 #include <sys/wait.h>
105 #endif
107 #ifndef USE_NATIVE_THREAD_PRIORITY
108 #define USE_NATIVE_THREAD_PRIORITY 0
109 #define RUBY_THREAD_PRIORITY_MAX 3
110 #define RUBY_THREAD_PRIORITY_MIN -3
111 #endif
113 static VALUE rb_cThreadShield;
115 static VALUE sym_immediate;
116 static VALUE sym_on_blocking;
117 static VALUE sym_never;
119 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
122 static inline VALUE
123 rb_thread_local_storage(VALUE thread)
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
129 return rb_ivar_get(thread, idLocals);
132 enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
139 static void sleep_forever(rb_thread_t *th, unsigned int fl);
140 static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
142 static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143 static int rb_threadptr_dead(rb_thread_t *th);
144 static void rb_check_deadlock(rb_ractor_t *r);
145 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146 static const char *thread_status_name(rb_thread_t *th, int detail);
147 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149 MAYBE_UNUSED(static int consume_communication_pipe(int fd));
151 static volatile int system_working = 1;
152 static rb_internal_thread_specific_key_t specific_key_count;
154 struct waiting_fd {
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158 struct rb_io_close_wait_list *busy;
161 /********************************************************************************/
163 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
165 struct rb_blocking_region_buffer {
166 enum rb_thread_status prev_status;
169 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170 static void unblock_function_clear(rb_thread_t *th);
172 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
176 #define THREAD_BLOCKING_BEGIN(th) do { \
177 struct rb_thread_sched * const sched = TH_SCHED(th); \
178 RB_VM_SAVE_MACHINE_CONTEXT(th); \
179 thread_sched_to_waiting((sched), (th));
181 #define THREAD_BLOCKING_END(th) \
182 thread_sched_to_running((sched), (th)); \
183 rb_ractor_thread_switch(th->ractor, th); \
184 } while(0)
186 #ifdef __GNUC__
187 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189 #else
190 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191 #endif
192 #else
193 #define only_if_constant(expr, notconst) notconst
194 #endif
195 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196 struct rb_blocking_region_buffer __region; \
197 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198 /* always return true unless fail_if_interrupted */ \
199 !only_if_constant(fail_if_interrupted, TRUE)) { \
200 exec; \
201 blocking_region_end(th, &__region); \
202 }; \
203 } while(0)
206 * returns true if this thread was spuriously interrupted, false otherwise
207 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
209 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
210 static inline int
211 vm_check_ints_blocking(rb_execution_context_t *ec)
213 rb_thread_t *th = rb_ec_thread_ptr(ec);
215 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
216 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
218 else {
219 th->pending_interrupt_queue_checked = 0;
220 RUBY_VM_SET_INTERRUPT(ec);
222 return rb_threadptr_execute_interrupts(th, 1);
226 rb_vm_check_ints_blocking(rb_execution_context_t *ec)
228 return vm_check_ints_blocking(ec);
232 * poll() is supported by many OSes, but so far Linux is the only
233 * one we know of that supports using poll() in all places select()
234 * would work.
236 #if defined(HAVE_POLL)
237 # if defined(__linux__)
238 # define USE_POLL
239 # endif
240 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
241 # define USE_POLL
242 /* FreeBSD does not set POLLOUT when POLLHUP happens */
243 # define POLLERR_SET (POLLHUP | POLLERR)
244 # endif
245 #endif
247 static void
248 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
249 const struct timeval *timeout)
251 if (timeout) {
252 *rel = rb_timeval2hrtime(timeout);
253 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
254 *to = rel;
256 else {
257 *to = 0;
261 MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
262 MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
263 MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
265 #include THREAD_IMPL_SRC
268 * TODO: somebody with win32 knowledge should be able to get rid of
269 * timer-thread by busy-waiting on signals. And it should be possible
270 * to make the GVL in thread_pthread.c be platform-independent.
272 #ifndef BUSY_WAIT_SIGNALS
273 # define BUSY_WAIT_SIGNALS (0)
274 #endif
276 #ifndef USE_EVENTFD
277 # define USE_EVENTFD (0)
278 #endif
280 #include "thread_sync.c"
282 void
283 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
285 rb_native_mutex_initialize(lock);
288 void
289 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
291 rb_native_mutex_destroy(lock);
294 void
295 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
297 rb_native_mutex_lock(lock);
300 void
301 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
303 rb_native_mutex_unlock(lock);
306 static int
307 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
309 do {
310 if (fail_if_interrupted) {
311 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
312 return FALSE;
315 else {
316 RUBY_VM_CHECK_INTS(th->ec);
319 rb_native_mutex_lock(&th->interrupt_lock);
320 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
321 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
323 VM_ASSERT(th->unblock.func == NULL);
325 th->unblock.func = func;
326 th->unblock.arg = arg;
327 rb_native_mutex_unlock(&th->interrupt_lock);
329 return TRUE;
332 static void
333 unblock_function_clear(rb_thread_t *th)
335 rb_native_mutex_lock(&th->interrupt_lock);
336 th->unblock.func = 0;
337 rb_native_mutex_unlock(&th->interrupt_lock);
340 static void
341 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
343 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
345 rb_native_mutex_lock(&th->interrupt_lock);
347 if (trap) {
348 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
350 else {
351 RUBY_VM_SET_INTERRUPT(th->ec);
354 if (th->unblock.func != NULL) {
355 (th->unblock.func)(th->unblock.arg);
357 else {
358 /* none */
361 rb_native_mutex_unlock(&th->interrupt_lock);
364 void
365 rb_threadptr_interrupt(rb_thread_t *th)
367 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
368 rb_threadptr_interrupt_common(th, 0);
371 static void
372 threadptr_trap_interrupt(rb_thread_t *th)
374 rb_threadptr_interrupt_common(th, 1);
377 static void
378 terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
380 rb_thread_t *th = 0;
382 ccan_list_for_each(&r->threads.set, th, lt_node) {
383 if (th != main_thread) {
384 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
386 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
387 rb_threadptr_interrupt(th);
389 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
391 else {
392 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
397 static void
398 rb_threadptr_join_list_wakeup(rb_thread_t *thread)
400 while (thread->join_list) {
401 struct rb_waiting_list *join_list = thread->join_list;
403 // Consume the entry from the join list:
404 thread->join_list = join_list->next;
406 rb_thread_t *target_thread = join_list->thread;
408 if (target_thread->scheduler != Qnil && join_list->fiber) {
409 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
411 else {
412 rb_threadptr_interrupt(target_thread);
414 switch (target_thread->status) {
415 case THREAD_STOPPED:
416 case THREAD_STOPPED_FOREVER:
417 target_thread->status = THREAD_RUNNABLE;
418 break;
419 default:
420 break;
426 void
427 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
429 while (th->keeping_mutexes) {
430 rb_mutex_t *mutex = th->keeping_mutexes;
431 th->keeping_mutexes = mutex->next_mutex;
433 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
435 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
436 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
440 void
441 rb_thread_terminate_all(rb_thread_t *th)
443 rb_ractor_t *cr = th->ractor;
444 rb_execution_context_t * volatile ec = th->ec;
445 volatile int sleeping = 0;
447 if (cr->threads.main != th) {
448 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
449 (void *)cr->threads.main, (void *)th);
452 /* unlock all locking mutexes */
453 rb_threadptr_unlock_all_locking_mutexes(th);
455 EC_PUSH_TAG(ec);
456 if (EC_EXEC_TAG() == TAG_NONE) {
457 retry:
458 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
460 terminate_all(cr, th);
462 while (rb_ractor_living_thread_num(cr) > 1) {
463 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
465 * Thread exiting routine in thread_start_func_2 notify
466 * me when the last sub-thread exit.
468 sleeping = 1;
469 native_sleep(th, &rel);
470 RUBY_VM_CHECK_INTS_BLOCKING(ec);
471 sleeping = 0;
474 else {
476 * When caught an exception (e.g. Ctrl+C), let's broadcast
477 * kill request again to ensure killing all threads even
478 * if they are blocked on sleep, mutex, etc.
480 if (sleeping) {
481 sleeping = 0;
482 goto retry;
485 EC_POP_TAG();
488 void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
490 static void
491 thread_cleanup_func_before_exec(void *th_ptr)
493 rb_thread_t *th = th_ptr;
494 th->status = THREAD_KILLED;
496 // The thread stack doesn't exist in the forked process:
497 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
499 rb_threadptr_root_fiber_terminate(th);
502 static void
503 thread_cleanup_func(void *th_ptr, int atfork)
505 rb_thread_t *th = th_ptr;
507 th->locking_mutex = Qfalse;
508 thread_cleanup_func_before_exec(th_ptr);
511 * Unfortunately, we can't release native threading resource at fork
512 * because libc may have unstable locking state therefore touching
513 * a threading resource may cause a deadlock.
515 if (atfork) {
516 th->nt = NULL;
517 return;
520 rb_native_mutex_destroy(&th->interrupt_lock);
523 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
524 static VALUE rb_thread_to_s(VALUE thread);
526 void
527 ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
529 native_thread_init_stack(th, local_in_parent_frame);
532 const VALUE *
533 rb_vm_proc_local_ep(VALUE proc)
535 const VALUE *ep = vm_proc_ep(proc);
537 if (ep) {
538 return rb_vm_ep_local_ep(ep);
540 else {
541 return NULL;
545 // for ractor, defined in vm.c
546 VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
547 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
549 static VALUE
550 thread_do_start_proc(rb_thread_t *th)
552 VALUE args = th->invoke_arg.proc.args;
553 const VALUE *args_ptr;
554 int args_len;
555 VALUE procval = th->invoke_arg.proc.proc;
556 rb_proc_t *proc;
557 GetProcPtr(procval, proc);
559 th->ec->errinfo = Qnil;
560 th->ec->root_lep = rb_vm_proc_local_ep(procval);
561 th->ec->root_svar = Qfalse;
563 vm_check_ints_blocking(th->ec);
565 if (th->invoke_type == thread_invoke_type_ractor_proc) {
566 VALUE self = rb_ractor_self(th->ractor);
567 VM_ASSERT(FIXNUM_P(args));
568 args_len = FIX2INT(args);
569 args_ptr = ALLOCA_N(VALUE, args_len);
570 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
571 vm_check_ints_blocking(th->ec);
573 return rb_vm_invoke_proc_with_self(
574 th->ec, proc, self,
575 args_len, args_ptr,
576 th->invoke_arg.proc.kw_splat,
577 VM_BLOCK_HANDLER_NONE
580 else {
581 args_len = RARRAY_LENINT(args);
582 if (args_len < 8) {
583 /* free proc.args if the length is enough small */
584 args_ptr = ALLOCA_N(VALUE, args_len);
585 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
586 th->invoke_arg.proc.args = Qnil;
588 else {
589 args_ptr = RARRAY_CONST_PTR(args);
592 vm_check_ints_blocking(th->ec);
594 return rb_vm_invoke_proc(
595 th->ec, proc,
596 args_len, args_ptr,
597 th->invoke_arg.proc.kw_splat,
598 VM_BLOCK_HANDLER_NONE
603 static VALUE
604 thread_do_start(rb_thread_t *th)
606 native_set_thread_name(th);
607 VALUE result = Qundef;
609 switch (th->invoke_type) {
610 case thread_invoke_type_proc:
611 result = thread_do_start_proc(th);
612 break;
614 case thread_invoke_type_ractor_proc:
615 result = thread_do_start_proc(th);
616 rb_ractor_atexit(th->ec, result);
617 break;
619 case thread_invoke_type_func:
620 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
621 break;
623 case thread_invoke_type_none:
624 rb_bug("unreachable");
627 return result;
630 void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
632 static int
633 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
635 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
636 VM_ASSERT(th != th->vm->ractor.main_thread);
638 enum ruby_tag_type state;
639 VALUE errinfo = Qnil;
640 rb_thread_t *ractor_main_th = th->ractor->threads.main;
642 // setup ractor
643 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
644 RB_VM_LOCK();
646 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
647 rb_ractor_t *r = th->ractor;
648 r->r_stdin = rb_io_prep_stdin();
649 r->r_stdout = rb_io_prep_stdout();
650 r->r_stderr = rb_io_prep_stderr();
652 RB_VM_UNLOCK();
655 // Ensure that we are not joinable.
656 VM_ASSERT(UNDEF_P(th->value));
658 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
659 VALUE result = Qundef;
661 EC_PUSH_TAG(th->ec);
663 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
664 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
666 result = thread_do_start(th);
669 if (!fiber_scheduler_closed) {
670 fiber_scheduler_closed = 1;
671 rb_fiber_scheduler_set(Qnil);
674 if (!event_thread_end_hooked) {
675 event_thread_end_hooked = 1;
676 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
679 if (state == TAG_NONE) {
680 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
681 th->value = result;
682 } else {
683 errinfo = th->ec->errinfo;
685 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
686 if (!NIL_P(exc)) errinfo = exc;
688 if (state == TAG_FATAL) {
689 if (th->invoke_type == thread_invoke_type_ractor_proc) {
690 rb_ractor_atexit(th->ec, Qnil);
692 /* fatal error within this thread, need to stop whole script */
694 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
695 /* exit on main_thread. */
697 else {
698 if (th->report_on_exception) {
699 VALUE mesg = rb_thread_to_s(th->self);
700 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
701 rb_write_error_str(mesg);
702 rb_ec_error_print(th->ec, errinfo);
705 if (th->invoke_type == thread_invoke_type_ractor_proc) {
706 rb_ractor_atexit_exception(th->ec);
709 if (th->vm->thread_abort_on_exception ||
710 th->abort_on_exception || RTEST(ruby_debug)) {
711 /* exit on main_thread */
713 else {
714 errinfo = Qnil;
717 th->value = Qnil;
720 // The thread is effectively finished and can be joined.
721 VM_ASSERT(!UNDEF_P(th->value));
723 rb_threadptr_join_list_wakeup(th);
724 rb_threadptr_unlock_all_locking_mutexes(th);
726 if (th->invoke_type == thread_invoke_type_ractor_proc) {
727 rb_thread_terminate_all(th);
728 rb_ractor_teardown(th->ec);
731 th->status = THREAD_KILLED;
732 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
734 if (th->vm->ractor.main_thread == th) {
735 ruby_stop(0);
738 if (RB_TYPE_P(errinfo, T_OBJECT)) {
739 /* treat with normal error object */
740 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
743 EC_POP_TAG();
745 rb_ec_clear_current_thread_trace_func(th->ec);
747 /* locking_mutex must be Qfalse */
748 if (th->locking_mutex != Qfalse) {
749 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
750 (void *)th, th->locking_mutex);
753 if (ractor_main_th->status == THREAD_KILLED &&
754 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
755 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
756 rb_threadptr_interrupt(ractor_main_th);
759 rb_check_deadlock(th->ractor);
761 rb_fiber_close(th->ec->fiber_ptr);
763 thread_cleanup_func(th, FALSE);
764 VM_ASSERT(th->ec->vm_stack == NULL);
766 if (th->invoke_type == thread_invoke_type_ractor_proc) {
767 // after rb_ractor_living_threads_remove()
768 // GC will happen anytime and this ractor can be collected (and destroy GVL).
769 // So gvl_release() should be before it.
770 thread_sched_to_dead(TH_SCHED(th), th);
771 rb_ractor_living_threads_remove(th->ractor, th);
773 else {
774 rb_ractor_living_threads_remove(th->ractor, th);
775 thread_sched_to_dead(TH_SCHED(th), th);
778 return 0;
781 struct thread_create_params {
782 enum thread_invoke_type type;
784 // for normal proc thread
785 VALUE args;
786 VALUE proc;
788 // for ractor
789 rb_ractor_t *g;
791 // for func
792 VALUE (*fn)(void *);
795 static void thread_specific_storage_alloc(rb_thread_t *th);
797 static VALUE
798 thread_create_core(VALUE thval, struct thread_create_params *params)
800 rb_execution_context_t *ec = GET_EC();
801 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
802 int err;
804 thread_specific_storage_alloc(th);
806 if (OBJ_FROZEN(current_th->thgroup)) {
807 rb_raise(rb_eThreadError,
808 "can't start a new thread (frozen ThreadGroup)");
811 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
813 switch (params->type) {
814 case thread_invoke_type_proc:
815 th->invoke_type = thread_invoke_type_proc;
816 th->invoke_arg.proc.args = params->args;
817 th->invoke_arg.proc.proc = params->proc;
818 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
819 break;
821 case thread_invoke_type_ractor_proc:
822 #if RACTOR_CHECK_MODE > 0
823 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
824 #endif
825 th->invoke_type = thread_invoke_type_ractor_proc;
826 th->ractor = params->g;
827 th->ractor->threads.main = th;
828 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
829 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
830 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
831 rb_ractor_send_parameters(ec, params->g, params->args);
832 break;
834 case thread_invoke_type_func:
835 th->invoke_type = thread_invoke_type_func;
836 th->invoke_arg.func.func = params->fn;
837 th->invoke_arg.func.arg = (void *)params->args;
838 break;
840 default:
841 rb_bug("unreachable");
844 th->priority = current_th->priority;
845 th->thgroup = current_th->thgroup;
847 th->pending_interrupt_queue = rb_ary_hidden_new(0);
848 th->pending_interrupt_queue_checked = 0;
849 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
850 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
852 rb_native_mutex_initialize(&th->interrupt_lock);
854 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
856 rb_ractor_living_threads_insert(th->ractor, th);
858 /* kick thread */
859 err = native_thread_create(th);
860 if (err) {
861 th->status = THREAD_KILLED;
862 rb_ractor_living_threads_remove(th->ractor, th);
863 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
865 return thval;
868 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
871 * call-seq:
872 * Thread.new { ... } -> thread
873 * Thread.new(*args, &proc) -> thread
874 * Thread.new(*args) { |args| ... } -> thread
876 * Creates a new thread executing the given block.
878 * Any +args+ given to ::new will be passed to the block:
880 * arr = []
881 * a, b, c = 1, 2, 3
882 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
883 * arr #=> [1, 2, 3]
885 * A ThreadError exception is raised if ::new is called without a block.
887 * If you're going to subclass Thread, be sure to call super in your
888 * +initialize+ method, otherwise a ThreadError will be raised.
890 static VALUE
891 thread_s_new(int argc, VALUE *argv, VALUE klass)
893 rb_thread_t *th;
894 VALUE thread = rb_thread_alloc(klass);
896 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
897 rb_raise(rb_eThreadError, "can't alloc thread");
900 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
901 th = rb_thread_ptr(thread);
902 if (!threadptr_initialized(th)) {
903 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
904 klass);
906 return thread;
910 * call-seq:
911 * Thread.start([args]*) {|args| block } -> thread
912 * Thread.fork([args]*) {|args| block } -> thread
914 * Basically the same as ::new. However, if class Thread is subclassed, then
915 * calling +start+ in that subclass will not invoke the subclass's
916 * +initialize+ method.
919 static VALUE
920 thread_start(VALUE klass, VALUE args)
922 struct thread_create_params params = {
923 .type = thread_invoke_type_proc,
924 .args = args,
925 .proc = rb_block_proc(),
927 return thread_create_core(rb_thread_alloc(klass), &params);
930 static VALUE
931 threadptr_invoke_proc_location(rb_thread_t *th)
933 if (th->invoke_type == thread_invoke_type_proc) {
934 return rb_proc_location(th->invoke_arg.proc.proc);
936 else {
937 return Qnil;
941 /* :nodoc: */
942 static VALUE
943 thread_initialize(VALUE thread, VALUE args)
945 rb_thread_t *th = rb_thread_ptr(thread);
947 if (!rb_block_given_p()) {
948 rb_raise(rb_eThreadError, "must be called with a block");
950 else if (th->invoke_type != thread_invoke_type_none) {
951 VALUE loc = threadptr_invoke_proc_location(th);
952 if (!NIL_P(loc)) {
953 rb_raise(rb_eThreadError,
954 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
955 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
957 else {
958 rb_raise(rb_eThreadError, "already initialized thread");
961 else {
962 struct thread_create_params params = {
963 .type = thread_invoke_type_proc,
964 .args = args,
965 .proc = rb_block_proc(),
967 return thread_create_core(thread, &params);
971 VALUE
972 rb_thread_create(VALUE (*fn)(void *), void *arg)
974 struct thread_create_params params = {
975 .type = thread_invoke_type_func,
976 .fn = fn,
977 .args = (VALUE)arg,
979 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
982 VALUE
983 rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
985 struct thread_create_params params = {
986 .type = thread_invoke_type_ractor_proc,
987 .g = r,
988 .args = args,
989 .proc = proc,
991 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
995 struct join_arg {
996 struct rb_waiting_list *waiter;
997 rb_thread_t *target;
998 VALUE timeout;
999 rb_hrtime_t *limit;
1002 static VALUE
1003 remove_from_join_list(VALUE arg)
1005 struct join_arg *p = (struct join_arg *)arg;
1006 rb_thread_t *target_thread = p->target;
1008 if (target_thread->status != THREAD_KILLED) {
1009 struct rb_waiting_list **join_list = &target_thread->join_list;
1011 while (*join_list) {
1012 if (*join_list == p->waiter) {
1013 *join_list = (*join_list)->next;
1014 break;
1017 join_list = &(*join_list)->next;
1021 return Qnil;
1024 static int
1025 thread_finished(rb_thread_t *th)
1027 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1030 static VALUE
1031 thread_join_sleep(VALUE arg)
1033 struct join_arg *p = (struct join_arg *)arg;
1034 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1035 rb_hrtime_t end = 0, *limit = p->limit;
1037 if (limit) {
1038 end = rb_hrtime_add(*limit, rb_hrtime_now());
1041 while (!thread_finished(target_th)) {
1042 VALUE scheduler = rb_fiber_scheduler_current();
1044 if (scheduler != Qnil) {
1045 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1046 // Check if the target thread is finished after blocking:
1047 if (thread_finished(target_th)) break;
1048 // Otherwise, a timeout occurred:
1049 else return Qfalse;
1051 else if (!limit) {
1052 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1054 else {
1055 if (hrtime_update_expire(limit, end)) {
1056 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1057 return Qfalse;
1059 th->status = THREAD_STOPPED;
1060 native_sleep(th, limit);
1062 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1063 th->status = THREAD_RUNNABLE;
1065 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1068 return Qtrue;
1071 static VALUE
1072 thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1074 rb_execution_context_t *ec = GET_EC();
1075 rb_thread_t *th = ec->thread_ptr;
1076 rb_fiber_t *fiber = ec->fiber_ptr;
1078 if (th == target_th) {
1079 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1082 if (th->ractor->threads.main == target_th) {
1083 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1086 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1088 if (target_th->status != THREAD_KILLED) {
1089 struct rb_waiting_list waiter;
1090 waiter.next = target_th->join_list;
1091 waiter.thread = th;
1092 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1093 target_th->join_list = &waiter;
1095 struct join_arg arg;
1096 arg.waiter = &waiter;
1097 arg.target = target_th;
1098 arg.timeout = timeout;
1099 arg.limit = limit;
1101 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1102 return Qnil;
1106 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1108 if (target_th->ec->errinfo != Qnil) {
1109 VALUE err = target_th->ec->errinfo;
1111 if (FIXNUM_P(err)) {
1112 switch (err) {
1113 case INT2FIX(TAG_FATAL):
1114 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1116 /* OK. killed. */
1117 break;
1118 default:
1119 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1122 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1123 rb_bug("thread_join: THROW_DATA should not reach here.");
1125 else {
1126 /* normal exception */
1127 rb_exc_raise(err);
1130 return target_th->self;
1134 * call-seq:
1135 * thr.join -> thr
1136 * thr.join(limit) -> thr
1138 * The calling thread will suspend execution and run this +thr+.
1140 * Does not return until +thr+ exits or until the given +limit+ seconds have
1141 * passed.
1143 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1144 * returned.
1146 * Any threads not joined will be killed when the main program exits.
1148 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1149 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1150 * will be processed at this time.
1152 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1153 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1154 * x.join # Let thread x finish, thread a will be killed on exit.
1155 * #=> "axyz"
1157 * The following example illustrates the +limit+ parameter.
1159 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1160 * puts "Waiting" until y.join(0.15)
1162 * This will produce:
1164 * tick...
1165 * Waiting
1166 * tick...
1167 * Waiting
1168 * tick...
1169 * tick...
1172 static VALUE
1173 thread_join_m(int argc, VALUE *argv, VALUE self)
1175 VALUE timeout = Qnil;
1176 rb_hrtime_t rel = 0, *limit = 0;
1178 if (rb_check_arity(argc, 0, 1)) {
1179 timeout = argv[0];
1182 // Convert the timeout eagerly, so it's always converted and deterministic
1184 * This supports INFINITY and negative values, so we can't use
1185 * rb_time_interval right now...
1187 if (NIL_P(timeout)) {
1188 /* unlimited */
1190 else if (FIXNUM_P(timeout)) {
1191 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1192 limit = &rel;
1194 else {
1195 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1198 return thread_join(rb_thread_ptr(self), timeout, limit);
1202 * call-seq:
1203 * thr.value -> obj
1205 * Waits for +thr+ to complete, using #join, and returns its value or raises
1206 * the exception which terminated the thread.
1208 * a = Thread.new { 2 + 2 }
1209 * a.value #=> 4
1211 * b = Thread.new { raise 'something went wrong' }
1212 * b.value #=> RuntimeError: something went wrong
1215 static VALUE
1216 thread_value(VALUE self)
1218 rb_thread_t *th = rb_thread_ptr(self);
1219 thread_join(th, Qnil, 0);
1220 if (UNDEF_P(th->value)) {
1221 // If the thread is dead because we forked th->value is still Qundef.
1222 return Qnil;
1224 return th->value;
1228 * Thread Scheduling
1231 static void
1232 getclockofday(struct timespec *ts)
1234 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1235 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1236 return;
1237 #endif
1238 rb_timespec_now(ts);
1242 * Don't inline this, since library call is already time consuming
1243 * and we don't want "struct timespec" on stack too long for GC
1245 NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1246 rb_hrtime_t
1247 rb_hrtime_now(void)
1249 struct timespec ts;
1251 getclockofday(&ts);
1252 return rb_timespec2hrtime(&ts);
1256 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1257 * being uninitialized, maybe other versions, too.
1259 COMPILER_WARNING_PUSH
1260 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1261 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1262 #endif
1263 #ifndef PRIu64
1264 #define PRIu64 PRI_64_PREFIX "u"
1265 #endif
1267 * @end is the absolute time when @ts is set to expire
1268 * Returns true if @end has past
1269 * Updates @ts and returns false otherwise
1271 static int
1272 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1274 rb_hrtime_t now = rb_hrtime_now();
1276 if (now > end) return 1;
1278 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1280 *timeout = end - now;
1281 return 0;
1283 COMPILER_WARNING_POP
1285 static int
1286 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1288 enum rb_thread_status prev_status = th->status;
1289 int woke;
1290 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1292 th->status = THREAD_STOPPED;
1293 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1294 while (th->status == THREAD_STOPPED) {
1295 native_sleep(th, &rel);
1296 woke = vm_check_ints_blocking(th->ec);
1297 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1298 break;
1299 if (hrtime_update_expire(&rel, end))
1300 break;
1301 woke = 1;
1303 th->status = prev_status;
1304 return woke;
1307 static int
1308 sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1310 enum rb_thread_status prev_status = th->status;
1311 int woke;
1312 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1314 th->status = THREAD_STOPPED;
1315 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1316 while (th->status == THREAD_STOPPED) {
1317 native_sleep(th, &rel);
1318 woke = vm_check_ints_blocking(th->ec);
1319 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1320 break;
1321 if (hrtime_update_expire(&rel, end))
1322 break;
1323 woke = 1;
1325 th->status = prev_status;
1326 return woke;
1329 static void
1330 sleep_forever(rb_thread_t *th, unsigned int fl)
1332 enum rb_thread_status prev_status = th->status;
1333 enum rb_thread_status status;
1334 int woke;
1336 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1337 th->status = status;
1339 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1341 while (th->status == status) {
1342 if (fl & SLEEP_DEADLOCKABLE) {
1343 rb_ractor_sleeper_threads_inc(th->ractor);
1344 rb_check_deadlock(th->ractor);
1347 native_sleep(th, 0);
1349 if (fl & SLEEP_DEADLOCKABLE) {
1350 rb_ractor_sleeper_threads_dec(th->ractor);
1352 if (fl & SLEEP_ALLOW_SPURIOUS) {
1353 break;
1356 woke = vm_check_ints_blocking(th->ec);
1358 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1359 break;
1362 th->status = prev_status;
1365 void
1366 rb_thread_sleep_forever(void)
1368 RUBY_DEBUG_LOG("forever");
1369 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1372 void
1373 rb_thread_sleep_deadly(void)
1375 RUBY_DEBUG_LOG("deadly");
1376 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1379 static void
1380 rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1382 VALUE scheduler = rb_fiber_scheduler_current();
1383 if (scheduler != Qnil) {
1384 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1386 else {
1387 RUBY_DEBUG_LOG("...");
1388 if (end) {
1389 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1391 else {
1392 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1397 void
1398 rb_thread_wait_for(struct timeval time)
1400 rb_thread_t *th = GET_THREAD();
1402 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1406 * CAUTION: This function causes thread switching.
1407 * rb_thread_check_ints() check ruby's interrupts.
1408 * some interrupt needs thread switching/invoke handlers,
1409 * and so on.
1412 void
1413 rb_thread_check_ints(void)
1415 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1419 * Hidden API for tcl/tk wrapper.
1420 * There is no guarantee to perpetuate it.
1423 rb_thread_check_trap_pending(void)
1425 return rb_signal_buff_size() != 0;
1428 /* This function can be called in blocking region. */
1430 rb_thread_interrupted(VALUE thval)
1432 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1435 void
1436 rb_thread_sleep(int sec)
1438 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
1441 static void
1442 rb_thread_schedule_limits(uint32_t limits_us)
1444 if (!rb_thread_alone()) {
1445 rb_thread_t *th = GET_THREAD();
1446 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1448 if (th->running_time_us >= limits_us) {
1449 RUBY_DEBUG_LOG("switch %s", "start");
1451 RB_VM_SAVE_MACHINE_CONTEXT(th);
1452 thread_sched_yield(TH_SCHED(th), th);
1453 rb_ractor_thread_switch(th->ractor, th);
1455 RUBY_DEBUG_LOG("switch %s", "done");
1460 void
1461 rb_thread_schedule(void)
1463 rb_thread_schedule_limits(0);
1464 RUBY_VM_CHECK_INTS(GET_EC());
1467 /* blocking region */
1469 static inline int
1470 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1471 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1473 #ifdef RUBY_VM_CRITICAL_SECTION
1474 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1475 #endif
1476 VM_ASSERT(th == GET_THREAD());
1478 region->prev_status = th->status;
1479 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1480 th->blocking_region_buffer = region;
1481 th->status = THREAD_STOPPED;
1482 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1484 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1486 RB_VM_SAVE_MACHINE_CONTEXT(th);
1487 thread_sched_to_waiting(TH_SCHED(th), th);
1488 return TRUE;
1490 else {
1491 return FALSE;
1495 static inline void
1496 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1498 /* entry to ubf_list still permitted at this point, make it impossible: */
1499 unblock_function_clear(th);
1500 /* entry to ubf_list impossible at this point, so unregister is safe: */
1501 unregister_ubf_list(th);
1503 thread_sched_to_running(TH_SCHED(th), th);
1504 rb_ractor_thread_switch(th->ractor, th);
1506 th->blocking_region_buffer = 0;
1507 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1508 if (th->status == THREAD_STOPPED) {
1509 th->status = region->prev_status;
1512 RUBY_DEBUG_LOG("end");
1514 #ifndef _WIN32
1515 // GET_THREAD() clears WSAGetLastError()
1516 VM_ASSERT(th == GET_THREAD());
1517 #endif
1520 void *
1521 rb_nogvl(void *(*func)(void *), void *data1,
1522 rb_unblock_function_t *ubf, void *data2,
1523 int flags)
1525 void *val = 0;
1526 rb_execution_context_t *ec = GET_EC();
1527 rb_thread_t *th = rb_ec_thread_ptr(ec);
1528 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1529 bool is_main_thread = vm->ractor.main_thread == th;
1530 int saved_errno = 0;
1531 VALUE ubf_th = Qfalse;
1533 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1534 ubf = ubf_select;
1535 data2 = th;
1537 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1538 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1539 vm->ubf_async_safe = 1;
1543 BLOCKING_REGION(th, {
1544 val = func(data1);
1545 saved_errno = rb_errno();
1546 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1548 if (is_main_thread) vm->ubf_async_safe = 0;
1550 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1551 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1554 if (ubf_th != Qfalse) {
1555 thread_value(rb_thread_kill(ubf_th));
1558 rb_errno_set(saved_errno);
1560 return val;
1564 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1565 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1566 * without interrupt process.
1568 * rb_thread_call_without_gvl() does:
1569 * (1) Check interrupts.
1570 * (2) release GVL.
1571 * Other Ruby threads may run in parallel.
1572 * (3) call func with data1
1573 * (4) acquire GVL.
1574 * Other Ruby threads can not run in parallel any more.
1575 * (5) Check interrupts.
1577 * rb_thread_call_without_gvl2() does:
1578 * (1) Check interrupt and return if interrupted.
1579 * (2) release GVL.
1580 * (3) call func with data1 and a pointer to the flags.
1581 * (4) acquire GVL.
1583 * If another thread interrupts this thread (Thread#kill, signal delivery,
1584 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1585 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1586 * toggling a cancellation flag, canceling the invocation of a call inside
1587 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1589 * There are built-in ubfs and you can specify these ubfs:
1591 * * RUBY_UBF_IO: ubf for IO operation
1592 * * RUBY_UBF_PROCESS: ubf for process operation
1594 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1595 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1596 * provide proper ubf(), your program will not stop for Control+C or other
1597 * shutdown events.
1599 * "Check interrupts" on above list means checking asynchronous
1600 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1601 * request, and so on) and calling corresponding procedures
1602 * (such as `trap' for signals, raise an exception for Thread#raise).
1603 * If `func()' finished and received interrupts, you may skip interrupt
1604 * checking. For example, assume the following func() it reads data from file.
1606 * read_func(...) {
1607 * // (a) before read
1608 * read(buffer); // (b) reading
1609 * // (c) after read
1612 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1613 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1614 * at (c), after *read* operation is completed, checking interrupts is harmful
1615 * because it causes irrevocable side-effect, the read data will vanish. To
1616 * avoid such problem, the `read_func()' should be used with
1617 * `rb_thread_call_without_gvl2()'.
1619 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1620 * immediately. This function does not show when the execution was interrupted.
1621 * For example, there are 4 possible timing (a), (b), (c) and before calling
1622 * read_func(). You need to record progress of a read_func() and check
1623 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1624 * `rb_thread_check_ints()' correctly or your program can not process proper
1625 * process such as `trap' and so on.
1627 * NOTE: You can not execute most of Ruby C API and touch Ruby
1628 * objects in `func()' and `ubf()', including raising an
1629 * exception, because current thread doesn't acquire GVL
1630 * (it causes synchronization problems). If you need to
1631 * call ruby functions either use rb_thread_call_with_gvl()
1632 * or read source code of C APIs and confirm safety by
1633 * yourself.
1635 * NOTE: In short, this API is difficult to use safely. I recommend you
1636 * use other ways if you have. We lack experiences to use this API.
1637 * Please report your problem related on it.
1639 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1640 * for a short running `func()'. Be sure to benchmark and use this
1641 * mechanism when `func()' consumes enough time.
1643 * Safe C API:
1644 * * rb_thread_interrupted() - check interrupt flag
1645 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1646 * they will work without GVL, and may acquire GVL when GC is needed.
1648 void *
1649 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1650 rb_unblock_function_t *ubf, void *data2)
1652 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1655 void *
1656 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1657 rb_unblock_function_t *ubf, void *data2)
1659 return rb_nogvl(func, data1, ubf, data2, 0);
1662 static int
1663 waitfd_to_waiting_flag(int wfd_event)
1665 return wfd_event << 1;
1668 static void
1669 thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1671 wfd->fd = fd;
1672 wfd->th = th;
1673 wfd->busy = NULL;
1675 RB_VM_LOCK_ENTER();
1677 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1679 RB_VM_LOCK_LEAVE();
1682 static void
1683 thread_io_wake_pending_closer(struct waiting_fd *wfd)
1685 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1686 if (has_waiter) {
1687 rb_mutex_lock(wfd->busy->wakeup_mutex);
1690 /* Needs to be protected with RB_VM_LOCK because we don't know if
1691 wfd is on the global list of pending FD ops or if it's on a
1692 struct rb_io_close_wait_list close-waiter. */
1693 RB_VM_LOCK_ENTER();
1694 ccan_list_del(&wfd->wfd_node);
1695 RB_VM_LOCK_LEAVE();
1697 if (has_waiter) {
1698 rb_thread_wakeup(wfd->busy->closing_thread);
1699 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1703 static bool
1704 thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1706 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1707 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1708 #else
1709 return false;
1710 #endif
1713 // true if need retry
1714 static bool
1715 thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1717 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1718 if (thread_io_mn_schedulable(th, events, timeout)) {
1719 rb_hrtime_t rel, *prel;
1721 if (timeout) {
1722 rel = rb_timeval2hrtime(timeout);
1723 prel = &rel;
1725 else {
1726 prel = NULL;
1729 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1731 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1732 // timeout
1733 return false;
1735 else {
1736 return true;
1739 #endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1740 return false;
1743 // assume read/write
1744 static bool
1745 blocking_call_retryable_p(int r, int eno)
1747 if (r != -1) return false;
1749 switch (eno) {
1750 case EAGAIN:
1751 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1752 case EWOULDBLOCK:
1753 #endif
1754 return true;
1755 default:
1756 return false;
1760 bool
1761 rb_thread_mn_schedulable(VALUE thval)
1763 rb_thread_t *th = rb_thread_ptr(thval);
1764 return th->mn_schedulable;
1767 VALUE
1768 rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1770 rb_execution_context_t * volatile ec = GET_EC();
1771 rb_thread_t *th = rb_ec_thread_ptr(ec);
1773 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1775 struct waiting_fd waiting_fd;
1776 volatile VALUE val = Qundef; /* shouldn't be used */
1777 volatile int saved_errno = 0;
1778 enum ruby_tag_type state;
1779 bool prev_mn_schedulable = th->mn_schedulable;
1780 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1782 // `errno` is only valid when there is an actual error - but we can't
1783 // extract that from the return value of `func` alone, so we clear any
1784 // prior `errno` value here so that we can later check if it was set by
1785 // `func` or not (as opposed to some previously set value).
1786 errno = 0;
1788 thread_io_setup_wfd(th, fd, &waiting_fd);
1790 EC_PUSH_TAG(ec);
1791 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1792 retry:
1793 BLOCKING_REGION(waiting_fd.th, {
1794 val = func(data1);
1795 saved_errno = errno;
1796 }, ubf_select, waiting_fd.th, FALSE);
1798 if (events &&
1799 blocking_call_retryable_p((int)val, saved_errno) &&
1800 thread_io_wait_events(th, fd, events, NULL)) {
1801 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1802 goto retry;
1805 EC_POP_TAG();
1807 th->mn_schedulable = prev_mn_schedulable;
1810 * must be deleted before jump
1811 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1813 thread_io_wake_pending_closer(&waiting_fd);
1815 if (state) {
1816 EC_JUMP_TAG(ec, state);
1818 /* TODO: check func() */
1819 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1821 // If the error was a timeout, we raise a specific exception for that:
1822 if (saved_errno == ETIMEDOUT) {
1823 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1826 errno = saved_errno;
1828 return val;
1831 VALUE
1832 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1834 return rb_thread_io_blocking_call(func, data1, fd, 0);
1838 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1840 * After releasing GVL using
1841 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1842 * methods. If you need to access Ruby you must use this function
1843 * rb_thread_call_with_gvl().
1845 * This function rb_thread_call_with_gvl() does:
1846 * (1) acquire GVL.
1847 * (2) call passed function `func'.
1848 * (3) release GVL.
1849 * (4) return a value which is returned at (2).
1851 * NOTE: You should not return Ruby object at (2) because such Object
1852 * will not be marked.
1854 * NOTE: If an exception is raised in `func', this function DOES NOT
1855 * protect (catch) the exception. If you have any resources
1856 * which should free before throwing exception, you need use
1857 * rb_protect() in `func' and return a value which represents
1858 * exception was raised.
1860 * NOTE: This function should not be called by a thread which was not
1861 * created as Ruby thread (created by Thread.new or so). In other
1862 * words, this function *DOES NOT* associate or convert a NON-Ruby
1863 * thread to a Ruby thread.
1865 void *
1866 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1868 rb_thread_t *th = ruby_thread_from_native();
1869 struct rb_blocking_region_buffer *brb;
1870 struct rb_unblock_callback prev_unblock;
1871 void *r;
1873 if (th == 0) {
1874 /* Error has occurred, but we can't use rb_bug()
1875 * because this thread is not Ruby's thread.
1876 * What should we do?
1878 bp();
1879 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1880 exit(EXIT_FAILURE);
1883 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1884 prev_unblock = th->unblock;
1886 if (brb == 0) {
1887 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1890 blocking_region_end(th, brb);
1891 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1892 r = (*func)(data1);
1893 /* leave from Ruby world: You can not access Ruby values, etc. */
1894 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1895 RUBY_ASSERT_ALWAYS(released);
1896 return r;
1900 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1903 *** This API is EXPERIMENTAL!
1904 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1909 ruby_thread_has_gvl_p(void)
1911 rb_thread_t *th = ruby_thread_from_native();
1913 if (th && th->blocking_region_buffer == 0) {
1914 return 1;
1916 else {
1917 return 0;
1922 * call-seq:
1923 * Thread.pass -> nil
1925 * Give the thread scheduler a hint to pass execution to another thread.
1926 * A running thread may or may not switch, it depends on OS and processor.
1929 static VALUE
1930 thread_s_pass(VALUE klass)
1932 rb_thread_schedule();
1933 return Qnil;
1936 /*****************************************************/
1939 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1941 * Async events such as an exception thrown by Thread#raise,
1942 * Thread#kill and thread termination (after main thread termination)
1943 * will be queued to th->pending_interrupt_queue.
1944 * - clear: clear the queue.
1945 * - enque: enqueue err object into queue.
1946 * - deque: dequeue err object from queue.
1947 * - active_p: return 1 if the queue should be checked.
1949 * All rb_threadptr_pending_interrupt_* functions are called by
1950 * a GVL acquired thread, of course.
1951 * Note that all "rb_" prefix APIs need GVL to call.
1954 void
1955 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1957 rb_ary_clear(th->pending_interrupt_queue);
1960 void
1961 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1963 rb_ary_push(th->pending_interrupt_queue, v);
1964 th->pending_interrupt_queue_checked = 0;
1967 static void
1968 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1970 if (!th->pending_interrupt_queue) {
1971 rb_raise(rb_eThreadError, "uninitialized thread");
1975 enum handle_interrupt_timing {
1976 INTERRUPT_NONE,
1977 INTERRUPT_IMMEDIATE,
1978 INTERRUPT_ON_BLOCKING,
1979 INTERRUPT_NEVER
1982 static enum handle_interrupt_timing
1983 rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
1985 if (sym == sym_immediate) {
1986 return INTERRUPT_IMMEDIATE;
1988 else if (sym == sym_on_blocking) {
1989 return INTERRUPT_ON_BLOCKING;
1991 else if (sym == sym_never) {
1992 return INTERRUPT_NEVER;
1994 else {
1995 rb_raise(rb_eThreadError, "unknown mask signature");
1999 static enum handle_interrupt_timing
2000 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2002 VALUE mask;
2003 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2004 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2005 VALUE mod;
2006 long i;
2008 for (i=0; i<mask_stack_len; i++) {
2009 mask = mask_stack[mask_stack_len-(i+1)];
2011 if (SYMBOL_P(mask)) {
2012 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2013 if (err != rb_cInteger) {
2014 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2016 else {
2017 continue;
2021 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2022 VALUE klass = mod;
2023 VALUE sym;
2025 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2026 klass = RBASIC(mod)->klass;
2028 else if (mod != RCLASS_ORIGIN(mod)) {
2029 continue;
2032 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2033 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2036 /* try to next mask */
2038 return INTERRUPT_NONE;
2041 static int
2042 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2044 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2047 static int
2048 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2050 int i;
2051 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2052 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2053 if (rb_obj_is_kind_of(e, err)) {
2054 return TRUE;
2057 return FALSE;
2060 static VALUE
2061 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2063 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2064 int i;
2066 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2067 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2069 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2071 switch (mask_timing) {
2072 case INTERRUPT_ON_BLOCKING:
2073 if (timing != INTERRUPT_ON_BLOCKING) {
2074 break;
2076 /* fall through */
2077 case INTERRUPT_NONE: /* default: IMMEDIATE */
2078 case INTERRUPT_IMMEDIATE:
2079 rb_ary_delete_at(th->pending_interrupt_queue, i);
2080 return err;
2081 case INTERRUPT_NEVER:
2082 break;
2086 th->pending_interrupt_queue_checked = 1;
2087 return Qundef;
2088 #else
2089 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2090 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2091 th->pending_interrupt_queue_checked = 1;
2093 return err;
2094 #endif
2097 static int
2098 threadptr_pending_interrupt_active_p(rb_thread_t *th)
2101 * For optimization, we don't check async errinfo queue
2102 * if the queue and the thread interrupt mask were not changed
2103 * since last check.
2105 if (th->pending_interrupt_queue_checked) {
2106 return 0;
2109 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2110 return 0;
2113 return 1;
2116 static int
2117 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2119 VALUE *maskp = (VALUE *)args;
2121 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2122 rb_raise(rb_eArgError, "unknown mask signature");
2125 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2126 *maskp = val;
2127 return ST_CONTINUE;
2130 if (RTEST(*maskp)) {
2131 if (!RB_TYPE_P(*maskp, T_HASH)) {
2132 VALUE prev = *maskp;
2133 *maskp = rb_ident_hash_new();
2134 if (SYMBOL_P(prev)) {
2135 rb_hash_aset(*maskp, rb_eException, prev);
2138 rb_hash_aset(*maskp, key, val);
2140 else {
2141 *maskp = Qfalse;
2144 return ST_CONTINUE;
2148 * call-seq:
2149 * Thread.handle_interrupt(hash) { ... } -> result of the block
2151 * Changes asynchronous interrupt timing.
2153 * _interrupt_ means asynchronous event and corresponding procedure
2154 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2155 * and main thread termination (if main thread terminates, then all
2156 * other thread will be killed).
2158 * The given +hash+ has pairs like <code>ExceptionClass =>
2159 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2160 * the given block. The TimingSymbol can be one of the following symbols:
2162 * [+:immediate+] Invoke interrupts immediately.
2163 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2164 * [+:never+] Never invoke all interrupts.
2166 * _BlockingOperation_ means that the operation will block the calling thread,
2167 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2168 * operation executed without GVL.
2170 * Masked asynchronous interrupts are delayed until they are enabled.
2171 * This method is similar to sigprocmask(3).
2173 * === NOTE
2175 * Asynchronous interrupts are difficult to use.
2177 * If you need to communicate between threads, please consider to use another way such as Queue.
2179 * Or use them with deep understanding about this method.
2181 * === Usage
2183 * In this example, we can guard from Thread#raise exceptions.
2185 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2186 * ignored in the first block of the main thread. In the second
2187 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2189 * th = Thread.new do
2190 * Thread.handle_interrupt(RuntimeError => :never) {
2191 * begin
2192 * # You can write resource allocation code safely.
2193 * Thread.handle_interrupt(RuntimeError => :immediate) {
2194 * # ...
2196 * ensure
2197 * # You can write resource deallocation code safely.
2198 * end
2200 * end
2201 * Thread.pass
2202 * # ...
2203 * th.raise "stop"
2205 * While we are ignoring the RuntimeError exception, it's safe to write our
2206 * resource allocation code. Then, the ensure block is where we can safely
2207 * deallocate your resources.
2209 * ==== Guarding from Timeout::Error
2211 * In the next example, we will guard from the Timeout::Error exception. This
2212 * will help prevent from leaking resources when Timeout::Error exceptions occur
2213 * during normal ensure clause. For this example we use the help of the
2214 * standard library Timeout, from lib/timeout.rb
2216 * require 'timeout'
2217 * Thread.handle_interrupt(Timeout::Error => :never) {
2218 * timeout(10){
2219 * # Timeout::Error doesn't occur here
2220 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2221 * # possible to be killed by Timeout::Error
2222 * # while blocking operation
2224 * # Timeout::Error doesn't occur here
2228 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2229 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2230 * operation that will block the calling thread is susceptible to a
2231 * Timeout::Error exception being raised.
2233 * ==== Stack control settings
2235 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2236 * to control more than one ExceptionClass and TimingSymbol at a time.
2238 * Thread.handle_interrupt(FooError => :never) {
2239 * Thread.handle_interrupt(BarError => :never) {
2240 * # FooError and BarError are prohibited.
2244 * ==== Inheritance with ExceptionClass
2246 * All exceptions inherited from the ExceptionClass parameter will be considered.
2248 * Thread.handle_interrupt(Exception => :never) {
2249 * # all exceptions inherited from Exception are prohibited.
2252 * For handling all interrupts, use +Object+ and not +Exception+
2253 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2255 static VALUE
2256 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2258 VALUE mask = Qundef;
2259 rb_execution_context_t * volatile ec = GET_EC();
2260 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2261 volatile VALUE r = Qnil;
2262 enum ruby_tag_type state;
2264 if (!rb_block_given_p()) {
2265 rb_raise(rb_eArgError, "block is needed.");
2268 mask_arg = rb_to_hash_type(mask_arg);
2270 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2271 mask = Qnil;
2274 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2276 if (UNDEF_P(mask)) {
2277 return rb_yield(Qnil);
2280 if (!RTEST(mask)) {
2281 mask = mask_arg;
2283 else if (RB_TYPE_P(mask, T_HASH)) {
2284 OBJ_FREEZE(mask);
2287 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2288 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2289 th->pending_interrupt_queue_checked = 0;
2290 RUBY_VM_SET_INTERRUPT(th->ec);
2293 EC_PUSH_TAG(th->ec);
2294 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2295 r = rb_yield(Qnil);
2297 EC_POP_TAG();
2299 rb_ary_pop(th->pending_interrupt_mask_stack);
2300 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2301 th->pending_interrupt_queue_checked = 0;
2302 RUBY_VM_SET_INTERRUPT(th->ec);
2305 RUBY_VM_CHECK_INTS(th->ec);
2307 if (state) {
2308 EC_JUMP_TAG(th->ec, state);
2311 return r;
2315 * call-seq:
2316 * target_thread.pending_interrupt?(error = nil) -> true/false
2318 * Returns whether or not the asynchronous queue is empty for the target thread.
2320 * If +error+ is given, then check only for +error+ type deferred events.
2322 * See ::pending_interrupt? for more information.
2324 static VALUE
2325 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2327 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2329 if (!target_th->pending_interrupt_queue) {
2330 return Qfalse;
2332 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2333 return Qfalse;
2335 if (rb_check_arity(argc, 0, 1)) {
2336 VALUE err = argv[0];
2337 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2338 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2340 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2342 else {
2343 return Qtrue;
2348 * call-seq:
2349 * Thread.pending_interrupt?(error = nil) -> true/false
2351 * Returns whether or not the asynchronous queue is empty.
2353 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2354 * this method can be used to determine if there are any deferred events.
2356 * If you find this method returns true, then you may finish +:never+ blocks.
2358 * For example, the following method processes deferred asynchronous events
2359 * immediately.
2361 * def Thread.kick_interrupt_immediately
2362 * Thread.handle_interrupt(Object => :immediate) {
2363 * Thread.pass
2365 * end
2367 * If +error+ is given, then check only for +error+ type deferred events.
2369 * === Usage
2371 * th = Thread.new{
2372 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2373 * while true
2374 * ...
2375 * # reach safe point to invoke interrupt
2376 * if Thread.pending_interrupt?
2377 * Thread.handle_interrupt(Object => :immediate){}
2378 * end
2379 * ...
2380 * end
2383 * ...
2384 * th.raise # stop thread
2386 * This example can also be written as the following, which you should use to
2387 * avoid asynchronous interrupts.
2389 * flag = true
2390 * th = Thread.new{
2391 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2392 * while true
2393 * ...
2394 * # reach safe point to invoke interrupt
2395 * break if flag == false
2396 * ...
2397 * end
2400 * ...
2401 * flag = false # stop thread
2404 static VALUE
2405 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2407 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2410 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2412 static void
2413 rb_threadptr_to_kill(rb_thread_t *th)
2415 rb_threadptr_pending_interrupt_clear(th);
2416 th->status = THREAD_RUNNABLE;
2417 th->to_kill = 1;
2418 th->ec->errinfo = INT2FIX(TAG_FATAL);
2419 EC_JUMP_TAG(th->ec, TAG_FATAL);
2422 static inline rb_atomic_t
2423 threadptr_get_interrupts(rb_thread_t *th)
2425 rb_execution_context_t *ec = th->ec;
2426 rb_atomic_t interrupt;
2427 rb_atomic_t old;
2429 do {
2430 interrupt = ec->interrupt_flag;
2431 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2432 } while (old != interrupt);
2433 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2437 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2439 rb_atomic_t interrupt;
2440 int postponed_job_interrupt = 0;
2441 int ret = FALSE;
2443 if (th->ec->raised_flag) return ret;
2445 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2446 int sig;
2447 int timer_interrupt;
2448 int pending_interrupt;
2449 int trap_interrupt;
2450 int terminate_interrupt;
2452 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2453 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2454 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2455 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2456 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2458 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2459 RB_VM_LOCK_ENTER();
2460 RB_VM_LOCK_LEAVE();
2463 if (postponed_job_interrupt) {
2464 rb_postponed_job_flush(th->vm);
2467 /* signal handling */
2468 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2469 enum rb_thread_status prev_status = th->status;
2471 th->status = THREAD_RUNNABLE;
2473 while ((sig = rb_get_next_signal()) != 0) {
2474 ret |= rb_signal_exec(th, sig);
2477 th->status = prev_status;
2480 /* exception from another thread */
2481 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2482 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2483 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2484 ret = TRUE;
2486 if (UNDEF_P(err)) {
2487 /* no error */
2489 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2490 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2491 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2492 terminate_interrupt = 1;
2494 else {
2495 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2496 /* the only special exception to be queued across thread */
2497 err = ruby_vm_special_exception_copy(err);
2499 /* set runnable if th was slept. */
2500 if (th->status == THREAD_STOPPED ||
2501 th->status == THREAD_STOPPED_FOREVER)
2502 th->status = THREAD_RUNNABLE;
2503 rb_exc_raise(err);
2507 if (terminate_interrupt) {
2508 rb_threadptr_to_kill(th);
2511 if (timer_interrupt) {
2512 uint32_t limits_us = TIME_QUANTUM_USEC;
2514 if (th->priority > 0)
2515 limits_us <<= th->priority;
2516 else
2517 limits_us >>= -th->priority;
2519 if (th->status == THREAD_RUNNABLE)
2520 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2522 VM_ASSERT(th->ec->cfp);
2523 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2524 0, 0, 0, Qundef);
2526 rb_thread_schedule_limits(limits_us);
2529 return ret;
2532 void
2533 rb_thread_execute_interrupts(VALUE thval)
2535 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2538 static void
2539 rb_threadptr_ready(rb_thread_t *th)
2541 rb_threadptr_interrupt(th);
2544 static VALUE
2545 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2547 VALUE exc;
2549 if (rb_threadptr_dead(target_th)) {
2550 return Qnil;
2553 if (argc == 0) {
2554 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2556 else {
2557 exc = rb_make_exception(argc, argv);
2560 /* making an exception object can switch thread,
2561 so we need to check thread deadness again */
2562 if (rb_threadptr_dead(target_th)) {
2563 return Qnil;
2566 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2567 rb_threadptr_pending_interrupt_enque(target_th, exc);
2568 rb_threadptr_interrupt(target_th);
2569 return Qnil;
2572 void
2573 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2575 VALUE argv[2];
2577 argv[0] = rb_eSignal;
2578 argv[1] = INT2FIX(sig);
2579 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2582 void
2583 rb_threadptr_signal_exit(rb_thread_t *th)
2585 VALUE argv[2];
2587 argv[0] = rb_eSystemExit;
2588 argv[1] = rb_str_new2("exit");
2590 // TODO: check signal raise deliverly
2591 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2595 rb_ec_set_raised(rb_execution_context_t *ec)
2597 if (ec->raised_flag & RAISED_EXCEPTION) {
2598 return 1;
2600 ec->raised_flag |= RAISED_EXCEPTION;
2601 return 0;
2605 rb_ec_reset_raised(rb_execution_context_t *ec)
2607 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2608 return 0;
2610 ec->raised_flag &= ~RAISED_EXCEPTION;
2611 return 1;
2615 rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2617 rb_vm_t *vm = GET_THREAD()->vm;
2618 struct waiting_fd *wfd = 0, *next;
2619 ccan_list_head_init(&busy->pending_fd_users);
2620 int has_any;
2621 VALUE wakeup_mutex;
2623 RB_VM_LOCK_ENTER();
2625 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2626 if (wfd->fd == fd) {
2627 rb_thread_t *th = wfd->th;
2628 VALUE err;
2630 ccan_list_del(&wfd->wfd_node);
2631 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2633 wfd->busy = busy;
2634 err = th->vm->special_exceptions[ruby_error_stream_closed];
2635 rb_threadptr_pending_interrupt_enque(th, err);
2636 rb_threadptr_interrupt(th);
2641 has_any = !ccan_list_empty(&busy->pending_fd_users);
2642 busy->closing_thread = rb_thread_current();
2643 wakeup_mutex = Qnil;
2644 if (has_any) {
2645 wakeup_mutex = rb_mutex_new();
2646 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2648 busy->wakeup_mutex = wakeup_mutex;
2650 RB_VM_LOCK_LEAVE();
2652 /* If the caller didn't pass *busy as a pointer to something on the stack,
2653 we need to guard this mutex object on _our_ C stack for the duration
2654 of this function. */
2655 RB_GC_GUARD(wakeup_mutex);
2656 return has_any;
2659 void
2660 rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2662 if (!RB_TEST(busy->wakeup_mutex)) {
2663 /* There was nobody else using this file when we closed it, so we
2664 never bothered to allocate a mutex*/
2665 return;
2668 rb_mutex_lock(busy->wakeup_mutex);
2669 while (!ccan_list_empty(&busy->pending_fd_users)) {
2670 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2672 rb_mutex_unlock(busy->wakeup_mutex);
2675 void
2676 rb_thread_fd_close(int fd)
2678 struct rb_io_close_wait_list busy;
2680 if (rb_notify_fd_close(fd, &busy)) {
2681 rb_notify_fd_close_wait(&busy);
2686 * call-seq:
2687 * thr.raise
2688 * thr.raise(string)
2689 * thr.raise(exception [, string [, array]])
2691 * Raises an exception from the given thread. The caller does not have to be
2692 * +thr+. See Kernel#raise for more information.
2694 * Thread.abort_on_exception = true
2695 * a = Thread.new { sleep(200) }
2696 * a.raise("Gotcha")
2698 * This will produce:
2700 * prog.rb:3: Gotcha (RuntimeError)
2701 * from prog.rb:2:in `initialize'
2702 * from prog.rb:2:in `new'
2703 * from prog.rb:2
2706 static VALUE
2707 thread_raise_m(int argc, VALUE *argv, VALUE self)
2709 rb_thread_t *target_th = rb_thread_ptr(self);
2710 const rb_thread_t *current_th = GET_THREAD();
2712 threadptr_check_pending_interrupt_queue(target_th);
2713 rb_threadptr_raise(target_th, argc, argv);
2715 /* To perform Thread.current.raise as Kernel.raise */
2716 if (current_th == target_th) {
2717 RUBY_VM_CHECK_INTS(target_th->ec);
2719 return Qnil;
2724 * call-seq:
2725 * thr.exit -> thr
2726 * thr.kill -> thr
2727 * thr.terminate -> thr
2729 * Terminates +thr+ and schedules another thread to be run, returning
2730 * the terminated Thread. If this is the main thread, or the last
2731 * thread, exits the process.
2734 VALUE
2735 rb_thread_kill(VALUE thread)
2737 rb_thread_t *target_th = rb_thread_ptr(thread);
2739 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2740 return thread;
2742 if (target_th == target_th->vm->ractor.main_thread) {
2743 rb_exit(EXIT_SUCCESS);
2746 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2748 if (target_th == GET_THREAD()) {
2749 /* kill myself immediately */
2750 rb_threadptr_to_kill(target_th);
2752 else {
2753 threadptr_check_pending_interrupt_queue(target_th);
2754 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2755 rb_threadptr_interrupt(target_th);
2758 return thread;
2762 rb_thread_to_be_killed(VALUE thread)
2764 rb_thread_t *target_th = rb_thread_ptr(thread);
2766 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2767 return TRUE;
2769 return FALSE;
2773 * call-seq:
2774 * Thread.kill(thread) -> thread
2776 * Causes the given +thread+ to exit, see also Thread::exit.
2778 * count = 0
2779 * a = Thread.new { loop { count += 1 } }
2780 * sleep(0.1) #=> 0
2781 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2782 * count #=> 93947
2783 * a.alive? #=> false
2786 static VALUE
2787 rb_thread_s_kill(VALUE obj, VALUE th)
2789 return rb_thread_kill(th);
2794 * call-seq:
2795 * Thread.exit -> thread
2797 * Terminates the currently running thread and schedules another thread to be
2798 * run.
2800 * If this thread is already marked to be killed, ::exit returns the Thread.
2802 * If this is the main thread, or the last thread, exit the process.
2805 static VALUE
2806 rb_thread_exit(VALUE _)
2808 rb_thread_t *th = GET_THREAD();
2809 return rb_thread_kill(th->self);
2814 * call-seq:
2815 * thr.wakeup -> thr
2817 * Marks a given thread as eligible for scheduling, however it may still
2818 * remain blocked on I/O.
2820 * *Note:* This does not invoke the scheduler, see #run for more information.
2822 * c = Thread.new { Thread.stop; puts "hey!" }
2823 * sleep 0.1 while c.status!='sleep'
2824 * c.wakeup
2825 * c.join
2826 * #=> "hey!"
2829 VALUE
2830 rb_thread_wakeup(VALUE thread)
2832 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2833 rb_raise(rb_eThreadError, "killed thread");
2835 return thread;
2838 VALUE
2839 rb_thread_wakeup_alive(VALUE thread)
2841 rb_thread_t *target_th = rb_thread_ptr(thread);
2842 if (target_th->status == THREAD_KILLED) return Qnil;
2844 rb_threadptr_ready(target_th);
2846 if (target_th->status == THREAD_STOPPED ||
2847 target_th->status == THREAD_STOPPED_FOREVER) {
2848 target_th->status = THREAD_RUNNABLE;
2851 return thread;
2856 * call-seq:
2857 * thr.run -> thr
2859 * Wakes up +thr+, making it eligible for scheduling.
2861 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2862 * sleep 0.1 while a.status!='sleep'
2863 * puts "Got here"
2864 * a.run
2865 * a.join
2867 * This will produce:
2870 * Got here
2873 * See also the instance method #wakeup.
2876 VALUE
2877 rb_thread_run(VALUE thread)
2879 rb_thread_wakeup(thread);
2880 rb_thread_schedule();
2881 return thread;
2885 VALUE
2886 rb_thread_stop(void)
2888 if (rb_thread_alone()) {
2889 rb_raise(rb_eThreadError,
2890 "stopping only thread\n\tnote: use sleep to stop forever");
2892 rb_thread_sleep_deadly();
2893 return Qnil;
2897 * call-seq:
2898 * Thread.stop -> nil
2900 * Stops execution of the current thread, putting it into a ``sleep'' state,
2901 * and schedules execution of another thread.
2903 * a = Thread.new { print "a"; Thread.stop; print "c" }
2904 * sleep 0.1 while a.status!='sleep'
2905 * print "b"
2906 * a.run
2907 * a.join
2908 * #=> "abc"
2911 static VALUE
2912 thread_stop(VALUE _)
2914 return rb_thread_stop();
2917 /********************************************************************/
2919 VALUE
2920 rb_thread_list(void)
2922 // TODO
2923 return rb_ractor_thread_list();
2927 * call-seq:
2928 * Thread.list -> array
2930 * Returns an array of Thread objects for all threads that are either runnable
2931 * or stopped.
2933 * Thread.new { sleep(200) }
2934 * Thread.new { 1000000.times {|i| i*i } }
2935 * Thread.new { Thread.stop }
2936 * Thread.list.each {|t| p t}
2938 * This will produce:
2940 * #<Thread:0x401b3e84 sleep>
2941 * #<Thread:0x401b3f38 run>
2942 * #<Thread:0x401b3fb0 sleep>
2943 * #<Thread:0x401bdf4c run>
2946 static VALUE
2947 thread_list(VALUE _)
2949 return rb_thread_list();
2952 VALUE
2953 rb_thread_current(void)
2955 return GET_THREAD()->self;
2959 * call-seq:
2960 * Thread.current -> thread
2962 * Returns the currently executing thread.
2964 * Thread.current #=> #<Thread:0x401bdf4c run>
2967 static VALUE
2968 thread_s_current(VALUE klass)
2970 return rb_thread_current();
2973 VALUE
2974 rb_thread_main(void)
2976 return GET_RACTOR()->threads.main->self;
2980 * call-seq:
2981 * Thread.main -> thread
2983 * Returns the main thread.
2986 static VALUE
2987 rb_thread_s_main(VALUE klass)
2989 return rb_thread_main();
2994 * call-seq:
2995 * Thread.abort_on_exception -> true or false
2997 * Returns the status of the global ``abort on exception'' condition.
2999 * The default is +false+.
3001 * When set to +true+, if any thread is aborted by an exception, the
3002 * raised exception will be re-raised in the main thread.
3004 * Can also be specified by the global $DEBUG flag or command line option
3005 * +-d+.
3007 * See also ::abort_on_exception=.
3009 * There is also an instance level method to set this for a specific thread,
3010 * see #abort_on_exception.
3013 static VALUE
3014 rb_thread_s_abort_exc(VALUE _)
3016 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3021 * call-seq:
3022 * Thread.abort_on_exception= boolean -> true or false
3024 * When set to +true+, if any thread is aborted by an exception, the
3025 * raised exception will be re-raised in the main thread.
3026 * Returns the new state.
3028 * Thread.abort_on_exception = true
3029 * t1 = Thread.new do
3030 * puts "In new thread"
3031 * raise "Exception from thread"
3032 * end
3033 * sleep(1)
3034 * puts "not reached"
3036 * This will produce:
3038 * In new thread
3039 * prog.rb:4: Exception from thread (RuntimeError)
3040 * from prog.rb:2:in `initialize'
3041 * from prog.rb:2:in `new'
3042 * from prog.rb:2
3044 * See also ::abort_on_exception.
3046 * There is also an instance level method to set this for a specific thread,
3047 * see #abort_on_exception=.
3050 static VALUE
3051 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3053 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3054 return val;
3059 * call-seq:
3060 * thr.abort_on_exception -> true or false
3062 * Returns the status of the thread-local ``abort on exception'' condition for
3063 * this +thr+.
3065 * The default is +false+.
3067 * See also #abort_on_exception=.
3069 * There is also a class level method to set this for all threads, see
3070 * ::abort_on_exception.
3073 static VALUE
3074 rb_thread_abort_exc(VALUE thread)
3076 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3081 * call-seq:
3082 * thr.abort_on_exception= boolean -> true or false
3084 * When set to +true+, if this +thr+ is aborted by an exception, the
3085 * raised exception will be re-raised in the main thread.
3087 * See also #abort_on_exception.
3089 * There is also a class level method to set this for all threads, see
3090 * ::abort_on_exception=.
3093 static VALUE
3094 rb_thread_abort_exc_set(VALUE thread, VALUE val)
3096 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3097 return val;
3102 * call-seq:
3103 * Thread.report_on_exception -> true or false
3105 * Returns the status of the global ``report on exception'' condition.
3107 * The default is +true+ since Ruby 2.5.
3109 * All threads created when this flag is true will report
3110 * a message on $stderr if an exception kills the thread.
3112 * Thread.new { 1.times { raise } }
3114 * will produce this output on $stderr:
3116 * #<Thread:...> terminated with exception (report_on_exception is true):
3117 * Traceback (most recent call last):
3118 * 2: from -e:1:in `block in <main>'
3119 * 1: from -e:1:in `times'
3121 * This is done to catch errors in threads early.
3122 * In some cases, you might not want this output.
3123 * There are multiple ways to avoid the extra output:
3125 * * If the exception is not intended, the best is to fix the cause of
3126 * the exception so it does not happen anymore.
3127 * * If the exception is intended, it might be better to rescue it closer to
3128 * where it is raised rather then let it kill the Thread.
3129 * * If it is guaranteed the Thread will be joined with Thread#join or
3130 * Thread#value, then it is safe to disable this report with
3131 * <code>Thread.current.report_on_exception = false</code>
3132 * when starting the Thread.
3133 * However, this might handle the exception much later, or not at all
3134 * if the Thread is never joined due to the parent thread being blocked, etc.
3136 * See also ::report_on_exception=.
3138 * There is also an instance level method to set this for a specific thread,
3139 * see #report_on_exception=.
3143 static VALUE
3144 rb_thread_s_report_exc(VALUE _)
3146 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3151 * call-seq:
3152 * Thread.report_on_exception= boolean -> true or false
3154 * Returns the new state.
3155 * When set to +true+, all threads created afterwards will inherit the
3156 * condition and report a message on $stderr if an exception kills a thread:
3158 * Thread.report_on_exception = true
3159 * t1 = Thread.new do
3160 * puts "In new thread"
3161 * raise "Exception from thread"
3162 * end
3163 * sleep(1)
3164 * puts "In the main thread"
3166 * This will produce:
3168 * In new thread
3169 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3170 * Traceback (most recent call last):
3171 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3172 * In the main thread
3174 * See also ::report_on_exception.
3176 * There is also an instance level method to set this for a specific thread,
3177 * see #report_on_exception=.
3180 static VALUE
3181 rb_thread_s_report_exc_set(VALUE self, VALUE val)
3183 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3184 return val;
3189 * call-seq:
3190 * Thread.ignore_deadlock -> true or false
3192 * Returns the status of the global ``ignore deadlock'' condition.
3193 * The default is +false+, so that deadlock conditions are not ignored.
3195 * See also ::ignore_deadlock=.
3199 static VALUE
3200 rb_thread_s_ignore_deadlock(VALUE _)
3202 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3207 * call-seq:
3208 * Thread.ignore_deadlock = boolean -> true or false
3210 * Returns the new state.
3211 * When set to +true+, the VM will not check for deadlock conditions.
3212 * It is only useful to set this if your application can break a
3213 * deadlock condition via some other means, such as a signal.
3215 * Thread.ignore_deadlock = true
3216 * queue = Thread::Queue.new
3218 * trap(:SIGUSR1){queue.push "Received signal"}
3220 * # raises fatal error unless ignoring deadlock
3221 * puts queue.pop
3223 * See also ::ignore_deadlock.
3226 static VALUE
3227 rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3229 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3230 return val;
3235 * call-seq:
3236 * thr.report_on_exception -> true or false
3238 * Returns the status of the thread-local ``report on exception'' condition for
3239 * this +thr+.
3241 * The default value when creating a Thread is the value of
3242 * the global flag Thread.report_on_exception.
3244 * See also #report_on_exception=.
3246 * There is also a class level method to set this for all new threads, see
3247 * ::report_on_exception=.
3250 static VALUE
3251 rb_thread_report_exc(VALUE thread)
3253 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3258 * call-seq:
3259 * thr.report_on_exception= boolean -> true or false
3261 * When set to +true+, a message is printed on $stderr if an exception
3262 * kills this +thr+. See ::report_on_exception for details.
3264 * See also #report_on_exception.
3266 * There is also a class level method to set this for all new threads, see
3267 * ::report_on_exception=.
3270 static VALUE
3271 rb_thread_report_exc_set(VALUE thread, VALUE val)
3273 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3274 return val;
3279 * call-seq:
3280 * thr.group -> thgrp or nil
3282 * Returns the ThreadGroup which contains the given thread.
3284 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3287 VALUE
3288 rb_thread_group(VALUE thread)
3290 return rb_thread_ptr(thread)->thgroup;
3293 static const char *
3294 thread_status_name(rb_thread_t *th, int detail)
3296 switch (th->status) {
3297 case THREAD_RUNNABLE:
3298 return th->to_kill ? "aborting" : "run";
3299 case THREAD_STOPPED_FOREVER:
3300 if (detail) return "sleep_forever";
3301 case THREAD_STOPPED:
3302 return "sleep";
3303 case THREAD_KILLED:
3304 return "dead";
3305 default:
3306 return "unknown";
3310 static int
3311 rb_threadptr_dead(rb_thread_t *th)
3313 return th->status == THREAD_KILLED;
3318 * call-seq:
3319 * thr.status -> string, false or nil
3321 * Returns the status of +thr+.
3323 * [<tt>"sleep"</tt>]
3324 * Returned if this thread is sleeping or waiting on I/O
3325 * [<tt>"run"</tt>]
3326 * When this thread is executing
3327 * [<tt>"aborting"</tt>]
3328 * If this thread is aborting
3329 * [+false+]
3330 * When this thread is terminated normally
3331 * [+nil+]
3332 * If terminated with an exception.
3334 * a = Thread.new { raise("die now") }
3335 * b = Thread.new { Thread.stop }
3336 * c = Thread.new { Thread.exit }
3337 * d = Thread.new { sleep }
3338 * d.kill #=> #<Thread:0x401b3678 aborting>
3339 * a.status #=> nil
3340 * b.status #=> "sleep"
3341 * c.status #=> false
3342 * d.status #=> "aborting"
3343 * Thread.current.status #=> "run"
3345 * See also the instance methods #alive? and #stop?
3348 static VALUE
3349 rb_thread_status(VALUE thread)
3351 rb_thread_t *target_th = rb_thread_ptr(thread);
3353 if (rb_threadptr_dead(target_th)) {
3354 if (!NIL_P(target_th->ec->errinfo) &&
3355 !FIXNUM_P(target_th->ec->errinfo)) {
3356 return Qnil;
3358 else {
3359 return Qfalse;
3362 else {
3363 return rb_str_new2(thread_status_name(target_th, FALSE));
3369 * call-seq:
3370 * thr.alive? -> true or false
3372 * Returns +true+ if +thr+ is running or sleeping.
3374 * thr = Thread.new { }
3375 * thr.join #=> #<Thread:0x401b3fb0 dead>
3376 * Thread.current.alive? #=> true
3377 * thr.alive? #=> false
3379 * See also #stop? and #status.
3382 static VALUE
3383 rb_thread_alive_p(VALUE thread)
3385 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3389 * call-seq:
3390 * thr.stop? -> true or false
3392 * Returns +true+ if +thr+ is dead or sleeping.
3394 * a = Thread.new { Thread.stop }
3395 * b = Thread.current
3396 * a.stop? #=> true
3397 * b.stop? #=> false
3399 * See also #alive? and #status.
3402 static VALUE
3403 rb_thread_stop_p(VALUE thread)
3405 rb_thread_t *th = rb_thread_ptr(thread);
3407 if (rb_threadptr_dead(th)) {
3408 return Qtrue;
3410 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3414 * call-seq:
3415 * thr.name -> string
3417 * show the name of the thread.
3420 static VALUE
3421 rb_thread_getname(VALUE thread)
3423 return rb_thread_ptr(thread)->name;
3427 * call-seq:
3428 * thr.name=(name) -> string
3430 * set given name to the ruby thread.
3431 * On some platform, it may set the name to pthread and/or kernel.
3434 static VALUE
3435 rb_thread_setname(VALUE thread, VALUE name)
3437 rb_thread_t *target_th = rb_thread_ptr(thread);
3439 if (!NIL_P(name)) {
3440 rb_encoding *enc;
3441 StringValueCStr(name);
3442 enc = rb_enc_get(name);
3443 if (!rb_enc_asciicompat(enc)) {
3444 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3445 rb_enc_name(enc));
3447 name = rb_str_new_frozen(name);
3449 target_th->name = name;
3450 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3451 native_set_another_thread_name(target_th->nt->thread_id, name);
3453 return name;
3456 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3458 * call-seq:
3459 * thr.native_thread_id -> integer
3461 * Return the native thread ID which is used by the Ruby thread.
3463 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3464 * * On Linux it is TID returned by gettid(2).
3465 * * On macOS it is the system-wide unique integral ID of thread returned
3466 * by pthread_threadid_np(3).
3467 * * On FreeBSD it is the unique integral ID of the thread returned by
3468 * pthread_getthreadid_np(3).
3469 * * On Windows it is the thread identifier returned by GetThreadId().
3470 * * On other platforms, it raises NotImplementedError.
3472 * NOTE:
3473 * If the thread is not associated yet or already deassociated with a native
3474 * thread, it returns _nil_.
3475 * If the Ruby implementation uses M:N thread model, the ID may change
3476 * depending on the timing.
3479 static VALUE
3480 rb_thread_native_thread_id(VALUE thread)
3482 rb_thread_t *target_th = rb_thread_ptr(thread);
3483 if (rb_threadptr_dead(target_th)) return Qnil;
3484 return native_thread_native_thread_id(target_th);
3486 #else
3487 # define rb_thread_native_thread_id rb_f_notimplement
3488 #endif
3491 * call-seq:
3492 * thr.to_s -> string
3494 * Dump the name, id, and status of _thr_ to a string.
3497 static VALUE
3498 rb_thread_to_s(VALUE thread)
3500 VALUE cname = rb_class_path(rb_obj_class(thread));
3501 rb_thread_t *target_th = rb_thread_ptr(thread);
3502 const char *status;
3503 VALUE str, loc;
3505 status = thread_status_name(target_th, TRUE);
3506 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3507 if (!NIL_P(target_th->name)) {
3508 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3510 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3511 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3512 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3514 rb_str_catf(str, " %s>", status);
3516 return str;
3519 /* variables for recursive traversals */
3520 #define recursive_key id__recursive_key__
3522 static VALUE
3523 threadptr_local_aref(rb_thread_t *th, ID id)
3525 if (id == recursive_key) {
3526 return th->ec->local_storage_recursive_hash;
3528 else {
3529 VALUE val;
3530 struct rb_id_table *local_storage = th->ec->local_storage;
3532 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3533 return val;
3535 else {
3536 return Qnil;
3541 VALUE
3542 rb_thread_local_aref(VALUE thread, ID id)
3544 return threadptr_local_aref(rb_thread_ptr(thread), id);
3548 * call-seq:
3549 * thr[sym] -> obj or nil
3551 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3552 * if not explicitly inside a Fiber), using either a symbol or a string name.
3553 * If the specified variable does not exist, returns +nil+.
3556 * Thread.new { Thread.current["name"] = "A" },
3557 * Thread.new { Thread.current[:name] = "B" },
3558 * Thread.new { Thread.current["name"] = "C" }
3559 * ].each do |th|
3560 * th.join
3561 * puts "#{th.inspect}: #{th[:name]}"
3562 * end
3564 * This will produce:
3566 * #<Thread:0x00000002a54220 dead>: A
3567 * #<Thread:0x00000002a541a8 dead>: B
3568 * #<Thread:0x00000002a54130 dead>: C
3570 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3571 * This confusion did not exist in Ruby 1.8 because
3572 * fibers are only available since Ruby 1.9.
3573 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3574 * following idiom for dynamic scope.
3576 * def meth(newvalue)
3577 * begin
3578 * oldvalue = Thread.current[:name]
3579 * Thread.current[:name] = newvalue
3580 * yield
3581 * ensure
3582 * Thread.current[:name] = oldvalue
3583 * end
3584 * end
3586 * The idiom may not work as dynamic scope if the methods are thread-local
3587 * and a given block switches fiber.
3589 * f = Fiber.new {
3590 * meth(1) {
3591 * Fiber.yield
3594 * meth(2) {
3595 * f.resume
3597 * f.resume
3598 * p Thread.current[:name]
3599 * #=> nil if fiber-local
3600 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3602 * For thread-local variables, please see #thread_variable_get and
3603 * #thread_variable_set.
3607 static VALUE
3608 rb_thread_aref(VALUE thread, VALUE key)
3610 ID id = rb_check_id(&key);
3611 if (!id) return Qnil;
3612 return rb_thread_local_aref(thread, id);
3616 * call-seq:
3617 * thr.fetch(sym) -> obj
3618 * thr.fetch(sym) { } -> obj
3619 * thr.fetch(sym, default) -> obj
3621 * Returns a fiber-local for the given key. If the key can't be
3622 * found, there are several options: With no other arguments, it will
3623 * raise a KeyError exception; if <i>default</i> is given, then that
3624 * will be returned; if the optional code block is specified, then
3625 * that will be run and its result returned. See Thread#[] and
3626 * Hash#fetch.
3628 static VALUE
3629 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3631 VALUE key, val;
3632 ID id;
3633 rb_thread_t *target_th = rb_thread_ptr(self);
3634 int block_given;
3636 rb_check_arity(argc, 1, 2);
3637 key = argv[0];
3639 block_given = rb_block_given_p();
3640 if (block_given && argc == 2) {
3641 rb_warn("block supersedes default value argument");
3644 id = rb_check_id(&key);
3646 if (id == recursive_key) {
3647 return target_th->ec->local_storage_recursive_hash;
3649 else if (id && target_th->ec->local_storage &&
3650 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3651 return val;
3653 else if (block_given) {
3654 return rb_yield(key);
3656 else if (argc == 1) {
3657 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3659 else {
3660 return argv[1];
3664 static VALUE
3665 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3667 if (id == recursive_key) {
3668 th->ec->local_storage_recursive_hash = val;
3669 return val;
3671 else {
3672 struct rb_id_table *local_storage = th->ec->local_storage;
3674 if (NIL_P(val)) {
3675 if (!local_storage) return Qnil;
3676 rb_id_table_delete(local_storage, id);
3677 return Qnil;
3679 else {
3680 if (local_storage == NULL) {
3681 th->ec->local_storage = local_storage = rb_id_table_create(0);
3683 rb_id_table_insert(local_storage, id, val);
3684 return val;
3689 VALUE
3690 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3692 if (OBJ_FROZEN(thread)) {
3693 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3696 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3700 * call-seq:
3701 * thr[sym] = obj -> obj
3703 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3704 * using either a symbol or a string.
3706 * See also Thread#[].
3708 * For thread-local variables, please see #thread_variable_set and
3709 * #thread_variable_get.
3712 static VALUE
3713 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3715 return rb_thread_local_aset(self, rb_to_id(id), val);
3719 * call-seq:
3720 * thr.thread_variable_get(key) -> obj or nil
3722 * Returns the value of a thread local variable that has been set. Note that
3723 * these are different than fiber local values. For fiber local values,
3724 * please see Thread#[] and Thread#[]=.
3726 * Thread local values are carried along with threads, and do not respect
3727 * fibers. For example:
3729 * Thread.new {
3730 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3731 * Thread.current["foo"] = "bar" # set a fiber local
3733 * Fiber.new {
3734 * Fiber.yield [
3735 * Thread.current.thread_variable_get("foo"), # get the thread local
3736 * Thread.current["foo"], # get the fiber local
3738 * }.resume
3739 * }.join.value # => ['bar', nil]
3741 * The value "bar" is returned for the thread local, where nil is returned
3742 * for the fiber local. The fiber is executed in the same thread, so the
3743 * thread local values are available.
3746 static VALUE
3747 rb_thread_variable_get(VALUE thread, VALUE key)
3749 VALUE locals;
3751 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3752 return Qnil;
3754 locals = rb_thread_local_storage(thread);
3755 return rb_hash_aref(locals, rb_to_symbol(key));
3759 * call-seq:
3760 * thr.thread_variable_set(key, value)
3762 * Sets a thread local with +key+ to +value+. Note that these are local to
3763 * threads, and not to fibers. Please see Thread#thread_variable_get and
3764 * Thread#[] for more information.
3767 static VALUE
3768 rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3770 VALUE locals;
3772 if (OBJ_FROZEN(thread)) {
3773 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3776 locals = rb_thread_local_storage(thread);
3777 return rb_hash_aset(locals, rb_to_symbol(key), val);
3781 * call-seq:
3782 * thr.key?(sym) -> true or false
3784 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3785 * variable.
3787 * me = Thread.current
3788 * me[:oliver] = "a"
3789 * me.key?(:oliver) #=> true
3790 * me.key?(:stanley) #=> false
3793 static VALUE
3794 rb_thread_key_p(VALUE self, VALUE key)
3796 VALUE val;
3797 ID id = rb_check_id(&key);
3798 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3800 if (!id || local_storage == NULL) {
3801 return Qfalse;
3803 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3806 static enum rb_id_table_iterator_result
3807 thread_keys_i(ID key, VALUE value, void *ary)
3809 rb_ary_push((VALUE)ary, ID2SYM(key));
3810 return ID_TABLE_CONTINUE;
3814 rb_thread_alone(void)
3816 // TODO
3817 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3821 * call-seq:
3822 * thr.keys -> array
3824 * Returns an array of the names of the fiber-local variables (as Symbols).
3826 * thr = Thread.new do
3827 * Thread.current[:cat] = 'meow'
3828 * Thread.current["dog"] = 'woof'
3829 * end
3830 * thr.join #=> #<Thread:0x401b3f10 dead>
3831 * thr.keys #=> [:dog, :cat]
3834 static VALUE
3835 rb_thread_keys(VALUE self)
3837 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3838 VALUE ary = rb_ary_new();
3840 if (local_storage) {
3841 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3843 return ary;
3846 static int
3847 keys_i(VALUE key, VALUE value, VALUE ary)
3849 rb_ary_push(ary, key);
3850 return ST_CONTINUE;
3854 * call-seq:
3855 * thr.thread_variables -> array
3857 * Returns an array of the names of the thread-local variables (as Symbols).
3859 * thr = Thread.new do
3860 * Thread.current.thread_variable_set(:cat, 'meow')
3861 * Thread.current.thread_variable_set("dog", 'woof')
3862 * end
3863 * thr.join #=> #<Thread:0x401b3f10 dead>
3864 * thr.thread_variables #=> [:dog, :cat]
3866 * Note that these are not fiber local variables. Please see Thread#[] and
3867 * Thread#thread_variable_get for more details.
3870 static VALUE
3871 rb_thread_variables(VALUE thread)
3873 VALUE locals;
3874 VALUE ary;
3876 ary = rb_ary_new();
3877 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3878 return ary;
3880 locals = rb_thread_local_storage(thread);
3881 rb_hash_foreach(locals, keys_i, ary);
3883 return ary;
3887 * call-seq:
3888 * thr.thread_variable?(key) -> true or false
3890 * Returns +true+ if the given string (or symbol) exists as a thread-local
3891 * variable.
3893 * me = Thread.current
3894 * me.thread_variable_set(:oliver, "a")
3895 * me.thread_variable?(:oliver) #=> true
3896 * me.thread_variable?(:stanley) #=> false
3898 * Note that these are not fiber local variables. Please see Thread#[] and
3899 * Thread#thread_variable_get for more details.
3902 static VALUE
3903 rb_thread_variable_p(VALUE thread, VALUE key)
3905 VALUE locals;
3907 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3908 return Qfalse;
3910 locals = rb_thread_local_storage(thread);
3912 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3916 * call-seq:
3917 * thr.priority -> integer
3919 * Returns the priority of <i>thr</i>. Default is inherited from the
3920 * current thread which creating the new thread, or zero for the
3921 * initial main thread; higher-priority thread will run more frequently
3922 * than lower-priority threads (but lower-priority threads can also run).
3924 * This is just hint for Ruby thread scheduler. It may be ignored on some
3925 * platform.
3927 * Thread.current.priority #=> 0
3930 static VALUE
3931 rb_thread_priority(VALUE thread)
3933 return INT2NUM(rb_thread_ptr(thread)->priority);
3938 * call-seq:
3939 * thr.priority= integer -> thr
3941 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3942 * will run more frequently than lower-priority threads (but lower-priority
3943 * threads can also run).
3945 * This is just hint for Ruby thread scheduler. It may be ignored on some
3946 * platform.
3948 * count1 = count2 = 0
3949 * a = Thread.new do
3950 * loop { count1 += 1 }
3951 * end
3952 * a.priority = -1
3954 * b = Thread.new do
3955 * loop { count2 += 1 }
3956 * end
3957 * b.priority = -2
3958 * sleep 1 #=> 1
3959 * count1 #=> 622504
3960 * count2 #=> 5832
3963 static VALUE
3964 rb_thread_priority_set(VALUE thread, VALUE prio)
3966 rb_thread_t *target_th = rb_thread_ptr(thread);
3967 int priority;
3969 #if USE_NATIVE_THREAD_PRIORITY
3970 target_th->priority = NUM2INT(prio);
3971 native_thread_apply_priority(th);
3972 #else
3973 priority = NUM2INT(prio);
3974 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3975 priority = RUBY_THREAD_PRIORITY_MAX;
3977 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3978 priority = RUBY_THREAD_PRIORITY_MIN;
3980 target_th->priority = (int8_t)priority;
3981 #endif
3982 return INT2NUM(target_th->priority);
3985 /* for IO */
3987 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3990 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3991 * in select(2) system call.
3993 * - Linux 2.2.12 (?)
3994 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3995 * select(2) documents how to allocate fd_set dynamically.
3996 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3997 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3998 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3999 * select(2) documents how to allocate fd_set dynamically.
4000 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4001 * - Solaris 8 has select_large_fdset
4002 * - Mac OS X 10.7 (Lion)
4003 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4004 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4005 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4007 * When fd_set is not big enough to hold big file descriptors,
4008 * it should be allocated dynamically.
4009 * Note that this assumes fd_set is structured as bitmap.
4011 * rb_fd_init allocates the memory.
4012 * rb_fd_term free the memory.
4013 * rb_fd_set may re-allocates bitmap.
4015 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4018 void
4019 rb_fd_init(rb_fdset_t *fds)
4021 fds->maxfd = 0;
4022 fds->fdset = ALLOC(fd_set);
4023 FD_ZERO(fds->fdset);
4026 void
4027 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4029 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4031 if (size < sizeof(fd_set))
4032 size = sizeof(fd_set);
4033 dst->maxfd = src->maxfd;
4034 dst->fdset = xmalloc(size);
4035 memcpy(dst->fdset, src->fdset, size);
4038 void
4039 rb_fd_term(rb_fdset_t *fds)
4041 xfree(fds->fdset);
4042 fds->maxfd = 0;
4043 fds->fdset = 0;
4046 void
4047 rb_fd_zero(rb_fdset_t *fds)
4049 if (fds->fdset)
4050 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4053 static void
4054 rb_fd_resize(int n, rb_fdset_t *fds)
4056 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4057 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4059 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4060 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4062 if (m > o) {
4063 fds->fdset = xrealloc(fds->fdset, m);
4064 memset((char *)fds->fdset + o, 0, m - o);
4066 if (n >= fds->maxfd) fds->maxfd = n + 1;
4069 void
4070 rb_fd_set(int n, rb_fdset_t *fds)
4072 rb_fd_resize(n, fds);
4073 FD_SET(n, fds->fdset);
4076 void
4077 rb_fd_clr(int n, rb_fdset_t *fds)
4079 if (n >= fds->maxfd) return;
4080 FD_CLR(n, fds->fdset);
4084 rb_fd_isset(int n, const rb_fdset_t *fds)
4086 if (n >= fds->maxfd) return 0;
4087 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4090 void
4091 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4093 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4095 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4096 dst->maxfd = max;
4097 dst->fdset = xrealloc(dst->fdset, size);
4098 memcpy(dst->fdset, src, size);
4101 void
4102 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4104 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4106 if (size < sizeof(fd_set))
4107 size = sizeof(fd_set);
4108 dst->maxfd = src->maxfd;
4109 dst->fdset = xrealloc(dst->fdset, size);
4110 memcpy(dst->fdset, src->fdset, size);
4114 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4116 fd_set *r = NULL, *w = NULL, *e = NULL;
4117 if (readfds) {
4118 rb_fd_resize(n - 1, readfds);
4119 r = rb_fd_ptr(readfds);
4121 if (writefds) {
4122 rb_fd_resize(n - 1, writefds);
4123 w = rb_fd_ptr(writefds);
4125 if (exceptfds) {
4126 rb_fd_resize(n - 1, exceptfds);
4127 e = rb_fd_ptr(exceptfds);
4129 return select(n, r, w, e, timeout);
4132 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4134 #undef FD_ZERO
4135 #undef FD_SET
4136 #undef FD_CLR
4137 #undef FD_ISSET
4139 #define FD_ZERO(f) rb_fd_zero(f)
4140 #define FD_SET(i, f) rb_fd_set((i), (f))
4141 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4142 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4144 #elif defined(_WIN32)
4146 void
4147 rb_fd_init(rb_fdset_t *set)
4149 set->capa = FD_SETSIZE;
4150 set->fdset = ALLOC(fd_set);
4151 FD_ZERO(set->fdset);
4154 void
4155 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4157 rb_fd_init(dst);
4158 rb_fd_dup(dst, src);
4161 void
4162 rb_fd_term(rb_fdset_t *set)
4164 xfree(set->fdset);
4165 set->fdset = NULL;
4166 set->capa = 0;
4169 void
4170 rb_fd_set(int fd, rb_fdset_t *set)
4172 unsigned int i;
4173 SOCKET s = rb_w32_get_osfhandle(fd);
4175 for (i = 0; i < set->fdset->fd_count; i++) {
4176 if (set->fdset->fd_array[i] == s) {
4177 return;
4180 if (set->fdset->fd_count >= (unsigned)set->capa) {
4181 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4182 set->fdset =
4183 rb_xrealloc_mul_add(
4184 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4186 set->fdset->fd_array[set->fdset->fd_count++] = s;
4189 #undef FD_ZERO
4190 #undef FD_SET
4191 #undef FD_CLR
4192 #undef FD_ISSET
4194 #define FD_ZERO(f) rb_fd_zero(f)
4195 #define FD_SET(i, f) rb_fd_set((i), (f))
4196 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4197 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4199 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4201 #endif
4203 #ifndef rb_fd_no_init
4204 #define rb_fd_no_init(fds) (void)(fds)
4205 #endif
4207 static int
4208 wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4210 if (*result < 0) {
4211 switch (errnum) {
4212 case EINTR:
4213 #ifdef ERESTART
4214 case ERESTART:
4215 #endif
4216 *result = 0;
4217 if (rel && hrtime_update_expire(rel, end)) {
4218 *rel = 0;
4220 return TRUE;
4222 return FALSE;
4224 else if (*result == 0) {
4225 /* check for spurious wakeup */
4226 if (rel) {
4227 return !hrtime_update_expire(rel, end);
4229 return TRUE;
4231 return FALSE;
4234 struct select_set {
4235 int max;
4236 rb_thread_t *th;
4237 rb_fdset_t *rset;
4238 rb_fdset_t *wset;
4239 rb_fdset_t *eset;
4240 rb_fdset_t orig_rset;
4241 rb_fdset_t orig_wset;
4242 rb_fdset_t orig_eset;
4243 struct timeval *timeout;
4246 static VALUE
4247 select_set_free(VALUE p)
4249 struct select_set *set = (struct select_set *)p;
4251 rb_fd_term(&set->orig_rset);
4252 rb_fd_term(&set->orig_wset);
4253 rb_fd_term(&set->orig_eset);
4255 return Qfalse;
4258 static VALUE
4259 do_select(VALUE p)
4261 struct select_set *set = (struct select_set *)p;
4262 int result = 0;
4263 int lerrno;
4264 rb_hrtime_t *to, rel, end = 0;
4266 timeout_prepare(&to, &rel, &end, set->timeout);
4267 #define restore_fdset(dst, src) \
4268 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4269 #define do_select_update() \
4270 (restore_fdset(set->rset, &set->orig_rset), \
4271 restore_fdset(set->wset, &set->orig_wset), \
4272 restore_fdset(set->eset, &set->orig_eset), \
4273 TRUE)
4275 do {
4276 lerrno = 0;
4278 BLOCKING_REGION(set->th, {
4279 struct timeval tv;
4281 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4282 result = native_fd_select(set->max,
4283 set->rset, set->wset, set->eset,
4284 rb_hrtime2timeval(&tv, to), set->th);
4285 if (result < 0) lerrno = errno;
4287 }, ubf_select, set->th, TRUE);
4289 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4290 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4292 if (result < 0) {
4293 errno = lerrno;
4296 return (VALUE)result;
4300 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4301 struct timeval *timeout)
4303 struct select_set set;
4305 set.th = GET_THREAD();
4306 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4307 set.max = max;
4308 set.rset = read;
4309 set.wset = write;
4310 set.eset = except;
4311 set.timeout = timeout;
4313 if (!set.rset && !set.wset && !set.eset) {
4314 if (!timeout) {
4315 rb_thread_sleep_forever();
4316 return 0;
4318 rb_thread_wait_for(*timeout);
4319 return 0;
4322 #define fd_init_copy(f) do { \
4323 if (set.f) { \
4324 rb_fd_resize(set.max - 1, set.f); \
4325 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4326 rb_fd_init_copy(&set.orig_##f, set.f); \
4329 else { \
4330 rb_fd_no_init(&set.orig_##f); \
4332 } while (0)
4333 fd_init_copy(rset);
4334 fd_init_copy(wset);
4335 fd_init_copy(eset);
4336 #undef fd_init_copy
4338 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4341 #ifdef USE_POLL
4343 /* The same with linux kernel. TODO: make platform independent definition. */
4344 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4345 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4346 #define POLLEX_SET (POLLPRI)
4348 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4349 # define POLLERR_SET (0)
4350 #endif
4353 * returns a mask of events
4356 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4358 struct pollfd fds[1] = {{
4359 .fd = fd,
4360 .events = (short)events,
4361 .revents = 0,
4363 int result = 0;
4364 nfds_t nfds;
4365 struct waiting_fd wfd;
4366 enum ruby_tag_type state;
4367 volatile int lerrno;
4369 rb_execution_context_t *ec = GET_EC();
4370 rb_thread_t *th = rb_ec_thread_ptr(ec);
4372 thread_io_setup_wfd(th, fd, &wfd);
4374 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4375 // fd is readable
4376 state = 0;
4377 fds[0].revents = events;
4378 errno = 0;
4380 else {
4381 EC_PUSH_TAG(wfd.th->ec);
4382 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4383 rb_hrtime_t *to, rel, end = 0;
4384 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4385 timeout_prepare(&to, &rel, &end, timeout);
4386 do {
4387 nfds = 1;
4389 lerrno = 0;
4390 BLOCKING_REGION(wfd.th, {
4391 struct timespec ts;
4393 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4394 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4395 if (result < 0) lerrno = errno;
4397 }, ubf_select, wfd.th, TRUE);
4399 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4400 } while (wait_retryable(&result, lerrno, to, end));
4402 EC_POP_TAG();
4405 thread_io_wake_pending_closer(&wfd);
4407 if (state) {
4408 EC_JUMP_TAG(wfd.th->ec, state);
4411 if (result < 0) {
4412 errno = lerrno;
4413 return -1;
4416 if (fds[0].revents & POLLNVAL) {
4417 errno = EBADF;
4418 return -1;
4422 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4423 * Therefore we need to fix it up.
4425 result = 0;
4426 if (fds[0].revents & POLLIN_SET)
4427 result |= RB_WAITFD_IN;
4428 if (fds[0].revents & POLLOUT_SET)
4429 result |= RB_WAITFD_OUT;
4430 if (fds[0].revents & POLLEX_SET)
4431 result |= RB_WAITFD_PRI;
4433 /* all requested events are ready if there is an error */
4434 if (fds[0].revents & POLLERR_SET)
4435 result |= events;
4437 return result;
4439 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4440 struct select_args {
4441 union {
4442 int fd;
4443 int error;
4444 } as;
4445 rb_fdset_t *read;
4446 rb_fdset_t *write;
4447 rb_fdset_t *except;
4448 struct waiting_fd wfd;
4449 struct timeval *tv;
4452 static VALUE
4453 select_single(VALUE ptr)
4455 struct select_args *args = (struct select_args *)ptr;
4456 int r;
4458 r = rb_thread_fd_select(args->as.fd + 1,
4459 args->read, args->write, args->except, args->tv);
4460 if (r == -1)
4461 args->as.error = errno;
4462 if (r > 0) {
4463 r = 0;
4464 if (args->read && rb_fd_isset(args->as.fd, args->read))
4465 r |= RB_WAITFD_IN;
4466 if (args->write && rb_fd_isset(args->as.fd, args->write))
4467 r |= RB_WAITFD_OUT;
4468 if (args->except && rb_fd_isset(args->as.fd, args->except))
4469 r |= RB_WAITFD_PRI;
4471 return (VALUE)r;
4474 static VALUE
4475 select_single_cleanup(VALUE ptr)
4477 struct select_args *args = (struct select_args *)ptr;
4479 thread_io_wake_pending_closer(&args->wfd);
4480 if (args->read) rb_fd_term(args->read);
4481 if (args->write) rb_fd_term(args->write);
4482 if (args->except) rb_fd_term(args->except);
4484 return (VALUE)-1;
4487 static rb_fdset_t *
4488 init_set_fd(int fd, rb_fdset_t *fds)
4490 if (fd < 0) {
4491 return 0;
4493 rb_fd_init(fds);
4494 rb_fd_set(fd, fds);
4496 return fds;
4500 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4502 rb_fdset_t rfds, wfds, efds;
4503 struct select_args args;
4504 int r;
4505 VALUE ptr = (VALUE)&args;
4506 rb_execution_context_t *ec = GET_EC();
4507 rb_thread_t *th = rb_ec_thread_ptr(ec);
4509 args.as.fd = fd;
4510 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4511 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4512 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4513 args.tv = timeout;
4514 thread_io_setup_wfd(th, fd, &args.wfd);
4516 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4517 if (r == -1)
4518 errno = args.as.error;
4520 return r;
4522 #endif /* ! USE_POLL */
4525 * for GC
4528 #ifdef USE_CONSERVATIVE_STACK_END
4529 void
4530 rb_gc_set_stack_end(VALUE **stack_end_p)
4532 VALUE stack_end;
4533 *stack_end_p = &stack_end;
4535 #endif
4541 void
4542 rb_threadptr_check_signal(rb_thread_t *mth)
4544 /* mth must be main_thread */
4545 if (rb_signal_buff_size() > 0) {
4546 /* wakeup main thread */
4547 threadptr_trap_interrupt(mth);
4551 static void
4552 async_bug_fd(const char *mesg, int errno_arg, int fd)
4554 char buff[64];
4555 size_t n = strlcpy(buff, mesg, sizeof(buff));
4556 if (n < sizeof(buff)-3) {
4557 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4559 rb_async_bug_errno(buff, errno_arg);
4562 /* VM-dependent API is not available for this function */
4563 static int
4564 consume_communication_pipe(int fd)
4566 #if USE_EVENTFD
4567 uint64_t buff[1];
4568 #else
4569 /* buffer can be shared because no one refers to them. */
4570 static char buff[1024];
4571 #endif
4572 ssize_t result;
4573 int ret = FALSE; /* for rb_sigwait_sleep */
4575 while (1) {
4576 result = read(fd, buff, sizeof(buff));
4577 #if USE_EVENTFD
4578 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4579 #else
4580 RUBY_DEBUG_LOG("result:%d", (int)result);
4581 #endif
4582 if (result > 0) {
4583 ret = TRUE;
4584 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4585 return ret;
4588 else if (result == 0) {
4589 return ret;
4591 else if (result < 0) {
4592 int e = errno;
4593 switch (e) {
4594 case EINTR:
4595 continue; /* retry */
4596 case EAGAIN:
4597 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4598 case EWOULDBLOCK:
4599 #endif
4600 return ret;
4601 default:
4602 async_bug_fd("consume_communication_pipe: read", e, fd);
4608 void
4609 rb_thread_stop_timer_thread(void)
4611 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4612 native_reset_timer_thread();
4616 void
4617 rb_thread_reset_timer_thread(void)
4619 native_reset_timer_thread();
4622 void
4623 rb_thread_start_timer_thread(void)
4625 system_working = 1;
4626 rb_thread_create_timer_thread();
4629 static int
4630 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4632 int i;
4633 VALUE coverage = (VALUE)val;
4634 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4635 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4637 if (lines) {
4638 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4639 rb_ary_clear(lines);
4641 else {
4642 int i;
4643 for (i = 0; i < RARRAY_LEN(lines); i++) {
4644 if (RARRAY_AREF(lines, i) != Qnil)
4645 RARRAY_ASET(lines, i, INT2FIX(0));
4649 if (branches) {
4650 VALUE counters = RARRAY_AREF(branches, 1);
4651 for (i = 0; i < RARRAY_LEN(counters); i++) {
4652 RARRAY_ASET(counters, i, INT2FIX(0));
4656 return ST_CONTINUE;
4659 void
4660 rb_clear_coverages(void)
4662 VALUE coverages = rb_get_coverages();
4663 if (RTEST(coverages)) {
4664 rb_hash_foreach(coverages, clear_coverage_i, 0);
4668 #if defined(HAVE_WORKING_FORK)
4670 static void
4671 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4673 rb_thread_t *i = 0;
4674 rb_vm_t *vm = th->vm;
4675 rb_ractor_t *r = th->ractor;
4676 vm->ractor.main_ractor = r;
4677 vm->ractor.main_thread = th;
4678 r->threads.main = th;
4679 r->status_ = ractor_created;
4681 thread_sched_atfork(TH_SCHED(th));
4682 ubf_list_atfork();
4684 // OK. Only this thread accesses:
4685 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4686 ccan_list_for_each(&r->threads.set, i, lt_node) {
4687 atfork(i, th);
4690 rb_vm_living_threads_init(vm);
4692 rb_ractor_atfork(vm, th);
4693 rb_vm_postponed_job_atfork();
4695 /* may be held by RJIT threads in parent */
4696 rb_native_mutex_initialize(&vm->workqueue_lock);
4698 /* may be held by any thread in parent */
4699 rb_native_mutex_initialize(&th->interrupt_lock);
4701 vm->fork_gen++;
4702 rb_ractor_sleeper_threads_clear(th->ractor);
4703 rb_clear_coverages();
4705 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4706 rb_thread_reset_timer_thread();
4707 rb_thread_start_timer_thread();
4709 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4710 VM_ASSERT(vm->ractor.cnt == 1);
4713 static void
4714 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4716 if (th != current_th) {
4717 rb_mutex_abandon_keeping_mutexes(th);
4718 rb_mutex_abandon_locking_mutex(th);
4719 thread_cleanup_func(th, TRUE);
4723 void rb_fiber_atfork(rb_thread_t *);
4724 void
4725 rb_thread_atfork(void)
4727 rb_thread_t *th = GET_THREAD();
4728 rb_threadptr_pending_interrupt_clear(th);
4729 rb_thread_atfork_internal(th, terminate_atfork_i);
4730 th->join_list = NULL;
4731 rb_fiber_atfork(th);
4733 /* We don't want reproduce CVE-2003-0900. */
4734 rb_reset_random_seed();
4737 static void
4738 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4740 if (th != current_th) {
4741 thread_cleanup_func_before_exec(th);
4745 void
4746 rb_thread_atfork_before_exec(void)
4748 rb_thread_t *th = GET_THREAD();
4749 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4751 #else
4752 void
4753 rb_thread_atfork(void)
4757 void
4758 rb_thread_atfork_before_exec(void)
4761 #endif
4763 struct thgroup {
4764 int enclosed;
4767 static const rb_data_type_t thgroup_data_type = {
4768 "thgroup",
4771 RUBY_TYPED_DEFAULT_FREE,
4772 NULL, // No external memory to report
4774 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4778 * Document-class: ThreadGroup
4780 * ThreadGroup provides a means of keeping track of a number of threads as a
4781 * group.
4783 * A given Thread object can only belong to one ThreadGroup at a time; adding
4784 * a thread to a new group will remove it from any previous group.
4786 * Newly created threads belong to the same group as the thread from which they
4787 * were created.
4791 * Document-const: Default
4793 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4794 * by default.
4796 static VALUE
4797 thgroup_s_alloc(VALUE klass)
4799 VALUE group;
4800 struct thgroup *data;
4802 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4803 data->enclosed = 0;
4805 return group;
4809 * call-seq:
4810 * thgrp.list -> array
4812 * Returns an array of all existing Thread objects that belong to this group.
4814 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4817 static VALUE
4818 thgroup_list(VALUE group)
4820 VALUE ary = rb_ary_new();
4821 rb_thread_t *th = 0;
4822 rb_ractor_t *r = GET_RACTOR();
4824 ccan_list_for_each(&r->threads.set, th, lt_node) {
4825 if (th->thgroup == group) {
4826 rb_ary_push(ary, th->self);
4829 return ary;
4834 * call-seq:
4835 * thgrp.enclose -> thgrp
4837 * Prevents threads from being added to or removed from the receiving
4838 * ThreadGroup.
4840 * New threads can still be started in an enclosed ThreadGroup.
4842 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4843 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4844 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4845 * tg.add thr
4846 * #=> ThreadError: can't move from the enclosed thread group
4849 static VALUE
4850 thgroup_enclose(VALUE group)
4852 struct thgroup *data;
4854 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4855 data->enclosed = 1;
4857 return group;
4862 * call-seq:
4863 * thgrp.enclosed? -> true or false
4865 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4868 static VALUE
4869 thgroup_enclosed_p(VALUE group)
4871 struct thgroup *data;
4873 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4874 return RBOOL(data->enclosed);
4879 * call-seq:
4880 * thgrp.add(thread) -> thgrp
4882 * Adds the given +thread+ to this group, removing it from any other
4883 * group to which it may have previously been a member.
4885 * puts "Initial group is #{ThreadGroup::Default.list}"
4886 * tg = ThreadGroup.new
4887 * t1 = Thread.new { sleep }
4888 * t2 = Thread.new { sleep }
4889 * puts "t1 is #{t1}"
4890 * puts "t2 is #{t2}"
4891 * tg.add(t1)
4892 * puts "Initial group now #{ThreadGroup::Default.list}"
4893 * puts "tg group now #{tg.list}"
4895 * This will produce:
4897 * Initial group is #<Thread:0x401bdf4c>
4898 * t1 is #<Thread:0x401b3c90>
4899 * t2 is #<Thread:0x401b3c18>
4900 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4901 * tg group now #<Thread:0x401b3c90>
4904 static VALUE
4905 thgroup_add(VALUE group, VALUE thread)
4907 rb_thread_t *target_th = rb_thread_ptr(thread);
4908 struct thgroup *data;
4910 if (OBJ_FROZEN(group)) {
4911 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4913 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4914 if (data->enclosed) {
4915 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4918 if (OBJ_FROZEN(target_th->thgroup)) {
4919 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4921 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4922 if (data->enclosed) {
4923 rb_raise(rb_eThreadError,
4924 "can't move from the enclosed thread group");
4927 target_th->thgroup = group;
4928 return group;
4932 * Document-class: ThreadShield
4934 static void
4935 thread_shield_mark(void *ptr)
4937 rb_gc_mark((VALUE)ptr);
4940 static const rb_data_type_t thread_shield_data_type = {
4941 "thread_shield",
4942 {thread_shield_mark, 0, 0,},
4943 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4946 static VALUE
4947 thread_shield_alloc(VALUE klass)
4949 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4952 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4953 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4954 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4955 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4956 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4957 static inline unsigned int
4958 rb_thread_shield_waiting(VALUE b)
4960 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4963 static inline void
4964 rb_thread_shield_waiting_inc(VALUE b)
4966 unsigned int w = rb_thread_shield_waiting(b);
4967 w++;
4968 if (w > THREAD_SHIELD_WAITING_MAX)
4969 rb_raise(rb_eRuntimeError, "waiting count overflow");
4970 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4971 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4974 static inline void
4975 rb_thread_shield_waiting_dec(VALUE b)
4977 unsigned int w = rb_thread_shield_waiting(b);
4978 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4979 w--;
4980 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4981 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4984 VALUE
4985 rb_thread_shield_new(void)
4987 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4988 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4989 return thread_shield;
4992 bool
4993 rb_thread_shield_owned(VALUE self)
4995 VALUE mutex = GetThreadShieldPtr(self);
4996 if (!mutex) return false;
4998 rb_mutex_t *m = mutex_ptr(mutex);
5000 return m->fiber == GET_EC()->fiber_ptr;
5004 * Wait a thread shield.
5006 * Returns
5007 * true: acquired the thread shield
5008 * false: the thread shield was destroyed and no other threads waiting
5009 * nil: the thread shield was destroyed but still in use
5011 VALUE
5012 rb_thread_shield_wait(VALUE self)
5014 VALUE mutex = GetThreadShieldPtr(self);
5015 rb_mutex_t *m;
5017 if (!mutex) return Qfalse;
5018 m = mutex_ptr(mutex);
5019 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5020 rb_thread_shield_waiting_inc(self);
5021 rb_mutex_lock(mutex);
5022 rb_thread_shield_waiting_dec(self);
5023 if (DATA_PTR(self)) return Qtrue;
5024 rb_mutex_unlock(mutex);
5025 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5028 static VALUE
5029 thread_shield_get_mutex(VALUE self)
5031 VALUE mutex = GetThreadShieldPtr(self);
5032 if (!mutex)
5033 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5034 return mutex;
5038 * Release a thread shield, and return true if it has waiting threads.
5040 VALUE
5041 rb_thread_shield_release(VALUE self)
5043 VALUE mutex = thread_shield_get_mutex(self);
5044 rb_mutex_unlock(mutex);
5045 return RBOOL(rb_thread_shield_waiting(self) > 0);
5049 * Release and destroy a thread shield, and return true if it has waiting threads.
5051 VALUE
5052 rb_thread_shield_destroy(VALUE self)
5054 VALUE mutex = thread_shield_get_mutex(self);
5055 DATA_PTR(self) = 0;
5056 rb_mutex_unlock(mutex);
5057 return RBOOL(rb_thread_shield_waiting(self) > 0);
5060 static VALUE
5061 threadptr_recursive_hash(rb_thread_t *th)
5063 return th->ec->local_storage_recursive_hash;
5066 static void
5067 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5069 th->ec->local_storage_recursive_hash = hash;
5072 ID rb_frame_last_func(void);
5075 * Returns the current "recursive list" used to detect recursion.
5076 * This list is a hash table, unique for the current thread and for
5077 * the current __callee__.
5080 static VALUE
5081 recursive_list_access(VALUE sym)
5083 rb_thread_t *th = GET_THREAD();
5084 VALUE hash = threadptr_recursive_hash(th);
5085 VALUE list;
5086 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5087 hash = rb_ident_hash_new();
5088 threadptr_recursive_hash_set(th, hash);
5089 list = Qnil;
5091 else {
5092 list = rb_hash_aref(hash, sym);
5094 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5095 list = rb_ident_hash_new();
5096 rb_hash_aset(hash, sym, list);
5098 return list;
5102 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5103 * in the recursion list.
5104 * Assumes the recursion list is valid.
5107 static bool
5108 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5110 #if SIZEOF_LONG == SIZEOF_VOIDP
5111 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5112 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5113 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5114 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5115 #endif
5117 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5118 if (UNDEF_P(pair_list))
5119 return false;
5120 if (paired_obj_id) {
5121 if (!RB_TYPE_P(pair_list, T_HASH)) {
5122 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5123 return false;
5125 else {
5126 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5127 return false;
5130 return true;
5134 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5135 * For a single obj, it sets list[obj] to Qtrue.
5136 * For a pair, it sets list[obj] to paired_obj_id if possible,
5137 * otherwise list[obj] becomes a hash like:
5138 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5139 * Assumes the recursion list is valid.
5142 static void
5143 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5145 VALUE pair_list;
5147 if (!paired_obj) {
5148 rb_hash_aset(list, obj, Qtrue);
5150 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5151 rb_hash_aset(list, obj, paired_obj);
5153 else {
5154 if (!RB_TYPE_P(pair_list, T_HASH)){
5155 VALUE other_paired_obj = pair_list;
5156 pair_list = rb_hash_new();
5157 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5158 rb_hash_aset(list, obj, pair_list);
5160 rb_hash_aset(pair_list, paired_obj, Qtrue);
5165 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5166 * For a pair, if list[obj] is a hash, then paired_obj_id is
5167 * removed from the hash and no attempt is made to simplify
5168 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5169 * Assumes the recursion list is valid.
5172 static int
5173 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5175 if (paired_obj) {
5176 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5177 if (UNDEF_P(pair_list)) {
5178 return 0;
5180 if (RB_TYPE_P(pair_list, T_HASH)) {
5181 rb_hash_delete_entry(pair_list, paired_obj);
5182 if (!RHASH_EMPTY_P(pair_list)) {
5183 return 1; /* keep hash until is empty */
5187 rb_hash_delete_entry(list, obj);
5188 return 1;
5191 struct exec_recursive_params {
5192 VALUE (*func) (VALUE, VALUE, int);
5193 VALUE list;
5194 VALUE obj;
5195 VALUE pairid;
5196 VALUE arg;
5199 static VALUE
5200 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5202 struct exec_recursive_params *p = (void *)data;
5203 return (*p->func)(p->obj, p->arg, FALSE);
5207 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5208 * current method is called recursively on obj, or on the pair <obj, pairid>
5209 * If outer is 0, then the innermost func will be called with recursive set
5210 * to true, otherwise the outermost func will be called. In the latter case,
5211 * all inner func are short-circuited by throw.
5212 * Implementation details: the value thrown is the recursive list which is
5213 * proper to the current method and unlikely to be caught anywhere else.
5214 * list[recursive_key] is used as a flag for the outermost call.
5217 static VALUE
5218 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5220 VALUE result = Qundef;
5221 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5222 struct exec_recursive_params p;
5223 int outermost;
5224 p.list = recursive_list_access(sym);
5225 p.obj = obj;
5226 p.pairid = pairid;
5227 p.arg = arg;
5228 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5230 if (recursive_check(p.list, p.obj, pairid)) {
5231 if (outer && !outermost) {
5232 rb_throw_obj(p.list, p.list);
5234 return (*func)(obj, arg, TRUE);
5236 else {
5237 enum ruby_tag_type state;
5239 p.func = func;
5241 if (outermost) {
5242 recursive_push(p.list, ID2SYM(recursive_key), 0);
5243 recursive_push(p.list, p.obj, p.pairid);
5244 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5245 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5246 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5247 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5248 if (result == p.list) {
5249 result = (*func)(obj, arg, TRUE);
5252 else {
5253 volatile VALUE ret = Qundef;
5254 recursive_push(p.list, p.obj, p.pairid);
5255 EC_PUSH_TAG(GET_EC());
5256 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5257 ret = (*func)(obj, arg, FALSE);
5259 EC_POP_TAG();
5260 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5261 goto invalid;
5263 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5264 result = ret;
5267 *(volatile struct exec_recursive_params *)&p;
5268 return result;
5270 invalid:
5271 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5272 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5273 sym, rb_thread_current());
5274 UNREACHABLE_RETURN(Qundef);
5278 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5279 * current method is called recursively on obj
5282 VALUE
5283 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5285 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5289 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5290 * current method is called recursively on the ordered pair <obj, paired_obj>
5293 VALUE
5294 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5296 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5300 * If recursion is detected on the current method and obj, the outermost
5301 * func will be called with (obj, arg, true). All inner func will be
5302 * short-circuited using throw.
5305 VALUE
5306 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5308 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5311 VALUE
5312 rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5314 return exec_recursive(func, obj, 0, arg, 1, mid);
5318 * If recursion is detected on the current method, obj and paired_obj,
5319 * the outermost func will be called with (obj, arg, true). All inner
5320 * func will be short-circuited using throw.
5323 VALUE
5324 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5326 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5330 * call-seq:
5331 * thread.backtrace -> array or nil
5333 * Returns the current backtrace of the target thread.
5337 static VALUE
5338 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5340 return rb_vm_thread_backtrace(argc, argv, thval);
5343 /* call-seq:
5344 * thread.backtrace_locations(*args) -> array or nil
5346 * Returns the execution stack for the target thread---an array containing
5347 * backtrace location objects.
5349 * See Thread::Backtrace::Location for more information.
5351 * This method behaves similarly to Kernel#caller_locations except it applies
5352 * to a specific thread.
5354 static VALUE
5355 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5357 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5360 void
5361 Init_Thread_Mutex(void)
5363 rb_thread_t *th = GET_THREAD();
5365 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5366 rb_native_mutex_initialize(&th->interrupt_lock);
5370 * Document-class: ThreadError
5372 * Raised when an invalid operation is attempted on a thread.
5374 * For example, when no other thread has been started:
5376 * Thread.stop
5378 * This will raises the following exception:
5380 * ThreadError: stopping only thread
5381 * note: use sleep to stop forever
5384 void
5385 Init_Thread(void)
5387 VALUE cThGroup;
5388 rb_thread_t *th = GET_THREAD();
5390 sym_never = ID2SYM(rb_intern_const("never"));
5391 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5392 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5394 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5395 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5396 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5397 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5398 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5399 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5400 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5401 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5402 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5403 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5404 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5405 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5406 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5407 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5408 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5409 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5410 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5411 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5412 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5414 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5415 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5416 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5417 rb_define_method(rb_cThread, "value", thread_value, 0);
5418 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5419 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5420 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5421 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5422 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5423 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5424 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5425 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5426 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5427 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5428 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5429 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5430 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5431 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5432 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5433 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5434 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5435 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5436 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5437 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5438 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5439 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5440 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5441 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5442 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5443 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5445 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5446 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5447 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5448 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5449 rb_define_alias(rb_cThread, "inspect", "to_s");
5451 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5452 "stream closed in another thread");
5454 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5455 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5456 rb_define_method(cThGroup, "list", thgroup_list, 0);
5457 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5458 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5459 rb_define_method(cThGroup, "add", thgroup_add, 1);
5462 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5463 rb_define_const(cThGroup, "Default", th->thgroup);
5466 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
5468 /* init thread core */
5470 /* main thread setting */
5472 /* acquire global vm lock */
5473 #ifdef HAVE_PTHREAD_NP_H
5474 VM_ASSERT(TH_SCHED(th)->running == th);
5475 #endif
5476 // thread_sched_to_running() should not be called because
5477 // it assumes blocked by thread_sched_to_waiting().
5478 // thread_sched_to_running(sched, th);
5480 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5481 th->pending_interrupt_queue_checked = 0;
5482 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5486 rb_thread_create_timer_thread();
5488 Init_thread_sync();
5490 // TODO: Suppress unused function warning for now
5491 // if (0) rb_thread_sched_destroy(NULL);
5495 ruby_native_thread_p(void)
5497 rb_thread_t *th = ruby_thread_from_native();
5499 return th != 0;
5502 #ifdef NON_SCALAR_THREAD_ID
5503 #define thread_id_str(th) (NULL)
5504 #else
5505 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5506 #endif
5508 static void
5509 debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5511 rb_thread_t *th = 0;
5512 VALUE sep = rb_str_new_cstr("\n ");
5514 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5515 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5516 (void *)GET_THREAD(), (void *)r->threads.main);
5518 ccan_list_for_each(&r->threads.set, th, lt_node) {
5519 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5520 "native:%p int:%u",
5521 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5523 if (th->locking_mutex) {
5524 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5525 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5526 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5530 struct rb_waiting_list *list = th->join_list;
5531 while (list) {
5532 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5533 list = list->next;
5536 rb_str_catf(msg, "\n ");
5537 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5538 rb_str_catf(msg, "\n");
5542 static void
5543 rb_check_deadlock(rb_ractor_t *r)
5545 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5547 #ifdef RUBY_THREAD_PTHREAD_H
5548 if (r->threads.sched.readyq_cnt > 0) return;
5549 #endif
5551 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5552 int ltnum = rb_ractor_living_thread_num(r);
5554 if (ltnum > sleeper_num) return;
5555 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5557 int found = 0;
5558 rb_thread_t *th = NULL;
5560 ccan_list_for_each(&r->threads.set, th, lt_node) {
5561 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5562 found = 1;
5564 else if (th->locking_mutex) {
5565 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5566 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5567 found = 1;
5570 if (found)
5571 break;
5574 if (!found) {
5575 VALUE argv[2];
5576 argv[0] = rb_eFatal;
5577 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5578 debug_deadlock_check(r, argv[1]);
5579 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5580 rb_threadptr_raise(r->threads.main, 2, argv);
5584 // Used for VM memsize reporting. Returns the size of a list of waiting_fd
5585 // structs. Defined here because the struct definition lives here as well.
5586 size_t
5587 rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5589 struct waiting_fd *waitfd = 0;
5590 size_t size = 0;
5592 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5593 size += sizeof(struct waiting_fd);
5596 return size;
5599 static void
5600 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5602 const rb_control_frame_t *cfp = GET_EC()->cfp;
5603 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5604 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5605 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5606 if (lines) {
5607 long line = rb_sourceline() - 1;
5608 long count;
5609 VALUE num;
5610 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5611 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5612 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5613 rb_ary_push(lines, LONG2FIX(line + 1));
5614 return;
5616 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5617 return;
5619 num = RARRAY_AREF(lines, line);
5620 if (!FIXNUM_P(num)) return;
5621 count = FIX2LONG(num) + 1;
5622 if (POSFIXABLE(count)) {
5623 RARRAY_ASET(lines, line, LONG2FIX(count));
5629 static void
5630 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5632 const rb_control_frame_t *cfp = GET_EC()->cfp;
5633 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5634 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5635 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5636 if (branches) {
5637 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5638 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5639 VALUE counters = RARRAY_AREF(branches, 1);
5640 VALUE num = RARRAY_AREF(counters, idx);
5641 count = FIX2LONG(num) + 1;
5642 if (POSFIXABLE(count)) {
5643 RARRAY_ASET(counters, idx, LONG2FIX(count));
5649 const rb_method_entry_t *
5650 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5652 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5654 if (!me->def) return NULL; // negative cme
5656 retry:
5657 switch (me->def->type) {
5658 case VM_METHOD_TYPE_ISEQ: {
5659 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5660 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5661 path = rb_iseq_path(iseq);
5662 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5663 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5664 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5665 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5666 break;
5668 case VM_METHOD_TYPE_BMETHOD: {
5669 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5670 if (iseq) {
5671 rb_iseq_location_t *loc;
5672 rb_iseq_check(iseq);
5673 path = rb_iseq_path(iseq);
5674 loc = &ISEQ_BODY(iseq)->location;
5675 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5676 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5677 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5678 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5679 break;
5681 return NULL;
5683 case VM_METHOD_TYPE_ALIAS:
5684 me = me->def->body.alias.original_me;
5685 goto retry;
5686 case VM_METHOD_TYPE_REFINED:
5687 me = me->def->body.refined.orig_me;
5688 if (!me) return NULL;
5689 goto retry;
5690 default:
5691 return NULL;
5694 /* found */
5695 if (RB_TYPE_P(path, T_ARRAY)) {
5696 path = rb_ary_entry(path, 1);
5697 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5699 if (resolved_location) {
5700 resolved_location[0] = path;
5701 resolved_location[1] = beg_pos_lineno;
5702 resolved_location[2] = beg_pos_column;
5703 resolved_location[3] = end_pos_lineno;
5704 resolved_location[4] = end_pos_column;
5706 return me;
5709 static void
5710 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5712 const rb_control_frame_t *cfp = GET_EC()->cfp;
5713 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5714 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5715 VALUE rcount;
5716 long count;
5718 me = rb_resolve_me_location(me, 0);
5719 if (!me) return;
5721 rcount = rb_hash_aref(me2counter, (VALUE) me);
5722 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5723 if (POSFIXABLE(count)) {
5724 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5728 VALUE
5729 rb_get_coverages(void)
5731 return GET_VM()->coverages;
5735 rb_get_coverage_mode(void)
5737 return GET_VM()->coverage_mode;
5740 void
5741 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5743 GET_VM()->coverages = coverages;
5744 GET_VM()->me2counter = me2counter;
5745 GET_VM()->coverage_mode = mode;
5748 void
5749 rb_resume_coverages(void)
5751 int mode = GET_VM()->coverage_mode;
5752 VALUE me2counter = GET_VM()->me2counter;
5753 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5754 if (mode & COVERAGE_TARGET_BRANCHES) {
5755 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5757 if (mode & COVERAGE_TARGET_METHODS) {
5758 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5762 void
5763 rb_suspend_coverages(void)
5765 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5766 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5767 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5769 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5770 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5774 /* Make coverage arrays empty so old covered files are no longer tracked. */
5775 void
5776 rb_reset_coverages(void)
5778 rb_clear_coverages();
5779 rb_iseq_remove_coverage_all();
5780 GET_VM()->coverages = Qfalse;
5783 VALUE
5784 rb_default_coverage(int n)
5786 VALUE coverage = rb_ary_hidden_new_fill(3);
5787 VALUE lines = Qfalse, branches = Qfalse;
5788 int mode = GET_VM()->coverage_mode;
5790 if (mode & COVERAGE_TARGET_LINES) {
5791 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5793 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5795 if (mode & COVERAGE_TARGET_BRANCHES) {
5796 branches = rb_ary_hidden_new_fill(2);
5797 /* internal data structures for branch coverage:
5799 * { branch base node =>
5800 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5801 * branch target id =>
5802 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5803 * ...
5804 * }],
5805 * ...
5808 * Example:
5809 * { NODE_CASE =>
5810 * [1, 0, 4, 3, {
5811 * NODE_WHEN => [2, 8, 2, 9, 0],
5812 * NODE_WHEN => [3, 8, 3, 9, 1],
5813 * ...
5814 * }],
5815 * ...
5818 VALUE structure = rb_hash_new();
5819 rb_obj_hide(structure);
5820 RARRAY_ASET(branches, 0, structure);
5821 /* branch execution counters */
5822 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5824 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5826 return coverage;
5829 static VALUE
5830 uninterruptible_exit(VALUE v)
5832 rb_thread_t *cur_th = GET_THREAD();
5833 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5835 cur_th->pending_interrupt_queue_checked = 0;
5836 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5837 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5839 return Qnil;
5842 VALUE
5843 rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5845 VALUE interrupt_mask = rb_ident_hash_new();
5846 rb_thread_t *cur_th = GET_THREAD();
5848 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5849 OBJ_FREEZE(interrupt_mask);
5850 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5852 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5854 RUBY_VM_CHECK_INTS(cur_th->ec);
5855 return ret;
5858 static void
5859 thread_specific_storage_alloc(rb_thread_t *th)
5861 VM_ASSERT(th->specific_storage == NULL);
5863 if (UNLIKELY(specific_key_count > 0)) {
5864 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5868 rb_internal_thread_specific_key_t
5869 rb_internal_thread_specific_key_create(void)
5871 rb_vm_t *vm = GET_VM();
5873 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5874 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5876 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5877 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5879 else {
5880 rb_internal_thread_specific_key_t key = specific_key_count++;
5882 if (key == 0) {
5883 // allocate
5884 rb_ractor_t *cr = GET_RACTOR();
5885 rb_thread_t *th;
5887 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5888 thread_specific_storage_alloc(th);
5891 return key;
5895 // async and native thread safe.
5896 void *
5897 rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5899 rb_thread_t *th = DATA_PTR(thread_val);
5901 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5902 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5903 VM_ASSERT(th->specific_storage);
5905 return th->specific_storage[key];
5908 // async and native thread safe.
5909 void
5910 rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5912 rb_thread_t *th = DATA_PTR(thread_val);
5914 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5915 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5916 VM_ASSERT(th->specific_storage);
5918 th->specific_storage[key] = data;