2 /**********************************************************************
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14 #include "internal/gc.h"
15 #include "internal/sanitizers.h"
18 #ifdef HAVE_SYS_RESOURCE_H
19 #include <sys/resource.h>
21 #ifdef HAVE_THR_STKSEGMENT
24 #if defined(HAVE_FCNTL_H)
26 #elif defined(HAVE_SYS_FCNTL_H)
27 #include <sys/fcntl.h>
29 #ifdef HAVE_SYS_PRCTL_H
30 #include <sys/prctl.h>
32 #if defined(HAVE_SYS_TIME_H)
35 #if defined(__HAIKU__)
36 #include <kernel/OS.h>
39 #include <sys/syscall.h> /* for SYS_gettid */
45 # include <AvailabilityMacros.h>
48 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49 # define USE_EVENTFD (1)
50 # include <sys/eventfd.h>
52 # define USE_EVENTFD (0)
55 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58 static pthread_condattr_t condattr_mono
;
59 static pthread_condattr_t
*condattr_monotonic
= &condattr_mono
;
61 static const void *const condattr_monotonic
= NULL
;
66 #ifndef HAVE_SYS_EVENT_H
67 #define HAVE_SYS_EVENT_H 0
70 #ifndef HAVE_SYS_EPOLL_H
71 #define HAVE_SYS_EPOLL_H 0
73 // force setting for debug
74 // #undef HAVE_SYS_EPOLL_H
75 // #define HAVE_SYS_EPOLL_H 0
78 #ifndef USE_MN_THREADS
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
80 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
81 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
90 #define USE_MN_THREADS 0
94 // native thread wrappers
96 #define NATIVE_MUTEX_LOCK_DEBUG 0
99 mutex_debug(const char *msg
, void *lock
)
101 if (NATIVE_MUTEX_LOCK_DEBUG
) {
103 static pthread_mutex_t dbglock
= PTHREAD_MUTEX_INITIALIZER
;
105 if ((r
= pthread_mutex_lock(&dbglock
)) != 0) {exit(EXIT_FAILURE
);}
106 fprintf(stdout
, "%s: %p\n", msg
, lock
);
107 if ((r
= pthread_mutex_unlock(&dbglock
)) != 0) {exit(EXIT_FAILURE
);}
112 rb_native_mutex_lock(pthread_mutex_t
*lock
)
115 mutex_debug("lock", lock
);
116 if ((r
= pthread_mutex_lock(lock
)) != 0) {
117 rb_bug_errno("pthread_mutex_lock", r
);
122 rb_native_mutex_unlock(pthread_mutex_t
*lock
)
125 mutex_debug("unlock", lock
);
126 if ((r
= pthread_mutex_unlock(lock
)) != 0) {
127 rb_bug_errno("pthread_mutex_unlock", r
);
132 rb_native_mutex_trylock(pthread_mutex_t
*lock
)
135 mutex_debug("trylock", lock
);
136 if ((r
= pthread_mutex_trylock(lock
)) != 0) {
141 rb_bug_errno("pthread_mutex_trylock", r
);
148 rb_native_mutex_initialize(pthread_mutex_t
*lock
)
150 int r
= pthread_mutex_init(lock
, 0);
151 mutex_debug("init", lock
);
153 rb_bug_errno("pthread_mutex_init", r
);
158 rb_native_mutex_destroy(pthread_mutex_t
*lock
)
160 int r
= pthread_mutex_destroy(lock
);
161 mutex_debug("destroy", lock
);
163 rb_bug_errno("pthread_mutex_destroy", r
);
168 rb_native_cond_initialize(rb_nativethread_cond_t
*cond
)
170 int r
= pthread_cond_init(cond
, condattr_monotonic
);
172 rb_bug_errno("pthread_cond_init", r
);
177 rb_native_cond_destroy(rb_nativethread_cond_t
*cond
)
179 int r
= pthread_cond_destroy(cond
);
181 rb_bug_errno("pthread_cond_destroy", r
);
186 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
187 * EAGAIN after retrying 8192 times. You can see them in the following page:
189 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
191 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
192 * need to retrying until pthread functions don't return EAGAIN.
196 rb_native_cond_signal(rb_nativethread_cond_t
*cond
)
200 r
= pthread_cond_signal(cond
);
201 } while (r
== EAGAIN
);
203 rb_bug_errno("pthread_cond_signal", r
);
208 rb_native_cond_broadcast(rb_nativethread_cond_t
*cond
)
212 r
= pthread_cond_broadcast(cond
);
213 } while (r
== EAGAIN
);
215 rb_bug_errno("rb_native_cond_broadcast", r
);
220 rb_native_cond_wait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
)
222 int r
= pthread_cond_wait(cond
, mutex
);
224 rb_bug_errno("pthread_cond_wait", r
);
229 native_cond_timedwait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
, const rb_hrtime_t
*abs
)
235 * An old Linux may return EINTR. Even though POSIX says
236 * "These functions shall not return an error code of [EINTR]".
237 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
238 * Let's hide it from arch generic code.
241 rb_hrtime2timespec(&ts
, abs
);
242 r
= pthread_cond_timedwait(cond
, mutex
, &ts
);
243 } while (r
== EINTR
);
245 if (r
!= 0 && r
!= ETIMEDOUT
) {
246 rb_bug_errno("pthread_cond_timedwait", r
);
253 native_cond_timeout(rb_nativethread_cond_t
*cond
, const rb_hrtime_t rel
)
255 if (condattr_monotonic
) {
256 return rb_hrtime_add(rb_hrtime_now(), rel
);
261 rb_timespec_now(&ts
);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts
), rel
);
267 rb_native_cond_timedwait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
, unsigned long msec
)
269 rb_hrtime_t hrmsec
= native_cond_timeout(cond
, RB_HRTIME_PER_MSEC
* msec
);
270 native_cond_timedwait(cond
, mutex
, &hrmsec
);
275 static rb_internal_thread_event_hook_t
*rb_internal_thread_event_hooks
= NULL
;
276 static void rb_thread_execute_hooks(rb_event_flag_t event
, rb_thread_t
*th
);
280 event_name(rb_event_flag_t event
)
283 case RUBY_INTERNAL_THREAD_EVENT_STARTED
:
285 case RUBY_INTERNAL_THREAD_EVENT_READY
:
287 case RUBY_INTERNAL_THREAD_EVENT_RESUMED
:
289 case RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
:
291 case RUBY_INTERNAL_THREAD_EVENT_EXITED
:
297 #define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
303 #define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
306 static rb_serial_t current_fork_gen
= 1; /* We can't use GET_VM()->fork_gen */
308 #if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309 # define USE_UBF_LIST 1
312 static void threadptr_trap_interrupt(rb_thread_t
*);
314 #ifdef HAVE_SCHED_YIELD
315 #define native_thread_yield() (void)sched_yield()
317 #define native_thread_yield() ((void)0)
320 /* 100ms. 10ms is too small for user level thread scheduling
321 * on recent Linux (tested on 2.6.35)
323 #define TIME_QUANTUM_MSEC (100)
324 #define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
325 #define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
327 static void native_thread_dedicated_inc(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
);
328 static void native_thread_dedicated_dec(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
);
329 static void native_thread_assign(struct rb_native_thread
*nt
, rb_thread_t
*th
);
331 static void ractor_sched_enq(rb_vm_t
*vm
, rb_ractor_t
*r
);
332 static void timer_thread_wakeup(void);
333 static void timer_thread_wakeup_locked(rb_vm_t
*vm
);
334 static void timer_thread_wakeup_force(void);
335 static void thread_sched_switch(rb_thread_t
*cth
, rb_thread_t
*next_th
);
336 static void coroutine_transfer0(struct coroutine_context
*transfer_from
,
337 struct coroutine_context
*transfer_to
, bool to_dead
);
339 #define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
342 th_has_dedicated_nt(const rb_thread_t
*th
)
344 // TODO: th->has_dedicated_nt
345 return th
->nt
->dedicated
> 0;
348 RBIMPL_ATTR_MAYBE_UNUSED()
350 thread_sched_dump_(const char *file
, int line
, struct rb_thread_sched
*sched
)
352 fprintf(stderr
, "@%s:%d running:%d\n", file
, line
, sched
->running
? (int)sched
->running
->serial
: -1);
355 ccan_list_for_each(&sched
->readyq
, th
, sched
.node
.readyq
) {
356 i
++; if (i
>10) rb_bug("too many");
357 fprintf(stderr
, " ready:%d (%sNT:%d)\n", th
->serial
,
358 th
->nt
? (th
->nt
->dedicated
? "D" : "S") : "x",
359 th
->nt
? (int)th
->nt
->serial
: -1);
363 #define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
365 RBIMPL_ATTR_MAYBE_UNUSED()
367 ractor_sched_dump_(const char *file
, int line
, rb_vm_t
*vm
)
371 fprintf(stderr
, "ractor_sched_dump %s:%d\n", file
, line
);
374 ccan_list_for_each(&vm
->ractor
.sched
.grq
, r
, threads
.sched
.grq_node
) {
376 if (i
>10) rb_bug("!!");
377 fprintf(stderr
, " %d ready:%d\n", i
, rb_ractor_id(r
));
381 #define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
382 #define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
385 thread_sched_lock_(struct rb_thread_sched
*sched
, rb_thread_t
*th
, const char *file
, int line
)
387 rb_native_mutex_lock(&sched
->lock_
);
390 RUBY_DEBUG_LOG2(file
, line
, "th:%u prev_owner:%u", rb_th_serial(th
), rb_th_serial(sched
->lock_owner
));
391 VM_ASSERT(sched
->lock_owner
== NULL
);
392 sched
->lock_owner
= th
;
394 RUBY_DEBUG_LOG2(file
, line
, "th:%u", rb_th_serial(th
));
399 thread_sched_unlock_(struct rb_thread_sched
*sched
, rb_thread_t
*th
, const char *file
, int line
)
401 RUBY_DEBUG_LOG2(file
, line
, "th:%u", rb_th_serial(th
));
404 VM_ASSERT(sched
->lock_owner
== th
);
405 sched
->lock_owner
= NULL
;
408 rb_native_mutex_unlock(&sched
->lock_
);
412 thread_sched_set_lock_owner(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
414 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
416 #if VM_CHECK_MODE > 0
417 sched
->lock_owner
= th
;
422 ASSERT_thread_sched_locked(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
424 VM_ASSERT(rb_native_mutex_trylock(&sched
->lock_
) == EBUSY
);
428 VM_ASSERT(sched
->lock_owner
== th
);
431 VM_ASSERT(sched
->lock_owner
!= NULL
);
436 #define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
437 #define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
439 RBIMPL_ATTR_MAYBE_UNUSED()
441 rb_ractor_serial(const rb_ractor_t
*r
) {
443 return rb_ractor_id(r
);
451 ractor_sched_set_locked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
453 #if VM_CHECK_MODE > 0
454 VM_ASSERT(vm
->ractor
.sched
.lock_owner
== NULL
);
455 VM_ASSERT(vm
->ractor
.sched
.locked
== false);
457 vm
->ractor
.sched
.lock_owner
= cr
;
458 vm
->ractor
.sched
.locked
= true;
463 ractor_sched_set_unlocked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
465 #if VM_CHECK_MODE > 0
466 VM_ASSERT(vm
->ractor
.sched
.locked
);
467 VM_ASSERT(vm
->ractor
.sched
.lock_owner
== cr
);
469 vm
->ractor
.sched
.locked
= false;
470 vm
->ractor
.sched
.lock_owner
= NULL
;
475 ractor_sched_lock_(rb_vm_t
*vm
, rb_ractor_t
*cr
, const char *file
, int line
)
477 rb_native_mutex_lock(&vm
->ractor
.sched
.lock
);
480 RUBY_DEBUG_LOG2(file
, line
, "cr:%u prev_owner:%u", rb_ractor_serial(cr
), rb_ractor_serial(vm
->ractor
.sched
.lock_owner
));
482 RUBY_DEBUG_LOG2(file
, line
, "cr:%u", rb_ractor_serial(cr
));
485 ractor_sched_set_locked(vm
, cr
);
489 ractor_sched_unlock_(rb_vm_t
*vm
, rb_ractor_t
*cr
, const char *file
, int line
)
491 RUBY_DEBUG_LOG2(file
, line
, "cr:%u", rb_ractor_serial(cr
));
493 ractor_sched_set_unlocked(vm
, cr
);
494 rb_native_mutex_unlock(&vm
->ractor
.sched
.lock
);
498 ASSERT_ractor_sched_locked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
500 VM_ASSERT(rb_native_mutex_trylock(&vm
->ractor
.sched
.lock
) == EBUSY
);
501 VM_ASSERT(vm
->ractor
.sched
.locked
);
502 VM_ASSERT(cr
== NULL
|| vm
->ractor
.sched
.lock_owner
== cr
);
505 RBIMPL_ATTR_MAYBE_UNUSED()
507 ractor_sched_running_threads_contain_p(rb_vm_t
*vm
, rb_thread_t
*th
)
510 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, rth
, sched
.node
.running_threads
) {
511 if (rth
== th
) return true;
516 RBIMPL_ATTR_MAYBE_UNUSED()
518 ractor_sched_running_threads_size(rb_vm_t
*vm
)
522 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, th
, sched
.node
.running_threads
) {
528 RBIMPL_ATTR_MAYBE_UNUSED()
530 ractor_sched_timeslice_threads_size(rb_vm_t
*vm
)
534 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, th
, sched
.node
.timeslice_threads
) {
540 RBIMPL_ATTR_MAYBE_UNUSED()
542 ractor_sched_timeslice_threads_contain_p(rb_vm_t
*vm
, rb_thread_t
*th
)
545 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, rth
, sched
.node
.timeslice_threads
) {
546 if (rth
== th
) return true;
551 static void ractor_sched_barrier_join_signal_locked(rb_vm_t
*vm
);
552 static void ractor_sched_barrier_join_wait_locked(rb_vm_t
*vm
, rb_thread_t
*th
);
554 // setup timeslice signals by the timer thread.
556 thread_sched_setup_running_threads(struct rb_thread_sched
*sched
, rb_ractor_t
*cr
, rb_vm_t
*vm
,
557 rb_thread_t
*add_th
, rb_thread_t
*del_th
, rb_thread_t
*add_timeslice_th
)
559 #if USE_RUBY_DEBUG_LOG
560 unsigned int prev_running_cnt
= vm
->ractor
.sched
.running_cnt
;
563 rb_thread_t
*del_timeslice_th
;
565 if (del_th
&& sched
->is_running_timeslice
) {
566 del_timeslice_th
= del_th
;
567 sched
->is_running_timeslice
= false;
570 del_timeslice_th
= NULL
;
573 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
574 rb_th_serial(add_th
), rb_th_serial(del_th
),
575 rb_th_serial(add_timeslice_th
), rb_th_serial(del_timeslice_th
));
577 ractor_sched_lock(vm
, cr
);
579 // update running_threads
581 VM_ASSERT(ractor_sched_running_threads_contain_p(vm
, del_th
));
582 VM_ASSERT(del_timeslice_th
!= NULL
||
583 !ractor_sched_timeslice_threads_contain_p(vm
, del_th
));
585 ccan_list_del_init(&del_th
->sched
.node
.running_threads
);
586 vm
->ractor
.sched
.running_cnt
--;
588 if (UNLIKELY(vm
->ractor
.sched
.barrier_waiting
)) {
589 ractor_sched_barrier_join_signal_locked(vm
);
591 sched
->is_running
= false;
595 if (UNLIKELY(vm
->ractor
.sched
.barrier_waiting
)) {
596 RUBY_DEBUG_LOG("barrier-wait");
598 ractor_sched_barrier_join_signal_locked(vm
);
599 ractor_sched_barrier_join_wait_locked(vm
, add_th
);
602 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm
, add_th
));
603 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm
, add_th
));
605 ccan_list_add(&vm
->ractor
.sched
.running_threads
, &add_th
->sched
.node
.running_threads
);
606 vm
->ractor
.sched
.running_cnt
++;
607 sched
->is_running
= true;
610 if (add_timeslice_th
) {
611 // update timeslice threads
612 int was_empty
= ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
);
613 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm
, add_timeslice_th
));
614 ccan_list_add(&vm
->ractor
.sched
.timeslice_threads
, &add_timeslice_th
->sched
.node
.timeslice_threads
);
615 sched
->is_running_timeslice
= true;
617 timer_thread_wakeup_locked(vm
);
621 if (del_timeslice_th
) {
622 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm
, del_timeslice_th
));
623 ccan_list_del_init(&del_timeslice_th
->sched
.node
.timeslice_threads
);
626 VM_ASSERT(ractor_sched_running_threads_size(vm
) == vm
->ractor
.sched
.running_cnt
);
627 VM_ASSERT(ractor_sched_timeslice_threads_size(vm
) <= vm
->ractor
.sched
.running_cnt
);
629 ractor_sched_unlock(vm
, cr
);
631 if (add_th
&& !del_th
&& UNLIKELY(vm
->ractor
.sync
.lock_owner
!= NULL
)) {
632 // it can be after barrier synchronization by another ractor
633 rb_thread_t
*lock_owner
= NULL
;
635 lock_owner
= sched
->lock_owner
;
637 thread_sched_unlock(sched
, lock_owner
);
642 thread_sched_lock(sched
, lock_owner
);
645 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
646 // rb_th_serial(add_th), rb_th_serial(del_th),
647 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
648 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt
, vm
->ractor
.sched
.running_cnt
);
652 thread_sched_add_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
654 ASSERT_thread_sched_locked(sched
, th
);
655 VM_ASSERT(sched
->running
== th
);
657 rb_vm_t
*vm
= th
->vm
;
658 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, th
, NULL
, ccan_list_empty(&sched
->readyq
) ? NULL
: th
);
662 thread_sched_del_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
664 ASSERT_thread_sched_locked(sched
, th
);
666 rb_vm_t
*vm
= th
->vm
;
667 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, NULL
, th
, NULL
);
671 rb_add_running_thread(rb_thread_t
*th
)
673 struct rb_thread_sched
*sched
= TH_SCHED(th
);
675 thread_sched_lock(sched
, th
);
677 thread_sched_add_running_thread(sched
, th
);
679 thread_sched_unlock(sched
, th
);
683 rb_del_running_thread(rb_thread_t
*th
)
685 struct rb_thread_sched
*sched
= TH_SCHED(th
);
687 thread_sched_lock(sched
, th
);
689 thread_sched_del_running_thread(sched
, th
);
691 thread_sched_unlock(sched
, th
);
694 // setup current or next running thread
695 // sched->running should be set only on this function.
697 // if th is NULL, there is no running threads.
699 thread_sched_set_running(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
701 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched
->running
), rb_th_serial(th
));
702 VM_ASSERT(sched
->running
!= th
);
707 RBIMPL_ATTR_MAYBE_UNUSED()
709 thread_sched_readyq_contain_p(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
712 ccan_list_for_each(&sched
->readyq
, rth
, sched
.node
.readyq
) {
713 if (rth
== th
) return true;
718 // deque thread from the ready queue.
719 // if the ready queue is empty, return NULL.
721 // return deque'ed running thread (or NULL).
723 thread_sched_deq(struct rb_thread_sched
*sched
)
725 ASSERT_thread_sched_locked(sched
, NULL
);
726 rb_thread_t
*next_th
;
728 VM_ASSERT(sched
->running
!= NULL
);
730 if (ccan_list_empty(&sched
->readyq
)) {
734 next_th
= ccan_list_pop(&sched
->readyq
, rb_thread_t
, sched
.node
.readyq
);
736 VM_ASSERT(sched
->readyq_cnt
> 0);
738 ccan_list_node_init(&next_th
->sched
.node
.readyq
);
741 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th
), sched
->readyq_cnt
);
746 // enqueue ready thread to the ready queue.
748 thread_sched_enq(struct rb_thread_sched
*sched
, rb_thread_t
*ready_th
)
750 ASSERT_thread_sched_locked(sched
, NULL
);
751 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th
), sched
->readyq_cnt
);
753 VM_ASSERT(sched
->running
!= NULL
);
754 VM_ASSERT(!thread_sched_readyq_contain_p(sched
, ready_th
));
756 if (sched
->is_running
) {
757 if (ccan_list_empty(&sched
->readyq
)) {
758 // add sched->running to timeslice
759 thread_sched_setup_running_threads(sched
, ready_th
->ractor
, ready_th
->vm
, NULL
, NULL
, sched
->running
);
763 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th
->vm
, sched
->running
));
766 ccan_list_add_tail(&sched
->readyq
, &ready_th
->sched
.node
.readyq
);
773 thread_sched_wakeup_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*next_th
, bool will_switch
)
775 ASSERT_thread_sched_locked(sched
, NULL
);
776 VM_ASSERT(sched
->running
== next_th
);
780 if (th_has_dedicated_nt(next_th
)) {
781 RUBY_DEBUG_LOG("pinning th:%u", next_th
->serial
);
782 rb_native_cond_signal(&next_th
->nt
->cond
.readyq
);
786 RUBY_DEBUG_LOG("th:%u is already running.", next_th
->serial
);
791 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th
));
794 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th
));
795 ractor_sched_enq(next_th
->vm
, next_th
->ractor
);
800 RUBY_DEBUG_LOG("no waiting threads%s", "");
804 // waiting -> ready (locked)
806 thread_sched_to_ready_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool wakeup
, bool will_switch
)
808 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th
), rb_th_serial(sched
->running
), sched
->readyq_cnt
);
810 VM_ASSERT(sched
->running
!= th
);
811 VM_ASSERT(!thread_sched_readyq_contain_p(sched
, th
));
812 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY
, th
);
814 if (sched
->running
== NULL
) {
815 thread_sched_set_running(sched
, th
);
816 if (wakeup
) thread_sched_wakeup_running_thread(sched
, th
, will_switch
);
819 thread_sched_enq(sched
, th
);
825 // `th` had became "waiting" state by `thread_sched_to_waiting`
826 // and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
827 RBIMPL_ATTR_MAYBE_UNUSED()
829 thread_sched_to_ready(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
831 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
833 thread_sched_lock(sched
, th
);
835 thread_sched_to_ready_common(sched
, th
, true, false);
837 thread_sched_unlock(sched
, th
);
840 // wait until sched->running is `th`.
842 thread_sched_wait_running_turn(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool can_direct_transfer
)
844 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
846 ASSERT_thread_sched_locked(sched
, th
);
847 VM_ASSERT(th
== GET_THREAD());
849 if (th
!= sched
->running
) {
850 // already deleted from running threads
851 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
853 // wait for execution right
854 rb_thread_t
*next_th
;
855 while((next_th
= sched
->running
) != th
) {
856 if (th_has_dedicated_nt(th
)) {
857 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th
), rb_th_serial(sched
->running
));
859 thread_sched_set_lock_owner(sched
, NULL
);
861 RUBY_DEBUG_LOG("nt:%d cond:%p", th
->nt
->serial
, &th
->nt
->cond
.readyq
);
862 rb_native_cond_wait(&th
->nt
->cond
.readyq
, &sched
->lock_
);
864 thread_sched_set_lock_owner(sched
, th
);
866 RUBY_DEBUG_LOG("(nt) wakeup %s", sched
->running
== th
? "success" : "failed");
867 if (th
== sched
->running
) {
868 rb_ractor_thread_switch(th
->ractor
, th
);
872 // search another ready thread
873 if (can_direct_transfer
&&
874 (next_th
= sched
->running
) != NULL
&&
875 !next_th
->nt
// next_th is running or has dedicated nt
878 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th
), rb_th_serial(next_th
));
880 thread_sched_set_lock_owner(sched
, NULL
);
882 rb_ractor_set_current_ec(th
->ractor
, NULL
);
883 thread_sched_switch(th
, next_th
);
885 thread_sched_set_lock_owner(sched
, th
);
888 // search another ready ractor
889 struct rb_native_thread
*nt
= th
->nt
;
890 native_thread_assign(NULL
, th
);
892 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th
), rb_th_serial(next_th
));
894 thread_sched_set_lock_owner(sched
, NULL
);
896 rb_ractor_set_current_ec(th
->ractor
, NULL
);
897 coroutine_transfer0(th
->sched
.context
, nt
->nt_context
, false);
899 thread_sched_set_lock_owner(sched
, th
);
902 VM_ASSERT(GET_EC() == th
->ec
);
906 VM_ASSERT(th
->nt
!= NULL
);
907 VM_ASSERT(GET_EC() == th
->ec
);
908 VM_ASSERT(th
->sched
.waiting_reason
.flags
== thread_sched_waiting_none
);
910 // add th to running threads
911 thread_sched_add_running_thread(sched
, th
);
914 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
915 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED
, th
);
918 // waiting -> ready -> running (locked)
920 thread_sched_to_running_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
922 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th
), th_has_dedicated_nt(th
));
924 VM_ASSERT(sched
->running
!= th
);
925 VM_ASSERT(th_has_dedicated_nt(th
));
926 VM_ASSERT(GET_THREAD() == th
);
928 native_thread_dedicated_dec(th
->vm
, th
->ractor
, th
->nt
);
931 thread_sched_to_ready_common(sched
, th
, false, false);
933 if (sched
->running
== th
) {
934 thread_sched_add_running_thread(sched
, th
);
937 // TODO: check SNT number
938 thread_sched_wait_running_turn(sched
, th
, false);
941 // waiting -> ready -> running
943 // `th` had been waiting by `thread_sched_to_waiting()`
944 // and run a dedicated task (like waitpid and so on).
945 // After the dedicated task, this function is called
946 // to join a normal thread-scheduling.
948 thread_sched_to_running(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
950 thread_sched_lock(sched
, th
);
952 thread_sched_to_running_common(sched
, th
);
954 thread_sched_unlock(sched
, th
);
957 // resume a next thread in the thread ready queue.
959 // deque next running thread from the ready thread queue and
960 // resume this thread if available.
962 // If the next therad has a dedicated native thraed, simply signal to resume.
963 // Otherwise, make the ractor ready and other nt will run the ractor and the thread.
965 thread_sched_wakeup_next_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool will_switch
)
967 ASSERT_thread_sched_locked(sched
, th
);
969 VM_ASSERT(sched
->running
== th
);
970 VM_ASSERT(sched
->running
->nt
!= NULL
);
972 rb_thread_t
*next_th
= thread_sched_deq(sched
);
974 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th
));
975 VM_ASSERT(th
!= next_th
);
977 thread_sched_set_running(sched
, next_th
);
978 VM_ASSERT(next_th
== sched
->running
);
979 thread_sched_wakeup_running_thread(sched
, next_th
, will_switch
);
982 thread_sched_del_running_thread(sched
, th
);
986 // running -> waiting
989 // th will run dedicated task.
990 // run another ready thread.
993 // run another ready thread.
995 thread_sched_to_waiting_common0(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool to_dead
)
997 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
999 if (!to_dead
) native_thread_dedicated_inc(th
->vm
, th
->ractor
, th
->nt
);
1001 RUBY_DEBUG_LOG("%sth:%u", to_dead
? "to_dead " : "", rb_th_serial(th
));
1003 bool can_switch
= to_dead
? !th_has_dedicated_nt(th
) : false;
1004 thread_sched_wakeup_next_thread(sched
, th
, can_switch
);
1007 // running -> dead (locked)
1009 thread_sched_to_dead_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1011 RUBY_DEBUG_LOG("dedicated:%d", th
->nt
->dedicated
);
1012 thread_sched_to_waiting_common0(sched
, th
, true);
1013 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED
, th
);
1018 thread_sched_to_dead(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1020 thread_sched_lock(sched
, th
);
1022 thread_sched_to_dead_common(sched
, th
);
1024 thread_sched_unlock(sched
, th
);
1027 // running -> waiting (locked)
1029 // This thread will run dedicated task (th->nt->dedicated++).
1031 thread_sched_to_waiting_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1033 RUBY_DEBUG_LOG("dedicated:%d", th
->nt
->dedicated
);
1034 thread_sched_to_waiting_common0(sched
, th
, false);
1037 // running -> waiting
1039 // This thread will run a dedicated task.
1041 thread_sched_to_waiting(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1043 thread_sched_lock(sched
, th
);
1045 thread_sched_to_waiting_common(sched
, th
);
1047 thread_sched_unlock(sched
, th
);
1050 // mini utility func
1052 setup_ubf(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
)
1054 rb_native_mutex_lock(&th
->interrupt_lock
);
1056 th
->unblock
.func
= func
;
1057 th
->unblock
.arg
= arg
;
1059 rb_native_mutex_unlock(&th
->interrupt_lock
);
1063 ubf_waiting(void *ptr
)
1065 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
1066 struct rb_thread_sched
*sched
= TH_SCHED(th
);
1068 // only once. it is safe because th->interrupt_lock is already acquired.
1069 th
->unblock
.func
= NULL
;
1070 th
->unblock
.arg
= NULL
;
1072 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
1074 thread_sched_lock(sched
, th
);
1076 if (sched
->running
== th
) {
1077 // not sleeping yet.
1080 thread_sched_to_ready_common(sched
, th
, true, false);
1083 thread_sched_unlock(sched
, th
);
1086 // running -> waiting
1088 // This thread will sleep until other thread wakeup the thread.
1090 thread_sched_to_waiting_until_wakeup(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1092 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
1094 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1095 setup_ubf(th
, ubf_waiting
, (void *)th
);
1097 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1099 thread_sched_lock(sched
, th
);
1101 if (!RUBY_VM_INTERRUPTED(th
->ec
)) {
1102 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1103 thread_sched_wakeup_next_thread(sched
, th
, can_direct_transfer
);
1104 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1107 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th
));
1110 thread_sched_unlock(sched
, th
);
1112 setup_ubf(th
, NULL
, NULL
);
1115 // run another thread in the ready queue.
1116 // continue to run if there are no ready threads.
1118 thread_sched_yield(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1120 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th
->serial
, sched
->readyq_cnt
);
1122 thread_sched_lock(sched
, th
);
1124 if (!ccan_list_empty(&sched
->readyq
)) {
1125 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1126 thread_sched_wakeup_next_thread(sched
, th
, !th_has_dedicated_nt(th
));
1127 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1128 thread_sched_to_ready_common(sched
, th
, false, can_direct_transfer
);
1129 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1132 VM_ASSERT(sched
->readyq_cnt
== 0);
1135 thread_sched_unlock(sched
, th
);
1139 rb_thread_sched_init(struct rb_thread_sched
*sched
, bool atfork
)
1141 rb_native_mutex_initialize(&sched
->lock_
);
1144 sched
->lock_owner
= NULL
;
1147 ccan_list_head_init(&sched
->readyq
);
1148 sched
->readyq_cnt
= 0;
1151 if (!atfork
) sched
->enable_mn_threads
= true; // MN is enabled on Ractors
1156 coroutine_transfer0(struct coroutine_context
*transfer_from
, struct coroutine_context
*transfer_to
, bool to_dead
)
1158 #ifdef RUBY_ASAN_ENABLED
1159 void **fake_stack
= to_dead
? NULL
: &transfer_from
->fake_stack
;
1160 __sanitizer_start_switch_fiber(fake_stack
, transfer_to
->stack_base
, transfer_to
->stack_size
);
1163 RBIMPL_ATTR_MAYBE_UNUSED()
1164 struct coroutine_context
*returning_from
= coroutine_transfer(transfer_from
, transfer_to
);
1166 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1167 * never be resumed! */
1168 VM_ASSERT(!to_dead
);
1169 #ifdef RUBY_ASAN_ENABLED
1170 __sanitizer_finish_switch_fiber(transfer_from
->fake_stack
,
1171 (const void**)&returning_from
->stack_base
, &returning_from
->stack_size
);
1177 thread_sched_switch0(struct coroutine_context
*current_cont
, rb_thread_t
*next_th
, struct rb_native_thread
*nt
, bool to_dead
)
1179 VM_ASSERT(!nt
->dedicated
);
1180 VM_ASSERT(next_th
->nt
== NULL
);
1182 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th
));
1184 ruby_thread_set_native(next_th
);
1185 native_thread_assign(nt
, next_th
);
1187 coroutine_transfer0(current_cont
, next_th
->sched
.context
, to_dead
);
1191 thread_sched_switch(rb_thread_t
*cth
, rb_thread_t
*next_th
)
1193 struct rb_native_thread
*nt
= cth
->nt
;
1194 native_thread_assign(NULL
, cth
);
1195 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth
), rb_th_serial(next_th
), nt
->serial
);
1196 thread_sched_switch0(cth
->sched
.context
, next_th
, nt
, cth
->status
== THREAD_KILLED
);
1199 #if VM_CHECK_MODE > 0
1200 RBIMPL_ATTR_MAYBE_UNUSED()
1202 grq_size(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1204 ASSERT_ractor_sched_locked(vm
, cr
);
1206 rb_ractor_t
*r
, *prev_r
= NULL
;
1209 ccan_list_for_each(&vm
->ractor
.sched
.grq
, r
, threads
.sched
.grq_node
) {
1212 VM_ASSERT(r
!= prev_r
);
1220 ractor_sched_enq(rb_vm_t
*vm
, rb_ractor_t
*r
)
1222 struct rb_thread_sched
*sched
= &r
->threads
.sched
;
1223 rb_ractor_t
*cr
= NULL
; // timer thread can call this function
1225 VM_ASSERT(sched
->running
!= NULL
);
1226 VM_ASSERT(sched
->running
->nt
== NULL
);
1228 ractor_sched_lock(vm
, cr
);
1230 #if VM_CHECK_MODE > 0
1231 // check if grq contains r
1233 ccan_list_for_each(&vm
->ractor
.sched
.grq
, tr
, threads
.sched
.grq_node
) {
1238 ccan_list_add_tail(&vm
->ractor
.sched
.grq
, &sched
->grq_node
);
1239 vm
->ractor
.sched
.grq_cnt
++;
1240 VM_ASSERT(grq_size(vm
, cr
) == vm
->ractor
.sched
.grq_cnt
);
1242 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r
), rb_th_serial(sched
->running
), vm
->ractor
.sched
.grq_cnt
);
1244 rb_native_cond_signal(&vm
->ractor
.sched
.cond
);
1246 // ractor_sched_dump(vm);
1248 ractor_sched_unlock(vm
, cr
);
1252 #ifndef SNT_KEEP_SECONDS
1253 #define SNT_KEEP_SECONDS 0
1257 // make at least MINIMUM_SNT snts for debug.
1258 #define MINIMUM_SNT 0
1261 static rb_ractor_t
*
1262 ractor_sched_deq(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1266 ractor_sched_lock(vm
, cr
);
1268 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm
->ractor
.sched
.grq
));
1269 // ractor_sched_dump(vm);
1271 VM_ASSERT(rb_current_execution_context(false) == NULL
);
1272 VM_ASSERT(grq_size(vm
, cr
) == vm
->ractor
.sched
.grq_cnt
);
1274 while ((r
= ccan_list_pop(&vm
->ractor
.sched
.grq
, rb_ractor_t
, threads
.sched
.grq_node
)) == NULL
) {
1275 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1277 #if SNT_KEEP_SECONDS > 0
1278 rb_hrtime_t abs
= rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC
* SNT_KEEP_SECONDS
);
1279 if (native_cond_timedwait(&vm
->ractor
.sched
.cond
, &vm
->ractor
.sched
.lock
, &abs
) == ETIMEDOUT
) {
1280 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1281 VM_ASSERT(r
== NULL
);
1282 vm
->ractor
.sched
.snt_cnt
--;
1283 vm
->ractor
.sched
.running_cnt
--;
1287 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1290 ractor_sched_set_unlocked(vm
, cr
);
1291 rb_native_cond_wait(&vm
->ractor
.sched
.cond
, &vm
->ractor
.sched
.lock
);
1292 ractor_sched_set_locked(vm
, cr
);
1294 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1298 VM_ASSERT(rb_current_execution_context(false) == NULL
);
1301 VM_ASSERT(vm
->ractor
.sched
.grq_cnt
> 0);
1302 vm
->ractor
.sched
.grq_cnt
--;
1303 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r
), vm
->ractor
.sched
.grq_cnt
);
1306 VM_ASSERT(SNT_KEEP_SECONDS
> 0);
1310 ractor_sched_unlock(vm
, cr
);
1315 void rb_ractor_lock_self(rb_ractor_t
*r
);
1316 void rb_ractor_unlock_self(rb_ractor_t
*r
);
1319 rb_ractor_sched_sleep(rb_execution_context_t
*ec
, rb_ractor_t
*cr
, rb_unblock_function_t
*ubf
)
1321 // ractor lock of cr is acquired
1322 // r is sleeping statuss
1323 rb_thread_t
*th
= rb_ec_thread_ptr(ec
);
1324 struct rb_thread_sched
*sched
= TH_SCHED(th
);
1325 cr
->sync
.wait
.waiting_thread
= th
; // TODO: multi-thread
1327 setup_ubf(th
, ubf
, (void *)cr
);
1329 thread_sched_lock(sched
, th
);
1331 rb_ractor_unlock_self(cr
);
1333 if (RUBY_VM_INTERRUPTED(th
->ec
)) {
1334 RUBY_DEBUG_LOG("interrupted");
1336 else if (cr
->sync
.wait
.wakeup_status
!= wakeup_none
) {
1337 RUBY_DEBUG_LOG("awaken:%d", (int)cr
->sync
.wait
.wakeup_status
);
1341 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1342 th
->status
= THREAD_STOPPED_FOREVER
;
1344 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1346 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1347 thread_sched_wakeup_next_thread(sched
, th
, can_direct_transfer
);
1348 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1349 th
->status
= THREAD_RUNNABLE
;
1354 thread_sched_unlock(sched
, th
);
1356 setup_ubf(th
, NULL
, NULL
);
1358 rb_ractor_lock_self(cr
);
1359 cr
->sync
.wait
.waiting_thread
= NULL
;
1363 rb_ractor_sched_wakeup(rb_ractor_t
*r
)
1365 rb_thread_t
*r_th
= r
->sync
.wait
.waiting_thread
;
1366 // ractor lock of r is acquired
1367 struct rb_thread_sched
*sched
= TH_SCHED(r_th
);
1369 VM_ASSERT(r
->sync
.wait
.wakeup_status
!= 0);
1371 thread_sched_lock(sched
, r_th
);
1373 if (r_th
->status
== THREAD_STOPPED_FOREVER
) {
1374 thread_sched_to_ready_common(sched
, r_th
, true, false);
1377 thread_sched_unlock(sched
, r_th
);
1381 ractor_sched_barrier_completed_p(rb_vm_t
*vm
)
1383 RUBY_DEBUG_LOG("run:%u wait:%u", vm
->ractor
.sched
.running_cnt
, vm
->ractor
.sched
.barrier_waiting_cnt
);
1384 VM_ASSERT(vm
->ractor
.sched
.running_cnt
- 1 >= vm
->ractor
.sched
.barrier_waiting_cnt
);
1385 return (vm
->ractor
.sched
.running_cnt
- vm
->ractor
.sched
.barrier_waiting_cnt
) == 1;
1389 rb_ractor_sched_barrier_start(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1391 VM_ASSERT(cr
== GET_RACTOR());
1392 VM_ASSERT(vm
->ractor
.sync
.lock_owner
== cr
); // VM is locked
1393 VM_ASSERT(!vm
->ractor
.sched
.barrier_waiting
);
1394 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting_cnt
== 0);
1396 RUBY_DEBUG_LOG("start serial:%u", vm
->ractor
.sched
.barrier_serial
);
1398 unsigned int lock_rec
;
1400 ractor_sched_lock(vm
, cr
);
1402 vm
->ractor
.sched
.barrier_waiting
= true;
1405 lock_rec
= vm
->ractor
.sync
.lock_rec
;
1406 vm
->ractor
.sync
.lock_rec
= 0;
1407 vm
->ractor
.sync
.lock_owner
= NULL
;
1408 rb_native_mutex_unlock(&vm
->ractor
.sync
.lock
);
1410 // interrupts all running threads
1412 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, ith
, sched
.node
.running_threads
) {
1413 if (ith
->ractor
!= cr
) {
1414 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith
));
1415 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith
->ec
);
1419 // wait for other ractors
1420 while (!ractor_sched_barrier_completed_p(vm
)) {
1421 ractor_sched_set_unlocked(vm
, cr
);
1422 rb_native_cond_wait(&vm
->ractor
.sched
.barrier_complete_cond
, &vm
->ractor
.sched
.lock
);
1423 ractor_sched_set_locked(vm
, cr
);
1427 ractor_sched_unlock(vm
, cr
);
1430 rb_native_mutex_lock(&vm
->ractor
.sync
.lock
);
1431 vm
->ractor
.sync
.lock_rec
= lock_rec
;
1432 vm
->ractor
.sync
.lock_owner
= cr
;
1434 RUBY_DEBUG_LOG("completed seirial:%u", vm
->ractor
.sched
.barrier_serial
);
1436 ractor_sched_lock(vm
, cr
);
1438 vm
->ractor
.sched
.barrier_waiting
= false;
1439 vm
->ractor
.sched
.barrier_serial
++;
1440 vm
->ractor
.sched
.barrier_waiting_cnt
= 0;
1441 rb_native_cond_broadcast(&vm
->ractor
.sched
.barrier_release_cond
);
1443 ractor_sched_unlock(vm
, cr
);
1447 ractor_sched_barrier_join_signal_locked(rb_vm_t
*vm
)
1449 if (ractor_sched_barrier_completed_p(vm
)) {
1450 rb_native_cond_signal(&vm
->ractor
.sched
.barrier_complete_cond
);
1455 ractor_sched_barrier_join_wait_locked(rb_vm_t
*vm
, rb_thread_t
*th
)
1457 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
);
1459 unsigned int barrier_serial
= vm
->ractor
.sched
.barrier_serial
;
1461 while (vm
->ractor
.sched
.barrier_serial
== barrier_serial
) {
1462 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial
);
1463 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1465 rb_ractor_t
*cr
= th
->ractor
;
1466 ractor_sched_set_unlocked(vm
, cr
);
1467 rb_native_cond_wait(&vm
->ractor
.sched
.barrier_release_cond
, &vm
->ractor
.sched
.lock
);
1468 ractor_sched_set_locked(vm
, cr
);
1470 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial
);
1475 rb_ractor_sched_barrier_join(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1477 VM_ASSERT(cr
->threads
.sched
.running
!= NULL
); // running ractor
1478 VM_ASSERT(cr
== GET_RACTOR());
1479 VM_ASSERT(vm
->ractor
.sync
.lock_owner
== NULL
); // VM is locked, but owner == NULL
1480 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
); // VM needs barrier sync
1482 #if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1483 unsigned int barrier_serial
= vm
->ractor
.sched
.barrier_serial
;
1486 RUBY_DEBUG_LOG("join");
1488 rb_native_mutex_unlock(&vm
->ractor
.sync
.lock
);
1490 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
); // VM needs barrier sync
1491 VM_ASSERT(vm
->ractor
.sched
.barrier_serial
== barrier_serial
);
1493 ractor_sched_lock(vm
, cr
);
1496 vm
->ractor
.sched
.barrier_waiting_cnt
++;
1497 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm
->ractor
.sched
.barrier_waiting_cnt
, barrier_serial
);
1499 ractor_sched_barrier_join_signal_locked(vm
);
1500 ractor_sched_barrier_join_wait_locked(vm
, cr
->threads
.sched
.running
);
1502 ractor_sched_unlock(vm
, cr
);
1505 rb_native_mutex_lock(&vm
->ractor
.sync
.lock
);
1512 static void clear_thread_cache_altstack(void);
1515 rb_thread_sched_destroy(struct rb_thread_sched
*sched
)
1518 * only called once at VM shutdown (not atfork), another thread
1519 * may still grab vm->gvl.lock when calling gvl_release at
1520 * the end of thread_start_func_2
1523 rb_native_mutex_destroy(&sched
->lock
);
1525 clear_thread_cache_altstack();
1529 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1531 get_native_thread_id(void)
1534 return (int)syscall(SYS_gettid
);
1535 #elif defined(__FreeBSD__)
1536 return pthread_getthreadid_np();
1541 #if defined(HAVE_WORKING_FORK)
1543 thread_sched_atfork(struct rb_thread_sched
*sched
)
1546 rb_thread_sched_init(sched
, true);
1547 rb_thread_t
*th
= GET_THREAD();
1548 rb_vm_t
*vm
= GET_VM();
1550 if (th_has_dedicated_nt(th
)) {
1551 vm
->ractor
.sched
.snt_cnt
= 0;
1554 vm
->ractor
.sched
.snt_cnt
= 1;
1556 vm
->ractor
.sched
.running_cnt
= 0;
1558 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm
->ractor
.sched
.cond
);
1560 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_complete_cond
);
1561 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_release_cond
);
1563 ccan_list_head_init(&vm
->ractor
.sched
.grq
);
1564 ccan_list_head_init(&vm
->ractor
.sched
.timeslice_threads
);
1565 ccan_list_head_init(&vm
->ractor
.sched
.running_threads
);
1567 VM_ASSERT(sched
->is_running
);
1568 sched
->is_running_timeslice
= false;
1570 if (sched
->running
!= th
) {
1571 thread_sched_to_running(sched
, th
);
1574 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, th
, NULL
, NULL
);
1577 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1579 th
->nt
->tid
= get_native_thread_id();
1586 #ifdef RB_THREAD_LOCAL_SPECIFIER
1587 static RB_THREAD_LOCAL_SPECIFIER rb_thread_t
*ruby_native_thread
;
1589 static pthread_key_t ruby_native_thread_key
;
1596 // This function can be called from signal handler
1597 // RUBY_DEBUG_LOG("i:%d", i);
1601 ruby_thread_from_native(void)
1603 #ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread
;
1606 return pthread_getspecific(ruby_native_thread_key
);
1611 ruby_thread_set_native(rb_thread_t
*th
)
1615 ccan_list_node_init(&th
->sched
.node
.ubf
);
1622 rb_ractor_set_current_ec(th
->ractor
, th
->ec
);
1624 #ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread
= th
;
1628 return pthread_setspecific(ruby_native_thread_key
, th
) == 0;
1632 static void native_thread_setup(struct rb_native_thread
*nt
);
1633 static void native_thread_setup_on_thread(struct rb_native_thread
*nt
);
1636 Init_native_thread(rb_thread_t
*main_th
)
1638 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic
) {
1640 int r
= pthread_condattr_init(condattr_monotonic
);
1642 r
= pthread_condattr_setclock(condattr_monotonic
, CLOCK_MONOTONIC
);
1644 if (r
) condattr_monotonic
= NULL
;
1648 #ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key
, 0) == EAGAIN
) {
1650 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1652 if (pthread_key_create(&ruby_current_ec_key
, 0) == EAGAIN
) {
1653 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1656 ruby_posix_signal(SIGVTALRM
, null_func
);
1659 rb_vm_t
*vm
= main_th
->vm
;
1660 rb_native_mutex_initialize(&vm
->ractor
.sched
.lock
);
1661 rb_native_cond_initialize(&vm
->ractor
.sched
.cond
);
1662 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_complete_cond
);
1663 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_release_cond
);
1665 ccan_list_head_init(&vm
->ractor
.sched
.grq
);
1666 ccan_list_head_init(&vm
->ractor
.sched
.timeslice_threads
);
1667 ccan_list_head_init(&vm
->ractor
.sched
.running_threads
);
1669 // setup main thread
1670 main_th
->nt
->thread_id
= pthread_self();
1671 main_th
->nt
->serial
= 1;
1672 #ifdef RUBY_NT_SERIAL
1675 ruby_thread_set_native(main_th
);
1676 native_thread_setup(main_th
->nt
);
1677 native_thread_setup_on_thread(main_th
->nt
);
1679 TH_SCHED(main_th
)->running
= main_th
;
1680 main_th
->has_dedicated_nt
= 1;
1682 thread_sched_setup_running_threads(TH_SCHED(main_th
), main_th
->ractor
, vm
, main_th
, NULL
, NULL
);
1685 main_th
->nt
->dedicated
= 1;
1686 main_th
->nt
->vm
= vm
;
1689 vm
->ractor
.sched
.dnt_cnt
= 1;
1692 extern int ruby_mn_threads_enabled
;
1695 ruby_mn_threads_params(void)
1697 rb_vm_t
*vm
= GET_VM();
1698 rb_ractor_t
*main_ractor
= GET_RACTOR();
1700 const char *mn_threads_cstr
= getenv("RUBY_MN_THREADS");
1701 bool enable_mn_threads
= false;
1703 if (USE_MN_THREADS
&& mn_threads_cstr
&& (enable_mn_threads
= atoi(mn_threads_cstr
) > 0)) {
1705 ruby_mn_threads_enabled
= 1;
1707 main_ractor
->threads
.sched
.enable_mn_threads
= enable_mn_threads
;
1709 const char *max_cpu_cstr
= getenv("RUBY_MAX_CPU");
1710 const int default_max_cpu
= 8; // TODO: CPU num?
1711 int max_cpu
= default_max_cpu
;
1713 if (USE_MN_THREADS
&& max_cpu_cstr
) {
1714 int given_max_cpu
= atoi(max_cpu_cstr
);
1715 if (given_max_cpu
> 0) {
1716 max_cpu
= given_max_cpu
;
1720 vm
->ractor
.sched
.max_cpu
= max_cpu
;
1724 native_thread_dedicated_inc(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
)
1726 RUBY_DEBUG_LOG("nt:%d %d->%d", nt
->serial
, nt
->dedicated
, nt
->dedicated
+ 1);
1728 if (nt
->dedicated
== 0) {
1729 ractor_sched_lock(vm
, cr
);
1731 vm
->ractor
.sched
.snt_cnt
--;
1732 vm
->ractor
.sched
.dnt_cnt
++;
1734 ractor_sched_unlock(vm
, cr
);
1741 native_thread_dedicated_dec(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
)
1743 RUBY_DEBUG_LOG("nt:%d %d->%d", nt
->serial
, nt
->dedicated
, nt
->dedicated
- 1);
1744 VM_ASSERT(nt
->dedicated
> 0);
1747 if (nt
->dedicated
== 0) {
1748 ractor_sched_lock(vm
, cr
);
1750 nt
->vm
->ractor
.sched
.snt_cnt
++;
1751 nt
->vm
->ractor
.sched
.dnt_cnt
--;
1753 ractor_sched_unlock(vm
, cr
);
1758 native_thread_assign(struct rb_native_thread
*nt
, rb_thread_t
*th
)
1760 #if USE_RUBY_DEBUG_LOG
1763 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th
->serial
, (int)th
->nt
->serial
, (int)nt
->serial
);
1766 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th
->serial
, (int)nt
->serial
);
1771 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th
->serial
, (int)th
->nt
->serial
);
1774 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th
->serial
);
1783 native_thread_destroy(struct rb_native_thread
*nt
)
1786 rb_native_cond_destroy(&nt
->cond
.readyq
);
1788 if (&nt
->cond
.readyq
!= &nt
->cond
.intr
) {
1789 rb_native_cond_destroy(&nt
->cond
.intr
);
1792 RB_ALTSTACK_FREE(nt
->altstack
);
1793 ruby_xfree(nt
->nt_context
);
1798 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799 #define STACKADDR_AVAILABLE 1
1800 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801 #define STACKADDR_AVAILABLE 1
1802 #undef MAINSTACKADDR_AVAILABLE
1803 #define MAINSTACKADDR_AVAILABLE 1
1804 void *pthread_get_stackaddr_np(pthread_t
);
1805 size_t pthread_get_stacksize_np(pthread_t
);
1806 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807 #define STACKADDR_AVAILABLE 1
1808 #elif defined HAVE_PTHREAD_GETTHRDS_NP
1809 #define STACKADDR_AVAILABLE 1
1810 #elif defined __HAIKU__
1811 #define STACKADDR_AVAILABLE 1
1814 #ifndef MAINSTACKADDR_AVAILABLE
1815 # ifdef STACKADDR_AVAILABLE
1816 # define MAINSTACKADDR_AVAILABLE 1
1818 # define MAINSTACKADDR_AVAILABLE 0
1821 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822 # define get_main_stack(addr, size) get_stack(addr, size)
1825 #ifdef STACKADDR_AVAILABLE
1827 * Get the initial address and size of current thread's stack
1830 get_stack(void **addr
, size_t *size
)
1832 #define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1835 pthread_attr_t attr
;
1837 STACK_GROW_DIR_DETECTION
;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr
));
1839 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr
, addr
, size
));
1841 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr
, addr
));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr
, size
));
1846 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr
, &guard
));
1849 guard
= getpagesize();
1852 pthread_attr_destroy(&attr
);
1853 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1854 pthread_attr_t attr
;
1855 CHECK_ERR(pthread_attr_init(&attr
));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr
));
1857 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr
, addr
, size
));
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr
, addr
));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr
, size
));
1863 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1864 pthread_attr_destroy(&attr
);
1865 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1866 pthread_t th
= pthread_self();
1867 *addr
= pthread_get_stackaddr_np(th
);
1868 *size
= pthread_get_stacksize_np(th
);
1869 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1871 # if defined HAVE_THR_STKSEGMENT /* Solaris */
1872 CHECK_ERR(thr_stksegment(&stk
));
1873 # else /* OpenBSD */
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk
));
1877 *size
= stk
.ss_size
;
1878 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1879 pthread_t th
= pthread_self();
1880 struct __pthrdsinfo thinfo
;
1882 int regsiz
=sizeof(reg
);
1883 CHECK_ERR(pthread_getthrds_np(&th
, PTHRDSINFO_QUERY_ALL
,
1884 &thinfo
, sizeof(thinfo
),
1886 *addr
= thinfo
.__pi_stackaddr
;
1887 /* Must not use thinfo.__pi_stacksize for size.
1888 It is around 3KB smaller than the correct size
1889 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1890 *size
= thinfo
.__pi_stackend
- thinfo
.__pi_stackaddr
;
1891 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1892 #elif defined __HAIKU__
1894 STACK_GROW_DIR_DETECTION
;
1895 CHECK_ERR(get_thread_info(find_thread(NULL
), &info
));
1896 *addr
= info
.stack_base
;
1897 *size
= (uintptr_t)info
.stack_end
- (uintptr_t)info
.stack_base
;
1898 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1900 #error STACKADDR_AVAILABLE is defined but not implemented.
1908 rb_nativethread_id_t id
;
1909 size_t stack_maxsize
;
1911 } native_main_thread
;
1913 #ifdef STACK_END_ADDRESS
1914 extern void *STACK_END_ADDRESS
;
1918 RUBY_STACK_SPACE_LIMIT
= 1024 * 1024, /* 1024KB */
1919 RUBY_STACK_SPACE_RATIO
= 5
1923 space_size(size_t stack_size
)
1925 size_t space_size
= stack_size
/ RUBY_STACK_SPACE_RATIO
;
1926 if (space_size
> RUBY_STACK_SPACE_LIMIT
) {
1927 return RUBY_STACK_SPACE_LIMIT
;
1935 static __attribute__((noinline
)) void
1936 reserve_stack(volatile char *limit
, size_t size
)
1939 # error needs alloca()
1942 volatile char buf
[0x100];
1943 enum {stack_check_margin
= 0x1000}; /* for -fstack-check */
1945 STACK_GROW_DIR_DETECTION
;
1947 if (!getrlimit(RLIMIT_STACK
, &rl
) && rl
.rlim_cur
== RLIM_INFINITY
)
1950 if (size
< stack_check_margin
) return;
1951 size
-= stack_check_margin
;
1953 size
-= sizeof(buf
); /* margin */
1954 if (IS_STACK_DIR_UPPER()) {
1955 const volatile char *end
= buf
+ sizeof(buf
);
1958 /* |<-bottom (=limit(a)) top->|
1959 * | .. |<-buf 256B |<-end | stack check |
1960 * | 256B | =size= | margin (4KB)|
1961 * | =size= limit(b)->| 256B | |
1962 * | | alloca(sz) | | |
1963 * | .. |<-buf |<-limit(c) [sz-1]->0> | |
1965 size_t sz
= limit
- end
;
1973 /* |<-top (=limit(a)) bottom->|
1974 * | .. | 256B buf->| | stack check |
1975 * | 256B | =size= | margin (4KB)|
1976 * | =size= limit(b)->| 256B | |
1977 * | | alloca(sz) | | |
1978 * | .. | buf->| limit(c)-><0> | |
1980 size_t sz
= buf
- limit
;
1987 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
1991 native_thread_init_main_thread_stack(void *addr
)
1993 native_main_thread
.id
= pthread_self();
1994 #ifdef RUBY_ASAN_ENABLED
1995 addr
= asan_get_real_stack_addr((void *)addr
);
1998 #if MAINSTACKADDR_AVAILABLE
1999 if (native_main_thread
.stack_maxsize
) return;
2003 if (get_main_stack(&stackaddr
, &size
) == 0) {
2004 native_main_thread
.stack_maxsize
= size
;
2005 native_main_thread
.stack_start
= stackaddr
;
2006 reserve_stack(stackaddr
, size
);
2011 #ifdef STACK_END_ADDRESS
2012 native_main_thread
.stack_start
= STACK_END_ADDRESS
;
2014 if (!native_main_thread
.stack_start
||
2015 STACK_UPPER((VALUE
*)(void *)&addr
,
2016 native_main_thread
.stack_start
> (VALUE
*)addr
,
2017 native_main_thread
.stack_start
< (VALUE
*)addr
)) {
2018 native_main_thread
.stack_start
= (VALUE
*)addr
;
2022 #if defined(HAVE_GETRLIMIT)
2023 #if defined(PTHREAD_STACK_DEFAULT)
2024 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
2025 # error "PTHREAD_STACK_DEFAULT is too small"
2027 size_t size
= PTHREAD_STACK_DEFAULT
;
2029 size_t size
= RUBY_VM_THREAD_VM_STACK_SIZE
;
2032 int pagesize
= getpagesize();
2034 STACK_GROW_DIR_DETECTION
;
2035 if (getrlimit(RLIMIT_STACK
, &rlim
) == 0) {
2036 size
= (size_t)rlim
.rlim_cur
;
2038 addr
= native_main_thread
.stack_start
;
2039 if (IS_STACK_DIR_UPPER()) {
2040 space
= ((size_t)((char *)addr
+ size
) / pagesize
) * pagesize
- (size_t)addr
;
2043 space
= (size_t)addr
- ((size_t)((char *)addr
- size
) / pagesize
+ 1) * pagesize
;
2045 native_main_thread
.stack_maxsize
= space
;
2049 #if MAINSTACKADDR_AVAILABLE
2052 /* If addr is out of range of main-thread stack range estimation, */
2053 /* it should be on co-routine (alternative stack). [Feature #2294] */
2056 STACK_GROW_DIR_DETECTION
;
2058 if (IS_STACK_DIR_UPPER()) {
2059 start
= native_main_thread
.stack_start
;
2060 end
= (char *)native_main_thread
.stack_start
+ native_main_thread
.stack_maxsize
;
2063 start
= (char *)native_main_thread
.stack_start
- native_main_thread
.stack_maxsize
;
2064 end
= native_main_thread
.stack_start
;
2067 if ((void *)addr
< start
|| (void *)addr
> end
) {
2069 native_main_thread
.stack_start
= (VALUE
*)addr
;
2070 native_main_thread
.stack_maxsize
= 0; /* unknown */
2075 #define CHECK_ERR(expr) \
2076 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2079 native_thread_init_stack(rb_thread_t
*th
, void *local_in_parent_frame
)
2081 rb_nativethread_id_t curr
= pthread_self();
2082 #ifdef RUBY_ASAN_ENABLED
2083 local_in_parent_frame
= asan_get_real_stack_addr(local_in_parent_frame
);
2084 th
->ec
->machine
.asan_fake_stack_handle
= asan_get_thread_fake_stack_handle();
2087 if (!native_main_thread
.id
) {
2088 /* This thread is the first thread, must be the main thread -
2089 * configure the native_main_thread object */
2090 native_thread_init_main_thread_stack(local_in_parent_frame
);
2093 if (pthread_equal(curr
, native_main_thread
.id
)) {
2094 th
->ec
->machine
.stack_start
= native_main_thread
.stack_start
;
2095 th
->ec
->machine
.stack_maxsize
= native_main_thread
.stack_maxsize
;
2098 #ifdef STACKADDR_AVAILABLE
2099 if (th_has_dedicated_nt(th
)) {
2103 if (get_stack(&start
, &size
) == 0) {
2104 uintptr_t diff
= (uintptr_t)start
- (uintptr_t)local_in_parent_frame
;
2105 th
->ec
->machine
.stack_start
= local_in_parent_frame
;
2106 th
->ec
->machine
.stack_maxsize
= size
- diff
;
2110 rb_raise(rb_eNotImpError
, "ruby engine can initialize only in the main thread");
2119 struct rb_native_thread
*nt
;
2123 nt_start(void *ptr
);
2126 native_thread_create0(struct rb_native_thread
*nt
)
2129 pthread_attr_t attr
;
2131 const size_t stack_size
= nt
->vm
->default_params
.thread_machine_stack_size
;
2132 const size_t space
= space_size(stack_size
);
2134 nt
->machine_stack_maxsize
= stack_size
- space
;
2136 #ifdef USE_SIGALTSTACK
2137 nt
->altstack
= rb_allocate_sigaltstack();
2140 CHECK_ERR(pthread_attr_init(&attr
));
2142 # ifdef PTHREAD_STACK_MIN
2143 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size
);
2144 CHECK_ERR(pthread_attr_setstacksize(&attr
, stack_size
));
2147 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2148 CHECK_ERR(pthread_attr_setinheritsched(&attr
, PTHREAD_INHERIT_SCHED
));
2150 CHECK_ERR(pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
));
2152 err
= pthread_create(&nt
->thread_id
, &attr
, nt_start
, nt
);
2154 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt
->serial
, err
);
2156 CHECK_ERR(pthread_attr_destroy(&attr
));
2162 native_thread_setup(struct rb_native_thread
*nt
)
2165 rb_native_cond_initialize(&nt
->cond
.readyq
);
2167 if (&nt
->cond
.readyq
!= &nt
->cond
.intr
) {
2168 rb_native_cond_initialize(&nt
->cond
.intr
);
2173 native_thread_setup_on_thread(struct rb_native_thread
*nt
)
2176 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2177 nt
->tid
= get_native_thread_id();
2180 // init signal handler
2181 RB_ALTSTACK_INIT(nt
->altstack
, nt
->altstack
);
2184 static struct rb_native_thread
*
2185 native_thread_alloc(void)
2187 struct rb_native_thread
*nt
= ZALLOC(struct rb_native_thread
);
2188 native_thread_setup(nt
);
2191 nt
->nt_context
= ruby_xmalloc(sizeof(struct coroutine_context
));
2194 #if USE_RUBY_DEBUG_LOG
2195 static rb_atomic_t nt_serial
= 2;
2196 nt
->serial
= RUBY_ATOMIC_FETCH_ADD(nt_serial
, 1);
2202 native_thread_create_dedicated(rb_thread_t
*th
)
2204 th
->nt
= native_thread_alloc();
2205 th
->nt
->vm
= th
->vm
;
2206 th
->nt
->running_thread
= th
;
2207 th
->nt
->dedicated
= 1;
2210 size_t vm_stack_word_size
= th
->vm
->default_params
.thread_vm_stack_size
/ sizeof(VALUE
);
2211 void *vm_stack
= ruby_xmalloc(vm_stack_word_size
* sizeof(VALUE
));
2212 th
->sched
.malloc_stack
= true;
2213 rb_ec_initialize_vm_stack(th
->ec
, vm_stack
, vm_stack_word_size
);
2214 th
->sched
.context_stack
= vm_stack
;
2217 thread_sched_to_ready(TH_SCHED(th
), th
);
2219 return native_thread_create0(th
->nt
);
2223 call_thread_start_func_2(rb_thread_t
*th
)
2225 /* Capture the address of a local in this stack frame to mark the beginning of the
2226 machine stack for this thread. This is required even if we can tell the real
2227 stack beginning from the pthread API in native_thread_init_stack, because
2228 glibc stores some of its own data on the stack before calling into user code
2229 on a new thread, and replacing that data on fiber-switch would break it (see
2231 VALUE stack_start
= 0;
2232 VALUE
*stack_start_addr
= asan_get_real_stack_addr(&stack_start
);
2234 native_thread_init_stack(th
, stack_start_addr
);
2235 thread_start_func_2(th
, th
->ec
->machine
.stack_start
);
2241 struct rb_native_thread
*nt
= (struct rb_native_thread
*)ptr
;
2242 rb_vm_t
*vm
= nt
->vm
;
2244 native_thread_setup_on_thread(nt
);
2247 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2248 nt
->tid
= get_native_thread_id();
2251 #if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2252 ruby_nt_serial
= nt
->serial
;
2255 RUBY_DEBUG_LOG("nt:%u", nt
->serial
);
2257 if (!nt
->dedicated
) {
2258 coroutine_initialize_main(nt
->nt_context
);
2262 if (nt
->dedicated
) {
2263 // wait running turn
2264 rb_thread_t
*th
= nt
->running_thread
;
2265 struct rb_thread_sched
*sched
= TH_SCHED(th
);
2267 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th
));
2268 ruby_thread_set_native(th
);
2270 thread_sched_lock(sched
, th
);
2272 if (sched
->running
== th
) {
2273 thread_sched_add_running_thread(sched
, th
);
2275 thread_sched_wait_running_turn(sched
, th
, false);
2277 thread_sched_unlock(sched
, th
);
2280 call_thread_start_func_2(th
);
2281 break; // TODO: allow to change to the SNT
2284 RUBY_DEBUG_LOG("check next");
2285 rb_ractor_t
*r
= ractor_sched_deq(vm
, NULL
);
2288 struct rb_thread_sched
*sched
= &r
->threads
.sched
;
2290 thread_sched_lock(sched
, NULL
);
2292 rb_thread_t
*next_th
= sched
->running
;
2294 if (next_th
&& next_th
->nt
== NULL
) {
2295 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt
->serial
, (int)next_th
->serial
);
2296 thread_sched_switch0(nt
->nt_context
, next_th
, nt
, false);
2299 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th
);
2302 thread_sched_unlock(sched
, NULL
);
2305 // timeout -> deleted.
2309 if (nt
->dedicated
) {
2310 // SNT becomes DNT while running
2319 static int native_thread_create_shared(rb_thread_t
*th
);
2322 static void nt_free_stack(void *mstack
);
2326 rb_threadptr_remove(rb_thread_t
*th
)
2329 if (th
->sched
.malloc_stack
) {
2334 rb_vm_t
*vm
= th
->vm
;
2335 th
->sched
.finished
= false;
2339 ccan_list_add(&vm
->ractor
.sched
.zombie_threads
, &th
->sched
.node
.zombie_threads
);
2347 rb_threadptr_sched_free(rb_thread_t
*th
)
2350 if (th
->sched
.malloc_stack
) {
2352 ruby_xfree(th
->sched
.context_stack
);
2353 native_thread_destroy(th
->nt
);
2356 nt_free_stack(th
->sched
.context_stack
);
2357 // TODO: how to free nt and nt->altstack?
2360 ruby_xfree(th
->sched
.context
);
2361 VM_ASSERT((th
->sched
.context
= NULL
) == NULL
);
2363 ruby_xfree(th
->sched
.context_stack
);
2364 native_thread_destroy(th
->nt
);
2371 rb_thread_sched_mark_zombies(rb_vm_t
*vm
)
2373 if (!ccan_list_empty(&vm
->ractor
.sched
.zombie_threads
)) {
2374 rb_thread_t
*zombie_th
, *next_zombie_th
;
2375 ccan_list_for_each_safe(&vm
->ractor
.sched
.zombie_threads
, zombie_th
, next_zombie_th
, sched
.node
.zombie_threads
) {
2376 if (zombie_th
->sched
.finished
) {
2377 ccan_list_del_init(&zombie_th
->sched
.node
.zombie_threads
);
2380 rb_gc_mark(zombie_th
->self
);
2387 native_thread_create(rb_thread_t
*th
)
2389 VM_ASSERT(th
->nt
== 0);
2390 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th
->serial
, th
->has_dedicated_nt
);
2391 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED
, th
);
2393 if (!th
->ractor
->threads
.sched
.enable_mn_threads
) {
2394 th
->has_dedicated_nt
= 1;
2397 if (th
->has_dedicated_nt
) {
2398 return native_thread_create_dedicated(th
);
2401 return native_thread_create_shared(th
);
2405 #if USE_NATIVE_THREAD_PRIORITY
2408 native_thread_apply_priority(rb_thread_t
*th
)
2410 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2411 struct sched_param sp
;
2413 int priority
= 0 - th
->priority
;
2415 pthread_getschedparam(th
->nt
->thread_id
, &policy
, &sp
);
2416 max
= sched_get_priority_max(policy
);
2417 min
= sched_get_priority_min(policy
);
2419 if (min
> priority
) {
2422 else if (max
< priority
) {
2426 sp
.sched_priority
= priority
;
2427 pthread_setschedparam(th
->nt
->thread_id
, policy
, &sp
);
2433 #endif /* USE_NATIVE_THREAD_PRIORITY */
2436 native_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
, rb_thread_t
*th
)
2438 return rb_fd_select(n
, readfds
, writefds
, exceptfds
, timeout
);
2442 ubf_pthread_cond_signal(void *ptr
)
2444 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
2445 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th
), (int)th
->nt
->serial
);
2446 rb_native_cond_signal(&th
->nt
->cond
.intr
);
2450 native_cond_sleep(rb_thread_t
*th
, rb_hrtime_t
*rel
)
2452 rb_nativethread_lock_t
*lock
= &th
->interrupt_lock
;
2453 rb_nativethread_cond_t
*cond
= &th
->nt
->cond
.intr
;
2455 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2456 * current_time + 100,000,000. So cut up to 100,000,000. This is
2457 * considered as a kind of spurious wakeup. The caller to native_sleep
2458 * should care about spurious wakeup.
2460 * See also [Bug #1341] [ruby-core:29702]
2461 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2463 const rb_hrtime_t max
= (rb_hrtime_t
)100000000 * RB_HRTIME_PER_SEC
;
2465 THREAD_BLOCKING_BEGIN(th
);
2467 rb_native_mutex_lock(lock
);
2468 th
->unblock
.func
= ubf_pthread_cond_signal
;
2469 th
->unblock
.arg
= th
;
2471 if (RUBY_VM_INTERRUPTED(th
->ec
)) {
2472 /* interrupted. return immediate */
2473 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th
));
2477 rb_native_cond_wait(cond
, lock
);
2486 end
= native_cond_timeout(cond
, *rel
);
2487 native_cond_timedwait(cond
, lock
, &end
);
2490 th
->unblock
.func
= 0;
2492 rb_native_mutex_unlock(lock
);
2494 THREAD_BLOCKING_END(th
);
2496 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th
));
2500 static CCAN_LIST_HEAD(ubf_list_head
);
2501 static rb_nativethread_lock_t ubf_list_lock
= RB_NATIVETHREAD_LOCK_INIT
;
2504 ubf_list_atfork(void)
2506 ccan_list_head_init(&ubf_list_head
);
2507 rb_native_mutex_initialize(&ubf_list_lock
);
2510 RBIMPL_ATTR_MAYBE_UNUSED()
2512 ubf_list_contain_p(rb_thread_t
*th
)
2514 rb_thread_t
*list_th
;
2515 ccan_list_for_each(&ubf_list_head
, list_th
, sched
.node
.ubf
) {
2516 if (list_th
== th
) return true;
2521 /* The thread 'th' is registered to be trying unblock. */
2523 register_ubf_list(rb_thread_t
*th
)
2525 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2526 struct ccan_list_node
*node
= &th
->sched
.node
.ubf
;
2528 VM_ASSERT(th
->unblock
.func
!= NULL
);
2530 rb_native_mutex_lock(&ubf_list_lock
);
2532 // check not connected yet
2533 if (ccan_list_empty((struct ccan_list_head
*)node
)) {
2534 VM_ASSERT(!ubf_list_contain_p(th
));
2535 ccan_list_add(&ubf_list_head
, node
);
2538 rb_native_mutex_unlock(&ubf_list_lock
);
2540 timer_thread_wakeup();
2543 /* The thread 'th' is unblocked. It no longer need to be registered. */
2545 unregister_ubf_list(rb_thread_t
*th
)
2547 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2548 struct ccan_list_node
*node
= &th
->sched
.node
.ubf
;
2550 /* we can't allow re-entry into ubf_list_head */
2551 VM_ASSERT(th
->unblock
.func
== NULL
);
2553 if (!ccan_list_empty((struct ccan_list_head
*)node
)) {
2554 rb_native_mutex_lock(&ubf_list_lock
);
2556 VM_ASSERT(ubf_list_contain_p(th
));
2557 ccan_list_del_init(node
);
2559 rb_native_mutex_unlock(&ubf_list_lock
);
2564 * send a signal to intent that a target thread return from blocking syscall.
2565 * Maybe any signal is ok, but we chose SIGVTALRM.
2568 ubf_wakeup_thread(rb_thread_t
*th
)
2570 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th
), (void *)th
->nt
->thread_id
);
2572 pthread_kill(th
->nt
->thread_id
, SIGVTALRM
);
2576 ubf_select(void *ptr
)
2578 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
2579 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th
));
2580 ubf_wakeup_thread(th
);
2581 register_ubf_list(th
);
2585 ubf_threads_empty(void)
2587 return ccan_list_empty(&ubf_list_head
) != 0;
2591 ubf_wakeup_all_threads(void)
2593 if (!ubf_threads_empty()) {
2595 rb_native_mutex_lock(&ubf_list_lock
);
2597 ccan_list_for_each(&ubf_list_head
, th
, sched
.node
.ubf
) {
2598 ubf_wakeup_thread(th
);
2601 rb_native_mutex_unlock(&ubf_list_lock
);
2605 #else /* USE_UBF_LIST */
2606 #define register_ubf_list(th) (void)(th)
2607 #define unregister_ubf_list(th) (void)(th)
2608 #define ubf_select 0
2609 static void ubf_wakeup_all_threads(void) { return; }
2610 static bool ubf_threads_empty(void) { return true; }
2611 #define ubf_list_atfork() do {} while (0)
2612 #endif /* USE_UBF_LIST */
2615 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2618 rb_thread_wakeup_timer_thread(int sig
)
2620 // This function can be called from signal handlers so that
2621 // pthread_mutex_lock() should not be used.
2623 // wakeup timer thread
2624 timer_thread_wakeup_force();
2626 // interrupt main thread if main thread is available
2627 if (system_working
) {
2628 rb_vm_t
*vm
= GET_VM();
2629 rb_thread_t
*main_th
= vm
->ractor
.main_thread
;
2632 volatile rb_execution_context_t
*main_th_ec
= ACCESS_ONCE(rb_execution_context_t
*, main_th
->ec
);
2635 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec
);
2637 if (vm
->ubf_async_safe
&& main_th
->unblock
.func
) {
2638 (main_th
->unblock
.func
)(main_th
->unblock
.arg
);
2645 #define CLOSE_INVALIDATE_PAIR(expr) \
2646 close_invalidate_pair(expr,"close_invalidate: "#expr)
2648 close_invalidate(int *fdp
, const char *msg
)
2653 if (close(fd
) < 0) {
2654 async_bug_fd(msg
, errno
, fd
);
2659 close_invalidate_pair(int fds
[2], const char *msg
)
2661 if (USE_EVENTFD
&& fds
[0] == fds
[1]) {
2662 fds
[1] = -1; // disable write port first
2663 close_invalidate(&fds
[0], msg
);
2666 close_invalidate(&fds
[1], msg
);
2667 close_invalidate(&fds
[0], msg
);
2672 set_nonblock(int fd
)
2677 oflags
= fcntl(fd
, F_GETFL
);
2680 oflags
|= O_NONBLOCK
;
2681 err
= fcntl(fd
, F_SETFL
, oflags
);
2686 /* communication pipe with timer thread and signal handler */
2688 setup_communication_pipe_internal(int pipes
[2])
2692 if (pipes
[0] > 0 || pipes
[1] > 0) {
2693 VM_ASSERT(pipes
[0] > 0);
2694 VM_ASSERT(pipes
[1] > 0);
2699 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2700 * missing EFD_* flags, they can fall back to pipe
2702 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2703 pipes
[0] = pipes
[1] = eventfd(0, EFD_NONBLOCK
|EFD_CLOEXEC
);
2705 if (pipes
[0] >= 0) {
2706 rb_update_max_fd(pipes
[0]);
2711 err
= rb_cloexec_pipe(pipes
);
2713 rb_bug("can not create communication pipe");
2715 rb_update_max_fd(pipes
[0]);
2716 rb_update_max_fd(pipes
[1]);
2717 set_nonblock(pipes
[0]);
2718 set_nonblock(pipes
[1]);
2721 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2722 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2727 #if defined(__linux__)
2729 #elif defined(__APPLE__)
2730 /* Undocumented, and main thread seems unlimited */
2737 static VALUE
threadptr_invoke_proc_location(rb_thread_t
*th
);
2740 native_set_thread_name(rb_thread_t
*th
)
2742 #ifdef SET_CURRENT_THREAD_NAME
2744 if (!NIL_P(loc
= th
->name
)) {
2745 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc
));
2747 else if ((loc
= threadptr_invoke_proc_location(th
)) != Qnil
) {
2749 char buf
[THREAD_NAME_MAX
];
2753 name
= RSTRING_PTR(RARRAY_AREF(loc
, 0));
2754 p
= strrchr(name
, '/'); /* show only the basename of the path. */
2758 n
= snprintf(buf
, sizeof(buf
), "%s:%d", name
, NUM2INT(RARRAY_AREF(loc
, 1)));
2762 if (len
>= sizeof(buf
)) {
2763 buf
[sizeof(buf
)-2] = '*';
2764 buf
[sizeof(buf
)-1] = '\0';
2766 SET_CURRENT_THREAD_NAME(buf
);
2772 native_set_another_thread_name(rb_nativethread_id_t thread_id
, VALUE name
)
2774 #if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2775 char buf
[THREAD_NAME_MAX
];
2777 # if !defined SET_ANOTHER_THREAD_NAME
2778 if (!pthread_equal(pthread_self(), thread_id
)) return;
2782 RSTRING_GETMEM(name
, s
, n
);
2783 if (n
>= (int)sizeof(buf
)) {
2784 memcpy(buf
, s
, sizeof(buf
)-1);
2785 buf
[sizeof(buf
)-1] = '\0';
2789 # if defined SET_ANOTHER_THREAD_NAME
2790 SET_ANOTHER_THREAD_NAME(thread_id
, s
);
2791 # elif defined SET_CURRENT_THREAD_NAME
2792 SET_CURRENT_THREAD_NAME(s
);
2797 #if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2799 native_thread_native_thread_id(rb_thread_t
*target_th
)
2801 if (!target_th
->nt
) return Qnil
;
2803 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2804 int tid
= target_th
->nt
->tid
;
2805 if (tid
== 0) return Qnil
;
2806 return INT2FIX(tid
);
2807 #elif defined(__APPLE__)
2809 /* The first condition is needed because MAC_OS_X_VERSION_10_6
2810 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2811 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2812 so it has C++17 and everything needed to build modern Ruby. */
2813 # if (!defined(MAC_OS_X_VERSION_10_6) || \
2814 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2815 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2816 const bool no_pthread_threadid_np
= true;
2817 # define NO_PTHREAD_MACH_THREAD_NP 1
2818 # elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2819 const bool no_pthread_threadid_np
= false;
2821 # if !(defined(__has_attribute) && __has_attribute(availability))
2822 /* __API_AVAILABLE macro does nothing on gcc */
2823 __attribute__((weak
)) int pthread_threadid_np(pthread_t
, uint64_t*);
2825 /* Check weakly linked symbol */
2826 const bool no_pthread_threadid_np
= !&pthread_threadid_np
;
2828 if (no_pthread_threadid_np
) {
2829 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2831 # ifndef NO_PTHREAD_MACH_THREAD_NP
2832 int e
= pthread_threadid_np(target_th
->nt
->thread_id
, &tid
);
2833 if (e
!= 0) rb_syserr_fail(e
, "pthread_threadid_np");
2834 return ULL2NUM((unsigned long long)tid
);
2838 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2840 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2844 rb_serial_t created_fork_gen
;
2845 pthread_t pthread_id
;
2847 int comm_fds
[2]; // r, w
2849 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2850 int event_fd
; // kernel event queue fd (epoll/kqueue)
2852 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2853 #define EPOLL_EVENTS_MAX 0x10
2854 struct epoll_event finished_events
[EPOLL_EVENTS_MAX
];
2855 #elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2856 #define KQUEUE_EVENTS_MAX 0x10
2857 struct kevent finished_events
[KQUEUE_EVENTS_MAX
];
2860 // waiting threads list
2861 struct ccan_list_head waiting
; // waiting threads in ractors
2862 pthread_mutex_t waiting_lock
;
2864 .created_fork_gen
= 0,
2867 #define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2869 static void timer_thread_check_timeslice(rb_vm_t
*vm
);
2870 static int timer_thread_set_timeout(rb_vm_t
*vm
);
2871 static void timer_thread_wakeup_thread(rb_thread_t
*th
);
2873 #include "thread_pthread_mn.c"
2876 timer_thread_set_timeout(rb_vm_t
*vm
)
2883 ractor_sched_lock(vm
, NULL
);
2885 if ( !ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
) // (1-1) Provide time slice for active NTs
2886 || !ubf_threads_empty() // (1-3) Periodic UBF
2887 || vm
->ractor
.sched
.grq_cnt
> 0 // (1-4) Lazy GRQ deq start
2890 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2891 !ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
),
2892 !ubf_threads_empty(),
2893 (vm
->ractor
.sched
.grq_cnt
> 0));
2896 vm
->ractor
.sched
.timeslice_wait_inf
= false;
2899 vm
->ractor
.sched
.timeslice_wait_inf
= true;
2902 ractor_sched_unlock(vm
, NULL
);
2904 if (vm
->ractor
.sched
.timeslice_wait_inf
) {
2905 rb_native_mutex_lock(&timer_th
.waiting_lock
);
2907 rb_thread_t
*th
= ccan_list_top(&timer_th
.waiting
, rb_thread_t
, sched
.waiting_reason
.node
);
2908 if (th
&& (th
->sched
.waiting_reason
.flags
& thread_sched_waiting_timeout
)) {
2909 rb_hrtime_t now
= rb_hrtime_now();
2910 rb_hrtime_t hrrel
= rb_hrtime_sub(th
->sched
.waiting_reason
.data
.timeout
, now
);
2912 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th
), (unsigned long)now
, (unsigned long)hrrel
);
2915 timeout
= (int)((hrrel
+ RB_HRTIME_PER_MSEC
- 1) / RB_HRTIME_PER_MSEC
); // ms
2918 rb_native_mutex_unlock(&timer_th
.waiting_lock
);
2921 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout
, (int)vm
->ractor
.sched
.timeslice_wait_inf
);
2923 // fprintf(stderr, "timeout:%d\n", timeout);
2929 timer_thread_check_signal(rb_vm_t
*vm
)
2931 // ruby_sigchld_handler(vm); TODO
2933 int signum
= rb_signal_buff_size();
2934 if (UNLIKELY(signum
> 0) && vm
->ractor
.main_thread
) {
2935 RUBY_DEBUG_LOG("signum:%d", signum
);
2936 threadptr_trap_interrupt(vm
->ractor
.main_thread
);
2941 timer_thread_check_exceed(rb_hrtime_t abs
, rb_hrtime_t now
)
2946 else if (abs
- now
< RB_HRTIME_PER_MSEC
) {
2947 return true; // too short time
2954 static rb_thread_t
*
2955 timer_thread_deq_wakeup(rb_vm_t
*vm
, rb_hrtime_t now
)
2957 rb_thread_t
*th
= ccan_list_top(&timer_th
.waiting
, rb_thread_t
, sched
.waiting_reason
.node
);
2960 (th
->sched
.waiting_reason
.flags
& thread_sched_waiting_timeout
) &&
2961 timer_thread_check_exceed(th
->sched
.waiting_reason
.data
.timeout
, now
)) {
2963 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th
));
2965 // delete from waiting list
2966 ccan_list_del_init(&th
->sched
.waiting_reason
.node
);
2969 th
->sched
.waiting_reason
.flags
= thread_sched_waiting_none
;
2970 th
->sched
.waiting_reason
.data
.result
= 0;
2979 timer_thread_wakeup_thread(rb_thread_t
*th
)
2981 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2982 struct rb_thread_sched
*sched
= TH_SCHED(th
);
2984 thread_sched_lock(sched
, th
);
2986 if (sched
->running
!= th
) {
2987 thread_sched_to_ready_common(sched
, th
, true, false);
2990 // will be release the execution right
2993 thread_sched_unlock(sched
, th
);
2997 timer_thread_check_timeout(rb_vm_t
*vm
)
2999 rb_hrtime_t now
= rb_hrtime_now();
3002 rb_native_mutex_lock(&timer_th
.waiting_lock
);
3004 while ((th
= timer_thread_deq_wakeup(vm
, now
)) != NULL
) {
3005 timer_thread_wakeup_thread(th
);
3008 rb_native_mutex_unlock(&timer_th
.waiting_lock
);
3012 timer_thread_check_timeslice(rb_vm_t
*vm
)
3016 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, th
, sched
.node
.timeslice_threads
) {
3017 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th
));
3018 RUBY_VM_SET_TIMER_INTERRUPT(th
->ec
);
3026 pthread_sigmask(0, NULL
, &oldmask
);
3027 if (sigismember(&oldmask
, SIGVTALRM
)) {
3031 RUBY_DEBUG_LOG("ok");
3036 timer_thread_func(void *ptr
)
3038 rb_vm_t
*vm
= (rb_vm_t
*)ptr
;
3039 #if defined(RUBY_NT_SERIAL)
3040 ruby_nt_serial
= (rb_atomic_t
)-1;
3043 RUBY_DEBUG_LOG("started%s", "");
3045 while (system_working
) {
3046 timer_thread_check_signal(vm
);
3047 timer_thread_check_timeout(vm
);
3048 ubf_wakeup_all_threads();
3050 RUBY_DEBUG_LOG("system_working:%d", system_working
);
3051 timer_thread_polling(vm
);
3054 RUBY_DEBUG_LOG("terminated");
3058 /* only use signal-safe system calls here */
3060 signal_communication_pipe(int fd
)
3063 const uint64_t buff
= 1;
3065 const char buff
= '!';
3069 /* already opened */
3072 if ((result
= write(fd
, &buff
, sizeof(buff
))) <= 0) {
3075 case EINTR
: goto retry
;
3077 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3082 async_bug_fd("rb_thread_wakeup_timer_thread: write", e
, fd
);
3085 if (TT_DEBUG
) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3093 timer_thread_wakeup_force(void)
3095 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3096 signal_communication_pipe(timer_th
.comm_fds
[1]);
3100 timer_thread_wakeup_locked(rb_vm_t
*vm
)
3102 // should be locked before.
3103 ASSERT_ractor_sched_locked(vm
, NULL
);
3105 if (timer_th
.created_fork_gen
== current_fork_gen
) {
3106 if (vm
->ractor
.sched
.timeslice_wait_inf
) {
3107 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th
.comm_fds
[1]);
3108 timer_thread_wakeup_force();
3111 RUBY_DEBUG_LOG("will be wakeup...");
3117 timer_thread_wakeup(void)
3119 rb_vm_t
*vm
= GET_VM();
3121 ractor_sched_lock(vm
, NULL
);
3123 timer_thread_wakeup_locked(vm
);
3125 ractor_sched_unlock(vm
, NULL
);
3129 rb_thread_create_timer_thread(void)
3131 rb_serial_t created_fork_gen
= timer_th
.created_fork_gen
;
3133 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen
, (int)current_fork_gen
);
3135 timer_th
.created_fork_gen
= current_fork_gen
;
3137 if (created_fork_gen
!= current_fork_gen
) {
3138 if (created_fork_gen
!= 0) {
3139 RUBY_DEBUG_LOG("forked child process");
3141 CLOSE_INVALIDATE_PAIR(timer_th
.comm_fds
);
3142 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3143 close_invalidate(&timer_th
.event_fd
, "close event_fd");
3145 rb_native_mutex_destroy(&timer_th
.waiting_lock
);
3148 ccan_list_head_init(&timer_th
.waiting
);
3149 rb_native_mutex_initialize(&timer_th
.waiting_lock
);
3151 // open communication channel
3152 setup_communication_pipe_internal(timer_th
.comm_fds
);
3155 timer_thread_setup_mn();
3158 pthread_create(&timer_th
.pthread_id
, NULL
, timer_thread_func
, GET_VM());
3162 native_stop_timer_thread(void)
3165 stopped
= --system_working
<= 0;
3168 RUBY_DEBUG_LOG("wakeup send %d", timer_th
.comm_fds
[1]);
3169 timer_thread_wakeup_force();
3170 RUBY_DEBUG_LOG("wakeup sent");
3171 pthread_join(timer_th
.pthread_id
, NULL
);
3174 if (TT_DEBUG
) fprintf(stderr
, "stop timer thread\n");
3179 native_reset_timer_thread(void)
3184 #ifdef HAVE_SIGALTSTACK
3186 ruby_stack_overflowed_p(const rb_thread_t
*th
, const void *addr
)
3190 const size_t water_mark
= 1024 * 1024;
3191 STACK_GROW_DIR_DETECTION
;
3193 #ifdef STACKADDR_AVAILABLE
3194 if (get_stack(&base
, &size
) == 0) {
3196 if (pthread_equal(th
->nt
->thread_id
, native_main_thread
.id
)) {
3198 if (getrlimit(RLIMIT_STACK
, &rlim
) == 0 && rlim
.rlim_cur
> size
) {
3199 size
= (size_t)rlim
.rlim_cur
;
3203 base
= (char *)base
+ STACK_DIR_UPPER(+size
, -size
);
3208 size
= th
->ec
->machine
.stack_maxsize
;
3209 base
= (char *)th
->ec
->machine
.stack_start
- STACK_DIR_UPPER(0, size
);
3214 size
/= RUBY_STACK_SPACE_RATIO
;
3215 if (size
> water_mark
) size
= water_mark
;
3216 if (IS_STACK_DIR_UPPER()) {
3217 if (size
> ~(size_t)base
+1) size
= ~(size_t)base
+1;
3218 if (addr
> base
&& addr
<= (void *)((char *)base
+ size
)) return 1;
3221 if (size
> (size_t)base
) size
= (size_t)base
;
3222 if (addr
> (void *)((char *)base
- size
) && addr
<= base
) return 1;
3229 rb_reserved_fd_p(int fd
)
3231 /* no false-positive if out-of-FD at startup */
3232 if (fd
< 0) return 0;
3234 if (fd
== timer_th
.comm_fds
[0] ||
3235 fd
== timer_th
.comm_fds
[1]
3236 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3237 || fd
== timer_th
.event_fd
3240 goto check_fork_gen
;
3245 if (timer_th
.created_fork_gen
== current_fork_gen
) {
3246 /* async-signal-safe */
3254 rb_nativethread_id_t
3255 rb_nativethread_self(void)
3257 return pthread_self();
3260 #if defined(USE_POLL) && !defined(HAVE_PPOLL)
3261 /* TODO: don't ignore sigmask */
3263 ruby_ppoll(struct pollfd
*fds
, nfds_t nfds
,
3264 const struct timespec
*ts
, const sigset_t
*sigmask
)
3271 if (ts
->tv_sec
> INT_MAX
/1000)
3272 timeout_ms
= INT_MAX
;
3274 tmp
= (int)(ts
->tv_sec
* 1000);
3275 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3276 tmp2
= (int)((ts
->tv_nsec
+ 999999L) / (1000L * 1000L));
3277 if (INT_MAX
- tmp
< tmp2
)
3278 timeout_ms
= INT_MAX
;
3280 timeout_ms
= (int)(tmp
+ tmp2
);
3286 return poll(fds
, nfds
, timeout_ms
);
3288 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3292 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3293 * since threads may be too starved to enter the GVL waitqueue for
3294 * us to detect contention. Instead, we want to kick other threads
3295 * so they can run and possibly prevent us from entering slow paths
3296 * in ppoll() or similar syscalls.
3298 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3299 * [ruby-core:90417] [Bug #15398]
3301 #define THREAD_BLOCKING_YIELD(th) do { \
3302 const rb_thread_t *next_th; \
3303 struct rb_thread_sched *sched = TH_SCHED(th); \
3304 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3305 thread_sched_to_waiting(sched, (th)); \
3306 next_th = sched->running; \
3307 rb_native_mutex_unlock(&sched->lock_); \
3308 native_thread_yield(); /* TODO: needed? */ \
3309 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3310 native_thread_yield(); \
3314 native_sleep(rb_thread_t
*th
, rb_hrtime_t
*rel
)
3316 struct rb_thread_sched
*sched
= TH_SCHED(th
);
3318 RUBY_DEBUG_LOG("rel:%d", rel
? (int)*rel
: 0);
3320 if (th_has_dedicated_nt(th
)) {
3321 native_cond_sleep(th
, rel
);
3324 thread_sched_wait_events(sched
, th
, -1, thread_sched_waiting_timeout
, rel
);
3328 thread_sched_to_waiting_until_wakeup(sched
, th
);
3331 RUBY_DEBUG_LOG("wakeup");
3334 // thread internal event hooks (only for pthread)
3336 struct rb_internal_thread_event_hook
{
3337 rb_internal_thread_event_callback callback
;
3338 rb_event_flag_t event
;
3341 struct rb_internal_thread_event_hook
*next
;
3344 static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock
= PTHREAD_RWLOCK_INITIALIZER
;
3346 rb_internal_thread_event_hook_t
*
3347 rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback
, rb_event_flag_t internal_event
, void *user_data
)
3349 rb_internal_thread_event_hook_t
*hook
= ALLOC_N(rb_internal_thread_event_hook_t
, 1);
3350 hook
->callback
= callback
;
3351 hook
->user_data
= user_data
;
3352 hook
->event
= internal_event
;
3355 if ((r
= pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3356 rb_bug_errno("pthread_rwlock_wrlock", r
);
3359 hook
->next
= rb_internal_thread_event_hooks
;
3360 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks
, hook
);
3362 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3363 rb_bug_errno("pthread_rwlock_unlock", r
);
3369 rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t
* hook
)
3372 if ((r
= pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3373 rb_bug_errno("pthread_rwlock_wrlock", r
);
3376 bool success
= FALSE
;
3378 if (rb_internal_thread_event_hooks
== hook
) {
3379 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks
, hook
->next
);
3383 rb_internal_thread_event_hook_t
*h
= rb_internal_thread_event_hooks
;
3386 if (h
->next
== hook
) {
3387 h
->next
= hook
->next
;
3391 } while ((h
= h
->next
));
3394 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3395 rb_bug_errno("pthread_rwlock_unlock", r
);
3405 rb_thread_execute_hooks(rb_event_flag_t event
, rb_thread_t
*th
)
3408 if ((r
= pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3409 rb_bug_errno("pthread_rwlock_rdlock", r
);
3412 if (rb_internal_thread_event_hooks
) {
3413 rb_internal_thread_event_hook_t
*h
= rb_internal_thread_event_hooks
;
3415 if (h
->event
& event
) {
3416 rb_internal_thread_event_data_t event_data
= {
3419 (*h
->callback
)(event
, &event_data
, h
->user_data
);
3421 } while((h
= h
->next
));
3423 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3424 rb_bug_errno("pthread_rwlock_unlock", r
);
3428 // return true if the current thread acquires DNT.
3429 // return false if the current thread already acquires DNT.
3431 rb_thread_lock_native_thread(void)
3433 rb_thread_t
*th
= GET_THREAD();
3434 bool is_snt
= th
->nt
->dedicated
== 0;
3435 native_thread_dedicated_inc(th
->vm
, th
->ractor
, th
->nt
);
3440 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */