Tempfile document updated.
[ruby.git] / thread_pthread.c
blobb9421559f2ee837c08fb14f5bb819fdb4d73338e
1 /* -*-c-*- */
2 /**********************************************************************
4 thread_pthread.c -
6 $Author$
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14 #include "internal/gc.h"
15 #include "internal/sanitizers.h"
16 #include "rjit.h"
18 #ifdef HAVE_SYS_RESOURCE_H
19 #include <sys/resource.h>
20 #endif
21 #ifdef HAVE_THR_STKSEGMENT
22 #include <thread.h>
23 #endif
24 #if defined(HAVE_FCNTL_H)
25 #include <fcntl.h>
26 #elif defined(HAVE_SYS_FCNTL_H)
27 #include <sys/fcntl.h>
28 #endif
29 #ifdef HAVE_SYS_PRCTL_H
30 #include <sys/prctl.h>
31 #endif
32 #if defined(HAVE_SYS_TIME_H)
33 #include <sys/time.h>
34 #endif
35 #if defined(__HAIKU__)
36 #include <kernel/OS.h>
37 #endif
38 #ifdef __linux__
39 #include <sys/syscall.h> /* for SYS_gettid */
40 #endif
41 #include <time.h>
42 #include <signal.h>
44 #if defined __APPLE__
45 # include <AvailabilityMacros.h>
46 #endif
48 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49 # define USE_EVENTFD (1)
50 # include <sys/eventfd.h>
51 #else
52 # define USE_EVENTFD (0)
53 #endif
55 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58 static pthread_condattr_t condattr_mono;
59 static pthread_condattr_t *condattr_monotonic = &condattr_mono;
60 #else
61 static const void *const condattr_monotonic = NULL;
62 #endif
64 #include COROUTINE_H
66 #ifndef HAVE_SYS_EVENT_H
67 #define HAVE_SYS_EVENT_H 0
68 #endif
70 #ifndef HAVE_SYS_EPOLL_H
71 #define HAVE_SYS_EPOLL_H 0
72 #else
73 // force setting for debug
74 // #undef HAVE_SYS_EPOLL_H
75 // #define HAVE_SYS_EPOLL_H 0
76 #endif
78 #ifndef USE_MN_THREADS
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
80 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
81 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
89 #else
90 #define USE_MN_THREADS 0
91 #endif
92 #endif
94 // native thread wrappers
96 #define NATIVE_MUTEX_LOCK_DEBUG 0
98 static void
99 mutex_debug(const char *msg, void *lock)
101 if (NATIVE_MUTEX_LOCK_DEBUG) {
102 int r;
103 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
105 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
106 fprintf(stdout, "%s: %p\n", msg, lock);
107 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
111 void
112 rb_native_mutex_lock(pthread_mutex_t *lock)
114 int r;
115 mutex_debug("lock", lock);
116 if ((r = pthread_mutex_lock(lock)) != 0) {
117 rb_bug_errno("pthread_mutex_lock", r);
121 void
122 rb_native_mutex_unlock(pthread_mutex_t *lock)
124 int r;
125 mutex_debug("unlock", lock);
126 if ((r = pthread_mutex_unlock(lock)) != 0) {
127 rb_bug_errno("pthread_mutex_unlock", r);
132 rb_native_mutex_trylock(pthread_mutex_t *lock)
134 int r;
135 mutex_debug("trylock", lock);
136 if ((r = pthread_mutex_trylock(lock)) != 0) {
137 if (r == EBUSY) {
138 return EBUSY;
140 else {
141 rb_bug_errno("pthread_mutex_trylock", r);
144 return 0;
147 void
148 rb_native_mutex_initialize(pthread_mutex_t *lock)
150 int r = pthread_mutex_init(lock, 0);
151 mutex_debug("init", lock);
152 if (r != 0) {
153 rb_bug_errno("pthread_mutex_init", r);
157 void
158 rb_native_mutex_destroy(pthread_mutex_t *lock)
160 int r = pthread_mutex_destroy(lock);
161 mutex_debug("destroy", lock);
162 if (r != 0) {
163 rb_bug_errno("pthread_mutex_destroy", r);
167 void
168 rb_native_cond_initialize(rb_nativethread_cond_t *cond)
170 int r = pthread_cond_init(cond, condattr_monotonic);
171 if (r != 0) {
172 rb_bug_errno("pthread_cond_init", r);
176 void
177 rb_native_cond_destroy(rb_nativethread_cond_t *cond)
179 int r = pthread_cond_destroy(cond);
180 if (r != 0) {
181 rb_bug_errno("pthread_cond_destroy", r);
186 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
187 * EAGAIN after retrying 8192 times. You can see them in the following page:
189 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
191 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
192 * need to retrying until pthread functions don't return EAGAIN.
195 void
196 rb_native_cond_signal(rb_nativethread_cond_t *cond)
198 int r;
199 do {
200 r = pthread_cond_signal(cond);
201 } while (r == EAGAIN);
202 if (r != 0) {
203 rb_bug_errno("pthread_cond_signal", r);
207 void
208 rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
210 int r;
211 do {
212 r = pthread_cond_broadcast(cond);
213 } while (r == EAGAIN);
214 if (r != 0) {
215 rb_bug_errno("rb_native_cond_broadcast", r);
219 void
220 rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
222 int r = pthread_cond_wait(cond, mutex);
223 if (r != 0) {
224 rb_bug_errno("pthread_cond_wait", r);
228 static int
229 native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
231 int r;
232 struct timespec ts;
235 * An old Linux may return EINTR. Even though POSIX says
236 * "These functions shall not return an error code of [EINTR]".
237 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
238 * Let's hide it from arch generic code.
240 do {
241 rb_hrtime2timespec(&ts, abs);
242 r = pthread_cond_timedwait(cond, mutex, &ts);
243 } while (r == EINTR);
245 if (r != 0 && r != ETIMEDOUT) {
246 rb_bug_errno("pthread_cond_timedwait", r);
249 return r;
252 static rb_hrtime_t
253 native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
255 if (condattr_monotonic) {
256 return rb_hrtime_add(rb_hrtime_now(), rel);
258 else {
259 struct timespec ts;
261 rb_timespec_now(&ts);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
266 void
267 rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
269 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
270 native_cond_timedwait(cond, mutex, &hrmsec);
273 // thread scheduling
275 static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
276 static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
278 #if 0
279 static const char *
280 event_name(rb_event_flag_t event)
282 switch (event) {
283 case RUBY_INTERNAL_THREAD_EVENT_STARTED:
284 return "STARTED";
285 case RUBY_INTERNAL_THREAD_EVENT_READY:
286 return "READY";
287 case RUBY_INTERNAL_THREAD_EVENT_RESUMED:
288 return "RESUMED";
289 case RUBY_INTERNAL_THREAD_EVENT_SUSPENDED:
290 return "SUSPENDED";
291 case RUBY_INTERNAL_THREAD_EVENT_EXITED:
292 return "EXITED";
294 return "no-event";
297 #define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
302 #else
303 #define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
304 #endif
306 static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
308 #if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309 # define USE_UBF_LIST 1
310 #endif
312 static void threadptr_trap_interrupt(rb_thread_t *);
314 #ifdef HAVE_SCHED_YIELD
315 #define native_thread_yield() (void)sched_yield()
316 #else
317 #define native_thread_yield() ((void)0)
318 #endif
320 /* 100ms. 10ms is too small for user level thread scheduling
321 * on recent Linux (tested on 2.6.35)
323 #define TIME_QUANTUM_MSEC (100)
324 #define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
325 #define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
327 static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
328 static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
329 static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
331 static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
332 static void timer_thread_wakeup(void);
333 static void timer_thread_wakeup_locked(rb_vm_t *vm);
334 static void timer_thread_wakeup_force(void);
335 static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
336 static void coroutine_transfer0(struct coroutine_context *transfer_from,
337 struct coroutine_context *transfer_to, bool to_dead);
339 #define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
341 static bool
342 th_has_dedicated_nt(const rb_thread_t *th)
344 // TODO: th->has_dedicated_nt
345 return th->nt->dedicated > 0;
348 RBIMPL_ATTR_MAYBE_UNUSED()
349 static void
350 thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
352 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
353 rb_thread_t *th;
354 int i = 0;
355 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
356 i++; if (i>10) rb_bug("too many");
357 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
358 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
359 th->nt ? (int)th->nt->serial : -1);
363 #define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
365 RBIMPL_ATTR_MAYBE_UNUSED()
366 static void
367 ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
369 rb_ractor_t *r;
371 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
373 int i = 0;
374 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
375 i++;
376 if (i>10) rb_bug("!!");
377 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
381 #define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
382 #define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
384 static void
385 thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
387 rb_native_mutex_lock(&sched->lock_);
389 #if VM_CHECK_MODE
390 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
391 VM_ASSERT(sched->lock_owner == NULL);
392 sched->lock_owner = th;
393 #else
394 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
395 #endif
398 static void
399 thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
401 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
403 #if VM_CHECK_MODE
404 VM_ASSERT(sched->lock_owner == th);
405 sched->lock_owner = NULL;
406 #endif
408 rb_native_mutex_unlock(&sched->lock_);
411 static void
412 thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
414 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
416 #if VM_CHECK_MODE > 0
417 sched->lock_owner = th;
418 #endif
421 static void
422 ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
424 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
426 #if VM_CHECK_MODE
427 if (th) {
428 VM_ASSERT(sched->lock_owner == th);
430 else {
431 VM_ASSERT(sched->lock_owner != NULL);
433 #endif
436 #define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
437 #define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
439 RBIMPL_ATTR_MAYBE_UNUSED()
440 static unsigned int
441 rb_ractor_serial(const rb_ractor_t *r) {
442 if (r) {
443 return rb_ractor_id(r);
445 else {
446 return 0;
450 static void
451 ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
453 #if VM_CHECK_MODE > 0
454 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
455 VM_ASSERT(vm->ractor.sched.locked == false);
457 vm->ractor.sched.lock_owner = cr;
458 vm->ractor.sched.locked = true;
459 #endif
462 static void
463 ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
465 #if VM_CHECK_MODE > 0
466 VM_ASSERT(vm->ractor.sched.locked);
467 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
469 vm->ractor.sched.locked = false;
470 vm->ractor.sched.lock_owner = NULL;
471 #endif
474 static void
475 ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
477 rb_native_mutex_lock(&vm->ractor.sched.lock);
479 #if VM_CHECK_MODE
480 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
481 #else
482 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
483 #endif
485 ractor_sched_set_locked(vm, cr);
488 static void
489 ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
491 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
493 ractor_sched_set_unlocked(vm, cr);
494 rb_native_mutex_unlock(&vm->ractor.sched.lock);
497 static void
498 ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
500 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
501 VM_ASSERT(vm->ractor.sched.locked);
502 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
505 RBIMPL_ATTR_MAYBE_UNUSED()
506 static bool
507 ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
509 rb_thread_t *rth;
510 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
511 if (rth == th) return true;
513 return false;
516 RBIMPL_ATTR_MAYBE_UNUSED()
517 static unsigned int
518 ractor_sched_running_threads_size(rb_vm_t *vm)
520 rb_thread_t *th;
521 unsigned int i = 0;
522 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
523 i++;
525 return i;
528 RBIMPL_ATTR_MAYBE_UNUSED()
529 static unsigned int
530 ractor_sched_timeslice_threads_size(rb_vm_t *vm)
532 rb_thread_t *th;
533 unsigned int i = 0;
534 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
535 i++;
537 return i;
540 RBIMPL_ATTR_MAYBE_UNUSED()
541 static bool
542 ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
544 rb_thread_t *rth;
545 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
546 if (rth == th) return true;
548 return false;
551 static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
552 static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
554 // setup timeslice signals by the timer thread.
555 static void
556 thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
557 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
559 #if USE_RUBY_DEBUG_LOG
560 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
561 #endif
563 rb_thread_t *del_timeslice_th;
565 if (del_th && sched->is_running_timeslice) {
566 del_timeslice_th = del_th;
567 sched->is_running_timeslice = false;
569 else {
570 del_timeslice_th = NULL;
573 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
574 rb_th_serial(add_th), rb_th_serial(del_th),
575 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
577 ractor_sched_lock(vm, cr);
579 // update running_threads
580 if (del_th) {
581 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
582 VM_ASSERT(del_timeslice_th != NULL ||
583 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
585 ccan_list_del_init(&del_th->sched.node.running_threads);
586 vm->ractor.sched.running_cnt--;
588 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 ractor_sched_barrier_join_signal_locked(vm);
591 sched->is_running = false;
594 if (add_th) {
595 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
596 RUBY_DEBUG_LOG("barrier-wait");
598 ractor_sched_barrier_join_signal_locked(vm);
599 ractor_sched_barrier_join_wait_locked(vm, add_th);
602 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
603 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
605 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
606 vm->ractor.sched.running_cnt++;
607 sched->is_running = true;
610 if (add_timeslice_th) {
611 // update timeslice threads
612 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
613 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
614 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
615 sched->is_running_timeslice = true;
616 if (was_empty) {
617 timer_thread_wakeup_locked(vm);
621 if (del_timeslice_th) {
622 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
623 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
626 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
627 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
629 ractor_sched_unlock(vm, cr);
631 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
632 // it can be after barrier synchronization by another ractor
633 rb_thread_t *lock_owner = NULL;
634 #if VM_CHECK_MODE
635 lock_owner = sched->lock_owner;
636 #endif
637 thread_sched_unlock(sched, lock_owner);
639 RB_VM_LOCK_ENTER();
640 RB_VM_LOCK_LEAVE();
642 thread_sched_lock(sched, lock_owner);
645 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
646 // rb_th_serial(add_th), rb_th_serial(del_th),
647 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
648 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
651 static void
652 thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
654 ASSERT_thread_sched_locked(sched, th);
655 VM_ASSERT(sched->running == th);
657 rb_vm_t *vm = th->vm;
658 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
661 static void
662 thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
664 ASSERT_thread_sched_locked(sched, th);
666 rb_vm_t *vm = th->vm;
667 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
670 void
671 rb_add_running_thread(rb_thread_t *th)
673 struct rb_thread_sched *sched = TH_SCHED(th);
675 thread_sched_lock(sched, th);
677 thread_sched_add_running_thread(sched, th);
679 thread_sched_unlock(sched, th);
682 void
683 rb_del_running_thread(rb_thread_t *th)
685 struct rb_thread_sched *sched = TH_SCHED(th);
687 thread_sched_lock(sched, th);
689 thread_sched_del_running_thread(sched, th);
691 thread_sched_unlock(sched, th);
694 // setup current or next running thread
695 // sched->running should be set only on this function.
697 // if th is NULL, there is no running threads.
698 static void
699 thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
701 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
702 VM_ASSERT(sched->running != th);
704 sched->running = th;
707 RBIMPL_ATTR_MAYBE_UNUSED()
708 static bool
709 thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
711 rb_thread_t *rth;
712 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
713 if (rth == th) return true;
715 return false;
718 // deque thread from the ready queue.
719 // if the ready queue is empty, return NULL.
721 // return deque'ed running thread (or NULL).
722 static rb_thread_t *
723 thread_sched_deq(struct rb_thread_sched *sched)
725 ASSERT_thread_sched_locked(sched, NULL);
726 rb_thread_t *next_th;
728 VM_ASSERT(sched->running != NULL);
730 if (ccan_list_empty(&sched->readyq)) {
731 next_th = NULL;
733 else {
734 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
736 VM_ASSERT(sched->readyq_cnt > 0);
737 sched->readyq_cnt--;
738 ccan_list_node_init(&next_th->sched.node.readyq);
741 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
743 return next_th;
746 // enqueue ready thread to the ready queue.
747 static void
748 thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
750 ASSERT_thread_sched_locked(sched, NULL);
751 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
753 VM_ASSERT(sched->running != NULL);
754 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
756 if (sched->is_running) {
757 if (ccan_list_empty(&sched->readyq)) {
758 // add sched->running to timeslice
759 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
762 else {
763 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
766 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
767 sched->readyq_cnt++;
770 // DNT: kick condvar
771 // SNT: TODO
772 static void
773 thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
775 ASSERT_thread_sched_locked(sched, NULL);
776 VM_ASSERT(sched->running == next_th);
778 if (next_th) {
779 if (next_th->nt) {
780 if (th_has_dedicated_nt(next_th)) {
781 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
782 rb_native_cond_signal(&next_th->nt->cond.readyq);
784 else {
785 // TODO
786 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
789 else {
790 if (will_switch) {
791 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
793 else {
794 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
795 ractor_sched_enq(next_th->vm, next_th->ractor);
799 else {
800 RUBY_DEBUG_LOG("no waiting threads%s", "");
804 // waiting -> ready (locked)
805 static void
806 thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
808 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
810 VM_ASSERT(sched->running != th);
811 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
812 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
814 if (sched->running == NULL) {
815 thread_sched_set_running(sched, th);
816 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
818 else {
819 thread_sched_enq(sched, th);
823 // waiting -> ready
825 // `th` had became "waiting" state by `thread_sched_to_waiting`
826 // and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
827 RBIMPL_ATTR_MAYBE_UNUSED()
828 static void
829 thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
831 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
833 thread_sched_lock(sched, th);
835 thread_sched_to_ready_common(sched, th, true, false);
837 thread_sched_unlock(sched, th);
840 // wait until sched->running is `th`.
841 static void
842 thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
844 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
846 ASSERT_thread_sched_locked(sched, th);
847 VM_ASSERT(th == GET_THREAD());
849 if (th != sched->running) {
850 // already deleted from running threads
851 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
853 // wait for execution right
854 rb_thread_t *next_th;
855 while((next_th = sched->running) != th) {
856 if (th_has_dedicated_nt(th)) {
857 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
859 thread_sched_set_lock_owner(sched, NULL);
861 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
862 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
864 thread_sched_set_lock_owner(sched, th);
866 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
867 if (th == sched->running) {
868 rb_ractor_thread_switch(th->ractor, th);
871 else {
872 // search another ready thread
873 if (can_direct_transfer &&
874 (next_th = sched->running) != NULL &&
875 !next_th->nt // next_th is running or has dedicated nt
878 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
880 thread_sched_set_lock_owner(sched, NULL);
882 rb_ractor_set_current_ec(th->ractor, NULL);
883 thread_sched_switch(th, next_th);
885 thread_sched_set_lock_owner(sched, th);
887 else {
888 // search another ready ractor
889 struct rb_native_thread *nt = th->nt;
890 native_thread_assign(NULL, th);
892 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
894 thread_sched_set_lock_owner(sched, NULL);
896 rb_ractor_set_current_ec(th->ractor, NULL);
897 coroutine_transfer0(th->sched.context, nt->nt_context, false);
899 thread_sched_set_lock_owner(sched, th);
902 VM_ASSERT(GET_EC() == th->ec);
906 VM_ASSERT(th->nt != NULL);
907 VM_ASSERT(GET_EC() == th->ec);
908 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
910 // add th to running threads
911 thread_sched_add_running_thread(sched, th);
914 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
915 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
918 // waiting -> ready -> running (locked)
919 static void
920 thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
922 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
924 VM_ASSERT(sched->running != th);
925 VM_ASSERT(th_has_dedicated_nt(th));
926 VM_ASSERT(GET_THREAD() == th);
928 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
930 // waiting -> ready
931 thread_sched_to_ready_common(sched, th, false, false);
933 if (sched->running == th) {
934 thread_sched_add_running_thread(sched, th);
937 // TODO: check SNT number
938 thread_sched_wait_running_turn(sched, th, false);
941 // waiting -> ready -> running
943 // `th` had been waiting by `thread_sched_to_waiting()`
944 // and run a dedicated task (like waitpid and so on).
945 // After the dedicated task, this function is called
946 // to join a normal thread-scheduling.
947 static void
948 thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
950 thread_sched_lock(sched, th);
952 thread_sched_to_running_common(sched, th);
954 thread_sched_unlock(sched, th);
957 // resume a next thread in the thread ready queue.
959 // deque next running thread from the ready thread queue and
960 // resume this thread if available.
962 // If the next therad has a dedicated native thraed, simply signal to resume.
963 // Otherwise, make the ractor ready and other nt will run the ractor and the thread.
964 static void
965 thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
967 ASSERT_thread_sched_locked(sched, th);
969 VM_ASSERT(sched->running == th);
970 VM_ASSERT(sched->running->nt != NULL);
972 rb_thread_t *next_th = thread_sched_deq(sched);
974 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
975 VM_ASSERT(th != next_th);
977 thread_sched_set_running(sched, next_th);
978 VM_ASSERT(next_th == sched->running);
979 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
981 if (th != next_th) {
982 thread_sched_del_running_thread(sched, th);
986 // running -> waiting
988 // to_dead: false
989 // th will run dedicated task.
990 // run another ready thread.
991 // to_dead: true
992 // th will be dead.
993 // run another ready thread.
994 static void
995 thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
997 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
999 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
1001 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
1003 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
1004 thread_sched_wakeup_next_thread(sched, th, can_switch);
1007 // running -> dead (locked)
1008 static void
1009 thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1011 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1012 thread_sched_to_waiting_common0(sched, th, true);
1013 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1016 // running -> dead
1017 static void
1018 thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1020 thread_sched_lock(sched, th);
1022 thread_sched_to_dead_common(sched, th);
1024 thread_sched_unlock(sched, th);
1027 // running -> waiting (locked)
1029 // This thread will run dedicated task (th->nt->dedicated++).
1030 static void
1031 thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1033 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1034 thread_sched_to_waiting_common0(sched, th, false);
1037 // running -> waiting
1039 // This thread will run a dedicated task.
1040 static void
1041 thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1043 thread_sched_lock(sched, th);
1045 thread_sched_to_waiting_common(sched, th);
1047 thread_sched_unlock(sched, th);
1050 // mini utility func
1051 static void
1052 setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1054 rb_native_mutex_lock(&th->interrupt_lock);
1056 th->unblock.func = func;
1057 th->unblock.arg = arg;
1059 rb_native_mutex_unlock(&th->interrupt_lock);
1062 static void
1063 ubf_waiting(void *ptr)
1065 rb_thread_t *th = (rb_thread_t *)ptr;
1066 struct rb_thread_sched *sched = TH_SCHED(th);
1068 // only once. it is safe because th->interrupt_lock is already acquired.
1069 th->unblock.func = NULL;
1070 th->unblock.arg = NULL;
1072 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1074 thread_sched_lock(sched, th);
1076 if (sched->running == th) {
1077 // not sleeping yet.
1079 else {
1080 thread_sched_to_ready_common(sched, th, true, false);
1083 thread_sched_unlock(sched, th);
1086 // running -> waiting
1088 // This thread will sleep until other thread wakeup the thread.
1089 static void
1090 thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1092 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1094 RB_VM_SAVE_MACHINE_CONTEXT(th);
1095 setup_ubf(th, ubf_waiting, (void *)th);
1097 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1099 thread_sched_lock(sched, th);
1101 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1102 bool can_direct_transfer = !th_has_dedicated_nt(th);
1103 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1104 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1106 else {
1107 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1110 thread_sched_unlock(sched, th);
1112 setup_ubf(th, NULL, NULL);
1115 // run another thread in the ready queue.
1116 // continue to run if there are no ready threads.
1117 static void
1118 thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1120 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1122 thread_sched_lock(sched, th);
1124 if (!ccan_list_empty(&sched->readyq)) {
1125 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1126 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1127 bool can_direct_transfer = !th_has_dedicated_nt(th);
1128 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1129 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1131 else {
1132 VM_ASSERT(sched->readyq_cnt == 0);
1135 thread_sched_unlock(sched, th);
1138 void
1139 rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1141 rb_native_mutex_initialize(&sched->lock_);
1143 #if VM_CHECK_MODE
1144 sched->lock_owner = NULL;
1145 #endif
1147 ccan_list_head_init(&sched->readyq);
1148 sched->readyq_cnt = 0;
1150 #if USE_MN_THREADS
1151 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1152 #endif
1155 static void
1156 coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
1158 #ifdef RUBY_ASAN_ENABLED
1159 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1160 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1161 #endif
1163 RBIMPL_ATTR_MAYBE_UNUSED()
1164 struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1166 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1167 * never be resumed! */
1168 VM_ASSERT(!to_dead);
1169 #ifdef RUBY_ASAN_ENABLED
1170 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1171 (const void**)&returning_from->stack_base, &returning_from->stack_size);
1172 #endif
1176 static void
1177 thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
1179 VM_ASSERT(!nt->dedicated);
1180 VM_ASSERT(next_th->nt == NULL);
1182 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1184 ruby_thread_set_native(next_th);
1185 native_thread_assign(nt, next_th);
1187 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1190 static void
1191 thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1193 struct rb_native_thread *nt = cth->nt;
1194 native_thread_assign(NULL, cth);
1195 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1196 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1199 #if VM_CHECK_MODE > 0
1200 RBIMPL_ATTR_MAYBE_UNUSED()
1201 static unsigned int
1202 grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1204 ASSERT_ractor_sched_locked(vm, cr);
1206 rb_ractor_t *r, *prev_r = NULL;
1207 unsigned int i = 0;
1209 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1210 i++;
1212 VM_ASSERT(r != prev_r);
1213 prev_r = r;
1215 return i;
1217 #endif
1219 static void
1220 ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1222 struct rb_thread_sched *sched = &r->threads.sched;
1223 rb_ractor_t *cr = NULL; // timer thread can call this function
1225 VM_ASSERT(sched->running != NULL);
1226 VM_ASSERT(sched->running->nt == NULL);
1228 ractor_sched_lock(vm, cr);
1230 #if VM_CHECK_MODE > 0
1231 // check if grq contains r
1232 rb_ractor_t *tr;
1233 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1234 VM_ASSERT(r != tr);
1236 #endif
1238 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1239 vm->ractor.sched.grq_cnt++;
1240 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1242 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1244 rb_native_cond_signal(&vm->ractor.sched.cond);
1246 // ractor_sched_dump(vm);
1248 ractor_sched_unlock(vm, cr);
1252 #ifndef SNT_KEEP_SECONDS
1253 #define SNT_KEEP_SECONDS 0
1254 #endif
1256 #ifndef MINIMUM_SNT
1257 // make at least MINIMUM_SNT snts for debug.
1258 #define MINIMUM_SNT 0
1259 #endif
1261 static rb_ractor_t *
1262 ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1264 rb_ractor_t *r;
1266 ractor_sched_lock(vm, cr);
1268 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1269 // ractor_sched_dump(vm);
1271 VM_ASSERT(rb_current_execution_context(false) == NULL);
1272 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1274 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1275 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1277 #if SNT_KEEP_SECONDS > 0
1278 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1279 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1280 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1281 VM_ASSERT(r == NULL);
1282 vm->ractor.sched.snt_cnt--;
1283 vm->ractor.sched.running_cnt--;
1284 break;
1286 else {
1287 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1289 #else
1290 ractor_sched_set_unlocked(vm, cr);
1291 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1292 ractor_sched_set_locked(vm, cr);
1294 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1295 #endif
1298 VM_ASSERT(rb_current_execution_context(false) == NULL);
1300 if (r) {
1301 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1302 vm->ractor.sched.grq_cnt--;
1303 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1305 else {
1306 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1307 // timeout
1310 ractor_sched_unlock(vm, cr);
1312 return r;
1315 void rb_ractor_lock_self(rb_ractor_t *r);
1316 void rb_ractor_unlock_self(rb_ractor_t *r);
1318 void
1319 rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1321 // ractor lock of cr is acquired
1322 // r is sleeping statuss
1323 rb_thread_t *th = rb_ec_thread_ptr(ec);
1324 struct rb_thread_sched *sched = TH_SCHED(th);
1325 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1327 setup_ubf(th, ubf, (void *)cr);
1329 thread_sched_lock(sched, th);
1331 rb_ractor_unlock_self(cr);
1333 if (RUBY_VM_INTERRUPTED(th->ec)) {
1334 RUBY_DEBUG_LOG("interrupted");
1336 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1337 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1339 else {
1340 // sleep
1341 RB_VM_SAVE_MACHINE_CONTEXT(th);
1342 th->status = THREAD_STOPPED_FOREVER;
1344 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1346 bool can_direct_transfer = !th_has_dedicated_nt(th);
1347 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1348 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1349 th->status = THREAD_RUNNABLE;
1350 // wakeup
1354 thread_sched_unlock(sched, th);
1356 setup_ubf(th, NULL, NULL);
1358 rb_ractor_lock_self(cr);
1359 cr->sync.wait.waiting_thread = NULL;
1362 void
1363 rb_ractor_sched_wakeup(rb_ractor_t *r)
1365 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1366 // ractor lock of r is acquired
1367 struct rb_thread_sched *sched = TH_SCHED(r_th);
1369 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1371 thread_sched_lock(sched, r_th);
1373 if (r_th->status == THREAD_STOPPED_FOREVER) {
1374 thread_sched_to_ready_common(sched, r_th, true, false);
1377 thread_sched_unlock(sched, r_th);
1380 static bool
1381 ractor_sched_barrier_completed_p(rb_vm_t *vm)
1383 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1384 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1385 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1388 void
1389 rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1391 VM_ASSERT(cr == GET_RACTOR());
1392 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1393 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1394 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1396 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1398 unsigned int lock_rec;
1400 ractor_sched_lock(vm, cr);
1402 vm->ractor.sched.barrier_waiting = true;
1404 // release VM lock
1405 lock_rec = vm->ractor.sync.lock_rec;
1406 vm->ractor.sync.lock_rec = 0;
1407 vm->ractor.sync.lock_owner = NULL;
1408 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1410 // interrupts all running threads
1411 rb_thread_t *ith;
1412 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1413 if (ith->ractor != cr) {
1414 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1415 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1419 // wait for other ractors
1420 while (!ractor_sched_barrier_completed_p(vm)) {
1421 ractor_sched_set_unlocked(vm, cr);
1422 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1423 ractor_sched_set_locked(vm, cr);
1427 ractor_sched_unlock(vm, cr);
1429 // acquire VM lock
1430 rb_native_mutex_lock(&vm->ractor.sync.lock);
1431 vm->ractor.sync.lock_rec = lock_rec;
1432 vm->ractor.sync.lock_owner = cr;
1434 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1436 ractor_sched_lock(vm, cr);
1438 vm->ractor.sched.barrier_waiting = false;
1439 vm->ractor.sched.barrier_serial++;
1440 vm->ractor.sched.barrier_waiting_cnt = 0;
1441 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1443 ractor_sched_unlock(vm, cr);
1446 static void
1447 ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1449 if (ractor_sched_barrier_completed_p(vm)) {
1450 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1454 static void
1455 ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1457 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1459 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1461 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1462 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1463 RB_VM_SAVE_MACHINE_CONTEXT(th);
1465 rb_ractor_t *cr = th->ractor;
1466 ractor_sched_set_unlocked(vm, cr);
1467 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1468 ractor_sched_set_locked(vm, cr);
1470 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1474 void
1475 rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1477 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1478 VM_ASSERT(cr == GET_RACTOR());
1479 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1480 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1482 #if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1483 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1484 #endif
1486 RUBY_DEBUG_LOG("join");
1488 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1490 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1491 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1493 ractor_sched_lock(vm, cr);
1495 // running_cnt
1496 vm->ractor.sched.barrier_waiting_cnt++;
1497 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1499 ractor_sched_barrier_join_signal_locked(vm);
1500 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1502 ractor_sched_unlock(vm, cr);
1505 rb_native_mutex_lock(&vm->ractor.sync.lock);
1506 // VM locked here
1509 #if 0
1510 // TODO
1512 static void clear_thread_cache_altstack(void);
1514 static void
1515 rb_thread_sched_destroy(struct rb_thread_sched *sched)
1518 * only called once at VM shutdown (not atfork), another thread
1519 * may still grab vm->gvl.lock when calling gvl_release at
1520 * the end of thread_start_func_2
1522 if (0) {
1523 rb_native_mutex_destroy(&sched->lock);
1525 clear_thread_cache_altstack();
1527 #endif
1529 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1530 static int
1531 get_native_thread_id(void)
1533 #ifdef __linux__
1534 return (int)syscall(SYS_gettid);
1535 #elif defined(__FreeBSD__)
1536 return pthread_getthreadid_np();
1537 #endif
1539 #endif
1541 #if defined(HAVE_WORKING_FORK)
1542 static void
1543 thread_sched_atfork(struct rb_thread_sched *sched)
1545 current_fork_gen++;
1546 rb_thread_sched_init(sched, true);
1547 rb_thread_t *th = GET_THREAD();
1548 rb_vm_t *vm = GET_VM();
1550 if (th_has_dedicated_nt(th)) {
1551 vm->ractor.sched.snt_cnt = 0;
1553 else {
1554 vm->ractor.sched.snt_cnt = 1;
1556 vm->ractor.sched.running_cnt = 0;
1558 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm->ractor.sched.cond);
1560 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1561 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1563 ccan_list_head_init(&vm->ractor.sched.grq);
1564 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1565 ccan_list_head_init(&vm->ractor.sched.running_threads);
1567 VM_ASSERT(sched->is_running);
1568 sched->is_running_timeslice = false;
1570 if (sched->running != th) {
1571 thread_sched_to_running(sched, th);
1573 else {
1574 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1577 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1578 if (th->nt) {
1579 th->nt->tid = get_native_thread_id();
1581 #endif
1584 #endif
1586 #ifdef RB_THREAD_LOCAL_SPECIFIER
1587 static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1588 #else
1589 static pthread_key_t ruby_native_thread_key;
1590 #endif
1592 static void
1593 null_func(int i)
1595 /* null */
1596 // This function can be called from signal handler
1597 // RUBY_DEBUG_LOG("i:%d", i);
1600 rb_thread_t *
1601 ruby_thread_from_native(void)
1603 #ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread;
1605 #else
1606 return pthread_getspecific(ruby_native_thread_key);
1607 #endif
1611 ruby_thread_set_native(rb_thread_t *th)
1613 if (th) {
1614 #ifdef USE_UBF_LIST
1615 ccan_list_node_init(&th->sched.node.ubf);
1616 #endif
1619 // setup TLS
1621 if (th && th->ec) {
1622 rb_ractor_set_current_ec(th->ractor, th->ec);
1624 #ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread = th;
1626 return 1;
1627 #else
1628 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1629 #endif
1632 static void native_thread_setup(struct rb_native_thread *nt);
1633 static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1635 void
1636 Init_native_thread(rb_thread_t *main_th)
1638 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic) {
1640 int r = pthread_condattr_init(condattr_monotonic);
1641 if (r == 0) {
1642 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1644 if (r) condattr_monotonic = NULL;
1646 #endif
1648 #ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1650 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1652 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1653 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1655 #endif
1656 ruby_posix_signal(SIGVTALRM, null_func);
1658 // setup vm
1659 rb_vm_t *vm = main_th->vm;
1660 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1661 rb_native_cond_initialize(&vm->ractor.sched.cond);
1662 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1663 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1665 ccan_list_head_init(&vm->ractor.sched.grq);
1666 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1667 ccan_list_head_init(&vm->ractor.sched.running_threads);
1669 // setup main thread
1670 main_th->nt->thread_id = pthread_self();
1671 main_th->nt->serial = 1;
1672 #ifdef RUBY_NT_SERIAL
1673 ruby_nt_serial = 1;
1674 #endif
1675 ruby_thread_set_native(main_th);
1676 native_thread_setup(main_th->nt);
1677 native_thread_setup_on_thread(main_th->nt);
1679 TH_SCHED(main_th)->running = main_th;
1680 main_th->has_dedicated_nt = 1;
1682 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1684 // setup main NT
1685 main_th->nt->dedicated = 1;
1686 main_th->nt->vm = vm;
1688 // setup mn
1689 vm->ractor.sched.dnt_cnt = 1;
1692 extern int ruby_mn_threads_enabled;
1694 void
1695 ruby_mn_threads_params(void)
1697 rb_vm_t *vm = GET_VM();
1698 rb_ractor_t *main_ractor = GET_RACTOR();
1700 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1701 bool enable_mn_threads = false;
1703 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1704 // enabled
1705 ruby_mn_threads_enabled = 1;
1707 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1709 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1710 const int default_max_cpu = 8; // TODO: CPU num?
1711 int max_cpu = default_max_cpu;
1713 if (USE_MN_THREADS && max_cpu_cstr) {
1714 int given_max_cpu = atoi(max_cpu_cstr);
1715 if (given_max_cpu > 0) {
1716 max_cpu = given_max_cpu;
1720 vm->ractor.sched.max_cpu = max_cpu;
1723 static void
1724 native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1726 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1728 if (nt->dedicated == 0) {
1729 ractor_sched_lock(vm, cr);
1731 vm->ractor.sched.snt_cnt--;
1732 vm->ractor.sched.dnt_cnt++;
1734 ractor_sched_unlock(vm, cr);
1737 nt->dedicated++;
1740 static void
1741 native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1743 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1744 VM_ASSERT(nt->dedicated > 0);
1745 nt->dedicated--;
1747 if (nt->dedicated == 0) {
1748 ractor_sched_lock(vm, cr);
1750 nt->vm->ractor.sched.snt_cnt++;
1751 nt->vm->ractor.sched.dnt_cnt--;
1753 ractor_sched_unlock(vm, cr);
1757 static void
1758 native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1760 #if USE_RUBY_DEBUG_LOG
1761 if (nt) {
1762 if (th->nt) {
1763 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1765 else {
1766 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1769 else {
1770 if (th->nt) {
1771 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1773 else {
1774 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1777 #endif
1779 th->nt = nt;
1782 static void
1783 native_thread_destroy(struct rb_native_thread *nt)
1785 if (nt) {
1786 rb_native_cond_destroy(&nt->cond.readyq);
1788 if (&nt->cond.readyq != &nt->cond.intr) {
1789 rb_native_cond_destroy(&nt->cond.intr);
1792 RB_ALTSTACK_FREE(nt->altstack);
1793 ruby_xfree(nt->nt_context);
1794 ruby_xfree(nt);
1798 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799 #define STACKADDR_AVAILABLE 1
1800 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801 #define STACKADDR_AVAILABLE 1
1802 #undef MAINSTACKADDR_AVAILABLE
1803 #define MAINSTACKADDR_AVAILABLE 1
1804 void *pthread_get_stackaddr_np(pthread_t);
1805 size_t pthread_get_stacksize_np(pthread_t);
1806 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807 #define STACKADDR_AVAILABLE 1
1808 #elif defined HAVE_PTHREAD_GETTHRDS_NP
1809 #define STACKADDR_AVAILABLE 1
1810 #elif defined __HAIKU__
1811 #define STACKADDR_AVAILABLE 1
1812 #endif
1814 #ifndef MAINSTACKADDR_AVAILABLE
1815 # ifdef STACKADDR_AVAILABLE
1816 # define MAINSTACKADDR_AVAILABLE 1
1817 # else
1818 # define MAINSTACKADDR_AVAILABLE 0
1819 # endif
1820 #endif
1821 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822 # define get_main_stack(addr, size) get_stack(addr, size)
1823 #endif
1825 #ifdef STACKADDR_AVAILABLE
1827 * Get the initial address and size of current thread's stack
1829 static int
1830 get_stack(void **addr, size_t *size)
1832 #define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1835 pthread_attr_t attr;
1836 size_t guard = 0;
1837 STACK_GROW_DIR_DETECTION;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1839 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1841 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1842 # else
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1845 # endif
1846 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1848 # else
1849 guard = getpagesize();
1850 # endif
1851 *size -= guard;
1852 pthread_attr_destroy(&attr);
1853 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1854 pthread_attr_t attr;
1855 CHECK_ERR(pthread_attr_init(&attr));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1857 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1859 # else
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1862 # endif
1863 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1864 pthread_attr_destroy(&attr);
1865 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1866 pthread_t th = pthread_self();
1867 *addr = pthread_get_stackaddr_np(th);
1868 *size = pthread_get_stacksize_np(th);
1869 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1870 stack_t stk;
1871 # if defined HAVE_THR_STKSEGMENT /* Solaris */
1872 CHECK_ERR(thr_stksegment(&stk));
1873 # else /* OpenBSD */
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1875 # endif
1876 *addr = stk.ss_sp;
1877 *size = stk.ss_size;
1878 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1879 pthread_t th = pthread_self();
1880 struct __pthrdsinfo thinfo;
1881 char reg[256];
1882 int regsiz=sizeof(reg);
1883 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1884 &thinfo, sizeof(thinfo),
1885 &reg, &regsiz));
1886 *addr = thinfo.__pi_stackaddr;
1887 /* Must not use thinfo.__pi_stacksize for size.
1888 It is around 3KB smaller than the correct size
1889 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1890 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1891 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1892 #elif defined __HAIKU__
1893 thread_info info;
1894 STACK_GROW_DIR_DETECTION;
1895 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1896 *addr = info.stack_base;
1897 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1898 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1899 #else
1900 #error STACKADDR_AVAILABLE is defined but not implemented.
1901 #endif
1902 return 0;
1903 #undef CHECK_ERR
1905 #endif
1907 static struct {
1908 rb_nativethread_id_t id;
1909 size_t stack_maxsize;
1910 VALUE *stack_start;
1911 } native_main_thread;
1913 #ifdef STACK_END_ADDRESS
1914 extern void *STACK_END_ADDRESS;
1915 #endif
1917 enum {
1918 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1919 RUBY_STACK_SPACE_RATIO = 5
1922 static size_t
1923 space_size(size_t stack_size)
1925 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1926 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1927 return RUBY_STACK_SPACE_LIMIT;
1929 else {
1930 return space_size;
1934 #ifdef __linux__
1935 static __attribute__((noinline)) void
1936 reserve_stack(volatile char *limit, size_t size)
1938 # ifdef C_ALLOCA
1939 # error needs alloca()
1940 # endif
1941 struct rlimit rl;
1942 volatile char buf[0x100];
1943 enum {stack_check_margin = 0x1000}; /* for -fstack-check */
1945 STACK_GROW_DIR_DETECTION;
1947 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
1948 return;
1950 if (size < stack_check_margin) return;
1951 size -= stack_check_margin;
1953 size -= sizeof(buf); /* margin */
1954 if (IS_STACK_DIR_UPPER()) {
1955 const volatile char *end = buf + sizeof(buf);
1956 limit += size;
1957 if (limit > end) {
1958 /* |<-bottom (=limit(a)) top->|
1959 * | .. |<-buf 256B |<-end | stack check |
1960 * | 256B | =size= | margin (4KB)|
1961 * | =size= limit(b)->| 256B | |
1962 * | | alloca(sz) | | |
1963 * | .. |<-buf |<-limit(c) [sz-1]->0> | |
1965 size_t sz = limit - end;
1966 limit = alloca(sz);
1967 limit[sz-1] = 0;
1970 else {
1971 limit -= size;
1972 if (buf > limit) {
1973 /* |<-top (=limit(a)) bottom->|
1974 * | .. | 256B buf->| | stack check |
1975 * | 256B | =size= | margin (4KB)|
1976 * | =size= limit(b)->| 256B | |
1977 * | | alloca(sz) | | |
1978 * | .. | buf->| limit(c)-><0> | |
1980 size_t sz = buf - limit;
1981 limit = alloca(sz);
1982 limit[0] = 0;
1986 #else
1987 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
1988 #endif
1990 static void
1991 native_thread_init_main_thread_stack(void *addr)
1993 native_main_thread.id = pthread_self();
1994 #ifdef RUBY_ASAN_ENABLED
1995 addr = asan_get_real_stack_addr((void *)addr);
1996 #endif
1998 #if MAINSTACKADDR_AVAILABLE
1999 if (native_main_thread.stack_maxsize) return;
2001 void* stackaddr;
2002 size_t size;
2003 if (get_main_stack(&stackaddr, &size) == 0) {
2004 native_main_thread.stack_maxsize = size;
2005 native_main_thread.stack_start = stackaddr;
2006 reserve_stack(stackaddr, size);
2007 goto bound_check;
2010 #endif
2011 #ifdef STACK_END_ADDRESS
2012 native_main_thread.stack_start = STACK_END_ADDRESS;
2013 #else
2014 if (!native_main_thread.stack_start ||
2015 STACK_UPPER((VALUE *)(void *)&addr,
2016 native_main_thread.stack_start > (VALUE *)addr,
2017 native_main_thread.stack_start < (VALUE *)addr)) {
2018 native_main_thread.stack_start = (VALUE *)addr;
2020 #endif
2022 #if defined(HAVE_GETRLIMIT)
2023 #if defined(PTHREAD_STACK_DEFAULT)
2024 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
2025 # error "PTHREAD_STACK_DEFAULT is too small"
2026 # endif
2027 size_t size = PTHREAD_STACK_DEFAULT;
2028 #else
2029 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
2030 #endif
2031 size_t space;
2032 int pagesize = getpagesize();
2033 struct rlimit rlim;
2034 STACK_GROW_DIR_DETECTION;
2035 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
2036 size = (size_t)rlim.rlim_cur;
2038 addr = native_main_thread.stack_start;
2039 if (IS_STACK_DIR_UPPER()) {
2040 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
2042 else {
2043 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
2045 native_main_thread.stack_maxsize = space;
2046 #endif
2049 #if MAINSTACKADDR_AVAILABLE
2050 bound_check:
2051 #endif
2052 /* If addr is out of range of main-thread stack range estimation, */
2053 /* it should be on co-routine (alternative stack). [Feature #2294] */
2055 void *start, *end;
2056 STACK_GROW_DIR_DETECTION;
2058 if (IS_STACK_DIR_UPPER()) {
2059 start = native_main_thread.stack_start;
2060 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2062 else {
2063 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2064 end = native_main_thread.stack_start;
2067 if ((void *)addr < start || (void *)addr > end) {
2068 /* out of range */
2069 native_main_thread.stack_start = (VALUE *)addr;
2070 native_main_thread.stack_maxsize = 0; /* unknown */
2075 #define CHECK_ERR(expr) \
2076 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2078 static int
2079 native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
2081 rb_nativethread_id_t curr = pthread_self();
2082 #ifdef RUBY_ASAN_ENABLED
2083 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2084 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2085 #endif
2087 if (!native_main_thread.id) {
2088 /* This thread is the first thread, must be the main thread -
2089 * configure the native_main_thread object */
2090 native_thread_init_main_thread_stack(local_in_parent_frame);
2093 if (pthread_equal(curr, native_main_thread.id)) {
2094 th->ec->machine.stack_start = native_main_thread.stack_start;
2095 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2097 else {
2098 #ifdef STACKADDR_AVAILABLE
2099 if (th_has_dedicated_nt(th)) {
2100 void *start;
2101 size_t size;
2103 if (get_stack(&start, &size) == 0) {
2104 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2105 th->ec->machine.stack_start = local_in_parent_frame;
2106 th->ec->machine.stack_maxsize = size - diff;
2109 #else
2110 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2111 #endif
2114 return 0;
2117 struct nt_param {
2118 rb_vm_t *vm;
2119 struct rb_native_thread *nt;
2122 static void *
2123 nt_start(void *ptr);
2125 static int
2126 native_thread_create0(struct rb_native_thread *nt)
2128 int err = 0;
2129 pthread_attr_t attr;
2131 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2132 const size_t space = space_size(stack_size);
2134 nt->machine_stack_maxsize = stack_size - space;
2136 #ifdef USE_SIGALTSTACK
2137 nt->altstack = rb_allocate_sigaltstack();
2138 #endif
2140 CHECK_ERR(pthread_attr_init(&attr));
2142 # ifdef PTHREAD_STACK_MIN
2143 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2144 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2145 # endif
2147 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2148 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2149 # endif
2150 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2152 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2154 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2156 CHECK_ERR(pthread_attr_destroy(&attr));
2158 return err;
2161 static void
2162 native_thread_setup(struct rb_native_thread *nt)
2164 // init cond
2165 rb_native_cond_initialize(&nt->cond.readyq);
2167 if (&nt->cond.readyq != &nt->cond.intr) {
2168 rb_native_cond_initialize(&nt->cond.intr);
2172 static void
2173 native_thread_setup_on_thread(struct rb_native_thread *nt)
2175 // init tid
2176 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2177 nt->tid = get_native_thread_id();
2178 #endif
2180 // init signal handler
2181 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2184 static struct rb_native_thread *
2185 native_thread_alloc(void)
2187 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2188 native_thread_setup(nt);
2190 #if USE_MN_THREADS
2191 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2192 #endif
2194 #if USE_RUBY_DEBUG_LOG
2195 static rb_atomic_t nt_serial = 2;
2196 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2197 #endif
2198 return nt;
2201 static int
2202 native_thread_create_dedicated(rb_thread_t *th)
2204 th->nt = native_thread_alloc();
2205 th->nt->vm = th->vm;
2206 th->nt->running_thread = th;
2207 th->nt->dedicated = 1;
2209 // vm stack
2210 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2211 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2212 th->sched.malloc_stack = true;
2213 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2214 th->sched.context_stack = vm_stack;
2216 // setup
2217 thread_sched_to_ready(TH_SCHED(th), th);
2219 return native_thread_create0(th->nt);
2222 static void
2223 call_thread_start_func_2(rb_thread_t *th)
2225 /* Capture the address of a local in this stack frame to mark the beginning of the
2226 machine stack for this thread. This is required even if we can tell the real
2227 stack beginning from the pthread API in native_thread_init_stack, because
2228 glibc stores some of its own data on the stack before calling into user code
2229 on a new thread, and replacing that data on fiber-switch would break it (see
2230 bug #13887) */
2231 VALUE stack_start = 0;
2232 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2234 native_thread_init_stack(th, stack_start_addr);
2235 thread_start_func_2(th, th->ec->machine.stack_start);
2238 static void *
2239 nt_start(void *ptr)
2241 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2242 rb_vm_t *vm = nt->vm;
2244 native_thread_setup_on_thread(nt);
2246 // init tid
2247 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2248 nt->tid = get_native_thread_id();
2249 #endif
2251 #if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2252 ruby_nt_serial = nt->serial;
2253 #endif
2255 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2257 if (!nt->dedicated) {
2258 coroutine_initialize_main(nt->nt_context);
2261 while (1) {
2262 if (nt->dedicated) {
2263 // wait running turn
2264 rb_thread_t *th = nt->running_thread;
2265 struct rb_thread_sched *sched = TH_SCHED(th);
2267 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2268 ruby_thread_set_native(th);
2270 thread_sched_lock(sched, th);
2272 if (sched->running == th) {
2273 thread_sched_add_running_thread(sched, th);
2275 thread_sched_wait_running_turn(sched, th, false);
2277 thread_sched_unlock(sched, th);
2279 // start threads
2280 call_thread_start_func_2(th);
2281 break; // TODO: allow to change to the SNT
2283 else {
2284 RUBY_DEBUG_LOG("check next");
2285 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2287 if (r) {
2288 struct rb_thread_sched *sched = &r->threads.sched;
2290 thread_sched_lock(sched, NULL);
2292 rb_thread_t *next_th = sched->running;
2294 if (next_th && next_th->nt == NULL) {
2295 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2296 thread_sched_switch0(nt->nt_context, next_th, nt, false);
2298 else {
2299 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2302 thread_sched_unlock(sched, NULL);
2304 else {
2305 // timeout -> deleted.
2306 break;
2309 if (nt->dedicated) {
2310 // SNT becomes DNT while running
2311 break;
2316 return NULL;
2319 static int native_thread_create_shared(rb_thread_t *th);
2321 #if USE_MN_THREADS
2322 static void nt_free_stack(void *mstack);
2323 #endif
2325 void
2326 rb_threadptr_remove(rb_thread_t *th)
2328 #if USE_MN_THREADS
2329 if (th->sched.malloc_stack) {
2330 // dedicated
2331 return;
2333 else {
2334 rb_vm_t *vm = th->vm;
2335 th->sched.finished = false;
2337 RB_VM_LOCK_ENTER();
2339 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2341 RB_VM_LOCK_LEAVE();
2343 #endif
2346 void
2347 rb_threadptr_sched_free(rb_thread_t *th)
2349 #if USE_MN_THREADS
2350 if (th->sched.malloc_stack) {
2351 // has dedicated
2352 ruby_xfree(th->sched.context_stack);
2353 native_thread_destroy(th->nt);
2355 else {
2356 nt_free_stack(th->sched.context_stack);
2357 // TODO: how to free nt and nt->altstack?
2360 ruby_xfree(th->sched.context);
2361 VM_ASSERT((th->sched.context = NULL) == NULL);
2362 #else
2363 ruby_xfree(th->sched.context_stack);
2364 native_thread_destroy(th->nt);
2365 #endif
2367 th->nt = NULL;
2370 void
2371 rb_thread_sched_mark_zombies(rb_vm_t *vm)
2373 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2374 rb_thread_t *zombie_th, *next_zombie_th;
2375 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2376 if (zombie_th->sched.finished) {
2377 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2379 else {
2380 rb_gc_mark(zombie_th->self);
2386 static int
2387 native_thread_create(rb_thread_t *th)
2389 VM_ASSERT(th->nt == 0);
2390 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2391 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2393 if (!th->ractor->threads.sched.enable_mn_threads) {
2394 th->has_dedicated_nt = 1;
2397 if (th->has_dedicated_nt) {
2398 return native_thread_create_dedicated(th);
2400 else {
2401 return native_thread_create_shared(th);
2405 #if USE_NATIVE_THREAD_PRIORITY
2407 static void
2408 native_thread_apply_priority(rb_thread_t *th)
2410 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2411 struct sched_param sp;
2412 int policy;
2413 int priority = 0 - th->priority;
2414 int max, min;
2415 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2416 max = sched_get_priority_max(policy);
2417 min = sched_get_priority_min(policy);
2419 if (min > priority) {
2420 priority = min;
2422 else if (max < priority) {
2423 priority = max;
2426 sp.sched_priority = priority;
2427 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2428 #else
2429 /* not touched */
2430 #endif
2433 #endif /* USE_NATIVE_THREAD_PRIORITY */
2435 static int
2436 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2438 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2441 static void
2442 ubf_pthread_cond_signal(void *ptr)
2444 rb_thread_t *th = (rb_thread_t *)ptr;
2445 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2446 rb_native_cond_signal(&th->nt->cond.intr);
2449 static void
2450 native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2452 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2453 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2455 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2456 * current_time + 100,000,000. So cut up to 100,000,000. This is
2457 * considered as a kind of spurious wakeup. The caller to native_sleep
2458 * should care about spurious wakeup.
2460 * See also [Bug #1341] [ruby-core:29702]
2461 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2463 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2465 THREAD_BLOCKING_BEGIN(th);
2467 rb_native_mutex_lock(lock);
2468 th->unblock.func = ubf_pthread_cond_signal;
2469 th->unblock.arg = th;
2471 if (RUBY_VM_INTERRUPTED(th->ec)) {
2472 /* interrupted. return immediate */
2473 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2475 else {
2476 if (!rel) {
2477 rb_native_cond_wait(cond, lock);
2479 else {
2480 rb_hrtime_t end;
2482 if (*rel > max) {
2483 *rel = max;
2486 end = native_cond_timeout(cond, *rel);
2487 native_cond_timedwait(cond, lock, &end);
2490 th->unblock.func = 0;
2492 rb_native_mutex_unlock(lock);
2494 THREAD_BLOCKING_END(th);
2496 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2499 #ifdef USE_UBF_LIST
2500 static CCAN_LIST_HEAD(ubf_list_head);
2501 static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2503 static void
2504 ubf_list_atfork(void)
2506 ccan_list_head_init(&ubf_list_head);
2507 rb_native_mutex_initialize(&ubf_list_lock);
2510 RBIMPL_ATTR_MAYBE_UNUSED()
2511 static bool
2512 ubf_list_contain_p(rb_thread_t *th)
2514 rb_thread_t *list_th;
2515 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2516 if (list_th == th) return true;
2518 return false;
2521 /* The thread 'th' is registered to be trying unblock. */
2522 static void
2523 register_ubf_list(rb_thread_t *th)
2525 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2526 struct ccan_list_node *node = &th->sched.node.ubf;
2528 VM_ASSERT(th->unblock.func != NULL);
2530 rb_native_mutex_lock(&ubf_list_lock);
2532 // check not connected yet
2533 if (ccan_list_empty((struct ccan_list_head*)node)) {
2534 VM_ASSERT(!ubf_list_contain_p(th));
2535 ccan_list_add(&ubf_list_head, node);
2538 rb_native_mutex_unlock(&ubf_list_lock);
2540 timer_thread_wakeup();
2543 /* The thread 'th' is unblocked. It no longer need to be registered. */
2544 static void
2545 unregister_ubf_list(rb_thread_t *th)
2547 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2548 struct ccan_list_node *node = &th->sched.node.ubf;
2550 /* we can't allow re-entry into ubf_list_head */
2551 VM_ASSERT(th->unblock.func == NULL);
2553 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2554 rb_native_mutex_lock(&ubf_list_lock);
2556 VM_ASSERT(ubf_list_contain_p(th));
2557 ccan_list_del_init(node);
2559 rb_native_mutex_unlock(&ubf_list_lock);
2564 * send a signal to intent that a target thread return from blocking syscall.
2565 * Maybe any signal is ok, but we chose SIGVTALRM.
2567 static void
2568 ubf_wakeup_thread(rb_thread_t *th)
2570 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2572 pthread_kill(th->nt->thread_id, SIGVTALRM);
2575 static void
2576 ubf_select(void *ptr)
2578 rb_thread_t *th = (rb_thread_t *)ptr;
2579 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2580 ubf_wakeup_thread(th);
2581 register_ubf_list(th);
2584 static bool
2585 ubf_threads_empty(void)
2587 return ccan_list_empty(&ubf_list_head) != 0;
2590 static void
2591 ubf_wakeup_all_threads(void)
2593 if (!ubf_threads_empty()) {
2594 rb_thread_t *th;
2595 rb_native_mutex_lock(&ubf_list_lock);
2597 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2598 ubf_wakeup_thread(th);
2601 rb_native_mutex_unlock(&ubf_list_lock);
2605 #else /* USE_UBF_LIST */
2606 #define register_ubf_list(th) (void)(th)
2607 #define unregister_ubf_list(th) (void)(th)
2608 #define ubf_select 0
2609 static void ubf_wakeup_all_threads(void) { return; }
2610 static bool ubf_threads_empty(void) { return true; }
2611 #define ubf_list_atfork() do {} while (0)
2612 #endif /* USE_UBF_LIST */
2614 #define TT_DEBUG 0
2615 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2617 void
2618 rb_thread_wakeup_timer_thread(int sig)
2620 // This function can be called from signal handlers so that
2621 // pthread_mutex_lock() should not be used.
2623 // wakeup timer thread
2624 timer_thread_wakeup_force();
2626 // interrupt main thread if main thread is available
2627 if (system_working) {
2628 rb_vm_t *vm = GET_VM();
2629 rb_thread_t *main_th = vm->ractor.main_thread;
2631 if (main_th) {
2632 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2634 if (main_th_ec) {
2635 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2637 if (vm->ubf_async_safe && main_th->unblock.func) {
2638 (main_th->unblock.func)(main_th->unblock.arg);
2645 #define CLOSE_INVALIDATE_PAIR(expr) \
2646 close_invalidate_pair(expr,"close_invalidate: "#expr)
2647 static void
2648 close_invalidate(int *fdp, const char *msg)
2650 int fd = *fdp;
2652 *fdp = -1;
2653 if (close(fd) < 0) {
2654 async_bug_fd(msg, errno, fd);
2658 static void
2659 close_invalidate_pair(int fds[2], const char *msg)
2661 if (USE_EVENTFD && fds[0] == fds[1]) {
2662 fds[1] = -1; // disable write port first
2663 close_invalidate(&fds[0], msg);
2665 else {
2666 close_invalidate(&fds[1], msg);
2667 close_invalidate(&fds[0], msg);
2671 static void
2672 set_nonblock(int fd)
2674 int oflags;
2675 int err;
2677 oflags = fcntl(fd, F_GETFL);
2678 if (oflags == -1)
2679 rb_sys_fail(0);
2680 oflags |= O_NONBLOCK;
2681 err = fcntl(fd, F_SETFL, oflags);
2682 if (err == -1)
2683 rb_sys_fail(0);
2686 /* communication pipe with timer thread and signal handler */
2687 static void
2688 setup_communication_pipe_internal(int pipes[2])
2690 int err;
2692 if (pipes[0] > 0 || pipes[1] > 0) {
2693 VM_ASSERT(pipes[0] > 0);
2694 VM_ASSERT(pipes[1] > 0);
2695 return;
2699 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2700 * missing EFD_* flags, they can fall back to pipe
2702 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2703 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2705 if (pipes[0] >= 0) {
2706 rb_update_max_fd(pipes[0]);
2707 return;
2709 #endif
2711 err = rb_cloexec_pipe(pipes);
2712 if (err != 0) {
2713 rb_bug("can not create communication pipe");
2715 rb_update_max_fd(pipes[0]);
2716 rb_update_max_fd(pipes[1]);
2717 set_nonblock(pipes[0]);
2718 set_nonblock(pipes[1]);
2721 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2722 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2723 #endif
2725 enum {
2726 THREAD_NAME_MAX =
2727 #if defined(__linux__)
2729 #elif defined(__APPLE__)
2730 /* Undocumented, and main thread seems unlimited */
2732 #else
2734 #endif
2737 static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2739 static void
2740 native_set_thread_name(rb_thread_t *th)
2742 #ifdef SET_CURRENT_THREAD_NAME
2743 VALUE loc;
2744 if (!NIL_P(loc = th->name)) {
2745 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2747 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2748 char *name, *p;
2749 char buf[THREAD_NAME_MAX];
2750 size_t len;
2751 int n;
2753 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2754 p = strrchr(name, '/'); /* show only the basename of the path. */
2755 if (p && p[1])
2756 name = p + 1;
2758 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2759 RB_GC_GUARD(loc);
2761 len = (size_t)n;
2762 if (len >= sizeof(buf)) {
2763 buf[sizeof(buf)-2] = '*';
2764 buf[sizeof(buf)-1] = '\0';
2766 SET_CURRENT_THREAD_NAME(buf);
2768 #endif
2771 static void
2772 native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2774 #if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2775 char buf[THREAD_NAME_MAX];
2776 const char *s = "";
2777 # if !defined SET_ANOTHER_THREAD_NAME
2778 if (!pthread_equal(pthread_self(), thread_id)) return;
2779 # endif
2780 if (!NIL_P(name)) {
2781 long n;
2782 RSTRING_GETMEM(name, s, n);
2783 if (n >= (int)sizeof(buf)) {
2784 memcpy(buf, s, sizeof(buf)-1);
2785 buf[sizeof(buf)-1] = '\0';
2786 s = buf;
2789 # if defined SET_ANOTHER_THREAD_NAME
2790 SET_ANOTHER_THREAD_NAME(thread_id, s);
2791 # elif defined SET_CURRENT_THREAD_NAME
2792 SET_CURRENT_THREAD_NAME(s);
2793 # endif
2794 #endif
2797 #if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2798 static VALUE
2799 native_thread_native_thread_id(rb_thread_t *target_th)
2801 if (!target_th->nt) return Qnil;
2803 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2804 int tid = target_th->nt->tid;
2805 if (tid == 0) return Qnil;
2806 return INT2FIX(tid);
2807 #elif defined(__APPLE__)
2808 uint64_t tid;
2809 /* The first condition is needed because MAC_OS_X_VERSION_10_6
2810 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2811 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2812 so it has C++17 and everything needed to build modern Ruby. */
2813 # if (!defined(MAC_OS_X_VERSION_10_6) || \
2814 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2815 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2816 const bool no_pthread_threadid_np = true;
2817 # define NO_PTHREAD_MACH_THREAD_NP 1
2818 # elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2819 const bool no_pthread_threadid_np = false;
2820 # else
2821 # if !(defined(__has_attribute) && __has_attribute(availability))
2822 /* __API_AVAILABLE macro does nothing on gcc */
2823 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2824 # endif
2825 /* Check weakly linked symbol */
2826 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2827 # endif
2828 if (no_pthread_threadid_np) {
2829 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2831 # ifndef NO_PTHREAD_MACH_THREAD_NP
2832 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2833 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2834 return ULL2NUM((unsigned long long)tid);
2835 # endif
2836 #endif
2838 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2839 #else
2840 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2841 #endif
2843 static struct {
2844 rb_serial_t created_fork_gen;
2845 pthread_t pthread_id;
2847 int comm_fds[2]; // r, w
2849 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2850 int event_fd; // kernel event queue fd (epoll/kqueue)
2851 #endif
2852 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2853 #define EPOLL_EVENTS_MAX 0x10
2854 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2855 #elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2856 #define KQUEUE_EVENTS_MAX 0x10
2857 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2858 #endif
2860 // waiting threads list
2861 struct ccan_list_head waiting; // waiting threads in ractors
2862 pthread_mutex_t waiting_lock;
2863 } timer_th = {
2864 .created_fork_gen = 0,
2867 #define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2869 static void timer_thread_check_timeslice(rb_vm_t *vm);
2870 static int timer_thread_set_timeout(rb_vm_t *vm);
2871 static void timer_thread_wakeup_thread(rb_thread_t *th);
2873 #include "thread_pthread_mn.c"
2875 static int
2876 timer_thread_set_timeout(rb_vm_t *vm)
2878 #if 0
2879 return 10; // ms
2880 #else
2881 int timeout = -1;
2883 ractor_sched_lock(vm, NULL);
2885 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2886 || !ubf_threads_empty() // (1-3) Periodic UBF
2887 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2890 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2891 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2892 !ubf_threads_empty(),
2893 (vm->ractor.sched.grq_cnt > 0));
2895 timeout = 10; // ms
2896 vm->ractor.sched.timeslice_wait_inf = false;
2898 else {
2899 vm->ractor.sched.timeslice_wait_inf = true;
2902 ractor_sched_unlock(vm, NULL);
2904 if (vm->ractor.sched.timeslice_wait_inf) {
2905 rb_native_mutex_lock(&timer_th.waiting_lock);
2907 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2908 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2909 rb_hrtime_t now = rb_hrtime_now();
2910 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2912 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2914 // TODO: overflow?
2915 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2918 rb_native_mutex_unlock(&timer_th.waiting_lock);
2921 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2923 // fprintf(stderr, "timeout:%d\n", timeout);
2924 return timeout;
2925 #endif
2928 static void
2929 timer_thread_check_signal(rb_vm_t *vm)
2931 // ruby_sigchld_handler(vm); TODO
2933 int signum = rb_signal_buff_size();
2934 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2935 RUBY_DEBUG_LOG("signum:%d", signum);
2936 threadptr_trap_interrupt(vm->ractor.main_thread);
2940 static bool
2941 timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2943 if (abs < now) {
2944 return true;
2946 else if (abs - now < RB_HRTIME_PER_MSEC) {
2947 return true; // too short time
2949 else {
2950 return false;
2954 static rb_thread_t *
2955 timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2957 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2959 if (th != NULL &&
2960 (th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
2961 timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
2963 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2965 // delete from waiting list
2966 ccan_list_del_init(&th->sched.waiting_reason.node);
2968 // setup result
2969 th->sched.waiting_reason.flags = thread_sched_waiting_none;
2970 th->sched.waiting_reason.data.result = 0;
2972 return th;
2975 return NULL;
2978 static void
2979 timer_thread_wakeup_thread(rb_thread_t *th)
2981 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2982 struct rb_thread_sched *sched = TH_SCHED(th);
2984 thread_sched_lock(sched, th);
2986 if (sched->running != th) {
2987 thread_sched_to_ready_common(sched, th, true, false);
2989 else {
2990 // will be release the execution right
2993 thread_sched_unlock(sched, th);
2996 static void
2997 timer_thread_check_timeout(rb_vm_t *vm)
2999 rb_hrtime_t now = rb_hrtime_now();
3000 rb_thread_t *th;
3002 rb_native_mutex_lock(&timer_th.waiting_lock);
3004 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
3005 timer_thread_wakeup_thread(th);
3008 rb_native_mutex_unlock(&timer_th.waiting_lock);
3011 static void
3012 timer_thread_check_timeslice(rb_vm_t *vm)
3014 // TODO: check time
3015 rb_thread_t *th;
3016 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
3017 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
3018 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
3022 void
3023 rb_assert_sig(void)
3025 sigset_t oldmask;
3026 pthread_sigmask(0, NULL, &oldmask);
3027 if (sigismember(&oldmask, SIGVTALRM)) {
3028 rb_bug("!!!");
3030 else {
3031 RUBY_DEBUG_LOG("ok");
3035 static void *
3036 timer_thread_func(void *ptr)
3038 rb_vm_t *vm = (rb_vm_t *)ptr;
3039 #if defined(RUBY_NT_SERIAL)
3040 ruby_nt_serial = (rb_atomic_t)-1;
3041 #endif
3043 RUBY_DEBUG_LOG("started%s", "");
3045 while (system_working) {
3046 timer_thread_check_signal(vm);
3047 timer_thread_check_timeout(vm);
3048 ubf_wakeup_all_threads();
3050 RUBY_DEBUG_LOG("system_working:%d", system_working);
3051 timer_thread_polling(vm);
3054 RUBY_DEBUG_LOG("terminated");
3055 return NULL;
3058 /* only use signal-safe system calls here */
3059 static void
3060 signal_communication_pipe(int fd)
3062 #if USE_EVENTFD
3063 const uint64_t buff = 1;
3064 #else
3065 const char buff = '!';
3066 #endif
3067 ssize_t result;
3069 /* already opened */
3070 if (fd >= 0) {
3071 retry:
3072 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3073 int e = errno;
3074 switch (e) {
3075 case EINTR: goto retry;
3076 case EAGAIN:
3077 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3078 case EWOULDBLOCK:
3079 #endif
3080 break;
3081 default:
3082 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3085 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3087 else {
3088 // ignore wakeup
3092 static void
3093 timer_thread_wakeup_force(void)
3095 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3096 signal_communication_pipe(timer_th.comm_fds[1]);
3099 static void
3100 timer_thread_wakeup_locked(rb_vm_t *vm)
3102 // should be locked before.
3103 ASSERT_ractor_sched_locked(vm, NULL);
3105 if (timer_th.created_fork_gen == current_fork_gen) {
3106 if (vm->ractor.sched.timeslice_wait_inf) {
3107 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3108 timer_thread_wakeup_force();
3110 else {
3111 RUBY_DEBUG_LOG("will be wakeup...");
3116 static void
3117 timer_thread_wakeup(void)
3119 rb_vm_t *vm = GET_VM();
3121 ractor_sched_lock(vm, NULL);
3123 timer_thread_wakeup_locked(vm);
3125 ractor_sched_unlock(vm, NULL);
3128 static void
3129 rb_thread_create_timer_thread(void)
3131 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3133 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3135 timer_th.created_fork_gen = current_fork_gen;
3137 if (created_fork_gen != current_fork_gen) {
3138 if (created_fork_gen != 0) {
3139 RUBY_DEBUG_LOG("forked child process");
3141 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3142 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3143 close_invalidate(&timer_th.event_fd, "close event_fd");
3144 #endif
3145 rb_native_mutex_destroy(&timer_th.waiting_lock);
3148 ccan_list_head_init(&timer_th.waiting);
3149 rb_native_mutex_initialize(&timer_th.waiting_lock);
3151 // open communication channel
3152 setup_communication_pipe_internal(timer_th.comm_fds);
3154 // open event fd
3155 timer_thread_setup_mn();
3158 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3161 static int
3162 native_stop_timer_thread(void)
3164 int stopped;
3165 stopped = --system_working <= 0;
3167 if (stopped) {
3168 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3169 timer_thread_wakeup_force();
3170 RUBY_DEBUG_LOG("wakeup sent");
3171 pthread_join(timer_th.pthread_id, NULL);
3174 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3175 return stopped;
3178 static void
3179 native_reset_timer_thread(void)
3184 #ifdef HAVE_SIGALTSTACK
3186 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3188 void *base;
3189 size_t size;
3190 const size_t water_mark = 1024 * 1024;
3191 STACK_GROW_DIR_DETECTION;
3193 #ifdef STACKADDR_AVAILABLE
3194 if (get_stack(&base, &size) == 0) {
3195 # ifdef __APPLE__
3196 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3197 struct rlimit rlim;
3198 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3199 size = (size_t)rlim.rlim_cur;
3202 # endif
3203 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3205 else
3206 #endif
3207 if (th) {
3208 size = th->ec->machine.stack_maxsize;
3209 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3211 else {
3212 return 0;
3214 size /= RUBY_STACK_SPACE_RATIO;
3215 if (size > water_mark) size = water_mark;
3216 if (IS_STACK_DIR_UPPER()) {
3217 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3218 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3220 else {
3221 if (size > (size_t)base) size = (size_t)base;
3222 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3224 return 0;
3226 #endif
3229 rb_reserved_fd_p(int fd)
3231 /* no false-positive if out-of-FD at startup */
3232 if (fd < 0) return 0;
3234 if (fd == timer_th.comm_fds[0] ||
3235 fd == timer_th.comm_fds[1]
3236 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3237 || fd == timer_th.event_fd
3238 #endif
3240 goto check_fork_gen;
3242 return 0;
3244 check_fork_gen:
3245 if (timer_th.created_fork_gen == current_fork_gen) {
3246 /* async-signal-safe */
3247 return 1;
3249 else {
3250 return 0;
3254 rb_nativethread_id_t
3255 rb_nativethread_self(void)
3257 return pthread_self();
3260 #if defined(USE_POLL) && !defined(HAVE_PPOLL)
3261 /* TODO: don't ignore sigmask */
3262 static int
3263 ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3264 const struct timespec *ts, const sigset_t *sigmask)
3266 int timeout_ms;
3268 if (ts) {
3269 int tmp, tmp2;
3271 if (ts->tv_sec > INT_MAX/1000)
3272 timeout_ms = INT_MAX;
3273 else {
3274 tmp = (int)(ts->tv_sec * 1000);
3275 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3276 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3277 if (INT_MAX - tmp < tmp2)
3278 timeout_ms = INT_MAX;
3279 else
3280 timeout_ms = (int)(tmp + tmp2);
3283 else
3284 timeout_ms = -1;
3286 return poll(fds, nfds, timeout_ms);
3288 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3289 #endif
3292 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3293 * since threads may be too starved to enter the GVL waitqueue for
3294 * us to detect contention. Instead, we want to kick other threads
3295 * so they can run and possibly prevent us from entering slow paths
3296 * in ppoll() or similar syscalls.
3298 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3299 * [ruby-core:90417] [Bug #15398]
3301 #define THREAD_BLOCKING_YIELD(th) do { \
3302 const rb_thread_t *next_th; \
3303 struct rb_thread_sched *sched = TH_SCHED(th); \
3304 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3305 thread_sched_to_waiting(sched, (th)); \
3306 next_th = sched->running; \
3307 rb_native_mutex_unlock(&sched->lock_); \
3308 native_thread_yield(); /* TODO: needed? */ \
3309 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3310 native_thread_yield(); \
3313 static void
3314 native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3316 struct rb_thread_sched *sched = TH_SCHED(th);
3318 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3319 if (rel) {
3320 if (th_has_dedicated_nt(th)) {
3321 native_cond_sleep(th, rel);
3323 else {
3324 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3327 else {
3328 thread_sched_to_waiting_until_wakeup(sched, th);
3331 RUBY_DEBUG_LOG("wakeup");
3334 // thread internal event hooks (only for pthread)
3336 struct rb_internal_thread_event_hook {
3337 rb_internal_thread_event_callback callback;
3338 rb_event_flag_t event;
3339 void *user_data;
3341 struct rb_internal_thread_event_hook *next;
3344 static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3346 rb_internal_thread_event_hook_t *
3347 rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3349 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3350 hook->callback = callback;
3351 hook->user_data = user_data;
3352 hook->event = internal_event;
3354 int r;
3355 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3356 rb_bug_errno("pthread_rwlock_wrlock", r);
3359 hook->next = rb_internal_thread_event_hooks;
3360 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3362 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3363 rb_bug_errno("pthread_rwlock_unlock", r);
3365 return hook;
3368 bool
3369 rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3371 int r;
3372 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3373 rb_bug_errno("pthread_rwlock_wrlock", r);
3376 bool success = FALSE;
3378 if (rb_internal_thread_event_hooks == hook) {
3379 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3380 success = TRUE;
3382 else {
3383 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3385 do {
3386 if (h->next == hook) {
3387 h->next = hook->next;
3388 success = TRUE;
3389 break;
3391 } while ((h = h->next));
3394 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3395 rb_bug_errno("pthread_rwlock_unlock", r);
3398 if (success) {
3399 ruby_xfree(hook);
3401 return success;
3404 static void
3405 rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3407 int r;
3408 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3409 rb_bug_errno("pthread_rwlock_rdlock", r);
3412 if (rb_internal_thread_event_hooks) {
3413 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3414 do {
3415 if (h->event & event) {
3416 rb_internal_thread_event_data_t event_data = {
3417 .thread = th->self,
3419 (*h->callback)(event, &event_data, h->user_data);
3421 } while((h = h->next));
3423 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3424 rb_bug_errno("pthread_rwlock_unlock", r);
3428 // return true if the current thread acquires DNT.
3429 // return false if the current thread already acquires DNT.
3430 bool
3431 rb_thread_lock_native_thread(void)
3433 rb_thread_t *th = GET_THREAD();
3434 bool is_snt = th->nt->dedicated == 0;
3435 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
3437 return is_snt;
3440 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */