* io.c (rb_open_file): encoding in mode string was ignored if perm is
[ruby-svn.git] / thread.c
blob14c2a89e423bac42bbf22cdff18812c599ba8d3a
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef USE_NATIVE_THREAD_PRIORITY
52 #define USE_NATIVE_THREAD_PRIORITY 0
53 #define RUBY_THREAD_PRIORITY_MAX 3
54 #define RUBY_THREAD_PRIORITY_MIN -3
55 #endif
57 #ifndef THREAD_DEBUG
58 #define THREAD_DEBUG 0
59 #endif
61 VALUE rb_cMutex;
62 VALUE rb_cBarrier;
64 static void sleep_timeval(rb_thread_t *th, struct timeval time);
65 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
66 static void sleep_forever(rb_thread_t *th, int nodeadlock);
67 static double timeofday(void);
68 struct timeval rb_time_interval(VALUE);
69 static int rb_thread_dead(rb_thread_t *th);
71 static void rb_check_deadlock(rb_vm_t *vm);
73 void rb_signal_exec(rb_thread_t *th, int sig);
74 void rb_disable_interrupt(void);
76 static const VALUE eKillSignal = INT2FIX(0);
77 static const VALUE eTerminateSignal = INT2FIX(1);
78 static volatile int system_working = 1;
80 inline static void
81 st_delete_wrap(st_table *table, st_data_t key)
83 st_delete(table, &key, 0);
86 /********************************************************************************/
88 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
90 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
91 struct rb_unblock_callback *old);
92 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
94 #define GVL_UNLOCK_BEGIN() do { \
95 rb_thread_t *_th_stored = GET_THREAD(); \
96 rb_gc_save_machine_context(_th_stored); \
97 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
99 #define GVL_UNLOCK_END() \
100 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
101 rb_thread_set_current(_th_stored); \
102 } while(0)
104 #define BLOCKING_REGION_CORE(exec) do { \
105 GVL_UNLOCK_BEGIN(); {\
106 exec; \
108 GVL_UNLOCK_END(); \
109 } while(0);
111 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
112 rb_thread_t *__th = GET_THREAD(); \
113 enum rb_thread_status __prev_status = __th->status; \
114 struct rb_unblock_callback __oldubf; \
115 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
116 __th->status = THREAD_STOPPED; \
117 thread_debug("enter blocking region (%p)\n", __th); \
118 BLOCKING_REGION_CORE(exec); \
119 thread_debug("leave blocking region (%p)\n", __th); \
120 remove_signal_thread_list(__th); \
121 reset_unblock_function(__th, &__oldubf); \
122 if (__th->status == THREAD_STOPPED) { \
123 __th->status = __prev_status; \
125 RUBY_VM_CHECK_INTS(); \
126 } while(0)
128 #if THREAD_DEBUG
129 #ifdef HAVE_VA_ARGS_MACRO
130 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
131 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
132 #define POSITION_FORMAT "%s:%d:"
133 #define POSITION_ARGS ,file, line
134 #else
135 void rb_thread_debug(const char *fmt, ...);
136 #define thread_debug rb_thread_debug
137 #define POSITION_FORMAT
138 #define POSITION_ARGS
139 #endif
141 # if THREAD_DEBUG < 0
142 static int rb_thread_debug_enabled;
144 static VALUE
145 rb_thread_s_debug(void)
147 return INT2NUM(rb_thread_debug_enabled);
150 static VALUE
151 rb_thread_s_debug_set(VALUE self, VALUE val)
153 rb_thread_debug_enabled = RTEST(val);
154 return val;
156 # else
157 # define rb_thread_debug_enabled THREAD_DEBUG
158 # endif
159 #else
160 #define thread_debug if(0)printf
161 #endif
163 #ifndef __ia64
164 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
165 #endif
166 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
167 VALUE *register_stack_start));
168 static void timer_thread_function(void *);
170 #if defined(_WIN32)
171 #include "thread_win32.c"
173 #define DEBUG_OUT() \
174 WaitForSingleObject(&debug_mutex, INFINITE); \
175 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
176 fflush(stdout); \
177 ReleaseMutex(&debug_mutex);
179 #elif defined(HAVE_PTHREAD_H)
180 #include "thread_pthread.c"
182 #define DEBUG_OUT() \
183 pthread_mutex_lock(&debug_mutex); \
184 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
185 fflush(stdout); \
186 pthread_mutex_unlock(&debug_mutex);
188 #else
189 #error "unsupported thread type"
190 #endif
192 #if THREAD_DEBUG
193 static int debug_mutex_initialized = 1;
194 static rb_thread_lock_t debug_mutex;
196 void
197 rb_thread_debug(
198 #ifdef HAVE_VA_ARGS_MACRO
199 const char *file, int line,
200 #endif
201 const char *fmt, ...)
203 va_list args;
204 char buf[BUFSIZ];
206 if (!rb_thread_debug_enabled) return;
208 if (debug_mutex_initialized == 1) {
209 debug_mutex_initialized = 0;
210 native_mutex_initialize(&debug_mutex);
213 va_start(args, fmt);
214 vsnprintf(buf, BUFSIZ, fmt, args);
215 va_end(args);
217 DEBUG_OUT();
219 #endif
222 static void
223 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
224 struct rb_unblock_callback *old)
226 check_ints:
227 RUBY_VM_CHECK_INTS(); /* check signal or so */
228 native_mutex_lock(&th->interrupt_lock);
229 if (th->interrupt_flag) {
230 native_mutex_unlock(&th->interrupt_lock);
231 goto check_ints;
233 else {
234 if (old) *old = th->unblock;
235 th->unblock.func = func;
236 th->unblock.arg = arg;
238 native_mutex_unlock(&th->interrupt_lock);
241 static void
242 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
244 native_mutex_lock(&th->interrupt_lock);
245 th->unblock = *old;
246 native_mutex_unlock(&th->interrupt_lock);
249 static void
250 rb_thread_interrupt(rb_thread_t *th)
252 native_mutex_lock(&th->interrupt_lock);
253 RUBY_VM_SET_INTERRUPT(th);
254 if (th->unblock.func) {
255 (th->unblock.func)(th->unblock.arg);
257 else {
258 /* none */
260 native_mutex_unlock(&th->interrupt_lock);
264 static int
265 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
267 VALUE thval = key;
268 rb_thread_t *th;
269 GetThreadPtr(thval, th);
271 if (th != main_thread) {
272 thread_debug("terminate_i: %p\n", th);
273 rb_thread_interrupt(th);
274 th->thrown_errinfo = eTerminateSignal;
275 th->status = THREAD_TO_KILL;
277 else {
278 thread_debug("terminate_i: main thread (%p)\n", th);
280 return ST_CONTINUE;
283 typedef struct rb_mutex_struct
285 rb_thread_lock_t lock;
286 rb_thread_cond_t cond;
287 struct rb_thread_struct volatile *th;
288 volatile int cond_waiting, cond_notified;
289 struct rb_mutex_struct *next_mutex;
290 } mutex_t;
292 static void rb_mutex_unlock_all(mutex_t *mutex);
294 void
295 rb_thread_terminate_all(void)
297 rb_thread_t *th = GET_THREAD(); /* main thread */
298 rb_vm_t *vm = th->vm;
299 if (vm->main_thread != th) {
300 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
303 /* unlock all locking mutexes */
304 if (th->keeping_mutexes) {
305 rb_mutex_unlock_all(th->keeping_mutexes);
308 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
309 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
311 while (!rb_thread_alone()) {
312 PUSH_TAG();
313 if (EXEC_TAG() == 0) {
314 rb_thread_schedule();
316 else {
317 /* ignore exception */
319 POP_TAG();
321 system_working = 0;
324 static void
325 thread_cleanup_func_before_exec(void *th_ptr)
327 rb_thread_t *th = th_ptr;
328 th->status = THREAD_KILLED;
329 th->machine_stack_start = th->machine_stack_end = 0;
330 #ifdef __ia64
331 th->machine_register_stack_start = th->machine_register_stack_end = 0;
332 #endif
335 static void
336 thread_cleanup_func(void *th_ptr)
338 rb_thread_t *th = th_ptr;
339 thread_cleanup_func_before_exec(th_ptr);
340 native_thread_destroy(th);
343 extern void ruby_error_print(void);
344 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
345 void rb_thread_recycle_stack_release(VALUE *);
347 void
348 ruby_thread_init_stack(rb_thread_t *th)
350 native_thread_init_stack(th);
353 static int
354 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
356 int state;
357 VALUE args = th->first_args;
358 rb_proc_t *proc;
359 rb_thread_t *join_th;
360 rb_thread_t *main_th;
361 VALUE errinfo = Qnil;
363 th->machine_stack_start = stack_start;
364 #ifdef __ia64
365 th->machine_register_stack_start = register_stack_start;
366 #endif
367 thread_debug("thread start: %p\n", th);
369 native_mutex_lock(&th->vm->global_vm_lock);
371 thread_debug("thread start (get lock): %p\n", th);
372 rb_thread_set_current(th);
374 TH_PUSH_TAG(th);
375 if ((state = EXEC_TAG()) == 0) {
376 SAVE_ROOT_JMPBUF(th, {
377 if (th->first_proc) {
378 GetProcPtr(th->first_proc, proc);
379 th->errinfo = Qnil;
380 th->local_lfp = proc->block.lfp;
381 th->local_svar = Qnil;
382 th->value = vm_invoke_proc(th, proc, proc->block.self,
383 RARRAY_LEN(args), RARRAY_PTR(args), 0);
385 else {
386 th->value = (*th->first_func)((void *)th->first_args);
390 else {
391 errinfo = th->errinfo;
392 if (NIL_P(errinfo)) errinfo = rb_errinfo();
393 if (state == TAG_FATAL) {
394 /* fatal error within this thread, need to stop whole script */
396 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
397 if (th->safe_level >= 4) {
398 th->errinfo = rb_exc_new3(rb_eSecurityError,
399 rb_sprintf("Insecure exit at level %d", th->safe_level));
400 errinfo = Qnil;
403 else if (th->safe_level < 4 &&
404 (th->vm->thread_abort_on_exception ||
405 th->abort_on_exception || RTEST(ruby_debug))) {
406 /* exit on main_thread */
408 else {
409 errinfo = Qnil;
411 th->value = Qnil;
414 th->status = THREAD_KILLED;
415 thread_debug("thread end: %p\n", th);
417 main_th = th->vm->main_thread;
418 if (th != main_th) {
419 if (TYPE(errinfo) == T_OBJECT) {
420 /* treat with normal error object */
421 rb_thread_raise(1, &errinfo, main_th);
424 TH_POP_TAG();
426 /* locking_mutex must be Qfalse */
427 if (th->locking_mutex != Qfalse) {
428 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
429 th, th->locking_mutex);
432 /* unlock all locking mutexes */
433 if (th->keeping_mutexes) {
434 rb_mutex_unlock_all(th->keeping_mutexes);
435 th->keeping_mutexes = NULL;
438 /* delete self from living_threads */
439 st_delete_wrap(th->vm->living_threads, th->self);
441 /* wake up joinning threads */
442 join_th = th->join_list_head;
443 while (join_th) {
444 if (join_th == main_th) errinfo = Qnil;
445 rb_thread_interrupt(join_th);
446 switch (join_th->status) {
447 case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
448 join_th->status = THREAD_RUNNABLE;
449 default: break;
451 join_th = join_th->join_list_next;
453 if (th != main_th) rb_check_deadlock(th->vm);
455 if (!th->root_fiber) {
456 rb_thread_recycle_stack_release(th->stack);
457 th->stack = 0;
460 thread_cleanup_func(th);
461 native_mutex_unlock(&th->vm->global_vm_lock);
463 return 0;
466 static VALUE
467 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
469 rb_thread_t *th;
471 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
472 rb_raise(rb_eThreadError,
473 "can't start a new thread (frozen ThreadGroup)");
475 GetThreadPtr(thval, th);
477 /* setup thread environment */
478 th->first_func = fn;
479 th->first_proc = fn ? Qfalse : rb_block_proc();
480 th->first_args = args; /* GC: shouldn't put before above line */
482 th->priority = GET_THREAD()->priority;
483 th->thgroup = GET_THREAD()->thgroup;
485 native_mutex_initialize(&th->interrupt_lock);
486 /* kick thread */
487 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
488 native_thread_create(th);
489 return thval;
492 static VALUE
493 thread_s_new(int argc, VALUE *argv, VALUE klass)
495 rb_thread_t *th;
496 VALUE thread = rb_thread_alloc(klass);
497 rb_obj_call_init(thread, argc, argv);
498 GetThreadPtr(thread, th);
499 if (!th->first_args) {
500 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
501 rb_class2name(klass));
503 return thread;
507 * call-seq:
508 * Thread.start([args]*) {|args| block } => thread
509 * Thread.fork([args]*) {|args| block } => thread
511 * Basically the same as <code>Thread::new</code>. However, if class
512 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
513 * subclass will not invoke the subclass's <code>initialize</code> method.
516 static VALUE
517 thread_start(VALUE klass, VALUE args)
519 return thread_create_core(rb_thread_alloc(klass), args, 0);
522 static VALUE
523 thread_initialize(VALUE thread, VALUE args)
525 rb_thread_t *th;
526 if (!rb_block_given_p()) {
527 rb_raise(rb_eThreadError, "must be called with a block");
529 GetThreadPtr(thread, th);
530 if (th->first_args) {
531 VALUE rb_proc_location(VALUE self);
532 VALUE proc = th->first_proc, line, loc;
533 const char *file;
534 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
535 rb_raise(rb_eThreadError, "already initialized thread");
537 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
538 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
539 rb_raise(rb_eThreadError, "already initialized thread - %s",
540 file);
542 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
543 file, NUM2INT(line));
545 return thread_create_core(thread, args, 0);
548 VALUE
549 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
551 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
555 /* +infty, for this purpose */
556 #define DELAY_INFTY 1E30
558 struct join_arg {
559 rb_thread_t *target, *waiting;
560 double limit;
561 int forever;
564 static VALUE
565 remove_from_join_list(VALUE arg)
567 struct join_arg *p = (struct join_arg *)arg;
568 rb_thread_t *target_th = p->target, *th = p->waiting;
570 if (target_th->status != THREAD_KILLED) {
571 rb_thread_t **pth = &target_th->join_list_head;
573 while (*pth) {
574 if (*pth == th) {
575 *pth = th->join_list_next;
576 break;
578 pth = &(*pth)->join_list_next;
582 return Qnil;
585 static VALUE
586 thread_join_sleep(VALUE arg)
588 struct join_arg *p = (struct join_arg *)arg;
589 rb_thread_t *target_th = p->target, *th = p->waiting;
590 double now, limit = p->limit;
592 while (target_th->status != THREAD_KILLED) {
593 if (p->forever) {
594 sleep_forever(th, 1);
596 else {
597 now = timeofday();
598 if (now > limit) {
599 thread_debug("thread_join: timeout (thid: %p)\n",
600 (void *)target_th->thread_id);
601 return Qfalse;
603 sleep_wait_for_interrupt(th, limit - now);
605 thread_debug("thread_join: interrupted (thid: %p)\n",
606 (void *)target_th->thread_id);
608 return Qtrue;
611 static VALUE
612 thread_join(rb_thread_t *target_th, double delay)
614 rb_thread_t *th = GET_THREAD();
615 struct join_arg arg;
617 arg.target = target_th;
618 arg.waiting = th;
619 arg.limit = timeofday() + delay;
620 arg.forever = delay == DELAY_INFTY;
622 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
624 if (target_th->status != THREAD_KILLED) {
625 th->join_list_next = target_th->join_list_head;
626 target_th->join_list_head = th;
627 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
628 remove_from_join_list, (VALUE)&arg)) {
629 return Qnil;
633 thread_debug("thread_join: success (thid: %p)\n",
634 (void *)target_th->thread_id);
636 if (target_th->errinfo != Qnil) {
637 VALUE err = target_th->errinfo;
639 if (FIXNUM_P(err)) {
640 /* */
642 else if (TYPE(target_th->errinfo) == T_NODE) {
643 rb_exc_raise(vm_make_jump_tag_but_local_jump(
644 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
646 else {
647 /* normal exception */
648 rb_exc_raise(err);
651 return target_th->self;
655 * call-seq:
656 * thr.join => thr
657 * thr.join(limit) => thr
659 * The calling thread will suspend execution and run <i>thr</i>. Does not
660 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
661 * the time limit expires, <code>nil</code> will be returned, otherwise
662 * <i>thr</i> is returned.
664 * Any threads not joined will be killed when the main program exits. If
665 * <i>thr</i> had previously raised an exception and the
666 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
667 * (so the exception has not yet been processed) it will be processed at this
668 * time.
670 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
671 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
672 * x.join # Let x thread finish, a will be killed on exit.
674 * <em>produces:</em>
676 * axyz
678 * The following example illustrates the <i>limit</i> parameter.
680 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
681 * puts "Waiting" until y.join(0.15)
683 * <em>produces:</em>
685 * tick...
686 * Waiting
687 * tick...
688 * Waitingtick...
691 * tick...
694 static VALUE
695 thread_join_m(int argc, VALUE *argv, VALUE self)
697 rb_thread_t *target_th;
698 double delay = DELAY_INFTY;
699 VALUE limit;
701 GetThreadPtr(self, target_th);
703 rb_scan_args(argc, argv, "01", &limit);
704 if (!NIL_P(limit)) {
705 delay = rb_num2dbl(limit);
708 return thread_join(target_th, delay);
712 * call-seq:
713 * thr.value => obj
715 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
716 * its value.
718 * a = Thread.new { 2 + 2 }
719 * a.value #=> 4
722 static VALUE
723 thread_value(VALUE self)
725 rb_thread_t *th;
726 GetThreadPtr(self, th);
727 thread_join(th, DELAY_INFTY);
728 return th->value;
732 * Thread Scheduling
735 static struct timeval
736 double2timeval(double d)
738 struct timeval time;
740 time.tv_sec = (int)d;
741 time.tv_usec = (int)((d - (int)d) * 1e6);
742 if (time.tv_usec < 0) {
743 time.tv_usec += (long)1e6;
744 time.tv_sec -= 1;
746 return time;
749 static void
750 sleep_forever(rb_thread_t *th, int deadlockable)
752 enum rb_thread_status prev_status = th->status;
754 th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
755 do {
756 if (deadlockable) {
757 th->vm->sleeper++;
758 rb_check_deadlock(th->vm);
760 native_sleep(th, 0);
761 if (deadlockable) {
762 th->vm->sleeper--;
764 RUBY_VM_CHECK_INTS();
765 } while (th->status == THREAD_STOPPED_FOREVER);
766 th->status = prev_status;
769 static void
770 getclockofday(struct timeval *tp)
772 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
773 struct timespec ts;
775 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
776 tp->tv_sec = ts.tv_sec;
777 tp->tv_usec = ts.tv_nsec / 1000;
778 } else
779 #endif
781 gettimeofday(tp, NULL);
785 static void
786 sleep_timeval(rb_thread_t *th, struct timeval tv)
788 struct timeval to, tvn;
789 enum rb_thread_status prev_status = th->status;
791 getclockofday(&to);
792 to.tv_sec += tv.tv_sec;
793 if ((to.tv_usec += tv.tv_usec) >= 1000000) {
794 to.tv_sec++;
795 to.tv_usec -= 1000000;
798 th->status = THREAD_STOPPED;
799 do {
800 native_sleep(th, &tv);
801 RUBY_VM_CHECK_INTS();
802 getclockofday(&tvn);
803 if (to.tv_sec < tvn.tv_sec) break;
804 if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
805 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
806 (long)to.tv_sec, to.tv_usec,
807 (long)tvn.tv_sec, tvn.tv_usec);
808 tv.tv_sec = to.tv_sec - tvn.tv_sec;
809 if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
810 --tv.tv_sec;
811 tv.tv_usec += 1000000;
813 } while (th->status == THREAD_STOPPED);
814 th->status = prev_status;
817 void
818 rb_thread_sleep_forever()
820 thread_debug("rb_thread_sleep_forever\n");
821 sleep_forever(GET_THREAD(), 0);
824 static void
825 rb_thread_sleep_deadly()
827 thread_debug("rb_thread_sleep_deadly\n");
828 sleep_forever(GET_THREAD(), 1);
831 static double
832 timeofday(void)
834 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
835 struct timespec tp;
837 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
838 return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
839 } else
840 #endif
842 struct timeval tv;
843 gettimeofday(&tv, NULL);
844 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
848 static void
849 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
851 sleep_timeval(th, double2timeval(sleepsec));
854 static void
855 sleep_for_polling(rb_thread_t *th)
857 struct timeval time;
858 time.tv_sec = 0;
859 time.tv_usec = 100 * 1000; /* 0.1 sec */
860 sleep_timeval(th, time);
863 void
864 rb_thread_wait_for(struct timeval time)
866 rb_thread_t *th = GET_THREAD();
867 sleep_timeval(th, time);
870 void
871 rb_thread_polling(void)
873 RUBY_VM_CHECK_INTS();
874 if (!rb_thread_alone()) {
875 rb_thread_t *th = GET_THREAD();
876 sleep_for_polling(th);
881 * CAUTION: This function causes thread switching.
882 * rb_thread_check_ints() check ruby's interrupts.
883 * some interrupt needs thread switching/invoke handlers,
884 * and so on.
887 void
888 rb_thread_check_ints(void)
890 RUBY_VM_CHECK_INTS();
893 struct timeval rb_time_timeval();
895 void
896 rb_thread_sleep(int sec)
898 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
901 void
902 rb_thread_schedule(void)
904 thread_debug("rb_thread_schedule\n");
905 if (!rb_thread_alone()) {
906 rb_thread_t *th = GET_THREAD();
908 thread_debug("rb_thread_schedule/switch start\n");
910 rb_gc_save_machine_context(th);
911 native_mutex_unlock(&th->vm->global_vm_lock);
913 native_thread_yield();
915 native_mutex_lock(&th->vm->global_vm_lock);
917 rb_thread_set_current(th);
918 thread_debug("rb_thread_schedule/switch done\n");
920 RUBY_VM_CHECK_INTS();
924 int rb_thread_critical; /* TODO: dummy variable */
926 VALUE
927 rb_thread_blocking_region(
928 rb_blocking_function_t *func, void *data1,
929 rb_unblock_function_t *ubf, void *data2)
931 VALUE val;
932 rb_thread_t *th = GET_THREAD();
934 if (ubf == RB_UBF_DFL) {
935 ubf = ubf_select;
936 data2 = th;
939 BLOCKING_REGION({
940 val = func(data1);
941 }, ubf, data2);
943 return val;
947 * call-seq:
948 * Thread.pass => nil
950 * Invokes the thread scheduler to pass execution to another thread.
952 * a = Thread.new { print "a"; Thread.pass;
953 * print "b"; Thread.pass;
954 * print "c" }
955 * b = Thread.new { print "x"; Thread.pass;
956 * print "y"; Thread.pass;
957 * print "z" }
958 * a.join
959 * b.join
961 * <em>produces:</em>
963 * axbycz
966 static VALUE
967 thread_s_pass(VALUE klass)
969 rb_thread_schedule();
970 return Qnil;
977 void
978 rb_thread_execute_interrupts(rb_thread_t *th)
980 if (th->raised_flag) return;
982 while (th->interrupt_flag) {
983 enum rb_thread_status status = th->status;
984 int timer_interrupt = th->interrupt_flag & 0x01;
985 int finalizer_interrupt = th->interrupt_flag & 0x04;
987 th->status = THREAD_RUNNABLE;
988 th->interrupt_flag = 0;
990 /* signal handling */
991 if (th->exec_signal) {
992 int sig = th->exec_signal;
993 th->exec_signal = 0;
994 rb_signal_exec(th, sig);
997 /* exception from another thread */
998 if (th->thrown_errinfo) {
999 VALUE err = th->thrown_errinfo;
1000 th->thrown_errinfo = 0;
1001 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
1003 if (err == eKillSignal || err == eTerminateSignal) {
1004 th->errinfo = INT2FIX(TAG_FATAL);
1005 TH_JUMP_TAG(th, TAG_FATAL);
1007 else {
1008 rb_exc_raise(err);
1011 th->status = status;
1013 if (finalizer_interrupt) {
1014 rb_gc_finalize_deferred();
1017 if (timer_interrupt) {
1018 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1020 if (th->slice > 0) {
1021 th->slice--;
1023 else {
1024 reschedule:
1025 rb_thread_schedule();
1026 if (th->slice < 0) {
1027 th->slice++;
1028 goto reschedule;
1030 else {
1031 th->slice = th->priority;
1039 void
1040 rb_gc_mark_threads(void)
1042 /* TODO: remove */
1045 /*****************************************************/
1047 static void
1048 rb_thread_ready(rb_thread_t *th)
1050 rb_thread_interrupt(th);
1053 static VALUE
1054 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
1056 VALUE exc;
1058 again:
1059 if (rb_thread_dead(th)) {
1060 return Qnil;
1063 if (th->thrown_errinfo != 0 || th->raised_flag) {
1064 rb_thread_schedule();
1065 goto again;
1068 exc = rb_make_exception(argc, argv);
1069 th->thrown_errinfo = exc;
1070 rb_thread_ready(th);
1071 return Qnil;
1074 void
1075 rb_thread_signal_raise(void *thptr, int sig)
1077 VALUE argv[2];
1078 rb_thread_t *th = thptr;
1080 argv[0] = rb_eSignal;
1081 argv[1] = INT2FIX(sig);
1082 rb_thread_raise(2, argv, th->vm->main_thread);
1085 void
1086 rb_thread_signal_exit(void *thptr)
1088 VALUE argv[2];
1089 rb_thread_t *th = thptr;
1091 argv[0] = rb_eSystemExit;
1092 argv[1] = rb_str_new2("exit");
1093 rb_thread_raise(2, argv, th->vm->main_thread);
1097 rb_thread_set_raised(rb_thread_t *th)
1099 if (th->raised_flag & RAISED_EXCEPTION) {
1100 return 1;
1102 th->raised_flag |= RAISED_EXCEPTION;
1103 return 0;
1107 rb_thread_reset_raised(rb_thread_t *th)
1109 if (!(th->raised_flag & RAISED_EXCEPTION)) {
1110 return 0;
1112 th->raised_flag &= ~RAISED_EXCEPTION;
1113 return 1;
1116 void
1117 rb_thread_fd_close(int fd)
1119 /* TODO: fix me */
1123 * call-seq:
1124 * thr.raise(exception)
1126 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1127 * caller does not have to be <i>thr</i>.
1129 * Thread.abort_on_exception = true
1130 * a = Thread.new { sleep(200) }
1131 * a.raise("Gotcha")
1133 * <em>produces:</em>
1135 * prog.rb:3: Gotcha (RuntimeError)
1136 * from prog.rb:2:in `initialize'
1137 * from prog.rb:2:in `new'
1138 * from prog.rb:2
1141 static VALUE
1142 thread_raise_m(int argc, VALUE *argv, VALUE self)
1144 rb_thread_t *th;
1145 GetThreadPtr(self, th);
1146 rb_thread_raise(argc, argv, th);
1147 return Qnil;
1152 * call-seq:
1153 * thr.exit => thr or nil
1154 * thr.kill => thr or nil
1155 * thr.terminate => thr or nil
1157 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1158 * is already marked to be killed, <code>exit</code> returns the
1159 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1160 * the process.
1163 VALUE
1164 rb_thread_kill(VALUE thread)
1166 rb_thread_t *th;
1168 GetThreadPtr(thread, th);
1170 if (th != GET_THREAD() && th->safe_level < 4) {
1171 rb_secure(4);
1173 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1174 return thread;
1176 if (th == th->vm->main_thread) {
1177 rb_exit(EXIT_SUCCESS);
1180 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
1182 rb_thread_interrupt(th);
1183 th->thrown_errinfo = eKillSignal;
1184 th->status = THREAD_TO_KILL;
1186 return thread;
1191 * call-seq:
1192 * Thread.kill(thread) => thread
1194 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1196 * count = 0
1197 * a = Thread.new { loop { count += 1 } }
1198 * sleep(0.1) #=> 0
1199 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1200 * count #=> 93947
1201 * a.alive? #=> false
1204 static VALUE
1205 rb_thread_s_kill(VALUE obj, VALUE th)
1207 return rb_thread_kill(th);
1212 * call-seq:
1213 * Thread.exit => thread
1215 * Terminates the currently running thread and schedules another thread to be
1216 * run. If this thread is already marked to be killed, <code>exit</code>
1217 * returns the <code>Thread</code>. If this is the main thread, or the last
1218 * thread, exit the process.
1221 static VALUE
1222 rb_thread_exit(void)
1224 return rb_thread_kill(GET_THREAD()->self);
1229 * call-seq:
1230 * thr.wakeup => thr
1232 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1233 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1235 * c = Thread.new { Thread.stop; puts "hey!" }
1236 * c.wakeup
1238 * <em>produces:</em>
1240 * hey!
1243 VALUE
1244 rb_thread_wakeup(VALUE thread)
1246 rb_thread_t *th;
1247 GetThreadPtr(thread, th);
1249 if (th->status == THREAD_KILLED) {
1250 rb_raise(rb_eThreadError, "killed thread");
1252 rb_thread_ready(th);
1253 if (th->status != THREAD_TO_KILL) {
1254 th->status = THREAD_RUNNABLE;
1256 return thread;
1261 * call-seq:
1262 * thr.run => thr
1264 * Wakes up <i>thr</i>, making it eligible for scheduling.
1266 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1267 * Thread.pass
1268 * puts "Got here"
1269 * a.run
1270 * a.join
1272 * <em>produces:</em>
1275 * Got here
1279 VALUE
1280 rb_thread_run(VALUE thread)
1282 rb_thread_wakeup(thread);
1283 rb_thread_schedule();
1284 return thread;
1289 * call-seq:
1290 * Thread.stop => nil
1292 * Stops execution of the current thread, putting it into a ``sleep'' state,
1293 * and schedules execution of another thread.
1295 * a = Thread.new { print "a"; Thread.stop; print "c" }
1296 * Thread.pass
1297 * print "b"
1298 * a.run
1299 * a.join
1301 * <em>produces:</em>
1303 * abc
1306 VALUE
1307 rb_thread_stop(void)
1309 if (rb_thread_alone()) {
1310 rb_raise(rb_eThreadError,
1311 "stopping only thread\n\tnote: use sleep to stop forever");
1313 rb_thread_sleep_deadly();
1314 return Qnil;
1317 static int
1318 thread_list_i(st_data_t key, st_data_t val, void *data)
1320 VALUE ary = (VALUE)data;
1321 rb_thread_t *th;
1322 GetThreadPtr((VALUE)key, th);
1324 switch (th->status) {
1325 case THREAD_RUNNABLE:
1326 case THREAD_STOPPED:
1327 case THREAD_STOPPED_FOREVER:
1328 case THREAD_TO_KILL:
1329 rb_ary_push(ary, th->self);
1330 default:
1331 break;
1333 return ST_CONTINUE;
1336 /********************************************************************/
1339 * call-seq:
1340 * Thread.list => array
1342 * Returns an array of <code>Thread</code> objects for all threads that are
1343 * either runnable or stopped.
1345 * Thread.new { sleep(200) }
1346 * Thread.new { 1000000.times {|i| i*i } }
1347 * Thread.new { Thread.stop }
1348 * Thread.list.each {|t| p t}
1350 * <em>produces:</em>
1352 * #<Thread:0x401b3e84 sleep>
1353 * #<Thread:0x401b3f38 run>
1354 * #<Thread:0x401b3fb0 sleep>
1355 * #<Thread:0x401bdf4c run>
1358 VALUE
1359 rb_thread_list(void)
1361 VALUE ary = rb_ary_new();
1362 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1363 return ary;
1366 VALUE
1367 rb_thread_current(void)
1369 return GET_THREAD()->self;
1373 * call-seq:
1374 * Thread.current => thread
1376 * Returns the currently executing thread.
1378 * Thread.current #=> #<Thread:0x401bdf4c run>
1381 static VALUE
1382 thread_s_current(VALUE klass)
1384 return rb_thread_current();
1387 VALUE
1388 rb_thread_main(void)
1390 return GET_THREAD()->vm->main_thread->self;
1393 static VALUE
1394 rb_thread_s_main(VALUE klass)
1396 return rb_thread_main();
1401 * call-seq:
1402 * Thread.abort_on_exception => true or false
1404 * Returns the status of the global ``abort on exception'' condition. The
1405 * default is <code>false</code>. When set to <code>true</code>, or if the
1406 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1407 * command line option <code>-d</code> was specified) all threads will abort
1408 * (the process will <code>exit(0)</code>) if an exception is raised in any
1409 * thread. See also <code>Thread::abort_on_exception=</code>.
1412 static VALUE
1413 rb_thread_s_abort_exc(void)
1415 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1420 * call-seq:
1421 * Thread.abort_on_exception= boolean => true or false
1423 * When set to <code>true</code>, all threads will abort if an exception is
1424 * raised. Returns the new state.
1426 * Thread.abort_on_exception = true
1427 * t1 = Thread.new do
1428 * puts "In new thread"
1429 * raise "Exception from thread"
1430 * end
1431 * sleep(1)
1432 * puts "not reached"
1434 * <em>produces:</em>
1436 * In new thread
1437 * prog.rb:4: Exception from thread (RuntimeError)
1438 * from prog.rb:2:in `initialize'
1439 * from prog.rb:2:in `new'
1440 * from prog.rb:2
1443 static VALUE
1444 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1446 rb_secure(4);
1447 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1448 return val;
1453 * call-seq:
1454 * thr.abort_on_exception => true or false
1456 * Returns the status of the thread-local ``abort on exception'' condition for
1457 * <i>thr</i>. The default is <code>false</code>. See also
1458 * <code>Thread::abort_on_exception=</code>.
1461 static VALUE
1462 rb_thread_abort_exc(VALUE thread)
1464 rb_thread_t *th;
1465 GetThreadPtr(thread, th);
1466 return th->abort_on_exception ? Qtrue : Qfalse;
1471 * call-seq:
1472 * thr.abort_on_exception= boolean => true or false
1474 * When set to <code>true</code>, causes all threads (including the main
1475 * program) to abort if an exception is raised in <i>thr</i>. The process will
1476 * effectively <code>exit(0)</code>.
1479 static VALUE
1480 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1482 rb_thread_t *th;
1483 rb_secure(4);
1485 GetThreadPtr(thread, th);
1486 th->abort_on_exception = RTEST(val);
1487 return val;
1492 * call-seq:
1493 * thr.group => thgrp or nil
1495 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1496 * the thread is not a member of any group.
1498 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1501 VALUE
1502 rb_thread_group(VALUE thread)
1504 rb_thread_t *th;
1505 VALUE group;
1506 GetThreadPtr(thread, th);
1507 group = th->thgroup;
1509 if (!group) {
1510 group = Qnil;
1512 return group;
1515 static const char *
1516 thread_status_name(enum rb_thread_status status)
1518 switch (status) {
1519 case THREAD_RUNNABLE:
1520 return "run";
1521 case THREAD_STOPPED:
1522 case THREAD_STOPPED_FOREVER:
1523 return "sleep";
1524 case THREAD_TO_KILL:
1525 return "aborting";
1526 case THREAD_KILLED:
1527 return "dead";
1528 default:
1529 return "unknown";
1533 static int
1534 rb_thread_dead(rb_thread_t *th)
1536 return th->status == THREAD_KILLED;
1541 * call-seq:
1542 * thr.status => string, false or nil
1544 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1545 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1546 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1547 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1548 * terminated with an exception.
1550 * a = Thread.new { raise("die now") }
1551 * b = Thread.new { Thread.stop }
1552 * c = Thread.new { Thread.exit }
1553 * d = Thread.new { sleep }
1554 * d.kill #=> #<Thread:0x401b3678 aborting>
1555 * a.status #=> nil
1556 * b.status #=> "sleep"
1557 * c.status #=> false
1558 * d.status #=> "aborting"
1559 * Thread.current.status #=> "run"
1562 static VALUE
1563 rb_thread_status(VALUE thread)
1565 rb_thread_t *th;
1566 GetThreadPtr(thread, th);
1568 if (rb_thread_dead(th)) {
1569 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1570 /* TODO */ ) {
1571 return Qnil;
1573 return Qfalse;
1575 return rb_str_new2(thread_status_name(th->status));
1580 * call-seq:
1581 * thr.alive? => true or false
1583 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1585 * thr = Thread.new { }
1586 * thr.join #=> #<Thread:0x401b3fb0 dead>
1587 * Thread.current.alive? #=> true
1588 * thr.alive? #=> false
1591 static VALUE
1592 rb_thread_alive_p(VALUE thread)
1594 rb_thread_t *th;
1595 GetThreadPtr(thread, th);
1597 if (rb_thread_dead(th))
1598 return Qfalse;
1599 return Qtrue;
1603 * call-seq:
1604 * thr.stop? => true or false
1606 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1608 * a = Thread.new { Thread.stop }
1609 * b = Thread.current
1610 * a.stop? #=> true
1611 * b.stop? #=> false
1614 static VALUE
1615 rb_thread_stop_p(VALUE thread)
1617 rb_thread_t *th;
1618 GetThreadPtr(thread, th);
1620 if (rb_thread_dead(th))
1621 return Qtrue;
1622 if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1623 return Qtrue;
1624 return Qfalse;
1628 * call-seq:
1629 * thr.safe_level => integer
1631 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1632 * levels can help when implementing sandboxes which run insecure code.
1634 * thr = Thread.new { $SAFE = 3; sleep }
1635 * Thread.current.safe_level #=> 0
1636 * thr.safe_level #=> 3
1639 static VALUE
1640 rb_thread_safe_level(VALUE thread)
1642 rb_thread_t *th;
1643 GetThreadPtr(thread, th);
1645 return INT2NUM(th->safe_level);
1649 * call-seq:
1650 * thr.inspect => string
1652 * Dump the name, id, and status of _thr_ to a string.
1655 static VALUE
1656 rb_thread_inspect(VALUE thread)
1658 const char *cname = rb_obj_classname(thread);
1659 rb_thread_t *th;
1660 const char *status;
1661 VALUE str;
1663 GetThreadPtr(thread, th);
1664 status = thread_status_name(th->status);
1665 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1666 OBJ_INFECT(str, thread);
1668 return str;
1671 VALUE
1672 rb_thread_local_aref(VALUE thread, ID id)
1674 rb_thread_t *th;
1675 VALUE val;
1677 GetThreadPtr(thread, th);
1678 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1679 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1681 if (!th->local_storage) {
1682 return Qnil;
1684 if (st_lookup(th->local_storage, id, &val)) {
1685 return val;
1687 return Qnil;
1691 * call-seq:
1692 * thr[sym] => obj or nil
1694 * Attribute Reference---Returns the value of a thread-local variable, using
1695 * either a symbol or a string name. If the specified variable does not exist,
1696 * returns <code>nil</code>.
1698 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1699 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1700 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1701 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1703 * <em>produces:</em>
1705 * #<Thread:0x401b3b3c sleep>: C
1706 * #<Thread:0x401b3bc8 sleep>: B
1707 * #<Thread:0x401b3c68 sleep>: A
1708 * #<Thread:0x401bdf4c run>:
1711 static VALUE
1712 rb_thread_aref(VALUE thread, VALUE id)
1714 return rb_thread_local_aref(thread, rb_to_id(id));
1717 VALUE
1718 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1720 rb_thread_t *th;
1721 GetThreadPtr(thread, th);
1723 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1724 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1726 if (OBJ_FROZEN(thread)) {
1727 rb_error_frozen("thread locals");
1729 if (!th->local_storage) {
1730 th->local_storage = st_init_numtable();
1732 if (NIL_P(val)) {
1733 st_delete_wrap(th->local_storage, id);
1734 return Qnil;
1736 st_insert(th->local_storage, id, val);
1737 return val;
1741 * call-seq:
1742 * thr[sym] = obj => obj
1744 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1745 * using either a symbol or a string. See also <code>Thread#[]</code>.
1748 static VALUE
1749 rb_thread_aset(VALUE self, ID id, VALUE val)
1751 return rb_thread_local_aset(self, rb_to_id(id), val);
1755 * call-seq:
1756 * thr.key?(sym) => true or false
1758 * Returns <code>true</code> if the given string (or symbol) exists as a
1759 * thread-local variable.
1761 * me = Thread.current
1762 * me[:oliver] = "a"
1763 * me.key?(:oliver) #=> true
1764 * me.key?(:stanley) #=> false
1767 static VALUE
1768 rb_thread_key_p(VALUE self, VALUE key)
1770 rb_thread_t *th;
1771 ID id = rb_to_id(key);
1773 GetThreadPtr(self, th);
1775 if (!th->local_storage) {
1776 return Qfalse;
1778 if (st_lookup(th->local_storage, id, 0)) {
1779 return Qtrue;
1781 return Qfalse;
1784 static int
1785 thread_keys_i(ID key, VALUE value, VALUE ary)
1787 rb_ary_push(ary, ID2SYM(key));
1788 return ST_CONTINUE;
1791 static int
1792 vm_living_thread_num(rb_vm_t *vm)
1794 return vm->living_threads->num_entries;
1798 rb_thread_alone()
1800 int num = 1;
1801 if (GET_THREAD()->vm->living_threads) {
1802 num = vm_living_thread_num(GET_THREAD()->vm);
1803 thread_debug("rb_thread_alone: %d\n", num);
1805 return num == 1;
1809 * call-seq:
1810 * thr.keys => array
1812 * Returns an an array of the names of the thread-local variables (as Symbols).
1814 * thr = Thread.new do
1815 * Thread.current[:cat] = 'meow'
1816 * Thread.current["dog"] = 'woof'
1817 * end
1818 * thr.join #=> #<Thread:0x401b3f10 dead>
1819 * thr.keys #=> [:dog, :cat]
1822 static VALUE
1823 rb_thread_keys(VALUE self)
1825 rb_thread_t *th;
1826 VALUE ary = rb_ary_new();
1827 GetThreadPtr(self, th);
1829 if (th->local_storage) {
1830 st_foreach(th->local_storage, thread_keys_i, ary);
1832 return ary;
1836 * call-seq:
1837 * thr.priority => integer
1839 * Returns the priority of <i>thr</i>. Default is inherited from the
1840 * current thread which creating the new thread, or zero for the
1841 * initial main thread; higher-priority threads will run before
1842 * lower-priority threads.
1844 * Thread.current.priority #=> 0
1847 static VALUE
1848 rb_thread_priority(VALUE thread)
1850 rb_thread_t *th;
1851 GetThreadPtr(thread, th);
1852 return INT2NUM(th->priority);
1857 * call-seq:
1858 * thr.priority= integer => thr
1860 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1861 * will run before lower-priority threads.
1863 * count1 = count2 = 0
1864 * a = Thread.new do
1865 * loop { count1 += 1 }
1866 * end
1867 * a.priority = -1
1869 * b = Thread.new do
1870 * loop { count2 += 1 }
1871 * end
1872 * b.priority = -2
1873 * sleep 1 #=> 1
1874 * count1 #=> 622504
1875 * count2 #=> 5832
1878 static VALUE
1879 rb_thread_priority_set(VALUE thread, VALUE prio)
1881 rb_thread_t *th;
1882 int priority;
1883 GetThreadPtr(thread, th);
1885 rb_secure(4);
1887 #if USE_NATIVE_THREAD_PRIORITY
1888 th->priority = NUM2INT(prio);
1889 native_thread_apply_priority(th);
1890 #else
1891 priority = NUM2INT(prio);
1892 if (priority > RUBY_THREAD_PRIORITY_MAX) {
1893 priority = RUBY_THREAD_PRIORITY_MAX;
1895 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
1896 priority = RUBY_THREAD_PRIORITY_MIN;
1898 th->priority = priority;
1899 th->slice = priority;
1900 #endif
1901 return INT2NUM(th->priority);
1904 /* for IO */
1906 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1907 void
1908 rb_fd_init(volatile rb_fdset_t *fds)
1910 fds->maxfd = 0;
1911 fds->fdset = ALLOC(fd_set);
1912 FD_ZERO(fds->fdset);
1915 void
1916 rb_fd_term(rb_fdset_t *fds)
1918 if (fds->fdset) xfree(fds->fdset);
1919 fds->maxfd = 0;
1920 fds->fdset = 0;
1923 void
1924 rb_fd_zero(rb_fdset_t *fds)
1926 if (fds->fdset) {
1927 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1928 FD_ZERO(fds->fdset);
1932 static void
1933 rb_fd_resize(int n, rb_fdset_t *fds)
1935 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1936 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1938 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1939 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1941 if (m > o) {
1942 fds->fdset = realloc(fds->fdset, m);
1943 memset((char *)fds->fdset + o, 0, m - o);
1945 if (n >= fds->maxfd) fds->maxfd = n + 1;
1948 void
1949 rb_fd_set(int n, rb_fdset_t *fds)
1951 rb_fd_resize(n, fds);
1952 FD_SET(n, fds->fdset);
1955 void
1956 rb_fd_clr(int n, rb_fdset_t *fds)
1958 if (n >= fds->maxfd) return;
1959 FD_CLR(n, fds->fdset);
1963 rb_fd_isset(int n, const rb_fdset_t *fds)
1965 if (n >= fds->maxfd) return 0;
1966 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1969 void
1970 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1972 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1974 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1975 dst->maxfd = max;
1976 dst->fdset = realloc(dst->fdset, size);
1977 memcpy(dst->fdset, src, size);
1981 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1983 fd_set *r = NULL, *w = NULL, *e = NULL;
1984 if (readfds) {
1985 rb_fd_resize(n - 1, readfds);
1986 r = rb_fd_ptr(readfds);
1988 if (writefds) {
1989 rb_fd_resize(n - 1, writefds);
1990 w = rb_fd_ptr(writefds);
1992 if (exceptfds) {
1993 rb_fd_resize(n - 1, exceptfds);
1994 e = rb_fd_ptr(exceptfds);
1996 return select(n, r, w, e, timeout);
1999 #undef FD_ZERO
2000 #undef FD_SET
2001 #undef FD_CLR
2002 #undef FD_ISSET
2004 #define FD_ZERO(f) rb_fd_zero(f)
2005 #define FD_SET(i, f) rb_fd_set(i, f)
2006 #define FD_CLR(i, f) rb_fd_clr(i, f)
2007 #define FD_ISSET(i, f) rb_fd_isset(i, f)
2009 #endif
2011 #if defined(__CYGWIN__) || defined(_WIN32)
2012 static long
2013 cmp_tv(const struct timeval *a, const struct timeval *b)
2015 long d = (a->tv_sec - b->tv_sec);
2016 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2019 static int
2020 subtract_tv(struct timeval *rest, const struct timeval *wait)
2022 while (rest->tv_usec < wait->tv_usec) {
2023 if (rest->tv_sec <= wait->tv_sec) {
2024 return 0;
2026 rest->tv_sec -= 1;
2027 rest->tv_usec += 1000 * 1000;
2029 rest->tv_sec -= wait->tv_sec;
2030 rest->tv_usec -= wait->tv_usec;
2031 return 1;
2033 #endif
2035 static int
2036 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
2037 struct timeval *timeout)
2039 int result, lerrno;
2040 fd_set orig_read, orig_write, orig_except;
2042 #ifndef linux
2043 double limit = 0;
2044 struct timeval wait_rest;
2045 # if defined(__CYGWIN__) || defined(_WIN32)
2046 struct timeval start_time;
2047 # endif
2049 if (timeout) {
2050 # if defined(__CYGWIN__) || defined(_WIN32)
2051 gettimeofday(&start_time, NULL);
2052 limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2053 # else
2054 limit = timeofday();
2055 # endif
2056 limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2057 wait_rest = *timeout;
2058 timeout = &wait_rest;
2060 #endif
2062 if (read) orig_read = *read;
2063 if (write) orig_write = *write;
2064 if (except) orig_except = *except;
2066 retry:
2067 lerrno = 0;
2069 #if defined(__CYGWIN__) || defined(_WIN32)
2071 int finish = 0;
2072 /* polling duration: 100ms */
2073 struct timeval wait_100ms, *wait;
2074 wait_100ms.tv_sec = 0;
2075 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2077 do {
2078 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
2079 BLOCKING_REGION({
2080 do {
2081 result = select(n, read, write, except, wait);
2082 if (result < 0) lerrno = errno;
2083 if (result != 0) break;
2085 if (read) *read = orig_read;
2086 if (write) *write = orig_write;
2087 if (except) *except = orig_except;
2088 wait = &wait_100ms;
2089 if (timeout) {
2090 struct timeval elapsed;
2091 gettimeofday(&elapsed, NULL);
2092 subtract_tv(&elapsed, &start_time);
2093 if (!subtract_tv(timeout, &elapsed)) {
2094 finish = 1;
2095 break;
2097 if (cmp_tv(&wait_100ms, timeout) < 0) wait = timeout;
2099 } while (__th->interrupt_flag == 0);
2100 }, 0, 0);
2101 } while (result == 0 && !finish);
2103 #else
2104 BLOCKING_REGION({
2105 result = select(n, read, write, except, timeout);
2106 if (result < 0) lerrno = errno;
2107 }, ubf_select, GET_THREAD());
2108 #endif
2110 errno = lerrno;
2112 if (result < 0) {
2113 switch (errno) {
2114 case EINTR:
2115 #ifdef ERESTART
2116 case ERESTART:
2117 #endif
2118 if (read) *read = orig_read;
2119 if (write) *write = orig_write;
2120 if (except) *except = orig_except;
2121 #ifndef linux
2122 if (timeout) {
2123 double d = limit - timeofday();
2125 wait_rest.tv_sec = (unsigned int)d;
2126 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
2127 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2128 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2130 #endif
2131 goto retry;
2132 default:
2133 break;
2136 return result;
2139 static void
2140 rb_thread_wait_fd_rw(int fd, int read)
2142 int result = 0;
2143 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2145 if (fd < 0) {
2146 rb_raise(rb_eIOError, "closed stream");
2148 while (result <= 0) {
2149 rb_fdset_t set;
2150 rb_fd_init(&set);
2151 FD_SET(fd, &set);
2153 if (read) {
2154 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
2156 else {
2157 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
2160 rb_fd_term(&set);
2162 if (result < 0) {
2163 rb_sys_fail(0);
2167 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2170 void
2171 rb_thread_wait_fd(int fd)
2173 rb_thread_wait_fd_rw(fd, 1);
2177 rb_thread_fd_writable(int fd)
2179 rb_thread_wait_fd_rw(fd, 0);
2180 return Qtrue;
2184 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2185 struct timeval *timeout)
2187 if (!read && !write && !except) {
2188 if (!timeout) {
2189 rb_thread_sleep_forever();
2190 return 0;
2192 rb_thread_wait_for(*timeout);
2193 return 0;
2195 else {
2196 return do_select(max, read, write, except, timeout);
2202 * for GC
2205 #ifdef USE_CONSERVATIVE_STACK_END
2206 void
2207 rb_gc_set_stack_end(VALUE **stack_end_p)
2209 VALUE stack_end;
2210 *stack_end_p = &stack_end;
2212 #endif
2214 void
2215 rb_gc_save_machine_context(rb_thread_t *th)
2217 SET_MACHINE_STACK_END(&th->machine_stack_end);
2218 FLUSH_REGISTER_WINDOWS;
2219 #ifdef __ia64
2220 th->machine_register_stack_end = rb_ia64_bsp();
2221 #endif
2222 setjmp(th->machine_regs);
2229 int rb_get_next_signal(rb_vm_t *vm);
2231 static void
2232 timer_thread_function(void *arg)
2234 rb_vm_t *vm = arg; /* TODO: fix me for Multi-VM */
2236 /* for time slice */
2237 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
2239 /* check signal */
2240 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
2241 rb_thread_t *mth = vm->main_thread;
2242 enum rb_thread_status prev_status = mth->status;
2243 mth->exec_signal = rb_get_next_signal(vm);
2244 thread_debug("main_thread: %s\n", thread_status_name(prev_status));
2245 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2246 (long)vm->buffered_signal_size, vm->main_thread->exec_signal);
2247 if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
2248 rb_thread_interrupt(mth);
2249 mth->status = prev_status;
2252 #if 0
2253 /* prove profiler */
2254 if (vm->prove_profile.enable) {
2255 rb_thread_t *th = vm->running_thread;
2257 if (vm->during_gc) {
2258 /* GC prove profiling */
2261 #endif
2264 void
2265 rb_thread_stop_timer_thread(void)
2267 if (timer_thread_id) {
2268 system_working = 0;
2269 native_thread_join(timer_thread_id);
2270 timer_thread_id = 0;
2274 void
2275 rb_thread_reset_timer_thread(void)
2277 timer_thread_id = 0;
2280 void
2281 rb_thread_start_timer_thread(void)
2283 rb_thread_create_timer_thread();
2286 static int
2287 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
2289 int i;
2290 VALUE lines = (VALUE)val;
2292 for (i = 0; i < RARRAY_LEN(lines); i++) {
2293 if (RARRAY_PTR(lines)[i] != Qnil) {
2294 RARRAY_PTR(lines)[i] = INT2FIX(0);
2297 return ST_CONTINUE;
2300 static void
2301 clear_coverage(void)
2303 extern VALUE rb_get_coverages(void);
2304 VALUE coverages = rb_get_coverages();
2305 if (RTEST(coverages)) {
2306 st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
2310 static int
2311 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2313 VALUE thval = key;
2314 rb_thread_t *th;
2315 GetThreadPtr(thval, th);
2317 if (th != current_th) {
2318 thread_cleanup_func(th);
2320 return ST_CONTINUE;
2323 void
2324 rb_thread_atfork(void)
2326 rb_thread_t *th = GET_THREAD();
2327 rb_vm_t *vm = th->vm;
2328 VALUE thval = th->self;
2329 vm->main_thread = th;
2331 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2332 st_clear(vm->living_threads);
2333 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2334 vm->sleeper = 0;
2335 clear_coverage();
2336 rb_reset_random_seed();
2339 static int
2340 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2342 VALUE thval = key;
2343 rb_thread_t *th;
2344 GetThreadPtr(thval, th);
2346 if (th != current_th) {
2347 thread_cleanup_func_before_exec(th);
2349 return ST_CONTINUE;
2352 void
2353 rb_thread_atfork_before_exec(void)
2355 rb_thread_t *th = GET_THREAD();
2356 rb_vm_t *vm = th->vm;
2357 VALUE thval = th->self;
2358 vm->main_thread = th;
2360 st_foreach(vm->living_threads, terminate_atfork_before_exec_i, (st_data_t)th);
2361 st_clear(vm->living_threads);
2362 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2363 vm->sleeper = 0;
2364 clear_coverage();
2367 struct thgroup {
2368 int enclosed;
2369 VALUE group;
2373 * Document-class: ThreadGroup
2375 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2376 * threads as a group. A <code>Thread</code> can belong to only one
2377 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2378 * remove it from any previous group.
2380 * Newly created threads belong to the same group as the thread from which they
2381 * were created.
2384 static VALUE thgroup_s_alloc(VALUE);
2385 static VALUE
2386 thgroup_s_alloc(VALUE klass)
2388 VALUE group;
2389 struct thgroup *data;
2391 group = Data_Make_Struct(klass, struct thgroup, 0, -1, data);
2392 data->enclosed = 0;
2393 data->group = group;
2395 return group;
2398 struct thgroup_list_params {
2399 VALUE ary;
2400 VALUE group;
2403 static int
2404 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2406 VALUE thread = (VALUE)key;
2407 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2408 VALUE group = ((struct thgroup_list_params *)data)->group;
2409 rb_thread_t *th;
2410 GetThreadPtr(thread, th);
2412 if (th->thgroup == group) {
2413 rb_ary_push(ary, thread);
2415 return ST_CONTINUE;
2419 * call-seq:
2420 * thgrp.list => array
2422 * Returns an array of all existing <code>Thread</code> objects that belong to
2423 * this group.
2425 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2428 static VALUE
2429 thgroup_list(VALUE group)
2431 VALUE ary = rb_ary_new();
2432 struct thgroup_list_params param;
2434 param.ary = ary;
2435 param.group = group;
2436 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2437 return ary;
2442 * call-seq:
2443 * thgrp.enclose => thgrp
2445 * Prevents threads from being added to or removed from the receiving
2446 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2447 * <code>ThreadGroup</code>.
2449 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2450 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2451 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2452 * tg.add thr
2454 * <em>produces:</em>
2456 * ThreadError: can't move from the enclosed thread group
2459 VALUE
2460 thgroup_enclose(VALUE group)
2462 struct thgroup *data;
2464 Data_Get_Struct(group, struct thgroup, data);
2465 data->enclosed = 1;
2467 return group;
2472 * call-seq:
2473 * thgrp.enclosed? => true or false
2475 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2476 * ThreadGroup#enclose.
2479 static VALUE
2480 thgroup_enclosed_p(VALUE group)
2482 struct thgroup *data;
2484 Data_Get_Struct(group, struct thgroup, data);
2485 if (data->enclosed)
2486 return Qtrue;
2487 return Qfalse;
2492 * call-seq:
2493 * thgrp.add(thread) => thgrp
2495 * Adds the given <em>thread</em> to this group, removing it from any other
2496 * group to which it may have previously belonged.
2498 * puts "Initial group is #{ThreadGroup::Default.list}"
2499 * tg = ThreadGroup.new
2500 * t1 = Thread.new { sleep }
2501 * t2 = Thread.new { sleep }
2502 * puts "t1 is #{t1}"
2503 * puts "t2 is #{t2}"
2504 * tg.add(t1)
2505 * puts "Initial group now #{ThreadGroup::Default.list}"
2506 * puts "tg group now #{tg.list}"
2508 * <em>produces:</em>
2510 * Initial group is #<Thread:0x401bdf4c>
2511 * t1 is #<Thread:0x401b3c90>
2512 * t2 is #<Thread:0x401b3c18>
2513 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2514 * tg group now #<Thread:0x401b3c90>
2517 static VALUE
2518 thgroup_add(VALUE group, VALUE thread)
2520 rb_thread_t *th;
2521 struct thgroup *data;
2523 rb_secure(4);
2524 GetThreadPtr(thread, th);
2526 if (OBJ_FROZEN(group)) {
2527 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2529 Data_Get_Struct(group, struct thgroup, data);
2530 if (data->enclosed) {
2531 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2534 if (!th->thgroup) {
2535 return Qnil;
2538 if (OBJ_FROZEN(th->thgroup)) {
2539 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2541 Data_Get_Struct(th->thgroup, struct thgroup, data);
2542 if (data->enclosed) {
2543 rb_raise(rb_eThreadError,
2544 "can't move from the enclosed thread group");
2547 th->thgroup = group;
2548 return group;
2553 * Document-class: Mutex
2555 * Mutex implements a simple semaphore that can be used to coordinate access to
2556 * shared data from multiple concurrent threads.
2558 * Example:
2560 * require 'thread'
2561 * semaphore = Mutex.new
2563 * a = Thread.new {
2564 * semaphore.synchronize {
2565 * # access shared resource
2569 * b = Thread.new {
2570 * semaphore.synchronize {
2571 * # access shared resource
2577 #define GetMutexPtr(obj, tobj) \
2578 Data_Get_Struct(obj, mutex_t, tobj)
2580 static const char *mutex_unlock(mutex_t *mutex);
2582 static void
2583 mutex_free(void *ptr)
2585 if (ptr) {
2586 mutex_t *mutex = ptr;
2587 if (mutex->th) {
2588 /* rb_warn("free locked mutex"); */
2589 mutex_unlock(mutex);
2591 native_mutex_destroy(&mutex->lock);
2592 native_cond_destroy(&mutex->cond);
2594 ruby_xfree(ptr);
2597 static VALUE
2598 mutex_alloc(VALUE klass)
2600 VALUE volatile obj;
2601 mutex_t *mutex;
2603 obj = Data_Make_Struct(klass, mutex_t, NULL, mutex_free, mutex);
2604 native_mutex_initialize(&mutex->lock);
2605 native_cond_initialize(&mutex->cond);
2606 return obj;
2610 * call-seq:
2611 * Mutex.new => mutex
2613 * Creates a new Mutex
2615 static VALUE
2616 mutex_initialize(VALUE self)
2618 return self;
2621 VALUE
2622 rb_mutex_new(void)
2624 return mutex_alloc(rb_cMutex);
2628 * call-seq:
2629 * mutex.locked? => true or false
2631 * Returns +true+ if this lock is currently held by some thread.
2633 VALUE
2634 rb_mutex_locked_p(VALUE self)
2636 mutex_t *mutex;
2637 GetMutexPtr(self, mutex);
2638 return mutex->th ? Qtrue : Qfalse;
2641 static void
2642 mutex_locked(rb_thread_t *th, VALUE self)
2644 mutex_t *mutex;
2645 GetMutexPtr(self, mutex);
2647 if (th->keeping_mutexes) {
2648 mutex->next_mutex = th->keeping_mutexes;
2650 th->keeping_mutexes = mutex;
2654 * call-seq:
2655 * mutex.try_lock => true or false
2657 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2658 * lock was granted.
2660 VALUE
2661 rb_mutex_trylock(VALUE self)
2663 mutex_t *mutex;
2664 VALUE locked = Qfalse;
2665 GetMutexPtr(self, mutex);
2667 if (mutex->th == GET_THREAD()) {
2668 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2671 native_mutex_lock(&mutex->lock);
2672 if (mutex->th == 0) {
2673 mutex->th = GET_THREAD();
2674 locked = Qtrue;
2676 mutex_locked(GET_THREAD(), self);
2678 native_mutex_unlock(&mutex->lock);
2680 return locked;
2683 static int
2684 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
2686 int interrupted = 0;
2687 #if 0 /* for debug */
2688 native_thread_yield();
2689 #endif
2691 native_mutex_lock(&mutex->lock);
2692 th->transition_for_lock = 0;
2693 while (mutex->th || (mutex->th = th, 0)) {
2694 if (last_thread) {
2695 interrupted = 2;
2696 break;
2699 mutex->cond_waiting++;
2700 native_cond_wait(&mutex->cond, &mutex->lock);
2701 mutex->cond_notified--;
2703 if (RUBY_VM_INTERRUPTED(th)) {
2704 interrupted = 1;
2705 break;
2708 th->transition_for_lock = 1;
2709 native_mutex_unlock(&mutex->lock);
2711 if (interrupted == 2) native_thread_yield();
2712 #if 0 /* for debug */
2713 native_thread_yield();
2714 #endif
2716 return interrupted;
2719 static void
2720 lock_interrupt(void *ptr)
2722 mutex_t *mutex = (mutex_t *)ptr;
2723 native_mutex_lock(&mutex->lock);
2724 if (mutex->cond_waiting > 0) {
2725 native_cond_broadcast(&mutex->cond);
2726 mutex->cond_notified = mutex->cond_waiting;
2727 mutex->cond_waiting = 0;
2729 native_mutex_unlock(&mutex->lock);
2733 * call-seq:
2734 * mutex.lock => true or false
2736 * Attempts to grab the lock and waits if it isn't available.
2737 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2739 VALUE
2740 rb_mutex_lock(VALUE self)
2742 if (rb_mutex_trylock(self) == Qfalse) {
2743 mutex_t *mutex;
2744 rb_thread_t *th = GET_THREAD();
2745 GetMutexPtr(self, mutex);
2747 while (mutex->th != th) {
2748 int interrupted;
2749 enum rb_thread_status prev_status = th->status;
2750 int last_thread = 0;
2751 struct rb_unblock_callback oldubf;
2753 set_unblock_function(th, lock_interrupt, mutex, &oldubf);
2754 th->status = THREAD_STOPPED_FOREVER;
2755 th->vm->sleeper++;
2756 th->locking_mutex = self;
2757 if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
2758 last_thread = 1;
2761 th->transition_for_lock = 1;
2762 BLOCKING_REGION_CORE({
2763 interrupted = lock_func(th, mutex, last_thread);
2765 th->transition_for_lock = 0;
2766 remove_signal_thread_list(th);
2767 reset_unblock_function(th, &oldubf);
2769 th->locking_mutex = Qfalse;
2770 if (mutex->th && interrupted == 2) {
2771 rb_check_deadlock(th->vm);
2773 if (th->status == THREAD_STOPPED_FOREVER) {
2774 th->status = prev_status;
2776 th->vm->sleeper--;
2778 if (mutex->th == th) mutex_locked(th, self);
2780 if (interrupted) {
2781 RUBY_VM_CHECK_INTS();
2785 return self;
2788 static const char *
2789 mutex_unlock(mutex_t *mutex)
2791 const char *err = NULL;
2792 rb_thread_t *th = GET_THREAD();
2793 mutex_t *th_mutex;
2795 native_mutex_lock(&mutex->lock);
2797 if (mutex->th == 0) {
2798 err = "Attempt to unlock a mutex which is not locked";
2800 else if (mutex->th != GET_THREAD()) {
2801 err = "Attempt to unlock a mutex which is locked by another thread";
2803 else {
2804 mutex->th = 0;
2805 if (mutex->cond_waiting > 0) {
2806 /* waiting thread */
2807 native_cond_signal(&mutex->cond);
2808 mutex->cond_waiting--;
2809 mutex->cond_notified++;
2813 native_mutex_unlock(&mutex->lock);
2815 if (!err) {
2816 th_mutex = th->keeping_mutexes;
2817 if (th_mutex == mutex) {
2818 th->keeping_mutexes = mutex->next_mutex;
2820 else {
2821 while (1) {
2822 mutex_t *tmp_mutex;
2823 tmp_mutex = th_mutex->next_mutex;
2824 if (tmp_mutex == mutex) {
2825 th_mutex->next_mutex = tmp_mutex->next_mutex;
2826 break;
2828 th_mutex = tmp_mutex;
2831 mutex->next_mutex = NULL;
2834 return err;
2838 * call-seq:
2839 * mutex.unlock => self
2841 * Releases the lock.
2842 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2844 VALUE
2845 rb_mutex_unlock(VALUE self)
2847 const char *err;
2848 mutex_t *mutex;
2849 GetMutexPtr(self, mutex);
2851 err = mutex_unlock(mutex);
2852 if (err) rb_raise(rb_eThreadError, err);
2854 return self;
2857 static void
2858 rb_mutex_unlock_all(mutex_t *mutexes)
2860 const char *err;
2861 mutex_t *mutex;
2863 while (mutexes) {
2864 mutex = mutexes;
2865 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2866 mutexes); */
2867 mutexes = mutex->next_mutex;
2868 err = mutex_unlock(mutex);
2869 if (err) rb_bug("invalid keeping_mutexes: %s", err);
2873 static VALUE
2874 rb_mutex_sleep_forever(VALUE time)
2876 rb_thread_sleep_deadly();
2877 return Qnil;
2880 static VALUE
2881 rb_mutex_wait_for(VALUE time)
2883 const struct timeval *t = (struct timeval *)time;
2884 rb_thread_wait_for(*t);
2885 return Qnil;
2888 VALUE
2889 rb_mutex_sleep(VALUE self, VALUE timeout)
2891 time_t beg, end;
2892 struct timeval t;
2894 if (!NIL_P(timeout)) {
2895 t = rb_time_interval(timeout);
2897 rb_mutex_unlock(self);
2898 beg = time(0);
2899 if (NIL_P(timeout)) {
2900 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2902 else {
2903 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2905 end = time(0) - beg;
2906 return INT2FIX(end);
2910 * call-seq:
2911 * mutex.sleep(timeout = nil) => number
2913 * Releases the lock and sleeps +timeout+ seconds if it is given and
2914 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2915 * the current thread.
2917 static VALUE
2918 mutex_sleep(int argc, VALUE *argv, VALUE self)
2920 VALUE timeout;
2922 rb_scan_args(argc, argv, "01", &timeout);
2923 return rb_mutex_sleep(self, timeout);
2927 * call-seq:
2928 * mutex.synchronize { ... } => result of the block
2930 * Obtains a lock, runs the block, and releases the lock when the block
2931 * completes. See the example under +Mutex+.
2934 VALUE
2935 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2937 rb_mutex_lock(mutex);
2938 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2942 * Document-class: Barrier
2944 typedef struct rb_thread_list_struct rb_thread_list_t;
2946 struct rb_thread_list_struct {
2947 rb_thread_t *th;
2948 rb_thread_list_t *next;
2951 static void
2952 thlist_mark(void *ptr)
2954 rb_thread_list_t *q = ptr;
2956 for (; q; q = q->next) {
2957 rb_gc_mark(q->th->self);
2961 static void
2962 thlist_free(void *ptr)
2964 rb_thread_list_t *q = ptr, *next;
2966 for (; q; q = next) {
2967 next = q->next;
2968 ruby_xfree(q);
2972 static int
2973 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2975 int woken = 0;
2976 rb_thread_list_t *q;
2978 while ((q = *list) != NULL) {
2979 rb_thread_t *th = q->th;
2981 *list = q->next;
2982 ruby_xfree(q);
2983 if (th->status != THREAD_KILLED) {
2984 rb_thread_ready(th);
2985 if (!woken && woken_thread) *woken_thread = th;
2986 if (++woken >= maxth && maxth) break;
2989 return woken;
2992 typedef struct {
2993 rb_thread_t *owner;
2994 rb_thread_list_t *waiting, **tail;
2995 } rb_barrier_t;
2997 static void
2998 barrier_mark(void *ptr)
3000 rb_barrier_t *b = ptr;
3002 if (b->owner) rb_gc_mark(b->owner->self);
3003 thlist_mark(b->waiting);
3006 static void
3007 barrier_free(void *ptr)
3009 rb_barrier_t *b = ptr;
3011 b->owner = 0;
3012 thlist_free(b->waiting);
3013 b->waiting = 0;
3014 ruby_xfree(ptr);
3017 static VALUE
3018 barrier_alloc(VALUE klass)
3020 VALUE volatile obj;
3021 rb_barrier_t *barrier;
3023 obj = Data_Make_Struct(klass, rb_barrier_t, barrier_mark, barrier_free, barrier);
3024 barrier->owner = GET_THREAD();
3025 barrier->waiting = 0;
3026 barrier->tail = &barrier->waiting;
3027 return obj;
3030 VALUE
3031 rb_barrier_new(void)
3033 return barrier_alloc(rb_cBarrier);
3036 VALUE
3037 rb_barrier_wait(VALUE self)
3039 rb_barrier_t *barrier;
3040 rb_thread_list_t *q;
3042 Data_Get_Struct(self, rb_barrier_t, barrier);
3043 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
3044 barrier->owner = 0;
3045 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
3046 return Qtrue;
3048 else if (barrier->owner == GET_THREAD()) {
3049 return Qfalse;
3051 else {
3052 *barrier->tail = q = ALLOC(rb_thread_list_t);
3053 q->th = GET_THREAD();
3054 q->next = 0;
3055 barrier->tail = &q->next;
3056 rb_thread_sleep_forever();
3057 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
3061 VALUE
3062 rb_barrier_release(VALUE self)
3064 rb_barrier_t *barrier;
3065 unsigned int n;
3067 Data_Get_Struct(self, rb_barrier_t, barrier);
3068 if (barrier->owner != GET_THREAD()) {
3069 rb_raise(rb_eThreadError, "not owned");
3071 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
3072 return n ? UINT2NUM(n) : Qfalse;
3075 /* variables for recursive traversals */
3076 static ID recursive_key;
3078 static VALUE
3079 recursive_check(VALUE hash, VALUE obj)
3081 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3082 return Qfalse;
3084 else {
3085 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
3087 if (NIL_P(list) || TYPE(list) != T_HASH)
3088 return Qfalse;
3089 if (NIL_P(rb_hash_lookup(list, obj)))
3090 return Qfalse;
3091 return Qtrue;
3095 static VALUE
3096 recursive_push(VALUE hash, VALUE obj)
3098 VALUE list, sym;
3100 sym = ID2SYM(rb_frame_this_func());
3101 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3102 hash = rb_hash_new();
3103 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3104 list = Qnil;
3106 else {
3107 list = rb_hash_aref(hash, sym);
3109 if (NIL_P(list) || TYPE(list) != T_HASH) {
3110 list = rb_hash_new();
3111 rb_hash_aset(hash, sym, list);
3113 rb_hash_aset(list, obj, Qtrue);
3114 return hash;
3117 static void
3118 recursive_pop(VALUE hash, VALUE obj)
3120 VALUE list, sym;
3122 sym = ID2SYM(rb_frame_this_func());
3123 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3124 VALUE symname;
3125 VALUE thrname;
3126 symname = rb_inspect(sym);
3127 thrname = rb_inspect(rb_thread_current());
3129 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
3130 StringValuePtr(symname), StringValuePtr(thrname));
3132 list = rb_hash_aref(hash, sym);
3133 if (NIL_P(list) || TYPE(list) != T_HASH) {
3134 VALUE symname = rb_inspect(sym);
3135 VALUE thrname = rb_inspect(rb_thread_current());
3136 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
3137 StringValuePtr(symname), StringValuePtr(thrname));
3139 rb_hash_delete(list, obj);
3142 VALUE
3143 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
3145 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3146 VALUE objid = rb_obj_id(obj);
3148 if (recursive_check(hash, objid)) {
3149 return (*func) (obj, arg, Qtrue);
3151 else {
3152 VALUE result = Qundef;
3153 int state;
3155 hash = recursive_push(hash, objid);
3156 PUSH_TAG();
3157 if ((state = EXEC_TAG()) == 0) {
3158 result = (*func) (obj, arg, Qfalse);
3160 POP_TAG();
3161 recursive_pop(hash, objid);
3162 if (state)
3163 JUMP_TAG(state);
3164 return result;
3168 /* tracer */
3170 static rb_event_hook_t *
3171 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3173 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
3174 hook->func = func;
3175 hook->flag = events;
3176 hook->data = data;
3177 return hook;
3180 static void
3181 thread_reset_event_flags(rb_thread_t *th)
3183 rb_event_hook_t *hook = th->event_hooks;
3184 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
3186 while (hook) {
3187 flag |= hook->flag;
3188 hook = hook->next;
3192 void
3193 rb_thread_add_event_hook(rb_thread_t *th,
3194 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3196 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3197 hook->next = th->event_hooks;
3198 th->event_hooks = hook;
3199 thread_reset_event_flags(th);
3202 static int
3203 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
3205 VALUE thval = key;
3206 rb_thread_t *th;
3207 GetThreadPtr(thval, th);
3209 if (flag) {
3210 th->event_flags |= RUBY_EVENT_VM;
3212 else {
3213 th->event_flags &= (~RUBY_EVENT_VM);
3215 return ST_CONTINUE;
3218 static void
3219 set_threads_event_flags(int flag)
3221 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3224 void
3225 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3227 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3228 rb_vm_t *vm = GET_VM();
3230 hook->next = vm->event_hooks;
3231 vm->event_hooks = hook;
3233 set_threads_event_flags(1);
3236 static int
3237 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3239 rb_event_hook_t *prev = NULL, *hook = *root, *next;
3241 while (hook) {
3242 next = hook->next;
3243 if (func == 0 || hook->func == func) {
3244 if (prev) {
3245 prev->next = hook->next;
3247 else {
3248 *root = hook->next;
3250 xfree(hook);
3252 else {
3253 prev = hook;
3255 hook = next;
3257 return -1;
3261 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3263 int ret = remove_event_hook(&th->event_hooks, func);
3264 thread_reset_event_flags(th);
3265 return ret;
3269 rb_remove_event_hook(rb_event_hook_func_t func)
3271 rb_vm_t *vm = GET_VM();
3272 rb_event_hook_t *hook = vm->event_hooks;
3273 int ret = remove_event_hook(&vm->event_hooks, func);
3275 if (hook != NULL && vm->event_hooks == NULL) {
3276 set_threads_event_flags(0);
3279 return ret;
3282 static int
3283 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
3285 rb_thread_t *th;
3286 GetThreadPtr((VALUE)key, th);
3287 rb_thread_remove_event_hook(th, 0);
3288 return ST_CONTINUE;
3291 void
3292 rb_clear_trace_func(void)
3294 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
3295 rb_remove_event_hook(0);
3298 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
3301 * call-seq:
3302 * set_trace_func(proc) => proc
3303 * set_trace_func(nil) => nil
3305 * Establishes _proc_ as the handler for tracing, or disables
3306 * tracing if the parameter is +nil+. _proc_ takes up
3307 * to six parameters: an event name, a filename, a line number, an
3308 * object id, a binding, and the name of a class. _proc_ is
3309 * invoked whenever an event occurs. Events are: <code>c-call</code>
3310 * (call a C-language routine), <code>c-return</code> (return from a
3311 * C-language routine), <code>call</code> (call a Ruby method),
3312 * <code>class</code> (start a class or module definition),
3313 * <code>end</code> (finish a class or module definition),
3314 * <code>line</code> (execute code on a new line), <code>raise</code>
3315 * (raise an exception), and <code>return</code> (return from a Ruby
3316 * method). Tracing is disabled within the context of _proc_.
3318 * class Test
3319 * def test
3320 * a = 1
3321 * b = 2
3322 * end
3323 * end
3325 * set_trace_func proc { |event, file, line, id, binding, classname|
3326 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3328 * t = Test.new
3329 * t.test
3331 * line prog.rb:11 false
3332 * c-call prog.rb:11 new Class
3333 * c-call prog.rb:11 initialize Object
3334 * c-return prog.rb:11 initialize Object
3335 * c-return prog.rb:11 new Class
3336 * line prog.rb:12 false
3337 * call prog.rb:2 test Test
3338 * line prog.rb:3 test Test
3339 * line prog.rb:4 test Test
3340 * return prog.rb:4 test Test
3343 static VALUE
3344 set_trace_func(VALUE obj, VALUE trace)
3346 rb_remove_event_hook(call_trace_func);
3348 if (NIL_P(trace)) {
3349 return Qnil;
3352 if (!rb_obj_is_proc(trace)) {
3353 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3356 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
3357 return trace;
3360 static void
3361 thread_add_trace_func(rb_thread_t *th, VALUE trace)
3363 if (!rb_obj_is_proc(trace)) {
3364 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3367 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
3370 static VALUE
3371 thread_add_trace_func_m(VALUE obj, VALUE trace)
3373 rb_thread_t *th;
3374 GetThreadPtr(obj, th);
3375 thread_add_trace_func(th, trace);
3376 return trace;
3379 static VALUE
3380 thread_set_trace_func_m(VALUE obj, VALUE trace)
3382 rb_thread_t *th;
3383 GetThreadPtr(obj, th);
3384 rb_thread_remove_event_hook(th, call_trace_func);
3386 if (NIL_P(trace)) {
3387 return Qnil;
3389 thread_add_trace_func(th, trace);
3390 return trace;
3393 static const char *
3394 get_event_name(rb_event_flag_t event)
3396 switch (event) {
3397 case RUBY_EVENT_LINE:
3398 return "line";
3399 case RUBY_EVENT_CLASS:
3400 return "class";
3401 case RUBY_EVENT_END:
3402 return "end";
3403 case RUBY_EVENT_CALL:
3404 return "call";
3405 case RUBY_EVENT_RETURN:
3406 return "return";
3407 case RUBY_EVENT_C_CALL:
3408 return "c-call";
3409 case RUBY_EVENT_C_RETURN:
3410 return "c-return";
3411 case RUBY_EVENT_RAISE:
3412 return "raise";
3413 default:
3414 return "unknown";
3418 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3420 struct call_trace_func_args {
3421 rb_event_flag_t event;
3422 VALUE proc;
3423 VALUE self;
3424 ID id;
3425 VALUE klass;
3428 static VALUE
3429 call_trace_proc(VALUE args, int tracing)
3431 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3432 VALUE eventname = rb_str_new2(get_event_name(p->event));
3433 VALUE filename = rb_str_new2(rb_sourcefile());
3434 VALUE argv[6];
3435 int line = rb_sourceline();
3436 ID id = 0;
3437 VALUE klass = 0;
3439 if (p->event == RUBY_EVENT_C_CALL ||
3440 p->event == RUBY_EVENT_C_RETURN) {
3441 id = p->id;
3442 klass = p->klass;
3444 else {
3445 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3447 if (id == ID_ALLOCATOR)
3448 return Qnil;
3449 if (klass) {
3450 if (TYPE(klass) == T_ICLASS) {
3451 klass = RBASIC(klass)->klass;
3453 else if (FL_TEST(klass, FL_SINGLETON)) {
3454 klass = rb_iv_get(klass, "__attached__");
3458 argv[0] = eventname;
3459 argv[1] = filename;
3460 argv[2] = INT2FIX(line);
3461 argv[3] = id ? ID2SYM(id) : Qnil;
3462 argv[4] = p->self ? rb_binding_new() : Qnil;
3463 argv[5] = klass ? klass : Qnil;
3465 return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
3468 static void
3469 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3471 struct call_trace_func_args args;
3473 args.event = event;
3474 args.proc = proc;
3475 args.self = self;
3476 args.id = id;
3477 args.klass = klass;
3478 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3481 VALUE
3482 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3484 rb_thread_t *th = GET_THREAD();
3485 int state, raised, tracing;
3486 VALUE result = Qnil;
3488 if ((tracing = th->tracing) != 0 && !always) {
3489 return Qnil;
3491 else {
3492 th->tracing = 1;
3495 raised = rb_thread_reset_raised(th);
3497 PUSH_TAG();
3498 if ((state = EXEC_TAG()) == 0) {
3499 result = (*func)(arg, tracing);
3502 if (raised) {
3503 rb_thread_set_raised(th);
3505 POP_TAG();
3507 th->tracing = tracing;
3508 if (state) {
3509 JUMP_TAG(state);
3512 return result;
3516 * +Thread+ encapsulates the behavior of a thread of
3517 * execution, including the main thread of the Ruby script.
3519 * In the descriptions of the methods in this class, the parameter _sym_
3520 * refers to a symbol, which is either a quoted string or a
3521 * +Symbol+ (such as <code>:name</code>).
3524 void
3525 Init_Thread(void)
3527 #undef rb_intern
3528 #define rb_intern(str) rb_intern_const(str)
3530 VALUE cThGroup;
3532 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3533 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3534 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3535 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3536 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3537 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3538 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3539 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3540 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3541 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3542 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3543 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3544 #if THREAD_DEBUG < 0
3545 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3546 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3547 #endif
3549 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3550 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3551 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3552 rb_define_method(rb_cThread, "value", thread_value, 0);
3553 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3554 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3555 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3556 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3557 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3558 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3559 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3560 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3561 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3562 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3563 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3564 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3565 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3566 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3567 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3568 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3569 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3570 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3572 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3574 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3575 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3576 rb_define_method(cThGroup, "list", thgroup_list, 0);
3577 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3578 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3579 rb_define_method(cThGroup, "add", thgroup_add, 1);
3582 rb_thread_t *th = GET_THREAD();
3583 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3584 rb_define_const(cThGroup, "Default", th->thgroup);
3587 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3588 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3589 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3590 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3591 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3592 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3593 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3594 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3596 recursive_key = rb_intern("__recursive_key__");
3597 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3599 /* trace */
3600 rb_define_global_function("set_trace_func", set_trace_func, 1);
3601 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3602 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3604 /* init thread core */
3605 Init_native_thread();
3607 /* main thread setting */
3609 /* acquire global interpreter lock */
3610 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
3611 native_mutex_initialize(lp);
3612 native_mutex_lock(lp);
3613 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3617 rb_thread_create_timer_thread();
3619 (void)native_mutex_trylock;
3620 (void)ruby_thread_set_native;
3624 ruby_native_thread_p(void)
3626 rb_thread_t *th = ruby_thread_from_native();
3628 return th ? Qtrue : Qfalse;
3631 static int
3632 check_deadlock_i(st_data_t key, st_data_t val, int *found)
3634 VALUE thval = key;
3635 rb_thread_t *th;
3636 GetThreadPtr(thval, th);
3638 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
3639 *found = 1;
3641 else if (th->locking_mutex) {
3642 mutex_t *mutex;
3643 GetMutexPtr(th->locking_mutex, mutex);
3645 native_mutex_lock(&mutex->lock);
3646 if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
3647 *found = 1;
3649 native_mutex_unlock(&mutex->lock);
3652 return (*found) ? ST_STOP : ST_CONTINUE;
3655 #if 0 /* for debug */
3656 static int
3657 debug_i(st_data_t key, st_data_t val, int *found)
3659 VALUE thval = key;
3660 rb_thread_t *th;
3661 GetThreadPtr(thval, th);
3663 printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
3664 if (th->locking_mutex) {
3665 mutex_t *mutex;
3666 GetMutexPtr(th->locking_mutex, mutex);
3668 native_mutex_lock(&mutex->lock);
3669 printf(" %p %d\n", mutex->th, mutex->cond_notified);
3670 native_mutex_unlock(&mutex->lock);
3672 else puts("");
3674 return ST_CONTINUE;
3676 #endif
3678 static void
3679 rb_check_deadlock(rb_vm_t *vm)
3681 int found = 0;
3683 if (vm_living_thread_num(vm) > vm->sleeper) return;
3684 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3686 st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
3688 if (!found) {
3689 VALUE argv[2];
3690 argv[0] = rb_eFatal;
3691 argv[1] = rb_str_new2("deadlock detected");
3692 #if 0 /* for debug */
3693 printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
3694 st_foreach(vm->living_threads, debug_i, (st_data_t)0);
3695 #endif
3696 rb_thread_raise(2, argv, vm->main_thread);
3700 static void
3701 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3703 VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
3704 if (coverage && RBASIC(coverage)->klass == 0) {
3705 long line = rb_sourceline() - 1;
3706 long count;
3707 if (RARRAY_PTR(coverage)[line] == Qnil) {
3708 rb_bug("bug");
3710 count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
3711 if (POSFIXABLE(count)) {
3712 RARRAY_PTR(coverage)[line] = LONG2FIX(count);
3717 VALUE
3718 rb_get_coverages(void)
3720 return GET_VM()->coverages;
3723 void
3724 rb_set_coverages(VALUE coverages)
3726 GET_VM()->coverages = coverages;
3727 rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
3730 void
3731 rb_reset_coverages(void)
3733 GET_VM()->coverages = Qfalse;
3734 rb_remove_event_hook(update_coverage);