1 /**********************************************************************
6 created at: Tue Aug 14 19:37:09 2012
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
10 **********************************************************************/
13 * This file include two parts:
15 * (1) set_trace_func internal mechanisms
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
24 #include "eval_intern.h"
26 #include "internal/hash.h"
27 #include "internal/symbol.h"
30 #include "ruby/debug.h"
32 #include "ruby/ractor.h"
37 /* (1) trace mechanisms */
39 typedef struct rb_event_hook_struct
{
40 rb_event_hook_flag_t hook_flags
;
41 rb_event_flag_t events
;
42 rb_event_hook_func_t func
;
44 struct rb_event_hook_struct
*next
;
48 unsigned int target_line
;
52 typedef void (*rb_event_hook_raw_arg_func_t
)(VALUE data
, const rb_trace_arg_t
*arg
);
54 #define MAX_EVENT_NUM 32
57 rb_hook_list_mark(rb_hook_list_t
*hooks
)
59 rb_event_hook_t
*hook
= hooks
->hooks
;
62 rb_gc_mark(hook
->data
);
67 static void clean_hooks(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
);
70 rb_hook_list_free(rb_hook_list_t
*hooks
)
72 hooks
->need_clean
= true;
74 if (hooks
->running
== 0) {
75 clean_hooks(GET_EC(), hooks
);
79 /* ruby_vm_event_flags management */
81 void rb_clear_attr_ccs(void);
84 update_global_event_hook(rb_event_flag_t prev_events
, rb_event_flag_t new_events
)
86 rb_event_flag_t new_iseq_events
= new_events
& ISEQ_TRACE_EVENTS
;
87 rb_event_flag_t enabled_iseq_events
= ruby_vm_event_enabled_global_flags
& ISEQ_TRACE_EVENTS
;
89 if (new_iseq_events
& ~enabled_iseq_events
) {
90 // :class events are triggered only in ISEQ_TYPE_CLASS, but mjit_target_iseq_p ignores such iseqs.
91 // Thus we don't need to cancel JIT-ed code for :class events.
92 if (new_iseq_events
!= RUBY_EVENT_CLASS
) {
93 // Stop calling all JIT-ed code. We can't rewrite existing JIT-ed code to trace_ insns for now.
94 mjit_cancel_all("TracePoint is enabled");
97 /* write all ISeqs if and only if new events are added */
98 rb_iseq_trace_set_all(new_iseq_events
| enabled_iseq_events
);
101 // if c_call or c_return is activated:
102 if (((prev_events
& RUBY_EVENT_C_CALL
) == 0 && (new_events
& RUBY_EVENT_C_CALL
)) ||
103 ((prev_events
& RUBY_EVENT_C_RETURN
) == 0 && (new_events
& RUBY_EVENT_C_RETURN
))) {
108 ruby_vm_event_flags
= new_events
;
109 ruby_vm_event_enabled_global_flags
|= new_events
;
110 rb_objspace_set_event_hook(new_events
);
112 if (new_events
& RUBY_EVENT_TRACEPOINT_ALL
) {
113 // Invalidate all code if listening for any TracePoint event.
114 // Internal events fire inside C routines so don't need special handling.
115 // Do this last so other ractors see updated vm events when they wake up.
116 rb_yjit_tracing_invalidate_all();
120 /* add/remove hooks */
122 static rb_event_hook_t
*
123 alloc_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
, rb_event_hook_flag_t hook_flags
)
125 rb_event_hook_t
*hook
;
127 if ((events
& RUBY_INTERNAL_EVENT_MASK
) && (events
& ~RUBY_INTERNAL_EVENT_MASK
)) {
128 rb_raise(rb_eTypeError
, "Can not specify normal event and internal event simultaneously.");
131 hook
= ALLOC(rb_event_hook_t
);
132 hook
->hook_flags
= hook_flags
;
133 hook
->events
= events
;
138 hook
->filter
.th
= NULL
;
139 hook
->filter
.target_line
= 0;
145 hook_list_connect(VALUE list_owner
, rb_hook_list_t
*list
, rb_event_hook_t
*hook
, int global_p
)
147 rb_event_flag_t prev_events
= list
->events
;
148 hook
->next
= list
->hooks
;
150 list
->events
|= hook
->events
;
153 /* global hooks are root objects at GC mark. */
154 update_global_event_hook(prev_events
, list
->events
);
157 RB_OBJ_WRITTEN(list_owner
, Qundef
, hook
->data
);
162 connect_event_hook(const rb_execution_context_t
*ec
, rb_event_hook_t
*hook
)
164 rb_hook_list_t
*list
= rb_ec_ractor_hooks(ec
);
165 hook_list_connect(Qundef
, list
, hook
, TRUE
);
169 rb_threadptr_add_event_hook(const rb_execution_context_t
*ec
, rb_thread_t
*th
,
170 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
, rb_event_hook_flag_t hook_flags
)
172 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
, hook_flags
);
173 hook
->filter
.th
= th
;
174 connect_event_hook(ec
, hook
);
178 rb_thread_add_event_hook(VALUE thval
, rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
180 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval
), func
, events
, data
, RUBY_EVENT_HOOK_FLAG_SAFE
);
184 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
186 rb_add_event_hook2(func
, events
, data
, RUBY_EVENT_HOOK_FLAG_SAFE
);
190 rb_thread_add_event_hook2(VALUE thval
, rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
, rb_event_hook_flag_t hook_flags
)
192 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval
), func
, events
, data
, hook_flags
);
196 rb_add_event_hook2(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
, rb_event_hook_flag_t hook_flags
)
198 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
, hook_flags
);
199 connect_event_hook(GET_EC(), hook
);
203 clean_hooks(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
)
205 rb_event_hook_t
*hook
, **nextp
= &list
->hooks
;
206 rb_event_flag_t prev_events
= list
->events
;
208 VM_ASSERT(list
->running
== 0);
209 VM_ASSERT(list
->need_clean
== true);
212 list
->need_clean
= false;
214 while ((hook
= *nextp
) != 0) {
215 if (hook
->hook_flags
& RUBY_EVENT_HOOK_FLAG_DELETED
) {
220 list
->events
|= hook
->events
; /* update active events */
225 if (list
->is_local
) {
226 if (list
->events
== 0) {
232 update_global_event_hook(prev_events
, list
->events
);
237 clean_hooks_check(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
)
239 if (UNLIKELY(list
->need_clean
)) {
240 if (list
->running
== 0) {
241 clean_hooks(ec
, list
);
246 #define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
248 /* if func is 0, then clear all funcs */
250 remove_event_hook(const rb_execution_context_t
*ec
, const rb_thread_t
*filter_th
, rb_event_hook_func_t func
, VALUE data
)
252 rb_hook_list_t
*list
= rb_ec_ractor_hooks(ec
);
254 rb_event_hook_t
*hook
= list
->hooks
;
257 if (func
== 0 || hook
->func
== func
) {
258 if (hook
->filter
.th
== filter_th
|| filter_th
== MATCH_ANY_FILTER_TH
) {
259 if (data
== Qundef
|| hook
->data
== data
) {
260 hook
->hook_flags
|= RUBY_EVENT_HOOK_FLAG_DELETED
;
262 list
->need_clean
= true;
269 clean_hooks_check(ec
, list
);
274 rb_threadptr_remove_event_hook(const rb_execution_context_t
*ec
, const rb_thread_t
*filter_th
, rb_event_hook_func_t func
, VALUE data
)
276 return remove_event_hook(ec
, filter_th
, func
, data
);
280 rb_thread_remove_event_hook(VALUE thval
, rb_event_hook_func_t func
)
282 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval
), func
, Qundef
);
286 rb_thread_remove_event_hook_with_data(VALUE thval
, rb_event_hook_func_t func
, VALUE data
)
288 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval
), func
, data
);
292 rb_remove_event_hook(rb_event_hook_func_t func
)
294 return remove_event_hook(GET_EC(), NULL
, func
, Qundef
);
298 rb_remove_event_hook_with_data(rb_event_hook_func_t func
, VALUE data
)
300 return remove_event_hook(GET_EC(), NULL
, func
, data
);
304 rb_ec_clear_current_thread_trace_func(const rb_execution_context_t
*ec
)
306 rb_threadptr_remove_event_hook(ec
, rb_ec_thread_ptr(ec
), 0, Qundef
);
310 rb_ec_clear_all_trace_func(const rb_execution_context_t
*ec
)
312 rb_threadptr_remove_event_hook(ec
, MATCH_ANY_FILTER_TH
, 0, Qundef
);
318 exec_hooks_body(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
, const rb_trace_arg_t
*trace_arg
)
320 rb_event_hook_t
*hook
;
322 for (hook
= list
->hooks
; hook
; hook
= hook
->next
) {
323 if (!(hook
->hook_flags
& RUBY_EVENT_HOOK_FLAG_DELETED
) &&
324 (trace_arg
->event
& hook
->events
) &&
325 (LIKELY(hook
->filter
.th
== 0) || hook
->filter
.th
== rb_ec_thread_ptr(ec
)) &&
326 (LIKELY(hook
->filter
.target_line
== 0) || (hook
->filter
.target_line
== (unsigned int)rb_vm_get_sourceline(ec
->cfp
)))) {
327 if (!(hook
->hook_flags
& RUBY_EVENT_HOOK_FLAG_RAW_ARG
)) {
328 (*hook
->func
)(trace_arg
->event
, hook
->data
, trace_arg
->self
, trace_arg
->id
, trace_arg
->klass
);
331 (*((rb_event_hook_raw_arg_func_t
)hook
->func
))(hook
->data
, trace_arg
);
338 exec_hooks_precheck(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
, const rb_trace_arg_t
*trace_arg
)
340 if (list
->events
& trace_arg
->event
) {
350 exec_hooks_postcheck(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
)
353 clean_hooks_check(ec
, list
);
357 exec_hooks_unprotected(const rb_execution_context_t
*ec
, rb_hook_list_t
*list
, const rb_trace_arg_t
*trace_arg
)
359 if (exec_hooks_precheck(ec
, list
, trace_arg
) == 0) return;
360 exec_hooks_body(ec
, list
, trace_arg
);
361 exec_hooks_postcheck(ec
, list
);
365 exec_hooks_protected(rb_execution_context_t
*ec
, rb_hook_list_t
*list
, const rb_trace_arg_t
*trace_arg
)
367 enum ruby_tag_type state
;
370 if (exec_hooks_precheck(ec
, list
, trace_arg
) == 0) return 0;
372 raised
= rb_ec_reset_raised(ec
);
374 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
377 if ((state
= EC_EXEC_TAG()) == TAG_NONE
) {
378 exec_hooks_body(ec
, list
, trace_arg
);
382 exec_hooks_postcheck(ec
, list
);
385 rb_ec_set_raised(ec
);
391 MJIT_FUNC_EXPORTED
void
392 rb_exec_event_hooks(rb_trace_arg_t
*trace_arg
, rb_hook_list_t
*hooks
, int pop_p
)
394 rb_execution_context_t
*ec
= trace_arg
->ec
;
396 if (UNLIKELY(trace_arg
->event
& RUBY_INTERNAL_EVENT_MASK
)) {
397 if (ec
->trace_arg
&& (ec
->trace_arg
->event
& RUBY_INTERNAL_EVENT_MASK
)) {
398 /* skip hooks because this thread doing INTERNAL_EVENT */
401 rb_trace_arg_t
*prev_trace_arg
= ec
->trace_arg
;
403 ec
->trace_arg
= trace_arg
;
404 /* only global hooks */
405 exec_hooks_unprotected(ec
, rb_ec_ractor_hooks(ec
), trace_arg
);
406 ec
->trace_arg
= prev_trace_arg
;
410 if (ec
->trace_arg
== NULL
&& /* check reentrant */
411 trace_arg
->self
!= rb_mRubyVMFrozenCore
/* skip special methods. TODO: remove it. */) {
412 const VALUE errinfo
= ec
->errinfo
;
413 const VALUE old_recursive
= ec
->local_storage_recursive_hash
;
417 ec
->local_storage_recursive_hash
= ec
->local_storage_recursive_hash_for_trace
;
419 ec
->trace_arg
= trace_arg
;
422 if ((state
= exec_hooks_protected(ec
, hooks
, trace_arg
)) == TAG_NONE
) {
423 ec
->errinfo
= errinfo
;
427 ec
->trace_arg
= NULL
;
428 ec
->local_storage_recursive_hash_for_trace
= ec
->local_storage_recursive_hash
;
429 ec
->local_storage_recursive_hash
= old_recursive
;
433 if (VM_FRAME_FINISHED_P(ec
->cfp
)) {
434 ec
->tag
= ec
->tag
->prev
;
438 EC_JUMP_TAG(ec
, state
);
445 rb_suppress_tracing(VALUE (*func
)(VALUE
), VALUE arg
)
448 volatile VALUE result
= Qnil
;
449 rb_execution_context_t
*const ec
= GET_EC();
450 rb_vm_t
*const vm
= rb_ec_vm_ptr(ec
);
451 enum ruby_tag_type state
;
452 rb_trace_arg_t dummy_trace_arg
;
453 dummy_trace_arg
.event
= 0;
455 if (!ec
->trace_arg
) {
456 ec
->trace_arg
= &dummy_trace_arg
;
459 raised
= rb_ec_reset_raised(ec
);
462 if (LIKELY((state
= EC_EXEC_TAG()) == TAG_NONE
)) {
463 result
= (*func
)(arg
);
466 (void)*&vm
; /* suppress "clobbered" warning */
471 rb_ec_reset_raised(ec
);
474 if (ec
->trace_arg
== &dummy_trace_arg
) {
475 ec
->trace_arg
= NULL
;
479 #if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
482 EC_JUMP_TAG(ec
, state
);
488 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
490 /* (2-1) set_trace_func (old API) */
494 * set_trace_func(proc) -> proc
495 * set_trace_func(nil) -> nil
497 * Establishes _proc_ as the handler for tracing, or disables
498 * tracing if the parameter is +nil+.
500 * *Note:* this method is obsolete, please use TracePoint instead.
502 * _proc_ takes up to six parameters:
509 * * the name of a class
511 * _proc_ is invoked whenever an event occurs.
515 * +c-call+:: call a C-language routine
516 * +c-return+:: return from a C-language routine
517 * +call+:: call a Ruby method
518 * +class+:: start a class or module definition
519 * +end+:: finish a class or module definition
520 * +line+:: execute code on a new line
521 * +raise+:: raise an exception
522 * +return+:: return from a Ruby method
524 * Tracing is disabled within the context of _proc_.
533 * set_trace_func proc { |event, file, line, id, binding, classname|
534 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
539 * line prog.rb:11 false
540 * c-call prog.rb:11 new Class
541 * c-call prog.rb:11 initialize Object
542 * c-return prog.rb:11 initialize Object
543 * c-return prog.rb:11 new Class
544 * line prog.rb:12 false
545 * call prog.rb:2 test Test
546 * line prog.rb:3 test Test
547 * line prog.rb:4 test Test
548 * return prog.rb:4 test Test
550 * Note that for +c-call+ and +c-return+ events, the binding returned is the
551 * binding of the nearest Ruby method calling the C method, since C methods
552 * themselves do not have bindings.
556 set_trace_func(VALUE obj
, VALUE trace
)
558 rb_remove_event_hook(call_trace_func
);
564 if (!rb_obj_is_proc(trace
)) {
565 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
568 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
573 thread_add_trace_func(rb_execution_context_t
*ec
, rb_thread_t
*filter_th
, VALUE trace
)
575 if (!rb_obj_is_proc(trace
)) {
576 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
579 rb_threadptr_add_event_hook(ec
, filter_th
, call_trace_func
, RUBY_EVENT_ALL
, trace
, RUBY_EVENT_HOOK_FLAG_SAFE
);
584 * thr.add_trace_func(proc) -> proc
586 * Adds _proc_ as a handler for tracing.
588 * See Thread#set_trace_func and Kernel#set_trace_func.
592 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
594 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj
), trace
);
600 * thr.set_trace_func(proc) -> proc
601 * thr.set_trace_func(nil) -> nil
603 * Establishes _proc_ on _thr_ as the handler for tracing, or
604 * disables tracing if the parameter is +nil+.
606 * See Kernel#set_trace_func.
610 thread_set_trace_func_m(VALUE target_thread
, VALUE trace
)
612 rb_execution_context_t
*ec
= GET_EC();
613 rb_thread_t
*target_th
= rb_thread_ptr(target_thread
);
615 rb_threadptr_remove_event_hook(ec
, target_th
, call_trace_func
, Qundef
);
621 thread_add_trace_func(ec
, target_th
, trace
);
627 get_event_name(rb_event_flag_t event
)
630 case RUBY_EVENT_LINE
: return "line";
631 case RUBY_EVENT_CLASS
: return "class";
632 case RUBY_EVENT_END
: return "end";
633 case RUBY_EVENT_CALL
: return "call";
634 case RUBY_EVENT_RETURN
: return "return";
635 case RUBY_EVENT_C_CALL
: return "c-call";
636 case RUBY_EVENT_C_RETURN
: return "c-return";
637 case RUBY_EVENT_RAISE
: return "raise";
644 get_event_id(rb_event_flag_t event
)
649 #define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
656 C(c_return
, C_RETURN
);
659 C(b_return
, B_RETURN
);
660 C(thread_begin
, THREAD_BEGIN
);
661 C(thread_end
, THREAD_END
);
662 C(fiber_switch
, FIBER_SWITCH
);
663 C(script_compiled
, SCRIPT_COMPILED
);
671 get_path_and_lineno(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
, rb_event_flag_t event
, VALUE
*pathp
, int *linep
)
673 cfp
= rb_vm_get_ruby_level_next_cfp(ec
, cfp
);
676 const rb_iseq_t
*iseq
= cfp
->iseq
;
677 *pathp
= rb_iseq_path(iseq
);
679 if (event
& (RUBY_EVENT_CLASS
|
681 RUBY_EVENT_B_CALL
)) {
682 *linep
= FIX2INT(rb_iseq_first_lineno(iseq
));
685 *linep
= rb_vm_get_sourceline(cfp
);
695 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
699 VALUE eventname
= rb_str_new2(get_event_name(event
));
701 const rb_execution_context_t
*ec
= GET_EC();
703 get_path_and_lineno(ec
, ec
->cfp
, event
, &filename
, &line
);
706 rb_ec_frame_method_id_and_class(ec
, &id
, 0, &klass
);
710 if (RB_TYPE_P(klass
, T_ICLASS
)) {
711 klass
= RBASIC(klass
)->klass
;
713 else if (FL_TEST(klass
, FL_SINGLETON
)) {
714 klass
= rb_ivar_get(klass
, id__attached__
);
720 argv
[2] = INT2FIX(line
);
721 argv
[3] = id
? ID2SYM(id
) : Qnil
;
722 argv
[4] = (self
&& (filename
!= Qnil
)) ? rb_binding_new() : Qnil
;
723 argv
[5] = klass
? klass
: Qnil
;
725 rb_proc_call_with_block(proc
, 6, argv
, Qnil
);
728 /* (2-2) TracePoint API */
730 static VALUE rb_cTracePoint
;
732 typedef struct rb_tp_struct
{
733 rb_event_flag_t events
;
734 int tracing
; /* bool */
735 rb_thread_t
*target_th
;
736 VALUE local_target_set
; /* Hash: target ->
737 * Qtrue (if target is iseq) or
738 * Qfalse (if target is bmethod)
740 void (*func
)(VALUE tpval
, void *data
);
751 rb_gc_mark(tp
->proc
);
752 rb_gc_mark(tp
->local_target_set
);
753 if (tp
->target_th
) rb_gc_mark(tp
->target_th
->self
);
757 tp_memsize(const void *ptr
)
759 return sizeof(rb_tp_t
);
762 static const rb_data_type_t tp_data_type
= {
764 {tp_mark
, RUBY_TYPED_DEFAULT_FREE
, tp_memsize
,},
765 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
769 tp_alloc(VALUE klass
)
772 return TypedData_Make_Struct(klass
, rb_tp_t
, &tp_data_type
, tp
);
775 static rb_event_flag_t
776 symbol2event_flag(VALUE v
)
779 VALUE sym
= rb_to_symbol_type(v
);
780 const rb_event_flag_t RUBY_EVENT_A_CALL
=
781 RUBY_EVENT_CALL
| RUBY_EVENT_B_CALL
| RUBY_EVENT_C_CALL
;
782 const rb_event_flag_t RUBY_EVENT_A_RETURN
=
783 RUBY_EVENT_RETURN
| RUBY_EVENT_B_RETURN
| RUBY_EVENT_C_RETURN
;
785 #define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
792 C(c_return
, C_RETURN
);
795 C(b_return
, B_RETURN
);
796 C(thread_begin
, THREAD_BEGIN
);
797 C(thread_end
, THREAD_END
);
798 C(fiber_switch
, FIBER_SWITCH
);
799 C(script_compiled
, SCRIPT_COMPILED
);
803 C(a_return
, A_RETURN
);
805 rb_raise(rb_eArgError
, "unknown event: %"PRIsVALUE
, rb_sym2str(sym
));
812 TypedData_Get_Struct(tpval
, rb_tp_t
, &tp_data_type
, tp
);
816 static rb_trace_arg_t
*
819 rb_trace_arg_t
*trace_arg
= GET_EC()->trace_arg
;
820 if (trace_arg
== 0) {
821 rb_raise(rb_eRuntimeError
, "access from outside");
826 struct rb_trace_arg_struct
*
827 rb_tracearg_from_tracepoint(VALUE tpval
)
829 return get_trace_arg();
833 rb_tracearg_event_flag(rb_trace_arg_t
*trace_arg
)
835 return trace_arg
->event
;
839 rb_tracearg_event(rb_trace_arg_t
*trace_arg
)
841 return ID2SYM(get_event_id(trace_arg
->event
));
845 fill_path_and_lineno(rb_trace_arg_t
*trace_arg
)
847 if (trace_arg
->path
== Qundef
) {
848 get_path_and_lineno(trace_arg
->ec
, trace_arg
->cfp
, trace_arg
->event
, &trace_arg
->path
, &trace_arg
->lineno
);
853 rb_tracearg_lineno(rb_trace_arg_t
*trace_arg
)
855 fill_path_and_lineno(trace_arg
);
856 return INT2FIX(trace_arg
->lineno
);
859 rb_tracearg_path(rb_trace_arg_t
*trace_arg
)
861 fill_path_and_lineno(trace_arg
);
862 return trace_arg
->path
;
866 fill_id_and_klass(rb_trace_arg_t
*trace_arg
)
868 if (!trace_arg
->klass_solved
) {
869 if (!trace_arg
->klass
) {
870 rb_vm_control_frame_id_and_class(trace_arg
->cfp
, &trace_arg
->id
, &trace_arg
->called_id
, &trace_arg
->klass
);
873 if (trace_arg
->klass
) {
874 if (RB_TYPE_P(trace_arg
->klass
, T_ICLASS
)) {
875 trace_arg
->klass
= RBASIC(trace_arg
->klass
)->klass
;
879 trace_arg
->klass
= Qnil
;
882 trace_arg
->klass_solved
= 1;
887 rb_tracearg_parameters(rb_trace_arg_t
*trace_arg
)
889 switch (trace_arg
->event
) {
890 case RUBY_EVENT_CALL
:
891 case RUBY_EVENT_RETURN
:
892 case RUBY_EVENT_B_CALL
:
893 case RUBY_EVENT_B_RETURN
: {
894 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(trace_arg
->ec
, trace_arg
->cfp
);
897 if (VM_FRAME_TYPE(cfp
) == VM_FRAME_MAGIC_BLOCK
&& !VM_FRAME_LAMBDA_P(cfp
)) {
900 return rb_iseq_parameters(cfp
->iseq
, is_proc
);
904 case RUBY_EVENT_C_CALL
:
905 case RUBY_EVENT_C_RETURN
: {
906 fill_id_and_klass(trace_arg
);
907 if (trace_arg
->klass
&& trace_arg
->id
) {
908 const rb_method_entry_t
*me
;
910 me
= rb_method_entry_without_refinements(trace_arg
->klass
, trace_arg
->id
, &iclass
);
911 return rb_unnamed_parameters(rb_method_entry_arity(me
));
915 case RUBY_EVENT_RAISE
:
916 case RUBY_EVENT_LINE
:
917 case RUBY_EVENT_CLASS
:
919 case RUBY_EVENT_SCRIPT_COMPILED
:
920 rb_raise(rb_eRuntimeError
, "not supported by this event");
927 rb_tracearg_method_id(rb_trace_arg_t
*trace_arg
)
929 fill_id_and_klass(trace_arg
);
930 return trace_arg
->id
? ID2SYM(trace_arg
->id
) : Qnil
;
934 rb_tracearg_callee_id(rb_trace_arg_t
*trace_arg
)
936 fill_id_and_klass(trace_arg
);
937 return trace_arg
->called_id
? ID2SYM(trace_arg
->called_id
) : Qnil
;
941 rb_tracearg_defined_class(rb_trace_arg_t
*trace_arg
)
943 fill_id_and_klass(trace_arg
);
944 return trace_arg
->klass
;
948 rb_tracearg_binding(rb_trace_arg_t
*trace_arg
)
950 rb_control_frame_t
*cfp
;
951 cfp
= rb_vm_get_binding_creatable_next_cfp(trace_arg
->ec
, trace_arg
->cfp
);
954 return rb_vm_make_binding(trace_arg
->ec
, cfp
);
962 rb_tracearg_self(rb_trace_arg_t
*trace_arg
)
964 return trace_arg
->self
;
968 rb_tracearg_return_value(rb_trace_arg_t
*trace_arg
)
970 if (trace_arg
->event
& (RUBY_EVENT_RETURN
| RUBY_EVENT_C_RETURN
| RUBY_EVENT_B_RETURN
)) {
974 rb_raise(rb_eRuntimeError
, "not supported by this event");
976 if (trace_arg
->data
== Qundef
) {
977 rb_bug("rb_tracearg_return_value: unreachable");
979 return trace_arg
->data
;
983 rb_tracearg_raised_exception(rb_trace_arg_t
*trace_arg
)
985 if (trace_arg
->event
& (RUBY_EVENT_RAISE
)) {
989 rb_raise(rb_eRuntimeError
, "not supported by this event");
991 if (trace_arg
->data
== Qundef
) {
992 rb_bug("rb_tracearg_raised_exception: unreachable");
994 return trace_arg
->data
;
998 rb_tracearg_eval_script(rb_trace_arg_t
*trace_arg
)
1000 VALUE data
= trace_arg
->data
;
1002 if (trace_arg
->event
& (RUBY_EVENT_SCRIPT_COMPILED
)) {
1006 rb_raise(rb_eRuntimeError
, "not supported by this event");
1008 if (data
== Qundef
) {
1009 rb_bug("rb_tracearg_raised_exception: unreachable");
1011 if (rb_obj_is_iseq(data
)) {
1015 VM_ASSERT(RB_TYPE_P(data
, T_ARRAY
));
1017 return RARRAY_AREF(data
, 0);
1022 rb_tracearg_instruction_sequence(rb_trace_arg_t
*trace_arg
)
1024 VALUE data
= trace_arg
->data
;
1026 if (trace_arg
->event
& (RUBY_EVENT_SCRIPT_COMPILED
)) {
1030 rb_raise(rb_eRuntimeError
, "not supported by this event");
1032 if (data
== Qundef
) {
1033 rb_bug("rb_tracearg_raised_exception: unreachable");
1036 if (rb_obj_is_iseq(data
)) {
1037 return rb_iseqw_new((const rb_iseq_t
*)data
);
1040 VM_ASSERT(RB_TYPE_P(data
, T_ARRAY
));
1041 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data
, 1)));
1044 return rb_iseqw_new((const rb_iseq_t
*)RARRAY_AREF(data
, 1));
1049 rb_tracearg_object(rb_trace_arg_t
*trace_arg
)
1051 if (trace_arg
->event
& (RUBY_INTERNAL_EVENT_NEWOBJ
| RUBY_INTERNAL_EVENT_FREEOBJ
)) {
1055 rb_raise(rb_eRuntimeError
, "not supported by this event");
1057 if (trace_arg
->data
== Qundef
) {
1058 rb_bug("rb_tracearg_object: unreachable");
1060 return trace_arg
->data
;
1064 tracepoint_attr_event(rb_execution_context_t
*ec
, VALUE tpval
)
1066 return rb_tracearg_event(get_trace_arg());
1070 tracepoint_attr_lineno(rb_execution_context_t
*ec
, VALUE tpval
)
1072 return rb_tracearg_lineno(get_trace_arg());
1075 tracepoint_attr_path(rb_execution_context_t
*ec
, VALUE tpval
)
1077 return rb_tracearg_path(get_trace_arg());
1081 tracepoint_attr_parameters(rb_execution_context_t
*ec
, VALUE tpval
)
1083 return rb_tracearg_parameters(get_trace_arg());
1087 tracepoint_attr_method_id(rb_execution_context_t
*ec
, VALUE tpval
)
1089 return rb_tracearg_method_id(get_trace_arg());
1093 tracepoint_attr_callee_id(rb_execution_context_t
*ec
, VALUE tpval
)
1095 return rb_tracearg_callee_id(get_trace_arg());
1099 tracepoint_attr_defined_class(rb_execution_context_t
*ec
, VALUE tpval
)
1101 return rb_tracearg_defined_class(get_trace_arg());
1105 tracepoint_attr_binding(rb_execution_context_t
*ec
, VALUE tpval
)
1107 return rb_tracearg_binding(get_trace_arg());
1111 tracepoint_attr_self(rb_execution_context_t
*ec
, VALUE tpval
)
1113 return rb_tracearg_self(get_trace_arg());
1117 tracepoint_attr_return_value(rb_execution_context_t
*ec
, VALUE tpval
)
1119 return rb_tracearg_return_value(get_trace_arg());
1123 tracepoint_attr_raised_exception(rb_execution_context_t
*ec
, VALUE tpval
)
1125 return rb_tracearg_raised_exception(get_trace_arg());
1129 tracepoint_attr_eval_script(rb_execution_context_t
*ec
, VALUE tpval
)
1131 return rb_tracearg_eval_script(get_trace_arg());
1135 tracepoint_attr_instruction_sequence(rb_execution_context_t
*ec
, VALUE tpval
)
1137 return rb_tracearg_instruction_sequence(get_trace_arg());
1141 tp_call_trace(VALUE tpval
, rb_trace_arg_t
*trace_arg
)
1143 rb_tp_t
*tp
= tpptr(tpval
);
1146 (*tp
->func
)(tpval
, tp
->data
);
1149 if (tp
->ractor
== NULL
|| tp
->ractor
== GET_RACTOR()) {
1150 rb_proc_call_with_block((VALUE
)tp
->proc
, 1, &tpval
, Qnil
);
1156 rb_tracepoint_enable(VALUE tpval
)
1161 if (tp
->local_target_set
!= Qfalse
) {
1162 rb_raise(rb_eArgError
, "can't nest-enable a targeting TracePoint");
1165 if (tp
->target_th
) {
1166 rb_thread_add_event_hook2(tp
->target_th
->self
, (rb_event_hook_func_t
)tp_call_trace
, tp
->events
, tpval
,
1167 RUBY_EVENT_HOOK_FLAG_SAFE
| RUBY_EVENT_HOOK_FLAG_RAW_ARG
);
1170 rb_add_event_hook2((rb_event_hook_func_t
)tp_call_trace
, tp
->events
, tpval
,
1171 RUBY_EVENT_HOOK_FLAG_SAFE
| RUBY_EVENT_HOOK_FLAG_RAW_ARG
);
1177 static const rb_iseq_t
*
1178 iseq_of(VALUE target
)
1180 VALUE iseqv
= rb_funcall(rb_cISeq
, rb_intern("of"), 1, target
);
1182 rb_raise(rb_eArgError
, "specified target is not supported");
1185 return rb_iseqw_to_iseq(iseqv
);
1189 const rb_method_definition_t
*rb_method_def(VALUE method
); /* proc.c */
1192 rb_tracepoint_enable_for_target(VALUE tpval
, VALUE target
, VALUE target_line
)
1194 rb_tp_t
*tp
= tpptr(tpval
);
1195 const rb_iseq_t
*iseq
= iseq_of(target
);
1197 unsigned int line
= 0;
1198 bool target_bmethod
= false;
1200 if (tp
->tracing
> 0) {
1201 rb_raise(rb_eArgError
, "can't nest-enable a targeting TracePoint");
1204 if (!NIL_P(target_line
)) {
1205 if ((tp
->events
& RUBY_EVENT_LINE
) == 0) {
1206 rb_raise(rb_eArgError
, "target_line is specified, but line event is not specified");
1209 line
= NUM2UINT(target_line
);
1213 VM_ASSERT(tp
->local_target_set
== Qfalse
);
1214 tp
->local_target_set
= rb_obj_hide(rb_ident_hash_new());
1217 if (rb_obj_is_method(target
)) {
1218 rb_method_definition_t
*def
= (rb_method_definition_t
*)rb_method_def(target
);
1219 if (def
->type
== VM_METHOD_TYPE_BMETHOD
&&
1220 (tp
->events
& (RUBY_EVENT_CALL
| RUBY_EVENT_RETURN
))) {
1221 def
->body
.bmethod
.hooks
= ZALLOC(rb_hook_list_t
);
1222 rb_hook_list_connect_tracepoint(target
, def
->body
.bmethod
.hooks
, tpval
, 0);
1223 rb_hash_aset(tp
->local_target_set
, target
, Qfalse
);
1224 target_bmethod
= true;
1231 n
+= rb_iseq_add_local_tracepoint_recursively(iseq
, tp
->events
, tpval
, line
, target_bmethod
);
1232 rb_hash_aset(tp
->local_target_set
, (VALUE
)iseq
, Qtrue
);
1236 rb_raise(rb_eArgError
, "can not enable any hooks");
1239 rb_yjit_tracing_invalidate_all();
1241 ruby_vm_event_local_num
++;
1249 disable_local_event_iseq_i(VALUE target
, VALUE iseq_p
, VALUE tpval
)
1252 rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t
*)target
, tpval
);
1256 rb_method_definition_t
*def
= (rb_method_definition_t
*)rb_method_def(target
);
1257 rb_hook_list_t
*hooks
= def
->body
.bmethod
.hooks
;
1258 VM_ASSERT(hooks
!= NULL
);
1259 rb_hook_list_remove_tracepoint(hooks
, tpval
);
1261 if (hooks
->events
== 0) {
1262 rb_hook_list_free(def
->body
.bmethod
.hooks
);
1263 def
->body
.bmethod
.hooks
= NULL
;
1270 rb_tracepoint_disable(VALUE tpval
)
1276 if (tp
->local_target_set
) {
1277 rb_hash_foreach(tp
->local_target_set
, disable_local_event_iseq_i
, tpval
);
1278 tp
->local_target_set
= Qfalse
;
1279 ruby_vm_event_local_num
--;
1282 if (tp
->target_th
) {
1283 rb_thread_remove_event_hook_with_data(tp
->target_th
->self
, (rb_event_hook_func_t
)tp_call_trace
, tpval
);
1286 rb_remove_event_hook_with_data((rb_event_hook_func_t
)tp_call_trace
, tpval
);
1290 tp
->target_th
= NULL
;
1295 rb_hook_list_connect_tracepoint(VALUE target
, rb_hook_list_t
*list
, VALUE tpval
, unsigned int target_line
)
1297 rb_tp_t
*tp
= tpptr(tpval
);
1298 rb_event_hook_t
*hook
= alloc_event_hook((rb_event_hook_func_t
)tp_call_trace
, tp
->events
, tpval
,
1299 RUBY_EVENT_HOOK_FLAG_SAFE
| RUBY_EVENT_HOOK_FLAG_RAW_ARG
);
1300 hook
->filter
.target_line
= target_line
;
1301 hook_list_connect(target
, list
, hook
, FALSE
);
1305 rb_hook_list_remove_tracepoint(rb_hook_list_t
*list
, VALUE tpval
)
1307 rb_event_hook_t
*hook
= list
->hooks
;
1308 rb_event_flag_t events
= 0;
1311 if (hook
->data
== tpval
) {
1312 hook
->hook_flags
|= RUBY_EVENT_HOOK_FLAG_DELETED
;
1313 list
->need_clean
= true;
1315 else if ((hook
->hook_flags
& RUBY_EVENT_HOOK_FLAG_DELETED
) == 0) {
1316 events
|= hook
->events
;
1321 list
->events
= events
;
1325 tracepoint_enable_m(rb_execution_context_t
*ec
, VALUE tpval
, VALUE target
, VALUE target_line
, VALUE target_thread
)
1327 rb_tp_t
*tp
= tpptr(tpval
);
1328 int previous_tracing
= tp
->tracing
;
1330 /* check target_thread */
1331 if (RTEST(target_thread
)) {
1332 if (tp
->target_th
) {
1333 rb_raise(rb_eArgError
, "can not override target_thread filter");
1335 tp
->target_th
= rb_thread_ptr(target_thread
);
1338 tp
->target_th
= NULL
;
1341 if (NIL_P(target
)) {
1342 if (!NIL_P(target_line
)) {
1343 rb_raise(rb_eArgError
, "only target_line is specified");
1345 rb_tracepoint_enable(tpval
);
1348 rb_tracepoint_enable_for_target(tpval
, target
, target_line
);
1351 if (rb_block_given_p()) {
1352 return rb_ensure(rb_yield
, Qundef
,
1353 previous_tracing
? rb_tracepoint_enable
: rb_tracepoint_disable
,
1357 return RBOOL(previous_tracing
);
1362 tracepoint_disable_m(rb_execution_context_t
*ec
, VALUE tpval
)
1364 rb_tp_t
*tp
= tpptr(tpval
);
1365 int previous_tracing
= tp
->tracing
;
1367 if (rb_block_given_p()) {
1368 if (tp
->local_target_set
!= Qfalse
) {
1369 rb_raise(rb_eArgError
, "can't disable a targeting TracePoint in a block");
1372 rb_tracepoint_disable(tpval
);
1373 return rb_ensure(rb_yield
, Qundef
,
1374 previous_tracing
? rb_tracepoint_enable
: rb_tracepoint_disable
,
1378 rb_tracepoint_disable(tpval
);
1379 return RBOOL(previous_tracing
);
1384 rb_tracepoint_enabled_p(VALUE tpval
)
1386 rb_tp_t
*tp
= tpptr(tpval
);
1387 return RBOOL(tp
->tracing
);
1391 tracepoint_enabled_p(rb_execution_context_t
*ec
, VALUE tpval
)
1393 return rb_tracepoint_enabled_p(tpval
);
1397 tracepoint_new(VALUE klass
, rb_thread_t
*target_th
, rb_event_flag_t events
, void (func
)(VALUE
, void*), void *data
, VALUE proc
)
1399 VALUE tpval
= tp_alloc(klass
);
1401 TypedData_Get_Struct(tpval
, rb_tp_t
, &tp_data_type
, tp
);
1404 tp
->ractor
= rb_ractor_shareable_p(proc
) ? NULL
: GET_RACTOR();
1407 tp
->events
= events
;
1414 rb_tracepoint_new(VALUE target_thval
, rb_event_flag_t events
, void (*func
)(VALUE
, void *), void *data
)
1416 rb_thread_t
*target_th
= NULL
;
1418 if (RTEST(target_thval
)) {
1419 target_th
= rb_thread_ptr(target_thval
);
1421 * Warning: This function is not tested.
1424 return tracepoint_new(rb_cTracePoint
, target_th
, events
, func
, data
, Qundef
);
1428 tracepoint_new_s(rb_execution_context_t
*ec
, VALUE self
, VALUE args
)
1430 rb_event_flag_t events
= 0;
1432 long argc
= RARRAY_LEN(args
);
1435 for (i
=0; i
<argc
; i
++) {
1436 events
|= symbol2event_flag(RARRAY_AREF(args
, i
));
1440 events
= RUBY_EVENT_TRACEPOINT_ALL
;
1443 if (!rb_block_given_p()) {
1444 rb_raise(rb_eArgError
, "must be called with a block");
1447 return tracepoint_new(self
, 0, events
, 0, 0, rb_block_proc());
1451 tracepoint_trace_s(rb_execution_context_t
*ec
, VALUE self
, VALUE args
)
1453 VALUE trace
= tracepoint_new_s(ec
, self
, args
);
1454 rb_tracepoint_enable(trace
);
1459 tracepoint_inspect(rb_execution_context_t
*ec
, VALUE self
)
1461 rb_tp_t
*tp
= tpptr(self
);
1462 rb_trace_arg_t
*trace_arg
= GET_EC()->trace_arg
;
1465 switch (trace_arg
->event
) {
1466 case RUBY_EVENT_LINE
:
1468 VALUE sym
= rb_tracearg_method_id(trace_arg
);
1471 return rb_sprintf("#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d in `%"PRIsVALUE
"'>",
1472 rb_tracearg_event(trace_arg
),
1473 rb_tracearg_path(trace_arg
),
1474 FIX2INT(rb_tracearg_lineno(trace_arg
)),
1477 case RUBY_EVENT_CALL
:
1478 case RUBY_EVENT_C_CALL
:
1479 case RUBY_EVENT_RETURN
:
1480 case RUBY_EVENT_C_RETURN
:
1481 return rb_sprintf("#<TracePoint:%"PRIsVALUE
" `%"PRIsVALUE
"' %"PRIsVALUE
":%d>",
1482 rb_tracearg_event(trace_arg
),
1483 rb_tracearg_method_id(trace_arg
),
1484 rb_tracearg_path(trace_arg
),
1485 FIX2INT(rb_tracearg_lineno(trace_arg
)));
1486 case RUBY_EVENT_THREAD_BEGIN
:
1487 case RUBY_EVENT_THREAD_END
:
1488 return rb_sprintf("#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
">",
1489 rb_tracearg_event(trace_arg
),
1490 rb_tracearg_self(trace_arg
));
1494 return rb_sprintf("#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d>",
1495 rb_tracearg_event(trace_arg
),
1496 rb_tracearg_path(trace_arg
),
1497 FIX2INT(rb_tracearg_lineno(trace_arg
)));
1500 return rb_sprintf("#<TracePoint:%s>", tp
->tracing
? "enabled" : "disabled");
1505 tracepoint_stat_event_hooks(VALUE hash
, VALUE key
, rb_event_hook_t
*hook
)
1507 int active
= 0, deleted
= 0;
1510 if (hook
->hook_flags
& RUBY_EVENT_HOOK_FLAG_DELETED
) {
1519 rb_hash_aset(hash
, key
, rb_ary_new3(2, INT2FIX(active
), INT2FIX(deleted
)));
1523 tracepoint_stat_s(rb_execution_context_t
*ec
, VALUE self
)
1525 rb_vm_t
*vm
= GET_VM();
1526 VALUE stat
= rb_hash_new();
1528 tracepoint_stat_event_hooks(stat
, vm
->self
, rb_ec_ractor_hooks(ec
)->hooks
);
1529 /* TODO: thread local hooks */
1535 disallow_reentry(VALUE val
)
1537 rb_trace_arg_t
*arg
= (rb_trace_arg_t
*)val
;
1538 rb_execution_context_t
*ec
= GET_EC();
1539 if (ec
->trace_arg
!= NULL
) rb_bug("should be NULL, but %p", (void *)ec
->trace_arg
);
1540 ec
->trace_arg
= arg
;
1545 tracepoint_allow_reentry(rb_execution_context_t
*ec
, VALUE self
)
1547 const rb_trace_arg_t
*arg
= ec
->trace_arg
;
1548 if (arg
== NULL
) rb_raise(rb_eRuntimeError
, "No need to allow reentrance.");
1549 ec
->trace_arg
= NULL
;
1550 return rb_ensure(rb_yield
, Qnil
, disallow_reentry
, (VALUE
)arg
);
1553 #include "trace_point.rbinc"
1555 /* This function is called from inits.c */
1560 rb_define_global_function("set_trace_func", set_trace_func
, 1);
1561 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
1562 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
1564 rb_cTracePoint
= rb_define_class("TracePoint", rb_cObject
);
1565 rb_undef_alloc_func(rb_cTracePoint
);
1568 typedef struct rb_postponed_job_struct
{
1569 rb_postponed_job_func_t func
;
1571 } rb_postponed_job_t
;
1573 #define MAX_POSTPONED_JOB 1000
1574 #define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1576 struct rb_workqueue_job
{
1577 struct list_node jnode
; /* <=> vm->workqueue */
1578 rb_postponed_job_t job
;
1582 Init_vm_postponed_job(void)
1584 rb_vm_t
*vm
= GET_VM();
1585 vm
->postponed_job_buffer
= ALLOC_N(rb_postponed_job_t
, MAX_POSTPONED_JOB
);
1586 vm
->postponed_job_index
= 0;
1587 /* workqueue is initialized when VM locks are initialized */
1590 enum postponed_job_register_result
{
1593 PJRR_INTERRUPTED
= 2
1596 /* Async-signal-safe */
1597 static enum postponed_job_register_result
1598 postponed_job_register(rb_execution_context_t
*ec
, rb_vm_t
*vm
,
1599 unsigned int flags
, rb_postponed_job_func_t func
, void *data
, rb_atomic_t max
, rb_atomic_t expected_index
)
1601 rb_postponed_job_t
*pjob
;
1603 if (expected_index
>= max
) return PJRR_FULL
; /* failed */
1605 if (ATOMIC_CAS(vm
->postponed_job_index
, expected_index
, expected_index
+1) == expected_index
) {
1606 pjob
= &vm
->postponed_job_buffer
[expected_index
];
1609 return PJRR_INTERRUPTED
;
1612 /* unused: pjob->flags = flags; */
1616 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec
);
1618 return PJRR_SUCCESS
;
1621 static rb_execution_context_t
*
1622 get_valid_ec(rb_vm_t
*vm
)
1624 rb_execution_context_t
*ec
= rb_current_execution_context(false);
1625 if (ec
== NULL
) ec
= rb_vm_main_ractor_ec(vm
);
1630 * return 0 if job buffer is full
1634 rb_postponed_job_register(unsigned int flags
, rb_postponed_job_func_t func
, void *data
)
1636 rb_vm_t
*vm
= GET_VM();
1637 rb_execution_context_t
*ec
= get_valid_ec(vm
);
1640 switch (postponed_job_register(ec
, vm
, flags
, func
, data
, MAX_POSTPONED_JOB
, vm
->postponed_job_index
)) {
1641 case PJRR_SUCCESS
: return 1;
1642 case PJRR_FULL
: return 0;
1643 case PJRR_INTERRUPTED
: goto begin
;
1644 default: rb_bug("unreachable\n");
1649 * return 0 if job buffer is full
1653 rb_postponed_job_register_one(unsigned int flags
, rb_postponed_job_func_t func
, void *data
)
1655 rb_vm_t
*vm
= GET_VM();
1656 rb_execution_context_t
*ec
= get_valid_ec(vm
);
1657 rb_postponed_job_t
*pjob
;
1658 rb_atomic_t i
, index
;
1661 index
= vm
->postponed_job_index
;
1662 for (i
=0; i
<index
; i
++) {
1663 pjob
= &vm
->postponed_job_buffer
[i
];
1664 if (pjob
->func
== func
) {
1665 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec
);
1669 switch (postponed_job_register(ec
, vm
, flags
, func
, data
, MAX_POSTPONED_JOB
+ MAX_POSTPONED_JOB_SPECIAL_ADDITION
, index
)) {
1670 case PJRR_SUCCESS
: return 1;
1671 case PJRR_FULL
: return 0;
1672 case PJRR_INTERRUPTED
: goto begin
;
1673 default: rb_bug("unreachable\n");
1678 * thread-safe and called from non-Ruby thread
1679 * returns FALSE on failure (ENOMEM), TRUE otherwise
1682 rb_workqueue_register(unsigned flags
, rb_postponed_job_func_t func
, void *data
)
1684 struct rb_workqueue_job
*wq_job
= malloc(sizeof(*wq_job
));
1685 rb_vm_t
*vm
= GET_VM();
1687 if (!wq_job
) return FALSE
;
1688 wq_job
->job
.func
= func
;
1689 wq_job
->job
.data
= data
;
1691 rb_nativethread_lock_lock(&vm
->workqueue_lock
);
1692 list_add_tail(&vm
->workqueue
, &wq_job
->jnode
);
1693 rb_nativethread_lock_unlock(&vm
->workqueue_lock
);
1695 // TODO: current implementation affects only main ractor
1696 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm
));
1702 rb_postponed_job_flush(rb_vm_t
*vm
)
1704 rb_execution_context_t
*ec
= GET_EC();
1705 const rb_atomic_t block_mask
= POSTPONED_JOB_INTERRUPT_MASK
|TRAP_INTERRUPT_MASK
;
1706 volatile rb_atomic_t saved_mask
= ec
->interrupt_mask
& block_mask
;
1707 VALUE
volatile saved_errno
= ec
->errinfo
;
1708 struct list_head tmp
;
1710 list_head_init(&tmp
);
1712 rb_nativethread_lock_lock(&vm
->workqueue_lock
);
1713 list_append_list(&tmp
, &vm
->workqueue
);
1714 rb_nativethread_lock_unlock(&vm
->workqueue_lock
);
1717 /* mask POSTPONED_JOB dispatch */
1718 ec
->interrupt_mask
|= block_mask
;
1721 if (EC_EXEC_TAG() == TAG_NONE
) {
1723 struct rb_workqueue_job
*wq_job
;
1725 while ((index
= vm
->postponed_job_index
) > 0) {
1726 if (ATOMIC_CAS(vm
->postponed_job_index
, index
, index
-1) == index
) {
1727 rb_postponed_job_t
*pjob
= &vm
->postponed_job_buffer
[index
-1];
1728 (*pjob
->func
)(pjob
->data
);
1731 while ((wq_job
= list_pop(&tmp
, struct rb_workqueue_job
, jnode
))) {
1732 rb_postponed_job_t pjob
= wq_job
->job
;
1735 (pjob
.func
)(pjob
.data
);
1740 /* restore POSTPONED_JOB mask */
1741 ec
->interrupt_mask
&= ~(saved_mask
^ block_mask
);
1742 ec
->errinfo
= saved_errno
;
1744 /* don't leak memory if a job threw an exception */
1745 if (!list_empty(&tmp
)) {
1746 rb_nativethread_lock_lock(&vm
->workqueue_lock
);
1747 list_prepend_list(&vm
->workqueue
, &tmp
);
1748 rb_nativethread_lock_unlock(&vm
->workqueue_lock
);
1750 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());