Remove prism compiler warning
[ruby.git] / vm_trace.c
blob0f99e34e7bb8cbd42ca70a60bbff62cf8fdbc9ca
1 /**********************************************************************
3 vm_trace.c -
5 $Author: ko1 $
6 created at: Tue Aug 14 19:37:09 2012
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
10 **********************************************************************/
13 * This file include two parts:
15 * (1) set_trace_func internal mechanisms
16 * and C level API
18 * (2) Ruby level API
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
24 #include "eval_intern.h"
25 #include "internal.h"
26 #include "internal/bits.h"
27 #include "internal/class.h"
28 #include "internal/gc.h"
29 #include "internal/hash.h"
30 #include "internal/symbol.h"
31 #include "internal/thread.h"
32 #include "iseq.h"
33 #include "rjit.h"
34 #include "ruby/atomic.h"
35 #include "ruby/debug.h"
36 #include "vm_core.h"
37 #include "ruby/ractor.h"
38 #include "yjit.h"
40 #include "builtin.h"
42 static VALUE sym_default;
44 /* (1) trace mechanisms */
46 typedef struct rb_event_hook_struct {
47 rb_event_hook_flag_t hook_flags;
48 rb_event_flag_t events;
49 rb_event_hook_func_t func;
50 VALUE data;
51 struct rb_event_hook_struct *next;
53 struct {
54 rb_thread_t *th;
55 unsigned int target_line;
56 } filter;
57 } rb_event_hook_t;
59 typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
61 #define MAX_EVENT_NUM 32
63 void
64 rb_hook_list_mark(rb_hook_list_t *hooks)
66 rb_event_hook_t *hook = hooks->hooks;
68 while (hook) {
69 rb_gc_mark(hook->data);
70 hook = hook->next;
74 void
75 rb_hook_list_mark_and_update(rb_hook_list_t *hooks)
77 rb_event_hook_t *hook = hooks->hooks;
79 while (hook) {
80 rb_gc_mark_and_move(&hook->data);
81 hook = hook->next;
85 static void clean_hooks(rb_hook_list_t *list);
87 void
88 rb_hook_list_free(rb_hook_list_t *hooks)
90 hooks->need_clean = true;
92 if (hooks->running == 0) {
93 clean_hooks(hooks);
97 /* ruby_vm_event_flags management */
99 void rb_clear_attr_ccs(void);
100 void rb_clear_bf_ccs(void);
102 static void
103 update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events)
105 rb_event_flag_t new_iseq_events = new_events & ISEQ_TRACE_EVENTS;
106 rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
107 bool first_time_iseq_events_p = new_iseq_events & ~enabled_iseq_events;
108 bool enable_c_call = (prev_events & RUBY_EVENT_C_CALL) == 0 && (new_events & RUBY_EVENT_C_CALL);
109 bool enable_c_return = (prev_events & RUBY_EVENT_C_RETURN) == 0 && (new_events & RUBY_EVENT_C_RETURN);
110 bool enable_call = (prev_events & RUBY_EVENT_CALL) == 0 && (new_events & RUBY_EVENT_CALL);
111 bool enable_return = (prev_events & RUBY_EVENT_RETURN) == 0 && (new_events & RUBY_EVENT_RETURN);
113 // Modify ISEQs or CCs to enable tracing
114 if (first_time_iseq_events_p) {
115 // write all ISeqs only when new events are added for the first time
116 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
118 // if c_call or c_return is activated
119 else if (enable_c_call || enable_c_return) {
120 rb_clear_attr_ccs();
122 else if (enable_call || enable_return) {
123 rb_clear_bf_ccs();
126 ruby_vm_event_flags = new_events;
127 ruby_vm_event_enabled_global_flags |= new_events;
128 rb_objspace_set_event_hook(new_events);
130 // Invalidate JIT code as needed
131 if (first_time_iseq_events_p || enable_c_call || enable_c_return) {
132 // Invalidate all code when ISEQs are modified to use trace_* insns above.
133 // Also invalidate when enabling c_call or c_return because generated code
134 // never fires these events.
135 // Internal events fire inside C routines so don't need special handling.
136 // Do this after event flags updates so other ractors see updated vm events
137 // when they wake up.
138 rb_yjit_tracing_invalidate_all();
139 rb_rjit_tracing_invalidate_all(new_iseq_events);
143 /* add/remove hooks */
145 static rb_event_hook_t *
146 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
148 rb_event_hook_t *hook;
150 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
151 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
154 hook = ALLOC(rb_event_hook_t);
155 hook->hook_flags = hook_flags;
156 hook->events = events;
157 hook->func = func;
158 hook->data = data;
160 /* no filters */
161 hook->filter.th = NULL;
162 hook->filter.target_line = 0;
164 return hook;
167 static void
168 hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
170 rb_event_flag_t prev_events = list->events;
171 hook->next = list->hooks;
172 list->hooks = hook;
173 list->events |= hook->events;
175 if (global_p) {
176 /* global hooks are root objects at GC mark. */
177 update_global_event_hook(prev_events, list->events);
179 else {
180 RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
184 static void
185 connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
187 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
188 hook_list_connect(Qundef, list, hook, TRUE);
191 static void
192 rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
193 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
195 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
196 hook->filter.th = th;
197 connect_event_hook(ec, hook);
200 void
201 rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
203 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
206 void
207 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
209 rb_add_event_hook2(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
212 void
213 rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
215 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
218 void
219 rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
221 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
222 connect_event_hook(GET_EC(), hook);
225 static void
226 clean_hooks(rb_hook_list_t *list)
228 rb_event_hook_t *hook, **nextp = &list->hooks;
229 rb_event_flag_t prev_events = list->events;
231 VM_ASSERT(list->running == 0);
232 VM_ASSERT(list->need_clean == true);
234 list->events = 0;
235 list->need_clean = false;
237 while ((hook = *nextp) != 0) {
238 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
239 *nextp = hook->next;
240 xfree(hook);
242 else {
243 list->events |= hook->events; /* update active events */
244 nextp = &hook->next;
248 if (list->is_local) {
249 if (list->events == 0) {
250 /* local events */
251 ruby_xfree(list);
254 else {
255 update_global_event_hook(prev_events, list->events);
259 static void
260 clean_hooks_check(rb_hook_list_t *list)
262 if (UNLIKELY(list->need_clean)) {
263 if (list->running == 0) {
264 clean_hooks(list);
269 #define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
271 /* if func is 0, then clear all funcs */
272 static int
273 remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
275 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
276 int ret = 0;
277 rb_event_hook_t *hook = list->hooks;
279 while (hook) {
280 if (func == 0 || hook->func == func) {
281 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
282 if (UNDEF_P(data) || hook->data == data) {
283 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
284 ret+=1;
285 list->need_clean = true;
289 hook = hook->next;
292 clean_hooks_check(list);
293 return ret;
296 static int
297 rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
299 return remove_event_hook(ec, filter_th, func, data);
303 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
305 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
309 rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
311 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
315 rb_remove_event_hook(rb_event_hook_func_t func)
317 return remove_event_hook(GET_EC(), NULL, func, Qundef);
321 rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
323 return remove_event_hook(GET_EC(), NULL, func, data);
326 void
327 rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
329 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
332 void
333 rb_ec_clear_all_trace_func(const rb_execution_context_t *ec)
335 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
338 /* invoke hooks */
340 static void
341 exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
343 rb_event_hook_t *hook;
345 for (hook = list->hooks; hook; hook = hook->next) {
346 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
347 (trace_arg->event & hook->events) &&
348 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
349 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
350 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
351 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
353 else {
354 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
360 static int
361 exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
363 if (list->events & trace_arg->event) {
364 list->running++;
365 return TRUE;
367 else {
368 return FALSE;
372 static void
373 exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
375 list->running--;
376 clean_hooks_check(list);
379 static void
380 exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
382 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
383 exec_hooks_body(ec, list, trace_arg);
384 exec_hooks_postcheck(ec, list);
387 static int
388 exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
390 enum ruby_tag_type state;
391 volatile int raised;
393 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
395 raised = rb_ec_reset_raised(ec);
397 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
399 EC_PUSH_TAG(ec);
400 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
401 exec_hooks_body(ec, list, trace_arg);
403 EC_POP_TAG();
405 exec_hooks_postcheck(ec, list);
407 if (raised) {
408 rb_ec_set_raised(ec);
411 return state;
414 // pop_p: Whether to pop the frame for the TracePoint when it throws.
415 void
416 rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
418 rb_execution_context_t *ec = trace_arg->ec;
420 if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
421 if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
422 /* skip hooks because this thread doing INTERNAL_EVENT */
424 else {
425 rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
427 ec->trace_arg = trace_arg;
428 /* only global hooks */
429 exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
430 ec->trace_arg = prev_trace_arg;
433 else {
434 if (ec->trace_arg == NULL && /* check reentrant */
435 trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
436 const VALUE errinfo = ec->errinfo;
437 const VALUE old_recursive = ec->local_storage_recursive_hash;
438 enum ruby_tag_type state = 0;
440 /* setup */
441 ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
442 ec->errinfo = Qnil;
443 ec->trace_arg = trace_arg;
445 /* kick hooks */
446 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
447 ec->errinfo = errinfo;
450 /* cleanup */
451 ec->trace_arg = NULL;
452 ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
453 ec->local_storage_recursive_hash = old_recursive;
455 if (state) {
456 if (pop_p) {
457 if (VM_FRAME_FINISHED_P(ec->cfp)) {
458 ec->tag = ec->tag->prev;
460 rb_vm_pop_frame(ec);
462 EC_JUMP_TAG(ec, state);
468 VALUE
469 rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
471 volatile int raised;
472 volatile VALUE result = Qnil;
473 rb_execution_context_t *const ec = GET_EC();
474 rb_vm_t *const vm = rb_ec_vm_ptr(ec);
475 enum ruby_tag_type state;
476 rb_trace_arg_t dummy_trace_arg;
477 dummy_trace_arg.event = 0;
479 if (!ec->trace_arg) {
480 ec->trace_arg = &dummy_trace_arg;
483 raised = rb_ec_reset_raised(ec);
485 EC_PUSH_TAG(ec);
486 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
487 result = (*func)(arg);
489 else {
490 (void)*&vm; /* suppress "clobbered" warning */
492 EC_POP_TAG();
494 if (raised) {
495 rb_ec_reset_raised(ec);
498 if (ec->trace_arg == &dummy_trace_arg) {
499 ec->trace_arg = NULL;
502 if (state) {
503 #if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
504 RB_GC_GUARD(result);
505 #endif
506 EC_JUMP_TAG(ec, state);
509 return result;
512 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
514 /* (2-1) set_trace_func (old API) */
517 * call-seq:
518 * set_trace_func(proc) -> proc
519 * set_trace_func(nil) -> nil
521 * Establishes _proc_ as the handler for tracing, or disables
522 * tracing if the parameter is +nil+.
524 * *Note:* this method is obsolete, please use TracePoint instead.
526 * _proc_ takes up to six parameters:
528 * * an event name string
529 * * a filename string
530 * * a line number
531 * * a method name symbol, or nil
532 * * a binding, or nil
533 * * the class, module, or nil
535 * _proc_ is invoked whenever an event occurs.
537 * Events are:
539 * <code>"c-call"</code>:: call a C-language routine
540 * <code>"c-return"</code>:: return from a C-language routine
541 * <code>"call"</code>:: call a Ruby method
542 * <code>"class"</code>:: start a class or module definition
543 * <code>"end"</code>:: finish a class or module definition
544 * <code>"line"</code>:: execute code on a new line
545 * <code>"raise"</code>:: raise an exception
546 * <code>"return"</code>:: return from a Ruby method
548 * Tracing is disabled within the context of _proc_.
550 * class Test
551 * def test
552 * a = 1
553 * b = 2
554 * end
555 * end
557 * set_trace_func proc { |event, file, line, id, binding, class_or_module|
558 * printf "%8s %s:%-2d %16p %14p\n", event, file, line, id, class_or_module
560 * t = Test.new
561 * t.test
563 * Produces:
565 * c-return prog.rb:8 :set_trace_func Kernel
566 * line prog.rb:11 nil nil
567 * c-call prog.rb:11 :new Class
568 * c-call prog.rb:11 :initialize BasicObject
569 * c-return prog.rb:11 :initialize BasicObject
570 * c-return prog.rb:11 :new Class
571 * line prog.rb:12 nil nil
572 * call prog.rb:2 :test Test
573 * line prog.rb:3 :test Test
574 * line prog.rb:4 :test Test
575 * return prog.rb:5 :test Test
578 static VALUE
579 set_trace_func(VALUE obj, VALUE trace)
581 rb_remove_event_hook(call_trace_func);
583 if (NIL_P(trace)) {
584 return Qnil;
587 if (!rb_obj_is_proc(trace)) {
588 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
591 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
592 return trace;
595 static void
596 thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
598 if (!rb_obj_is_proc(trace)) {
599 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
602 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
606 * call-seq:
607 * thr.add_trace_func(proc) -> proc
609 * Adds _proc_ as a handler for tracing.
611 * See Thread#set_trace_func and Kernel#set_trace_func.
614 static VALUE
615 thread_add_trace_func_m(VALUE obj, VALUE trace)
617 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
618 return trace;
622 * call-seq:
623 * thr.set_trace_func(proc) -> proc
624 * thr.set_trace_func(nil) -> nil
626 * Establishes _proc_ on _thr_ as the handler for tracing, or
627 * disables tracing if the parameter is +nil+.
629 * See Kernel#set_trace_func.
632 static VALUE
633 thread_set_trace_func_m(VALUE target_thread, VALUE trace)
635 rb_execution_context_t *ec = GET_EC();
636 rb_thread_t *target_th = rb_thread_ptr(target_thread);
638 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
640 if (NIL_P(trace)) {
641 return Qnil;
643 else {
644 thread_add_trace_func(ec, target_th, trace);
645 return trace;
649 static const char *
650 get_event_name(rb_event_flag_t event)
652 switch (event) {
653 case RUBY_EVENT_LINE: return "line";
654 case RUBY_EVENT_CLASS: return "class";
655 case RUBY_EVENT_END: return "end";
656 case RUBY_EVENT_CALL: return "call";
657 case RUBY_EVENT_RETURN: return "return";
658 case RUBY_EVENT_C_CALL: return "c-call";
659 case RUBY_EVENT_C_RETURN: return "c-return";
660 case RUBY_EVENT_RAISE: return "raise";
661 default:
662 return "unknown";
666 static ID
667 get_event_id(rb_event_flag_t event)
669 ID id;
671 switch (event) {
672 #define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
673 C(line, LINE);
674 C(class, CLASS);
675 C(end, END);
676 C(call, CALL);
677 C(return, RETURN);
678 C(c_call, C_CALL);
679 C(c_return, C_RETURN);
680 C(raise, RAISE);
681 C(b_call, B_CALL);
682 C(b_return, B_RETURN);
683 C(thread_begin, THREAD_BEGIN);
684 C(thread_end, THREAD_END);
685 C(fiber_switch, FIBER_SWITCH);
686 C(script_compiled, SCRIPT_COMPILED);
687 C(rescue, RESCUE);
688 #undef C
689 default:
690 return 0;
694 static void
695 get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
697 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
699 if (cfp) {
700 const rb_iseq_t *iseq = cfp->iseq;
701 *pathp = rb_iseq_path(iseq);
703 if (event & (RUBY_EVENT_CLASS |
704 RUBY_EVENT_CALL |
705 RUBY_EVENT_B_CALL)) {
706 *linep = FIX2INT(rb_iseq_first_lineno(iseq));
708 else {
709 *linep = rb_vm_get_sourceline(cfp);
712 else {
713 *pathp = Qnil;
714 *linep = 0;
718 static void
719 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
721 int line;
722 VALUE filename;
723 VALUE eventname = rb_str_new2(get_event_name(event));
724 VALUE argv[6];
725 const rb_execution_context_t *ec = GET_EC();
727 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
729 if (!klass) {
730 rb_ec_frame_method_id_and_class(ec, &id, 0, &klass);
733 if (klass) {
734 if (RB_TYPE_P(klass, T_ICLASS)) {
735 klass = RBASIC(klass)->klass;
737 else if (RCLASS_SINGLETON_P(klass)) {
738 klass = RCLASS_ATTACHED_OBJECT(klass);
742 argv[0] = eventname;
743 argv[1] = filename;
744 argv[2] = INT2FIX(line);
745 argv[3] = id ? ID2SYM(id) : Qnil;
746 argv[4] = Qnil;
747 if (self && (filename != Qnil) &&
748 event != RUBY_EVENT_C_CALL &&
749 event != RUBY_EVENT_C_RETURN &&
750 (VM_FRAME_RUBYFRAME_P(ec->cfp) && imemo_type_p((VALUE)ec->cfp->iseq, imemo_iseq))) {
751 argv[4] = rb_binding_new();
753 argv[5] = klass ? klass : Qnil;
755 rb_proc_call_with_block(proc, 6, argv, Qnil);
758 /* (2-2) TracePoint API */
760 static VALUE rb_cTracePoint;
762 typedef struct rb_tp_struct {
763 rb_event_flag_t events;
764 int tracing; /* bool */
765 rb_thread_t *target_th;
766 VALUE local_target_set; /* Hash: target ->
767 * Qtrue (if target is iseq) or
768 * Qfalse (if target is bmethod)
770 void (*func)(VALUE tpval, void *data);
771 void *data;
772 VALUE proc;
773 rb_ractor_t *ractor;
774 VALUE self;
775 } rb_tp_t;
777 static void
778 tp_mark(void *ptr)
780 rb_tp_t *tp = ptr;
781 rb_gc_mark(tp->proc);
782 rb_gc_mark(tp->local_target_set);
783 if (tp->target_th) rb_gc_mark(tp->target_th->self);
786 static const rb_data_type_t tp_data_type = {
787 "tracepoint",
789 tp_mark,
790 RUBY_TYPED_DEFAULT_FREE,
791 NULL, // Nothing allocated externally, so don't need a memsize function
793 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
796 static VALUE
797 tp_alloc(VALUE klass)
799 rb_tp_t *tp;
800 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
803 static rb_event_flag_t
804 symbol2event_flag(VALUE v)
806 ID id;
807 VALUE sym = rb_to_symbol_type(v);
808 const rb_event_flag_t RUBY_EVENT_A_CALL =
809 RUBY_EVENT_CALL | RUBY_EVENT_B_CALL | RUBY_EVENT_C_CALL;
810 const rb_event_flag_t RUBY_EVENT_A_RETURN =
811 RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN | RUBY_EVENT_C_RETURN;
813 #define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
814 C(line, LINE);
815 C(class, CLASS);
816 C(end, END);
817 C(call, CALL);
818 C(return, RETURN);
819 C(c_call, C_CALL);
820 C(c_return, C_RETURN);
821 C(raise, RAISE);
822 C(b_call, B_CALL);
823 C(b_return, B_RETURN);
824 C(thread_begin, THREAD_BEGIN);
825 C(thread_end, THREAD_END);
826 C(fiber_switch, FIBER_SWITCH);
827 C(script_compiled, SCRIPT_COMPILED);
828 C(rescue, RESCUE);
830 /* joke */
831 C(a_call, A_CALL);
832 C(a_return, A_RETURN);
833 #undef C
834 rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
837 static rb_tp_t *
838 tpptr(VALUE tpval)
840 rb_tp_t *tp;
841 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
842 return tp;
845 static rb_trace_arg_t *
846 get_trace_arg(void)
848 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
849 if (trace_arg == 0) {
850 rb_raise(rb_eRuntimeError, "access from outside");
852 return trace_arg;
855 struct rb_trace_arg_struct *
856 rb_tracearg_from_tracepoint(VALUE tpval)
858 return get_trace_arg();
861 rb_event_flag_t
862 rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
864 return trace_arg->event;
867 VALUE
868 rb_tracearg_event(rb_trace_arg_t *trace_arg)
870 return ID2SYM(get_event_id(trace_arg->event));
873 static void
874 fill_path_and_lineno(rb_trace_arg_t *trace_arg)
876 if (UNDEF_P(trace_arg->path)) {
877 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
881 VALUE
882 rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
884 fill_path_and_lineno(trace_arg);
885 return INT2FIX(trace_arg->lineno);
887 VALUE
888 rb_tracearg_path(rb_trace_arg_t *trace_arg)
890 fill_path_and_lineno(trace_arg);
891 return trace_arg->path;
894 static void
895 fill_id_and_klass(rb_trace_arg_t *trace_arg)
897 if (!trace_arg->klass_solved) {
898 if (!trace_arg->klass) {
899 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
902 if (trace_arg->klass) {
903 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
904 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
907 else {
908 trace_arg->klass = Qnil;
911 trace_arg->klass_solved = 1;
915 VALUE
916 rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
918 switch (trace_arg->event) {
919 case RUBY_EVENT_CALL:
920 case RUBY_EVENT_RETURN:
921 case RUBY_EVENT_B_CALL:
922 case RUBY_EVENT_B_RETURN: {
923 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
924 if (cfp) {
925 int is_proc = 0;
926 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
927 is_proc = 1;
929 return rb_iseq_parameters(cfp->iseq, is_proc);
931 break;
933 case RUBY_EVENT_C_CALL:
934 case RUBY_EVENT_C_RETURN: {
935 fill_id_and_klass(trace_arg);
936 if (trace_arg->klass && trace_arg->id) {
937 const rb_method_entry_t *me;
938 VALUE iclass = Qnil;
939 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->called_id, &iclass);
940 return rb_unnamed_parameters(rb_method_entry_arity(me));
942 break;
944 case RUBY_EVENT_RAISE:
945 case RUBY_EVENT_LINE:
946 case RUBY_EVENT_CLASS:
947 case RUBY_EVENT_END:
948 case RUBY_EVENT_SCRIPT_COMPILED:
949 case RUBY_EVENT_RESCUE:
950 rb_raise(rb_eRuntimeError, "not supported by this event");
951 break;
953 return Qnil;
956 VALUE
957 rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
959 fill_id_and_klass(trace_arg);
960 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
963 VALUE
964 rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
966 fill_id_and_klass(trace_arg);
967 return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
970 VALUE
971 rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
973 fill_id_and_klass(trace_arg);
974 return trace_arg->klass;
977 VALUE
978 rb_tracearg_binding(rb_trace_arg_t *trace_arg)
980 rb_control_frame_t *cfp;
981 switch (trace_arg->event) {
982 case RUBY_EVENT_C_CALL:
983 case RUBY_EVENT_C_RETURN:
984 return Qnil;
986 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
988 if (cfp && imemo_type_p((VALUE)cfp->iseq, imemo_iseq)) {
989 return rb_vm_make_binding(trace_arg->ec, cfp);
991 else {
992 return Qnil;
996 VALUE
997 rb_tracearg_self(rb_trace_arg_t *trace_arg)
999 return trace_arg->self;
1002 VALUE
1003 rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
1005 if (trace_arg->event & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN)) {
1006 /* ok */
1008 else {
1009 rb_raise(rb_eRuntimeError, "not supported by this event");
1011 if (UNDEF_P(trace_arg->data)) {
1012 rb_bug("rb_tracearg_return_value: unreachable");
1014 return trace_arg->data;
1017 VALUE
1018 rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
1020 if (trace_arg->event & (RUBY_EVENT_RAISE | RUBY_EVENT_RESCUE)) {
1021 /* ok */
1023 else {
1024 rb_raise(rb_eRuntimeError, "not supported by this event");
1026 if (UNDEF_P(trace_arg->data)) {
1027 rb_bug("rb_tracearg_raised_exception: unreachable");
1029 return trace_arg->data;
1032 VALUE
1033 rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
1035 VALUE data = trace_arg->data;
1037 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1038 /* ok */
1040 else {
1041 rb_raise(rb_eRuntimeError, "not supported by this event");
1043 if (UNDEF_P(data)) {
1044 rb_bug("rb_tracearg_raised_exception: unreachable");
1046 if (rb_obj_is_iseq(data)) {
1047 return Qnil;
1049 else {
1050 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1051 /* [src, iseq] */
1052 return RARRAY_AREF(data, 0);
1056 VALUE
1057 rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
1059 VALUE data = trace_arg->data;
1061 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1062 /* ok */
1064 else {
1065 rb_raise(rb_eRuntimeError, "not supported by this event");
1067 if (UNDEF_P(data)) {
1068 rb_bug("rb_tracearg_raised_exception: unreachable");
1071 if (rb_obj_is_iseq(data)) {
1072 return rb_iseqw_new((const rb_iseq_t *)data);
1074 else {
1075 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1076 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1078 /* [src, iseq] */
1079 return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1083 VALUE
1084 rb_tracearg_object(rb_trace_arg_t *trace_arg)
1086 if (trace_arg->event & (RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ)) {
1087 /* ok */
1089 else {
1090 rb_raise(rb_eRuntimeError, "not supported by this event");
1092 if (UNDEF_P(trace_arg->data)) {
1093 rb_bug("rb_tracearg_object: unreachable");
1095 return trace_arg->data;
1098 static VALUE
1099 tracepoint_attr_event(rb_execution_context_t *ec, VALUE tpval)
1101 return rb_tracearg_event(get_trace_arg());
1104 static VALUE
1105 tracepoint_attr_lineno(rb_execution_context_t *ec, VALUE tpval)
1107 return rb_tracearg_lineno(get_trace_arg());
1109 static VALUE
1110 tracepoint_attr_path(rb_execution_context_t *ec, VALUE tpval)
1112 return rb_tracearg_path(get_trace_arg());
1115 static VALUE
1116 tracepoint_attr_parameters(rb_execution_context_t *ec, VALUE tpval)
1118 return rb_tracearg_parameters(get_trace_arg());
1121 static VALUE
1122 tracepoint_attr_method_id(rb_execution_context_t *ec, VALUE tpval)
1124 return rb_tracearg_method_id(get_trace_arg());
1127 static VALUE
1128 tracepoint_attr_callee_id(rb_execution_context_t *ec, VALUE tpval)
1130 return rb_tracearg_callee_id(get_trace_arg());
1133 static VALUE
1134 tracepoint_attr_defined_class(rb_execution_context_t *ec, VALUE tpval)
1136 return rb_tracearg_defined_class(get_trace_arg());
1139 static VALUE
1140 tracepoint_attr_binding(rb_execution_context_t *ec, VALUE tpval)
1142 return rb_tracearg_binding(get_trace_arg());
1145 static VALUE
1146 tracepoint_attr_self(rb_execution_context_t *ec, VALUE tpval)
1148 return rb_tracearg_self(get_trace_arg());
1151 static VALUE
1152 tracepoint_attr_return_value(rb_execution_context_t *ec, VALUE tpval)
1154 return rb_tracearg_return_value(get_trace_arg());
1157 static VALUE
1158 tracepoint_attr_raised_exception(rb_execution_context_t *ec, VALUE tpval)
1160 return rb_tracearg_raised_exception(get_trace_arg());
1163 static VALUE
1164 tracepoint_attr_eval_script(rb_execution_context_t *ec, VALUE tpval)
1166 return rb_tracearg_eval_script(get_trace_arg());
1169 static VALUE
1170 tracepoint_attr_instruction_sequence(rb_execution_context_t *ec, VALUE tpval)
1172 return rb_tracearg_instruction_sequence(get_trace_arg());
1175 static void
1176 tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1178 rb_tp_t *tp = tpptr(tpval);
1180 if (tp->func) {
1181 (*tp->func)(tpval, tp->data);
1183 else {
1184 if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1185 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1190 VALUE
1191 rb_tracepoint_enable(VALUE tpval)
1193 rb_tp_t *tp;
1194 tp = tpptr(tpval);
1196 if (tp->local_target_set != Qfalse) {
1197 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1200 if (tp->tracing) {
1201 return Qundef;
1204 if (tp->target_th) {
1205 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1206 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1208 else {
1209 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1210 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1212 tp->tracing = 1;
1213 return Qundef;
1216 static const rb_iseq_t *
1217 iseq_of(VALUE target)
1219 VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1220 if (NIL_P(iseqv)) {
1221 rb_raise(rb_eArgError, "specified target is not supported");
1223 else {
1224 return rb_iseqw_to_iseq(iseqv);
1228 const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1230 static VALUE
1231 rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1233 rb_tp_t *tp = tpptr(tpval);
1234 const rb_iseq_t *iseq = iseq_of(target);
1235 int n = 0;
1236 unsigned int line = 0;
1237 bool target_bmethod = false;
1239 if (tp->tracing > 0) {
1240 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1243 if (!NIL_P(target_line)) {
1244 if ((tp->events & RUBY_EVENT_LINE) == 0) {
1245 rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1247 else {
1248 line = NUM2UINT(target_line);
1252 VM_ASSERT(tp->local_target_set == Qfalse);
1253 RB_OBJ_WRITE(tpval, &tp->local_target_set, rb_obj_hide(rb_ident_hash_new()));
1255 /* bmethod */
1256 if (rb_obj_is_method(target)) {
1257 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1258 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1259 (tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN))) {
1260 if (def->body.bmethod.hooks == NULL) {
1261 def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
1262 def->body.bmethod.hooks->is_local = true;
1264 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1265 rb_hash_aset(tp->local_target_set, target, Qfalse);
1266 target_bmethod = true;
1268 n++;
1272 /* iseq */
1273 n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
1274 rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
1276 if ((tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN)) &&
1277 iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) {
1278 rb_clear_bf_ccs();
1281 if (n == 0) {
1282 rb_raise(rb_eArgError, "can not enable any hooks");
1285 rb_yjit_tracing_invalidate_all();
1286 rb_rjit_tracing_invalidate_all(tp->events);
1288 ruby_vm_event_local_num++;
1290 tp->tracing = 1;
1292 return Qnil;
1295 static int
1296 disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1298 if (iseq_p) {
1299 rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
1301 else {
1302 /* bmethod */
1303 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1304 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1305 VM_ASSERT(hooks != NULL);
1306 rb_hook_list_remove_tracepoint(hooks, tpval);
1308 if (hooks->events == 0) {
1309 rb_hook_list_free(def->body.bmethod.hooks);
1310 def->body.bmethod.hooks = NULL;
1313 return ST_CONTINUE;
1316 VALUE
1317 rb_tracepoint_disable(VALUE tpval)
1319 rb_tp_t *tp;
1321 tp = tpptr(tpval);
1323 if (tp->local_target_set) {
1324 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1325 RB_OBJ_WRITE(tpval, &tp->local_target_set, Qfalse);
1326 ruby_vm_event_local_num--;
1328 else {
1329 if (tp->target_th) {
1330 rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
1332 else {
1333 rb_remove_event_hook_with_data((rb_event_hook_func_t)tp_call_trace, tpval);
1336 tp->tracing = 0;
1337 tp->target_th = NULL;
1338 return Qundef;
1341 void
1342 rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1344 rb_tp_t *tp = tpptr(tpval);
1345 rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1346 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1347 hook->filter.target_line = target_line;
1348 hook_list_connect(target, list, hook, FALSE);
1351 void
1352 rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
1354 rb_event_hook_t *hook = list->hooks;
1355 rb_event_flag_t events = 0;
1357 while (hook) {
1358 if (hook->data == tpval) {
1359 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1360 list->need_clean = true;
1362 else if ((hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) == 0) {
1363 events |= hook->events;
1365 hook = hook->next;
1368 list->events = events;
1371 static VALUE
1372 tracepoint_enable_m(rb_execution_context_t *ec, VALUE tpval, VALUE target, VALUE target_line, VALUE target_thread)
1374 rb_tp_t *tp = tpptr(tpval);
1375 int previous_tracing = tp->tracing;
1377 if (target_thread == sym_default) {
1378 if (rb_block_given_p() && NIL_P(target) && NIL_P(target_line)) {
1379 target_thread = rb_thread_current();
1381 else {
1382 target_thread = Qnil;
1386 /* check target_thread */
1387 if (RTEST(target_thread)) {
1388 if (tp->target_th) {
1389 rb_raise(rb_eArgError, "can not override target_thread filter");
1391 tp->target_th = rb_thread_ptr(target_thread);
1393 RUBY_ASSERT(tp->target_th->self == target_thread);
1394 RB_OBJ_WRITTEN(tpval, Qundef, target_thread);
1396 else {
1397 tp->target_th = NULL;
1400 if (NIL_P(target)) {
1401 if (!NIL_P(target_line)) {
1402 rb_raise(rb_eArgError, "only target_line is specified");
1404 rb_tracepoint_enable(tpval);
1406 else {
1407 rb_tracepoint_enable_for_target(tpval, target, target_line);
1410 if (rb_block_given_p()) {
1411 return rb_ensure(rb_yield, Qundef,
1412 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1413 tpval);
1415 else {
1416 return RBOOL(previous_tracing);
1420 static VALUE
1421 tracepoint_disable_m(rb_execution_context_t *ec, VALUE tpval)
1423 rb_tp_t *tp = tpptr(tpval);
1424 int previous_tracing = tp->tracing;
1426 if (rb_block_given_p()) {
1427 if (tp->local_target_set != Qfalse) {
1428 rb_raise(rb_eArgError, "can't disable a targeting TracePoint in a block");
1431 rb_tracepoint_disable(tpval);
1432 return rb_ensure(rb_yield, Qundef,
1433 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1434 tpval);
1436 else {
1437 rb_tracepoint_disable(tpval);
1438 return RBOOL(previous_tracing);
1442 VALUE
1443 rb_tracepoint_enabled_p(VALUE tpval)
1445 rb_tp_t *tp = tpptr(tpval);
1446 return RBOOL(tp->tracing);
1449 static VALUE
1450 tracepoint_enabled_p(rb_execution_context_t *ec, VALUE tpval)
1452 return rb_tracepoint_enabled_p(tpval);
1455 static VALUE
1456 tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1458 VALUE tpval = tp_alloc(klass);
1459 rb_tp_t *tp;
1460 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1462 RB_OBJ_WRITE(tpval, &tp->proc, proc);
1463 tp->ractor = rb_ractor_shareable_p(proc) ? NULL : GET_RACTOR();
1464 tp->func = func;
1465 tp->data = data;
1466 tp->events = events;
1467 tp->self = tpval;
1469 return tpval;
1472 VALUE
1473 rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1475 rb_thread_t *target_th = NULL;
1477 if (RTEST(target_thval)) {
1478 target_th = rb_thread_ptr(target_thval);
1479 /* TODO: Test it!
1480 * Warning: This function is not tested.
1483 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1486 static VALUE
1487 tracepoint_new_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1489 rb_event_flag_t events = 0;
1490 long i;
1491 long argc = RARRAY_LEN(args);
1493 if (argc > 0) {
1494 for (i=0; i<argc; i++) {
1495 events |= symbol2event_flag(RARRAY_AREF(args, i));
1498 else {
1499 events = RUBY_EVENT_TRACEPOINT_ALL;
1502 if (!rb_block_given_p()) {
1503 rb_raise(rb_eArgError, "must be called with a block");
1506 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1509 static VALUE
1510 tracepoint_trace_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1512 VALUE trace = tracepoint_new_s(ec, self, args);
1513 rb_tracepoint_enable(trace);
1514 return trace;
1517 static VALUE
1518 tracepoint_inspect(rb_execution_context_t *ec, VALUE self)
1520 rb_tp_t *tp = tpptr(self);
1521 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1523 if (trace_arg) {
1524 switch (trace_arg->event) {
1525 case RUBY_EVENT_LINE:
1527 VALUE sym = rb_tracearg_method_id(trace_arg);
1528 if (NIL_P(sym))
1529 break;
1530 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d in '%"PRIsVALUE"'>",
1531 rb_tracearg_event(trace_arg),
1532 rb_tracearg_path(trace_arg),
1533 FIX2INT(rb_tracearg_lineno(trace_arg)),
1534 sym);
1536 case RUBY_EVENT_CALL:
1537 case RUBY_EVENT_C_CALL:
1538 case RUBY_EVENT_RETURN:
1539 case RUBY_EVENT_C_RETURN:
1540 return rb_sprintf("#<TracePoint:%"PRIsVALUE" '%"PRIsVALUE"' %"PRIsVALUE":%d>",
1541 rb_tracearg_event(trace_arg),
1542 rb_tracearg_method_id(trace_arg),
1543 rb_tracearg_path(trace_arg),
1544 FIX2INT(rb_tracearg_lineno(trace_arg)));
1545 case RUBY_EVENT_THREAD_BEGIN:
1546 case RUBY_EVENT_THREAD_END:
1547 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1548 rb_tracearg_event(trace_arg),
1549 rb_tracearg_self(trace_arg));
1550 default:
1551 break;
1553 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d>",
1554 rb_tracearg_event(trace_arg),
1555 rb_tracearg_path(trace_arg),
1556 FIX2INT(rb_tracearg_lineno(trace_arg)));
1558 else {
1559 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1563 static void
1564 tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1566 int active = 0, deleted = 0;
1568 while (hook) {
1569 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1570 deleted++;
1572 else {
1573 active++;
1575 hook = hook->next;
1578 rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1581 static VALUE
1582 tracepoint_stat_s(rb_execution_context_t *ec, VALUE self)
1584 rb_vm_t *vm = GET_VM();
1585 VALUE stat = rb_hash_new();
1587 tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1588 /* TODO: thread local hooks */
1590 return stat;
1593 static VALUE
1594 disallow_reentry(VALUE val)
1596 rb_trace_arg_t *arg = (rb_trace_arg_t *)val;
1597 rb_execution_context_t *ec = GET_EC();
1598 if (ec->trace_arg != NULL) rb_bug("should be NULL, but %p", (void *)ec->trace_arg);
1599 ec->trace_arg = arg;
1600 return Qnil;
1603 static VALUE
1604 tracepoint_allow_reentry(rb_execution_context_t *ec, VALUE self)
1606 const rb_trace_arg_t *arg = ec->trace_arg;
1607 if (arg == NULL) rb_raise(rb_eRuntimeError, "No need to allow reentrance.");
1608 ec->trace_arg = NULL;
1609 return rb_ensure(rb_yield, Qnil, disallow_reentry, (VALUE)arg);
1612 #include "trace_point.rbinc"
1614 /* This function is called from inits.c */
1615 void
1616 Init_vm_trace(void)
1618 sym_default = ID2SYM(rb_intern_const("default"));
1620 /* trace_func */
1621 rb_define_global_function("set_trace_func", set_trace_func, 1);
1622 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1623 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1625 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1626 rb_undef_alloc_func(rb_cTracePoint);
1630 * Ruby actually has two separate mechanisms for enqueueing work from contexts
1631 * where it is not safe to run Ruby code, to run later on when it is safe. One
1632 * is async-signal-safe but more limited, and accessed through the
1633 * `rb_postponed_job_preregister` and `rb_postponed_job_trigger` functions. The
1634 * other is more flexible but cannot be used in signal handlers, and is accessed
1635 * through the `rb_workqueue_register` function.
1637 * The postponed job functions form part of Ruby's extension API, but the
1638 * workqueue functions are for internal use only.
1641 struct rb_workqueue_job {
1642 struct ccan_list_node jnode; /* <=> vm->workqueue */
1643 rb_postponed_job_func_t func;
1644 void *data;
1647 // Used for VM memsize reporting. Returns the size of a list of rb_workqueue_job
1648 // structs. Defined here because the struct definition lives here as well.
1649 size_t
1650 rb_vm_memsize_workqueue(struct ccan_list_head *workqueue)
1652 struct rb_workqueue_job *work = 0;
1653 size_t size = 0;
1655 ccan_list_for_each(workqueue, work, jnode) {
1656 size += sizeof(struct rb_workqueue_job);
1659 return size;
1663 * thread-safe and called from non-Ruby thread
1664 * returns FALSE on failure (ENOMEM), TRUE otherwise
1667 rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1669 struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1670 rb_vm_t *vm = GET_VM();
1672 if (!wq_job) return FALSE;
1673 wq_job->func = func;
1674 wq_job->data = data;
1676 rb_nativethread_lock_lock(&vm->workqueue_lock);
1677 ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
1678 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1680 // TODO: current implementation affects only main ractor
1681 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
1683 return TRUE;
1686 #define PJOB_TABLE_SIZE (sizeof(rb_atomic_t) * CHAR_BIT)
1687 /* pre-registered jobs table, for async-safe jobs */
1688 typedef struct rb_postponed_job_queue {
1689 struct {
1690 rb_postponed_job_func_t func;
1691 void *data;
1692 } table[PJOB_TABLE_SIZE];
1693 /* Bits in this are set when the corresponding entry in prereg_table has non-zero
1694 * triggered_count; i.e. somebody called rb_postponed_job_trigger */
1695 rb_atomic_t triggered_bitset;
1696 } rb_postponed_job_queues_t;
1698 void
1699 rb_vm_postponed_job_queue_init(rb_vm_t *vm)
1701 /* use mimmalloc; postponed job registration is a dependency of objspace, so this gets
1702 * called _VERY_ early inside Init_BareVM */
1703 rb_postponed_job_queues_t *pjq = ruby_mimmalloc(sizeof(rb_postponed_job_queues_t));
1704 pjq->triggered_bitset = 0;
1705 memset(pjq->table, 0, sizeof(pjq->table));
1706 vm->postponed_job_queue = pjq;
1709 static rb_execution_context_t *
1710 get_valid_ec(rb_vm_t *vm)
1712 rb_execution_context_t *ec = rb_current_execution_context(false);
1713 if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1714 return ec;
1717 void
1718 rb_vm_postponed_job_atfork(void)
1720 rb_vm_t *vm = GET_VM();
1721 rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
1722 /* make sure we set the interrupt flag on _this_ thread if we carried any pjobs over
1723 * from the other side of the fork */
1724 if (pjq->triggered_bitset) {
1725 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1730 /* Frees the memory managed by the postponed job infrastructure at shutdown */
1731 void
1732 rb_vm_postponed_job_free(void)
1734 rb_vm_t *vm = GET_VM();
1735 ruby_xfree(vm->postponed_job_queue);
1736 vm->postponed_job_queue = NULL;
1739 // Used for VM memsize reporting. Returns the total size of the postponed job
1740 // queue infrastructure.
1741 size_t
1742 rb_vm_memsize_postponed_job_queue(void)
1744 return sizeof(rb_postponed_job_queues_t);
1748 rb_postponed_job_handle_t
1749 rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
1751 /* The doc comments say that this function should be called under the GVL, because
1752 * that is actually required to get the guarantee that "if a given (func, data) pair
1753 * was already pre-registered, this method will return the same handle instance".
1755 * However, the actual implementation here is called without the GVL, from inside
1756 * rb_postponed_job_register, to support that legacy interface. In the presence
1757 * of concurrent calls to both _preregister and _register functions on the same
1758 * func, however, the data may get mixed up between them. */
1760 rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
1761 for (unsigned int i = 0; i < PJOB_TABLE_SIZE; i++) {
1762 /* Try and set this slot to equal `func` */
1763 rb_postponed_job_func_t existing_func = (rb_postponed_job_func_t)RUBY_ATOMIC_PTR_CAS(pjq->table[i], NULL, (void *)func);
1764 if (existing_func == NULL || existing_func == func) {
1765 /* Either this slot was NULL, and we set it to func, or, this slot was already equal to func.
1766 * In either case, clobber the data with our data. Note that concurrent calls to
1767 * rb_postponed_job_register with the same func & different data will result in either of the
1768 * datas being written */
1769 RUBY_ATOMIC_PTR_EXCHANGE(pjq->table[i].data, data);
1770 return (rb_postponed_job_handle_t)i;
1772 else {
1773 /* Try the next slot if this one already has a func in it */
1774 continue;
1778 /* full */
1779 return POSTPONED_JOB_HANDLE_INVALID;
1782 void
1783 rb_postponed_job_trigger(rb_postponed_job_handle_t h)
1785 rb_vm_t *vm = GET_VM();
1786 rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
1788 RUBY_ATOMIC_OR(pjq->triggered_bitset, (((rb_atomic_t)1UL) << h));
1789 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1793 static int
1794 pjob_register_legacy_impl(unsigned int flags, rb_postponed_job_func_t func, void *data)
1796 /* We _know_ calling preregister from a signal handler like this is racy; what is
1797 * and is not promised is very exhaustively documented in debug.h */
1798 rb_postponed_job_handle_t h = rb_postponed_job_preregister(0, func, data);
1799 if (h == POSTPONED_JOB_HANDLE_INVALID) {
1800 return 0;
1802 rb_postponed_job_trigger(h);
1803 return 1;
1807 rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1809 return pjob_register_legacy_impl(flags, func, data);
1813 rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1815 return pjob_register_legacy_impl(flags, func, data);
1819 void
1820 rb_postponed_job_flush(rb_vm_t *vm)
1822 rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
1823 rb_execution_context_t *ec = GET_EC();
1824 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK | TRAP_INTERRUPT_MASK;
1825 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1826 VALUE volatile saved_errno = ec->errinfo;
1827 struct ccan_list_head tmp;
1829 ccan_list_head_init(&tmp);
1831 rb_nativethread_lock_lock(&vm->workqueue_lock);
1832 ccan_list_append_list(&tmp, &vm->workqueue);
1833 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1835 rb_atomic_t triggered_bits = RUBY_ATOMIC_EXCHANGE(pjq->triggered_bitset, 0);
1837 ec->errinfo = Qnil;
1838 /* mask POSTPONED_JOB dispatch */
1839 ec->interrupt_mask |= block_mask;
1841 EC_PUSH_TAG(ec);
1842 if (EC_EXEC_TAG() == TAG_NONE) {
1843 /* execute postponed jobs */
1844 while (triggered_bits) {
1845 unsigned int i = bit_length(triggered_bits) - 1;
1846 triggered_bits ^= ((1UL) << i); /* toggle ith bit off */
1847 rb_postponed_job_func_t func = pjq->table[i].func;
1848 void *data = pjq->table[i].data;
1849 (func)(data);
1852 /* execute workqueue jobs */
1853 struct rb_workqueue_job *wq_job;
1854 while ((wq_job = ccan_list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1855 rb_postponed_job_func_t func = wq_job->func;
1856 void *data = wq_job->data;
1858 free(wq_job);
1859 (func)(data);
1862 EC_POP_TAG();
1864 /* restore POSTPONED_JOB mask */
1865 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1866 ec->errinfo = saved_errno;
1868 /* If we threw an exception, there might be leftover workqueue items; carry them over
1869 * to a subsequent execution of flush */
1870 if (!ccan_list_empty(&tmp)) {
1871 rb_nativethread_lock_lock(&vm->workqueue_lock);
1872 ccan_list_prepend_list(&vm->workqueue, &tmp);
1873 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1875 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1877 /* likewise with any remaining-to-be-executed bits of the preregistered postponed
1878 * job table */
1879 if (triggered_bits) {
1880 RUBY_ATOMIC_OR(pjq->triggered_bitset, triggered_bits);
1881 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());