1 /**********************************************************************
3 vm_insnhelper.c - instruction helper functions.
7 Copyright (C) 2007 Koichi Sasada
9 **********************************************************************/
11 #include "ruby/internal/config.h"
16 #include "debug_counter.h"
18 #include "internal/class.h"
19 #include "internal/compar.h"
20 #include "internal/hash.h"
21 #include "internal/numeric.h"
22 #include "internal/proc.h"
23 #include "internal/random.h"
24 #include "internal/variable.h"
25 #include "internal/struct.h"
28 /* finish iseq array */
30 #include "insns_info.inc"
32 extern rb_method_definition_t
*rb_method_definition_create(rb_method_type_t type
, ID mid
);
33 extern void rb_method_definition_set(const rb_method_entry_t
*me
, rb_method_definition_t
*def
, void *opts
);
34 extern int rb_method_definition_eq(const rb_method_definition_t
*d1
, const rb_method_definition_t
*d2
);
35 extern VALUE
rb_make_no_method_exception(VALUE exc
, VALUE format
, VALUE obj
,
36 int argc
, const VALUE
*argv
, int priv
);
38 static const struct rb_callcache vm_empty_cc
;
39 static const struct rb_callcache vm_empty_cc_for_super
;
41 /* control stack frame */
43 static rb_control_frame_t
*vm_get_ruby_level_caller_cfp(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
);
46 ruby_vm_special_exception_copy(VALUE exc
)
48 VALUE e
= rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc
)));
49 rb_obj_copy_ivar(e
, exc
);
53 NORETURN(static void ec_stack_overflow(rb_execution_context_t
*ec
, int));
55 ec_stack_overflow(rb_execution_context_t
*ec
, int setup
)
57 VALUE mesg
= rb_ec_vm_ptr(ec
)->special_exceptions
[ruby_error_sysstack
];
58 ec
->raised_flag
= RAISED_STACKOVERFLOW
;
60 VALUE at
= rb_ec_backtrace_object(ec
);
61 mesg
= ruby_vm_special_exception_copy(mesg
);
62 rb_ivar_set(mesg
, idBt
, at
);
63 rb_ivar_set(mesg
, idBt_locations
, at
);
66 EC_JUMP_TAG(ec
, TAG_RAISE
);
69 NORETURN(static void vm_stackoverflow(void));
72 vm_stackoverflow(void)
74 ec_stack_overflow(GET_EC(), TRUE
);
77 NORETURN(void rb_ec_stack_overflow(rb_execution_context_t
*ec
, int crit
));
79 rb_ec_stack_overflow(rb_execution_context_t
*ec
, int crit
)
82 rb_bug("system stack overflow during GC. Faulty native extension?");
85 ec
->raised_flag
= RAISED_STACKOVERFLOW
;
86 ec
->errinfo
= rb_ec_vm_ptr(ec
)->special_exceptions
[ruby_error_stackfatal
];
87 EC_JUMP_TAG(ec
, TAG_RAISE
);
89 #ifdef USE_SIGALTSTACK
90 ec_stack_overflow(ec
, TRUE
);
92 ec_stack_overflow(ec
, FALSE
);
96 static inline void stack_check(rb_execution_context_t
*ec
);
100 callable_class_p(VALUE klass
)
102 #if VM_CHECK_MODE >= 2
103 if (!klass
) return FALSE
;
104 switch (RB_BUILTIN_TYPE(klass
)) {
108 if (!RB_TYPE_P(RCLASS_SUPER(klass
), T_MODULE
)) break;
113 if (klass
== rb_cBasicObject
) {
116 klass
= RCLASS_SUPER(klass
);
125 callable_method_entry_p(const rb_callable_method_entry_t
*cme
)
131 VM_ASSERT(IMEMO_TYPE_P((VALUE
)cme
, imemo_ment
));
133 if (callable_class_p(cme
->defined_class
)) {
143 vm_check_frame_detail(VALUE type
, int req_block
, int req_me
, int req_cref
, VALUE specval
, VALUE cref_or_me
, int is_cframe
, const rb_iseq_t
*iseq
)
145 unsigned int magic
= (unsigned int)(type
& VM_FRAME_MAGIC_MASK
);
146 enum imemo_type cref_or_me_type
= imemo_env
; /* impossible value */
148 if (RB_TYPE_P(cref_or_me
, T_IMEMO
)) {
149 cref_or_me_type
= imemo_type(cref_or_me
);
151 if (type
& VM_FRAME_FLAG_BMETHOD
) {
155 if (req_block
&& (type
& VM_ENV_FLAG_LOCAL
) == 0) {
156 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval
, magic
);
158 if (!req_block
&& (type
& VM_ENV_FLAG_LOCAL
) != 0) {
159 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval
, magic
);
163 if (cref_or_me_type
!= imemo_ment
) {
164 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me
), magic
);
168 if (req_cref
&& cref_or_me_type
!= imemo_cref
) {
169 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me
), magic
);
171 else { /* cref or Qfalse */
172 if (cref_or_me
!= Qfalse
&& cref_or_me_type
!= imemo_cref
) {
173 if (((type
& VM_FRAME_FLAG_LAMBDA
) || magic
== VM_FRAME_MAGIC_IFUNC
) && (cref_or_me_type
== imemo_ment
)) {
177 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me
), magic
);
183 if (cref_or_me_type
== imemo_ment
) {
184 const rb_callable_method_entry_t
*me
= (const rb_callable_method_entry_t
*)cref_or_me
;
186 if (!callable_method_entry_p(me
)) {
187 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me
), magic
);
191 if ((type
& VM_FRAME_MAGIC_MASK
) == VM_FRAME_MAGIC_DUMMY
) {
192 VM_ASSERT(iseq
== NULL
||
193 RBASIC_CLASS((VALUE
)iseq
) == 0 || // dummy frame for loading
194 RUBY_VM_NORMAL_ISEQ_P(iseq
) //argument error
198 VM_ASSERT(is_cframe
== !RUBY_VM_NORMAL_ISEQ_P(iseq
));
203 vm_check_frame(VALUE type
,
206 const rb_iseq_t
*iseq
)
208 VALUE given_magic
= type
& VM_FRAME_MAGIC_MASK
;
209 VM_ASSERT(FIXNUM_P(type
));
211 #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
213 vm_check_frame_detail(type, req_block, req_me, req_cref, \
214 specval, cref_or_me, is_cframe, iseq); \
216 switch (given_magic
) {
217 /* BLK ME CREF CFRAME */
218 CHECK(VM_FRAME_MAGIC_METHOD
, TRUE
, TRUE
, FALSE
, FALSE
);
219 CHECK(VM_FRAME_MAGIC_CLASS
, TRUE
, FALSE
, TRUE
, FALSE
);
220 CHECK(VM_FRAME_MAGIC_TOP
, TRUE
, FALSE
, TRUE
, FALSE
);
221 CHECK(VM_FRAME_MAGIC_CFUNC
, TRUE
, TRUE
, FALSE
, TRUE
);
222 CHECK(VM_FRAME_MAGIC_BLOCK
, FALSE
, FALSE
, FALSE
, FALSE
);
223 CHECK(VM_FRAME_MAGIC_IFUNC
, FALSE
, FALSE
, FALSE
, TRUE
);
224 CHECK(VM_FRAME_MAGIC_EVAL
, FALSE
, FALSE
, FALSE
, FALSE
);
225 CHECK(VM_FRAME_MAGIC_RESCUE
, FALSE
, FALSE
, FALSE
, FALSE
);
226 CHECK(VM_FRAME_MAGIC_DUMMY
, TRUE
, FALSE
, FALSE
, FALSE
);
228 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic
);
233 static VALUE vm_stack_canary
; /* Initialized later */
234 static bool vm_stack_canary_was_born
= false;
236 // Return the index of the instruction right before the given PC.
237 // This is needed because insn_entry advances PC before the insn body.
239 previous_insn_index(const rb_iseq_t
*iseq
, const VALUE
*pc
)
241 unsigned int pos
= 0;
242 while (pos
< ISEQ_BODY(iseq
)->iseq_size
) {
243 int opcode
= rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq
)->iseq_encoded
[pos
]);
244 unsigned int next_pos
= pos
+ insn_len(opcode
);
245 if (ISEQ_BODY(iseq
)->iseq_encoded
+ next_pos
== pc
) {
250 rb_bug("failed to find the previous insn");
254 rb_vm_check_canary(const rb_execution_context_t
*ec
, VALUE
*sp
)
256 const struct rb_control_frame_struct
*reg_cfp
= ec
->cfp
;
257 const struct rb_iseq_struct
*iseq
;
259 if (! LIKELY(vm_stack_canary_was_born
)) {
260 return; /* :FIXME: isn't it rather fatal to enter this branch? */
262 else if ((VALUE
*)reg_cfp
== ec
->vm_stack
+ ec
->vm_stack_size
) {
263 /* This is at the very beginning of a thread. cfp does not exist. */
266 else if (! (iseq
= GET_ISEQ())) {
269 else if (LIKELY(sp
[0] != vm_stack_canary
)) {
273 /* we are going to call methods below; squash the canary to
274 * prevent infinite loop. */
278 const VALUE
*orig
= rb_iseq_original_iseq(iseq
);
279 const VALUE iseqw
= rb_iseqw_new(iseq
);
280 const VALUE inspection
= rb_inspect(iseqw
);
281 const char *stri
= rb_str_to_cstr(inspection
);
282 const VALUE disasm
= rb_iseq_disasm(iseq
);
283 const char *strd
= rb_str_to_cstr(disasm
);
284 const ptrdiff_t pos
= previous_insn_index(iseq
, GET_PC());
285 const enum ruby_vminsn_type insn
= (enum ruby_vminsn_type
)orig
[pos
];
286 const char *name
= insn_name(insn
);
288 /* rb_bug() is not capable of outputting this large contents. It
289 is designed to run form a SIGSEGV handler, which tends to be
292 "We are killing the stack canary set by %s, "
293 "at %s@pc=%"PRIdPTR
"\n"
294 "watch out the C stack trace.\n"
296 name
, stri
, pos
, strd
);
297 rb_bug("see above.");
299 #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
302 #define vm_check_canary(ec, sp)
303 #define vm_check_frame(a, b, c, d)
304 #endif /* VM_CHECK_MODE > 0 */
306 #if USE_DEBUG_COUNTER
308 vm_push_frame_debug_counter_inc(
309 const struct rb_execution_context_struct
*ec
,
310 const struct rb_control_frame_struct
*reg_cfp
,
313 const struct rb_control_frame_struct
*prev_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp
);
315 RB_DEBUG_COUNTER_INC(frame_push
);
317 if (RUBY_VM_END_CONTROL_FRAME(ec
) != prev_cfp
) {
318 const bool curr
= VM_FRAME_RUBYFRAME_P(reg_cfp
);
319 const bool prev
= VM_FRAME_RUBYFRAME_P(prev_cfp
);
322 RB_DEBUG_COUNTER_INC(frame_R2R
);
325 RB_DEBUG_COUNTER_INC(frame_R2C
);
330 RB_DEBUG_COUNTER_INC(frame_C2R
);
333 RB_DEBUG_COUNTER_INC(frame_C2C
);
338 switch (type
& VM_FRAME_MAGIC_MASK
) {
339 case VM_FRAME_MAGIC_METHOD
: RB_DEBUG_COUNTER_INC(frame_push_method
); return;
340 case VM_FRAME_MAGIC_BLOCK
: RB_DEBUG_COUNTER_INC(frame_push_block
); return;
341 case VM_FRAME_MAGIC_CLASS
: RB_DEBUG_COUNTER_INC(frame_push_class
); return;
342 case VM_FRAME_MAGIC_TOP
: RB_DEBUG_COUNTER_INC(frame_push_top
); return;
343 case VM_FRAME_MAGIC_CFUNC
: RB_DEBUG_COUNTER_INC(frame_push_cfunc
); return;
344 case VM_FRAME_MAGIC_IFUNC
: RB_DEBUG_COUNTER_INC(frame_push_ifunc
); return;
345 case VM_FRAME_MAGIC_EVAL
: RB_DEBUG_COUNTER_INC(frame_push_eval
); return;
346 case VM_FRAME_MAGIC_RESCUE
: RB_DEBUG_COUNTER_INC(frame_push_rescue
); return;
347 case VM_FRAME_MAGIC_DUMMY
: RB_DEBUG_COUNTER_INC(frame_push_dummy
); return;
350 rb_bug("unreachable");
353 #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
356 // Return a poison value to be set above the stack top to verify leafness.
358 rb_vm_stack_canary(void)
360 #if VM_CHECK_MODE > 0
361 return vm_stack_canary
;
367 STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF
, VM_ENV_DATA_INDEX_ME_CREF
== -2);
368 STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL
, VM_ENV_DATA_INDEX_SPECVAL
== -1);
369 STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS
, VM_ENV_DATA_INDEX_FLAGS
== -0);
372 vm_push_frame(rb_execution_context_t
*ec
,
373 const rb_iseq_t
*iseq
,
383 rb_control_frame_t
*const cfp
= RUBY_VM_NEXT_CONTROL_FRAME(ec
->cfp
);
385 vm_check_frame(type
, specval
, cref_or_me
, iseq
);
386 VM_ASSERT(local_size
>= 0);
388 /* check stack overflow */
389 CHECK_VM_STACK_OVERFLOW0(cfp
, sp
, local_size
+ stack_max
);
390 vm_check_canary(ec
, sp
);
392 /* setup vm value stack */
394 /* initialize local variables */
395 for (int i
=0; i
< local_size
; i
++) {
399 /* setup ep with managing data */
400 *sp
++ = cref_or_me
; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
401 *sp
++ = specval
/* ep[-1] / block handler or prev env ptr */;
402 *sp
++ = type
; /* ep[-0] / ENV_FLAGS */
404 /* setup new frame */
405 *cfp
= (const struct rb_control_frame_struct
) {
412 #if VM_DEBUG_BP_CHECK
423 vm_push_frame_debug_counter_inc(ec
, cfp
, type
);
427 rb_vm_pop_frame_no_int(rb_execution_context_t
*ec
)
429 rb_control_frame_t
*cfp
= ec
->cfp
;
431 if (VM_CHECK_MODE
>= 4) rb_gc_verify_internal_consistency();
432 if (VMDEBUG
== 2) SDR();
434 ec
->cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
437 /* return TRUE if the frame is finished */
439 vm_pop_frame(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, const VALUE
*ep
)
441 VALUE flags
= ep
[VM_ENV_DATA_INDEX_FLAGS
];
443 if (VM_CHECK_MODE
>= 4) rb_gc_verify_internal_consistency();
444 if (VMDEBUG
== 2) SDR();
446 RUBY_VM_CHECK_INTS(ec
);
447 ec
->cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
449 return flags
& VM_FRAME_FLAG_FINISH
;
453 rb_vm_pop_frame(rb_execution_context_t
*ec
)
455 vm_pop_frame(ec
, ec
->cfp
, ec
->cfp
->ep
);
458 // it pushes pseudo-frame with fname filename.
460 rb_vm_push_frame_fname(rb_execution_context_t
*ec
, VALUE fname
)
462 VALUE tmpbuf
= rb_imemo_tmpbuf_auto_free_pointer();
463 void *ptr
= ruby_xcalloc(sizeof(struct rb_iseq_constant_body
) + sizeof(struct rb_iseq_struct
), 1);
464 rb_imemo_tmpbuf_set_ptr(tmpbuf
, ptr
);
466 struct rb_iseq_struct
*dmy_iseq
= (struct rb_iseq_struct
*)ptr
;
467 struct rb_iseq_constant_body
*dmy_body
= (struct rb_iseq_constant_body
*)&dmy_iseq
[1];
468 dmy_iseq
->body
= dmy_body
;
469 dmy_body
->type
= ISEQ_TYPE_TOP
;
470 dmy_body
->location
.pathobj
= fname
;
473 dmy_iseq
, //const rb_iseq_t *iseq,
474 VM_FRAME_MAGIC_DUMMY
| VM_ENV_FLAG_LOCAL
| VM_FRAME_FLAG_FINISH
, // VALUE type,
475 ec
->cfp
->self
, // VALUE self,
476 VM_BLOCK_HANDLER_NONE
, // VALUE specval,
477 Qfalse
, // VALUE cref_or_me,
478 NULL
, // const VALUE *pc,
479 ec
->cfp
->sp
, // VALUE *sp,
480 0, // int local_size,
486 /* method dispatch */
488 rb_arity_error_new(int argc
, int min
, int max
)
490 VALUE err_mess
= rb_sprintf("wrong number of arguments (given %d, expected %d", argc
, min
);
492 /* max is not needed */
494 else if (max
== UNLIMITED_ARGUMENTS
) {
495 rb_str_cat_cstr(err_mess
, "+");
498 rb_str_catf(err_mess
, "..%d", max
);
500 rb_str_cat_cstr(err_mess
, ")");
501 return rb_exc_new3(rb_eArgError
, err_mess
);
505 rb_error_arity(int argc
, int min
, int max
)
507 rb_exc_raise(rb_arity_error_new(argc
, min
, max
));
512 NOINLINE(static void vm_env_write_slowpath(const VALUE
*ep
, int index
, VALUE v
));
515 vm_env_write_slowpath(const VALUE
*ep
, int index
, VALUE v
)
517 /* remember env value forcely */
518 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep
));
519 VM_FORCE_WRITE(&ep
[index
], v
);
520 VM_ENV_FLAGS_UNSET(ep
, VM_ENV_FLAG_WB_REQUIRED
);
521 RB_DEBUG_COUNTER_INC(lvar_set_slowpath
);
524 // YJIT assumes this function never runs GC
526 vm_env_write(const VALUE
*ep
, int index
, VALUE v
)
528 VALUE flags
= ep
[VM_ENV_DATA_INDEX_FLAGS
];
529 if (LIKELY((flags
& VM_ENV_FLAG_WB_REQUIRED
) == 0)) {
530 VM_STACK_ENV_WRITE(ep
, index
, v
);
533 vm_env_write_slowpath(ep
, index
, v
);
538 rb_vm_env_write(const VALUE
*ep
, int index
, VALUE v
)
540 vm_env_write(ep
, index
, v
);
544 rb_vm_bh_to_procval(const rb_execution_context_t
*ec
, VALUE block_handler
)
546 if (block_handler
== VM_BLOCK_HANDLER_NONE
) {
550 switch (vm_block_handler_type(block_handler
)) {
551 case block_handler_type_iseq
:
552 case block_handler_type_ifunc
:
553 return rb_vm_make_proc(ec
, VM_BH_TO_CAPT_BLOCK(block_handler
), rb_cProc
);
554 case block_handler_type_symbol
:
555 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler
));
556 case block_handler_type_proc
:
557 return VM_BH_TO_PROC(block_handler
);
559 VM_UNREACHABLE(rb_vm_bh_to_procval
);
566 #if VM_CHECK_MODE > 0
568 vm_svar_valid_p(VALUE svar
)
570 if (RB_TYPE_P((VALUE
)svar
, T_IMEMO
)) {
571 switch (imemo_type(svar
)) {
580 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar
));
585 static inline struct vm_svar
*
586 lep_svar(const rb_execution_context_t
*ec
, const VALUE
*lep
)
590 if (lep
&& (ec
== NULL
|| ec
->root_lep
!= lep
)) {
591 svar
= lep
[VM_ENV_DATA_INDEX_ME_CREF
];
594 svar
= ec
->root_svar
;
597 VM_ASSERT(svar
== Qfalse
|| vm_svar_valid_p(svar
));
599 return (struct vm_svar
*)svar
;
603 lep_svar_write(const rb_execution_context_t
*ec
, const VALUE
*lep
, const struct vm_svar
*svar
)
605 VM_ASSERT(vm_svar_valid_p((VALUE
)svar
));
607 if (lep
&& (ec
== NULL
|| ec
->root_lep
!= lep
)) {
608 vm_env_write(lep
, VM_ENV_DATA_INDEX_ME_CREF
, (VALUE
)svar
);
611 RB_OBJ_WRITE(rb_ec_thread_ptr(ec
)->self
, &ec
->root_svar
, svar
);
616 lep_svar_get(const rb_execution_context_t
*ec
, const VALUE
*lep
, rb_num_t key
)
618 const struct vm_svar
*svar
= lep_svar(ec
, lep
);
620 if ((VALUE
)svar
== Qfalse
|| imemo_type((VALUE
)svar
) != imemo_svar
) return Qnil
;
623 case VM_SVAR_LASTLINE
:
624 return svar
->lastline
;
625 case VM_SVAR_BACKREF
:
626 return svar
->backref
;
628 const VALUE ary
= svar
->others
;
634 return rb_ary_entry(ary
, key
- VM_SVAR_EXTRA_START
);
640 static struct vm_svar
*
643 struct vm_svar
*svar
= IMEMO_NEW(struct vm_svar
, imemo_svar
, obj
);
644 *((VALUE
*)&svar
->lastline
) = Qnil
;
645 *((VALUE
*)&svar
->backref
) = Qnil
;
646 *((VALUE
*)&svar
->others
) = Qnil
;
652 lep_svar_set(const rb_execution_context_t
*ec
, const VALUE
*lep
, rb_num_t key
, VALUE val
)
654 struct vm_svar
*svar
= lep_svar(ec
, lep
);
656 if ((VALUE
)svar
== Qfalse
|| imemo_type((VALUE
)svar
) != imemo_svar
) {
657 lep_svar_write(ec
, lep
, svar
= svar_new((VALUE
)svar
));
661 case VM_SVAR_LASTLINE
:
662 RB_OBJ_WRITE(svar
, &svar
->lastline
, val
);
664 case VM_SVAR_BACKREF
:
665 RB_OBJ_WRITE(svar
, &svar
->backref
, val
);
668 VALUE ary
= svar
->others
;
671 RB_OBJ_WRITE(svar
, &svar
->others
, ary
= rb_ary_new());
673 rb_ary_store(ary
, key
- VM_SVAR_EXTRA_START
, val
);
679 vm_getspecial(const rb_execution_context_t
*ec
, const VALUE
*lep
, rb_num_t key
, rb_num_t type
)
684 val
= lep_svar_get(ec
, lep
, key
);
687 VALUE backref
= lep_svar_get(ec
, lep
, VM_SVAR_BACKREF
);
692 val
= rb_reg_last_match(backref
);
695 val
= rb_reg_match_pre(backref
);
698 val
= rb_reg_match_post(backref
);
701 val
= rb_reg_match_last(backref
);
704 rb_bug("unexpected back-ref");
708 val
= rb_reg_nth_match((int)(type
>> 1), backref
);
715 vm_backref_defined(const rb_execution_context_t
*ec
, const VALUE
*lep
, rb_num_t type
)
717 VALUE backref
= lep_svar_get(ec
, lep
, VM_SVAR_BACKREF
);
727 return rb_reg_last_defined(backref
);
729 rb_bug("unexpected back-ref");
733 nth
= (int)(type
>> 1);
735 return rb_reg_nth_defined(nth
, backref
);
738 PUREFUNC(static rb_callable_method_entry_t
*check_method_entry(VALUE obj
, int can_be_svar
));
739 static rb_callable_method_entry_t
*
740 check_method_entry(VALUE obj
, int can_be_svar
)
742 if (obj
== Qfalse
) return NULL
;
744 #if VM_CHECK_MODE > 0
745 if (!RB_TYPE_P(obj
, T_IMEMO
)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj
));
748 switch (imemo_type(obj
)) {
750 return (rb_callable_method_entry_t
*)obj
;
755 return check_method_entry(((struct vm_svar
*)obj
)->cref_or_me
, FALSE
);
758 #if VM_CHECK_MODE > 0
759 rb_bug("check_method_entry: svar should not be there:");
765 const rb_callable_method_entry_t
*
766 rb_vm_frame_method_entry(const rb_control_frame_t
*cfp
)
768 const VALUE
*ep
= cfp
->ep
;
769 rb_callable_method_entry_t
*me
;
771 while (!VM_ENV_LOCAL_P(ep
)) {
772 if ((me
= check_method_entry(ep
[VM_ENV_DATA_INDEX_ME_CREF
], FALSE
)) != NULL
) return me
;
773 ep
= VM_ENV_PREV_EP(ep
);
776 return check_method_entry(ep
[VM_ENV_DATA_INDEX_ME_CREF
], TRUE
);
779 static const rb_iseq_t
*
780 method_entry_iseqptr(const rb_callable_method_entry_t
*me
)
782 switch (me
->def
->type
) {
783 case VM_METHOD_TYPE_ISEQ
:
784 return me
->def
->body
.iseq
.iseqptr
;
791 method_entry_cref(const rb_callable_method_entry_t
*me
)
793 switch (me
->def
->type
) {
794 case VM_METHOD_TYPE_ISEQ
:
795 return me
->def
->body
.iseq
.cref
;
801 #if VM_CHECK_MODE == 0
802 PUREFUNC(static rb_cref_t
*check_cref(VALUE
, int));
805 check_cref(VALUE obj
, int can_be_svar
)
807 if (obj
== Qfalse
) return NULL
;
809 #if VM_CHECK_MODE > 0
810 if (!RB_TYPE_P(obj
, T_IMEMO
)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj
));
813 switch (imemo_type(obj
)) {
815 return method_entry_cref((rb_callable_method_entry_t
*)obj
);
817 return (rb_cref_t
*)obj
;
820 return check_cref(((struct vm_svar
*)obj
)->cref_or_me
, FALSE
);
823 #if VM_CHECK_MODE > 0
824 rb_bug("check_method_entry: svar should not be there:");
830 static inline rb_cref_t
*
831 vm_env_cref(const VALUE
*ep
)
835 while (!VM_ENV_LOCAL_P(ep
)) {
836 if ((cref
= check_cref(ep
[VM_ENV_DATA_INDEX_ME_CREF
], FALSE
)) != NULL
) return cref
;
837 ep
= VM_ENV_PREV_EP(ep
);
840 return check_cref(ep
[VM_ENV_DATA_INDEX_ME_CREF
], TRUE
);
844 is_cref(const VALUE v
, int can_be_svar
)
846 if (RB_TYPE_P(v
, T_IMEMO
)) {
847 switch (imemo_type(v
)) {
851 if (can_be_svar
) return is_cref(((struct vm_svar
*)v
)->cref_or_me
, FALSE
);
860 vm_env_cref_by_cref(const VALUE
*ep
)
862 while (!VM_ENV_LOCAL_P(ep
)) {
863 if (is_cref(ep
[VM_ENV_DATA_INDEX_ME_CREF
], FALSE
)) return TRUE
;
864 ep
= VM_ENV_PREV_EP(ep
);
866 return is_cref(ep
[VM_ENV_DATA_INDEX_ME_CREF
], TRUE
);
870 cref_replace_with_duplicated_cref_each_frame(const VALUE
*vptr
, int can_be_svar
, VALUE parent
)
872 const VALUE v
= *vptr
;
873 rb_cref_t
*cref
, *new_cref
;
875 if (RB_TYPE_P(v
, T_IMEMO
)) {
876 switch (imemo_type(v
)) {
878 cref
= (rb_cref_t
*)v
;
879 new_cref
= vm_cref_dup(cref
);
881 RB_OBJ_WRITE(parent
, vptr
, new_cref
);
884 VM_FORCE_WRITE(vptr
, (VALUE
)new_cref
);
886 return (rb_cref_t
*)new_cref
;
889 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar
*)v
)->cref_or_me
, FALSE
, v
);
893 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
902 vm_cref_replace_with_duplicated_cref(const VALUE
*ep
)
904 if (vm_env_cref_by_cref(ep
)) {
908 while (!VM_ENV_LOCAL_P(ep
)) {
909 envval
= VM_ENV_ESCAPED_P(ep
) ? VM_ENV_ENVVAL(ep
) : Qfalse
;
910 if ((cref
= cref_replace_with_duplicated_cref_each_frame(&ep
[VM_ENV_DATA_INDEX_ME_CREF
], FALSE
, envval
)) != NULL
) {
913 ep
= VM_ENV_PREV_EP(ep
);
915 envval
= VM_ENV_ESCAPED_P(ep
) ? VM_ENV_ENVVAL(ep
) : Qfalse
;
916 return cref_replace_with_duplicated_cref_each_frame(&ep
[VM_ENV_DATA_INDEX_ME_CREF
], TRUE
, envval
);
919 rb_bug("vm_cref_dup: unreachable");
924 vm_get_cref(const VALUE
*ep
)
926 rb_cref_t
*cref
= vm_env_cref(ep
);
932 rb_bug("vm_get_cref: unreachable");
937 rb_vm_get_cref(const VALUE
*ep
)
939 return vm_get_cref(ep
);
943 vm_ec_cref(const rb_execution_context_t
*ec
)
945 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
950 return vm_get_cref(cfp
->ep
);
953 static const rb_cref_t
*
954 vm_get_const_key_cref(const VALUE
*ep
)
956 const rb_cref_t
*cref
= vm_get_cref(ep
);
957 const rb_cref_t
*key_cref
= cref
;
960 if (RCLASS_SINGLETON_P(CREF_CLASS(cref
)) ||
961 RCLASS_EXT(CREF_CLASS(cref
))->cloned
) {
964 cref
= CREF_NEXT(cref
);
967 /* does not include singleton class */
972 rb_vm_rewrite_cref(rb_cref_t
*cref
, VALUE old_klass
, VALUE new_klass
, rb_cref_t
**new_cref_ptr
)
977 if (CREF_CLASS(cref
) == old_klass
) {
978 new_cref
= vm_cref_new_use_prev(new_klass
, METHOD_VISI_UNDEF
, FALSE
, cref
, FALSE
);
979 *new_cref_ptr
= new_cref
;
982 new_cref
= vm_cref_new_use_prev(CREF_CLASS(cref
), METHOD_VISI_UNDEF
, FALSE
, cref
, FALSE
);
983 cref
= CREF_NEXT(cref
);
984 *new_cref_ptr
= new_cref
;
985 new_cref_ptr
= &new_cref
->next
;
987 *new_cref_ptr
= NULL
;
991 vm_cref_push(const rb_execution_context_t
*ec
, VALUE klass
, const VALUE
*ep
, int pushed_by_eval
, int singleton
)
993 rb_cref_t
*prev_cref
= NULL
;
996 prev_cref
= vm_env_cref(ep
);
999 rb_control_frame_t
*cfp
= vm_get_ruby_level_caller_cfp(ec
, ec
->cfp
);
1002 prev_cref
= vm_env_cref(cfp
->ep
);
1006 return vm_cref_new(klass
, METHOD_VISI_PUBLIC
, FALSE
, prev_cref
, pushed_by_eval
, singleton
);
1010 vm_get_cbase(const VALUE
*ep
)
1012 const rb_cref_t
*cref
= vm_get_cref(ep
);
1014 return CREF_CLASS_FOR_DEFINITION(cref
);
1018 vm_get_const_base(const VALUE
*ep
)
1020 const rb_cref_t
*cref
= vm_get_cref(ep
);
1023 if (!CREF_PUSHED_BY_EVAL(cref
)) {
1024 return CREF_CLASS_FOR_DEFINITION(cref
);
1026 cref
= CREF_NEXT(cref
);
1033 vm_check_if_namespace(VALUE klass
)
1035 if (!RB_TYPE_P(klass
, T_CLASS
) && !RB_TYPE_P(klass
, T_MODULE
)) {
1036 rb_raise(rb_eTypeError
, "%+"PRIsVALUE
" is not a class/module", klass
);
1041 vm_ensure_not_refinement_module(VALUE self
)
1043 if (RB_TYPE_P(self
, T_MODULE
) && FL_TEST(self
, RMODULE_IS_REFINEMENT
)) {
1044 rb_warn("not defined at the refinement, but at the outer class/module");
1049 vm_get_iclass(const rb_control_frame_t
*cfp
, VALUE klass
)
1055 vm_get_ev_const(rb_execution_context_t
*ec
, VALUE orig_klass
, ID id
, bool allow_nil
, int is_defined
)
1057 void rb_const_warn_if_deprecated(const rb_const_entry_t
*ce
, VALUE klass
, ID id
);
1060 if (NIL_P(orig_klass
) && allow_nil
) {
1061 /* in current lexical scope */
1062 const rb_cref_t
*root_cref
= vm_get_cref(ec
->cfp
->ep
);
1063 const rb_cref_t
*cref
;
1066 while (root_cref
&& CREF_PUSHED_BY_EVAL(root_cref
)) {
1067 root_cref
= CREF_NEXT(root_cref
);
1070 while (cref
&& CREF_NEXT(cref
)) {
1071 if (CREF_PUSHED_BY_EVAL(cref
)) {
1075 klass
= CREF_CLASS(cref
);
1077 cref
= CREF_NEXT(cref
);
1079 if (!NIL_P(klass
)) {
1081 rb_const_entry_t
*ce
;
1083 if ((ce
= rb_const_lookup(klass
, id
))) {
1084 rb_const_warn_if_deprecated(ce
, klass
, id
);
1087 if (am
== klass
) break;
1089 if (is_defined
) return 1;
1090 if (rb_autoloading_value(klass
, id
, &av
, NULL
)) return av
;
1091 rb_autoload_load(klass
, id
);
1092 goto search_continue
;
1099 if (UNLIKELY(!rb_ractor_main_p())) {
1100 if (!rb_ractor_shareable_p(val
)) {
1101 rb_raise(rb_eRactorIsolationError
,
1102 "can not access non-shareable objects in constant %"PRIsVALUE
"::%s by non-main ractor.", rb_class_path(klass
), rb_id2name(id
));
1113 if (root_cref
&& !NIL_P(CREF_CLASS(root_cref
))) {
1114 klass
= vm_get_iclass(ec
->cfp
, CREF_CLASS(root_cref
));
1117 klass
= CLASS_OF(ec
->cfp
->self
);
1121 return rb_const_defined(klass
, id
);
1124 return rb_const_get(klass
, id
);
1128 vm_check_if_namespace(orig_klass
);
1130 return rb_public_const_defined_from(orig_klass
, id
);
1133 return rb_public_const_get_from(orig_klass
, id
);
1139 rb_vm_get_ev_const(rb_execution_context_t
*ec
, VALUE orig_klass
, ID id
, VALUE allow_nil
)
1141 return vm_get_ev_const(ec
, orig_klass
, id
, allow_nil
== Qtrue
, 0);
1145 vm_get_ev_const_chain(rb_execution_context_t
*ec
, const ID
*segments
)
1149 int allow_nil
= TRUE
;
1150 if (segments
[0] == idNULL
) {
1155 while (segments
[idx
]) {
1156 ID id
= segments
[idx
++];
1157 val
= vm_get_ev_const(ec
, val
, id
, allow_nil
, 0);
1165 vm_get_cvar_base(const rb_cref_t
*cref
, const rb_control_frame_t
*cfp
, int top_level_raise
)
1170 rb_bug("vm_get_cvar_base: no cref");
1173 while (CREF_NEXT(cref
) &&
1174 (NIL_P(CREF_CLASS(cref
)) || RCLASS_SINGLETON_P(CREF_CLASS(cref
)) ||
1175 CREF_PUSHED_BY_EVAL(cref
) || CREF_SINGLETON(cref
))) {
1176 cref
= CREF_NEXT(cref
);
1178 if (top_level_raise
&& !CREF_NEXT(cref
)) {
1179 rb_raise(rb_eRuntimeError
, "class variable access from toplevel");
1182 klass
= vm_get_iclass(cfp
, CREF_CLASS(cref
));
1185 rb_raise(rb_eTypeError
, "no class variables available");
1190 ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, int is_attr
, attr_index_t index
, shape_id_t shape_id
));
1192 fill_ivar_cache(const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, int is_attr
, attr_index_t index
, shape_id_t shape_id
)
1195 vm_cc_attr_index_set(cc
, index
, shape_id
);
1198 vm_ic_attr_index_set(iseq
, ic
, index
, shape_id
);
1202 #define ractor_incidental_shareable_p(cond, val) \
1203 (!(cond) || rb_ractor_shareable_p(val))
1204 #define ractor_object_incidental_shareable_p(obj, val) \
1205 ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
1207 #define ATTR_INDEX_NOT_SET (attr_index_t)-1
1209 ALWAYS_INLINE(static VALUE
vm_getivar(VALUE
, ID
, const rb_iseq_t
*, IVC
, const struct rb_callcache
*, int, VALUE
));
1211 vm_getivar(VALUE obj
, ID id
, const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, int is_attr
, VALUE default_value
)
1215 shape_id_t shape_id
;
1218 if (SPECIAL_CONST_P(obj
)) {
1219 return default_value
;
1222 #if SHAPE_IN_BASIC_FLAGS
1223 shape_id
= RBASIC_SHAPE_ID(obj
);
1226 switch (BUILTIN_TYPE(obj
)) {
1228 ivar_list
= ROBJECT_IVPTR(obj
);
1229 VM_ASSERT(rb_ractor_shareable_p(obj
) ? rb_ractor_shareable_p(val
) : true);
1231 #if !SHAPE_IN_BASIC_FLAGS
1232 shape_id
= ROBJECT_SHAPE_ID(obj
);
1238 if (UNLIKELY(!rb_ractor_main_p())) {
1239 // For two reasons we can only use the fast path on the main
1241 // First, only the main ractor is allowed to set ivars on classes
1242 // and modules. So we can skip locking.
1243 // Second, other ractors need to check the shareability of the
1244 // values returned from the class ivars.
1248 ivar_list
= RCLASS_IVPTR(obj
);
1250 #if !SHAPE_IN_BASIC_FLAGS
1251 shape_id
= RCLASS_SHAPE_ID(obj
);
1257 if (FL_TEST_RAW(obj
, FL_EXIVAR
)) {
1258 struct gen_ivtbl
*ivtbl
;
1259 rb_gen_ivtbl_get(obj
, id
, &ivtbl
);
1260 #if !SHAPE_IN_BASIC_FLAGS
1261 shape_id
= ivtbl
->shape_id
;
1263 ivar_list
= ivtbl
->as
.shape
.ivptr
;
1266 return default_value
;
1270 shape_id_t cached_id
;
1274 vm_cc_atomic_shape_and_index(cc
, &cached_id
, &index
);
1277 vm_ic_atomic_shape_and_index(ic
, &cached_id
, &index
);
1280 if (LIKELY(cached_id
== shape_id
)) {
1281 RUBY_ASSERT(cached_id
!= OBJ_TOO_COMPLEX_SHAPE_ID
);
1283 if (index
== ATTR_INDEX_NOT_SET
) {
1284 return default_value
;
1287 val
= ivar_list
[index
];
1288 #if USE_DEBUG_COUNTER
1289 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit
);
1291 if (RB_TYPE_P(obj
, T_OBJECT
)) {
1292 RB_DEBUG_COUNTER_INC(ivar_get_obj_hit
);
1295 RUBY_ASSERT(!UNDEF_P(val
));
1297 else { // cache miss case
1298 #if USE_DEBUG_COUNTER
1300 if (cached_id
!= INVALID_SHAPE_ID
) {
1301 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set
);
1304 RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset
);
1308 if (cached_id
!= INVALID_SHAPE_ID
) {
1309 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set
);
1312 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset
);
1315 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss
);
1317 if (RB_TYPE_P(obj
, T_OBJECT
)) {
1318 RB_DEBUG_COUNTER_INC(ivar_get_obj_miss
);
1322 if (shape_id
== OBJ_TOO_COMPLEX_SHAPE_ID
) {
1323 st_table
*table
= NULL
;
1324 switch (BUILTIN_TYPE(obj
)) {
1327 table
= (st_table
*)RCLASS_IVPTR(obj
);
1331 table
= ROBJECT_IV_HASH(obj
);
1335 struct gen_ivtbl
*ivtbl
;
1336 if (rb_gen_ivtbl_get(obj
, 0, &ivtbl
)) {
1337 table
= ivtbl
->as
.complex.table
;
1343 if (!table
|| !st_lookup(table
, id
, &val
)) {
1344 val
= default_value
;
1348 shape_id_t previous_cached_id
= cached_id
;
1349 if (rb_shape_get_iv_index_with_hint(shape_id
, id
, &index
, &cached_id
)) {
1350 // This fills in the cache with the shared cache object.
1351 // "ent" is the shared cache object
1352 if (cached_id
!= previous_cached_id
) {
1353 fill_ivar_cache(iseq
, ic
, cc
, is_attr
, index
, cached_id
);
1356 if (index
== ATTR_INDEX_NOT_SET
) {
1357 val
= default_value
;
1360 // We fetched the ivar list above
1361 val
= ivar_list
[index
];
1362 RUBY_ASSERT(!UNDEF_P(val
));
1367 vm_cc_attr_index_initialize(cc
, shape_id
);
1370 vm_ic_attr_index_initialize(ic
, shape_id
);
1373 val
= default_value
;
1379 if (!UNDEF_P(default_value
)) {
1380 RUBY_ASSERT(!UNDEF_P(val
));
1386 #endif /* OPT_IC_FOR_IVAR */
1387 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss
);
1390 return rb_attr_get(obj
, id
);
1393 return rb_ivar_get(obj
, id
);
1398 populate_cache(attr_index_t index
, shape_id_t next_shape_id
, ID id
, const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, bool is_attr
)
1400 RUBY_ASSERT(next_shape_id
!= OBJ_TOO_COMPLEX_SHAPE_ID
);
1402 // Cache population code
1404 vm_cc_attr_index_set(cc
, index
, next_shape_id
);
1407 vm_ic_attr_index_set(iseq
, ic
, index
, next_shape_id
);
1411 ALWAYS_INLINE(static VALUE
vm_setivar_slowpath(VALUE obj
, ID id
, VALUE val
, const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, int is_attr
));
1412 NOINLINE(static VALUE
vm_setivar_slowpath_ivar(VALUE obj
, ID id
, VALUE val
, const rb_iseq_t
*iseq
, IVC ic
));
1413 NOINLINE(static VALUE
vm_setivar_slowpath_attr(VALUE obj
, ID id
, VALUE val
, const struct rb_callcache
*cc
));
1416 vm_setivar_slowpath(VALUE obj
, ID id
, VALUE val
, const rb_iseq_t
*iseq
, IVC ic
, const struct rb_callcache
*cc
, int is_attr
)
1419 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss
);
1421 if (BUILTIN_TYPE(obj
) == T_OBJECT
) {
1422 rb_check_frozen_internal(obj
);
1424 attr_index_t index
= rb_obj_ivar_set(obj
, id
, val
);
1426 shape_id_t next_shape_id
= ROBJECT_SHAPE_ID(obj
);
1428 if (next_shape_id
!= OBJ_TOO_COMPLEX_SHAPE_ID
) {
1429 populate_cache(index
, next_shape_id
, id
, iseq
, ic
, cc
, is_attr
);
1432 RB_DEBUG_COUNTER_INC(ivar_set_obj_miss
);
1436 return rb_ivar_set(obj
, id
, val
);
1440 vm_setivar_slowpath_ivar(VALUE obj
, ID id
, VALUE val
, const rb_iseq_t
*iseq
, IVC ic
)
1442 return vm_setivar_slowpath(obj
, id
, val
, iseq
, ic
, NULL
, false);
1446 vm_setivar_slowpath_attr(VALUE obj
, ID id
, VALUE val
, const struct rb_callcache
*cc
)
1448 return vm_setivar_slowpath(obj
, id
, val
, NULL
, NULL
, cc
, true);
1451 NOINLINE(static VALUE
vm_setivar_default(VALUE obj
, ID id
, VALUE val
, shape_id_t dest_shape_id
, attr_index_t index
));
1453 vm_setivar_default(VALUE obj
, ID id
, VALUE val
, shape_id_t dest_shape_id
, attr_index_t index
)
1455 #if SHAPE_IN_BASIC_FLAGS
1456 shape_id_t shape_id
= RBASIC_SHAPE_ID(obj
);
1458 shape_id_t shape_id
= rb_generic_shape_id(obj
);
1461 struct gen_ivtbl
*ivtbl
= 0;
1464 if (shape_id
== dest_shape_id
) {
1465 RUBY_ASSERT(dest_shape_id
!= INVALID_SHAPE_ID
&& shape_id
!= INVALID_SHAPE_ID
);
1467 else if (dest_shape_id
!= INVALID_SHAPE_ID
) {
1468 rb_shape_t
*shape
= rb_shape_get_shape_by_id(shape_id
);
1469 rb_shape_t
*dest_shape
= rb_shape_get_shape_by_id(dest_shape_id
);
1471 if (shape_id
== dest_shape
->parent_id
&& dest_shape
->edge_name
== id
&& shape
->capacity
== dest_shape
->capacity
) {
1472 RUBY_ASSERT(index
< dest_shape
->capacity
);
1482 rb_gen_ivtbl_get(obj
, 0, &ivtbl
);
1484 if (shape_id
!= dest_shape_id
) {
1485 #if SHAPE_IN_BASIC_FLAGS
1486 RBASIC_SET_SHAPE_ID(obj
, dest_shape_id
);
1488 ivtbl
->shape_id
= dest_shape_id
;
1492 RB_OBJ_WRITE(obj
, &ivtbl
->as
.shape
.ivptr
[index
], val
);
1494 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit
);
1500 vm_setivar(VALUE obj
, ID id
, VALUE val
, shape_id_t dest_shape_id
, attr_index_t index
)
1503 switch (BUILTIN_TYPE(obj
)) {
1506 VM_ASSERT(!rb_ractor_shareable_p(obj
) || rb_obj_frozen_p(obj
));
1508 shape_id_t shape_id
= ROBJECT_SHAPE_ID(obj
);
1509 RUBY_ASSERT(dest_shape_id
!= OBJ_TOO_COMPLEX_SHAPE_ID
);
1511 if (LIKELY(shape_id
== dest_shape_id
)) {
1512 RUBY_ASSERT(dest_shape_id
!= INVALID_SHAPE_ID
&& shape_id
!= INVALID_SHAPE_ID
);
1513 VM_ASSERT(!rb_ractor_shareable_p(obj
));
1515 else if (dest_shape_id
!= INVALID_SHAPE_ID
) {
1516 rb_shape_t
*shape
= rb_shape_get_shape_by_id(shape_id
);
1517 rb_shape_t
*dest_shape
= rb_shape_get_shape_by_id(dest_shape_id
);
1518 shape_id_t source_shape_id
= dest_shape
->parent_id
;
1520 if (shape_id
== source_shape_id
&& dest_shape
->edge_name
== id
&& shape
->capacity
== dest_shape
->capacity
) {
1521 RUBY_ASSERT(dest_shape_id
!= INVALID_SHAPE_ID
&& shape_id
!= INVALID_SHAPE_ID
);
1523 ROBJECT_SET_SHAPE_ID(obj
, dest_shape_id
);
1525 RUBY_ASSERT(rb_shape_get_next_iv_shape(rb_shape_get_shape_by_id(source_shape_id
), id
) == dest_shape
);
1526 RUBY_ASSERT(index
< dest_shape
->capacity
);
1536 VALUE
*ptr
= ROBJECT_IVPTR(obj
);
1538 RUBY_ASSERT(!rb_shape_obj_too_complex(obj
));
1539 RB_OBJ_WRITE(obj
, &ptr
[index
], val
);
1541 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit
);
1542 RB_DEBUG_COUNTER_INC(ivar_set_obj_hit
);
1548 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject
);
1554 #endif /* OPT_IC_FOR_IVAR */
1558 update_classvariable_cache(const rb_iseq_t
*iseq
, VALUE klass
, ID id
, const rb_cref_t
* cref
, ICVARC ic
)
1560 VALUE defined_class
= 0;
1561 VALUE cvar_value
= rb_cvar_find(klass
, id
, &defined_class
);
1563 if (RB_TYPE_P(defined_class
, T_ICLASS
)) {
1564 defined_class
= RBASIC(defined_class
)->klass
;
1567 struct rb_id_table
*rb_cvc_tbl
= RCLASS_CVC_TBL(defined_class
);
1569 rb_bug("the cvc table should be set");
1573 if (!rb_id_table_lookup(rb_cvc_tbl
, id
, &ent_data
)) {
1574 rb_bug("should have cvar cache entry");
1577 struct rb_cvar_class_tbl_entry
*ent
= (void *)ent_data
;
1579 ent
->global_cvar_state
= GET_GLOBAL_CVAR_STATE();
1583 RUBY_ASSERT(BUILTIN_TYPE((VALUE
)cref
) == T_IMEMO
&& IMEMO_TYPE_P(cref
, imemo_cref
));
1584 RB_OBJ_WRITTEN(iseq
, Qundef
, ent
->cref
);
1585 RB_OBJ_WRITTEN(iseq
, Qundef
, ent
->class_value
);
1586 RB_OBJ_WRITTEN(ent
->class_value
, Qundef
, ent
->cref
);
1592 vm_getclassvariable(const rb_iseq_t
*iseq
, const rb_control_frame_t
*reg_cfp
, ID id
, ICVARC ic
)
1594 const rb_cref_t
*cref
;
1595 cref
= vm_get_cref(GET_EP());
1597 if (ic
->entry
&& ic
->entry
->global_cvar_state
== GET_GLOBAL_CVAR_STATE() && ic
->entry
->cref
== cref
&& LIKELY(rb_ractor_main_p())) {
1598 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit
);
1600 VALUE v
= rb_ivar_lookup(ic
->entry
->class_value
, id
, Qundef
);
1601 RUBY_ASSERT(!UNDEF_P(v
));
1606 VALUE klass
= vm_get_cvar_base(cref
, reg_cfp
, 1);
1608 return update_classvariable_cache(iseq
, klass
, id
, cref
, ic
);
1612 rb_vm_getclassvariable(const rb_iseq_t
*iseq
, const rb_control_frame_t
*cfp
, ID id
, ICVARC ic
)
1614 return vm_getclassvariable(iseq
, cfp
, id
, ic
);
1618 vm_setclassvariable(const rb_iseq_t
*iseq
, const rb_control_frame_t
*reg_cfp
, ID id
, VALUE val
, ICVARC ic
)
1620 const rb_cref_t
*cref
;
1621 cref
= vm_get_cref(GET_EP());
1623 if (ic
->entry
&& ic
->entry
->global_cvar_state
== GET_GLOBAL_CVAR_STATE() && ic
->entry
->cref
== cref
&& LIKELY(rb_ractor_main_p())) {
1624 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit
);
1626 rb_class_ivar_set(ic
->entry
->class_value
, id
, val
);
1630 VALUE klass
= vm_get_cvar_base(cref
, reg_cfp
, 1);
1632 rb_cvar_set(klass
, id
, val
);
1634 update_classvariable_cache(iseq
, klass
, id
, cref
, ic
);
1638 rb_vm_setclassvariable(const rb_iseq_t
*iseq
, const rb_control_frame_t
*cfp
, ID id
, VALUE val
, ICVARC ic
)
1640 vm_setclassvariable(iseq
, cfp
, id
, val
, ic
);
1644 vm_getinstancevariable(const rb_iseq_t
*iseq
, VALUE obj
, ID id
, IVC ic
)
1646 return vm_getivar(obj
, id
, iseq
, ic
, NULL
, FALSE
, Qnil
);
1650 vm_setinstancevariable(const rb_iseq_t
*iseq
, VALUE obj
, ID id
, VALUE val
, IVC ic
)
1652 if (RB_SPECIAL_CONST_P(obj
)) {
1653 rb_error_frozen_object(obj
);
1657 shape_id_t dest_shape_id
;
1659 vm_ic_atomic_shape_and_index(ic
, &dest_shape_id
, &index
);
1661 if (UNLIKELY(UNDEF_P(vm_setivar(obj
, id
, val
, dest_shape_id
, index
)))) {
1662 switch (BUILTIN_TYPE(obj
)) {
1668 if (!UNDEF_P(vm_setivar_default(obj
, id
, val
, dest_shape_id
, index
))) {
1672 vm_setivar_slowpath_ivar(obj
, id
, val
, iseq
, ic
);
1677 rb_vm_setinstancevariable(const rb_iseq_t
*iseq
, VALUE obj
, ID id
, VALUE val
, IVC ic
)
1679 vm_setinstancevariable(iseq
, obj
, id
, val
, ic
);
1683 vm_throw_continue(const rb_execution_context_t
*ec
, VALUE err
)
1685 /* continue throw */
1687 if (FIXNUM_P(err
)) {
1688 ec
->tag
->state
= RUBY_TAG_FATAL
;
1690 else if (SYMBOL_P(err
)) {
1691 ec
->tag
->state
= TAG_THROW
;
1693 else if (THROW_DATA_P(err
)) {
1694 ec
->tag
->state
= THROW_DATA_STATE((struct vm_throw_data
*)err
);
1697 ec
->tag
->state
= TAG_RAISE
;
1703 vm_throw_start(const rb_execution_context_t
*ec
, rb_control_frame_t
*const reg_cfp
, enum ruby_tag_type state
,
1704 const int flag
, const VALUE throwobj
)
1706 const rb_control_frame_t
*escape_cfp
= NULL
;
1707 const rb_control_frame_t
* const eocfp
= RUBY_VM_END_CONTROL_FRAME(ec
); /* end of control frame pointer */
1712 else if (state
== TAG_BREAK
) {
1714 const VALUE
*ep
= GET_EP();
1715 const rb_iseq_t
*base_iseq
= GET_ISEQ();
1716 escape_cfp
= reg_cfp
;
1718 while (ISEQ_BODY(base_iseq
)->type
!= ISEQ_TYPE_BLOCK
) {
1719 if (ISEQ_BODY(escape_cfp
->iseq
)->type
== ISEQ_TYPE_CLASS
) {
1720 escape_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp
);
1721 ep
= escape_cfp
->ep
;
1722 base_iseq
= escape_cfp
->iseq
;
1725 ep
= VM_ENV_PREV_EP(ep
);
1726 base_iseq
= ISEQ_BODY(base_iseq
)->parent_iseq
;
1727 escape_cfp
= rb_vm_search_cf_from_ep(ec
, escape_cfp
, ep
);
1728 VM_ASSERT(escape_cfp
->iseq
== base_iseq
);
1732 if (VM_FRAME_LAMBDA_P(escape_cfp
)) {
1733 /* lambda{... break ...} */
1738 ep
= VM_ENV_PREV_EP(ep
);
1740 while (escape_cfp
< eocfp
) {
1741 if (escape_cfp
->ep
== ep
) {
1742 const rb_iseq_t
*const iseq
= escape_cfp
->iseq
;
1743 const VALUE epc
= escape_cfp
->pc
- ISEQ_BODY(iseq
)->iseq_encoded
;
1744 const struct iseq_catch_table
*const ct
= ISEQ_BODY(iseq
)->catch_table
;
1748 for (i
=0; i
< ct
->size
; i
++) {
1749 const struct iseq_catch_table_entry
*const entry
=
1750 UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
1752 if (entry
->type
== CATCH_TYPE_BREAK
&&
1753 entry
->iseq
== base_iseq
&&
1754 entry
->start
< epc
&& entry
->end
>= epc
) {
1755 if (entry
->cont
== epc
) { /* found! */
1764 escape_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp
);
1769 rb_vm_localjump_error("break from proc-closure", throwobj
, TAG_BREAK
);
1772 else if (state
== TAG_RETRY
) {
1773 const VALUE
*ep
= VM_ENV_PREV_EP(GET_EP());
1775 escape_cfp
= rb_vm_search_cf_from_ep(ec
, reg_cfp
, ep
);
1777 else if (state
== TAG_RETURN
) {
1778 const VALUE
*current_ep
= GET_EP();
1779 const VALUE
*target_ep
= NULL
, *target_lep
, *ep
= current_ep
;
1780 int in_class_frame
= 0;
1782 escape_cfp
= reg_cfp
;
1784 // find target_lep, target_ep
1785 while (!VM_ENV_LOCAL_P(ep
)) {
1786 if (VM_ENV_FLAGS(ep
, VM_FRAME_FLAG_LAMBDA
) && target_ep
== NULL
) {
1789 ep
= VM_ENV_PREV_EP(ep
);
1793 while (escape_cfp
< eocfp
) {
1794 const VALUE
*lep
= VM_CF_LEP(escape_cfp
);
1800 if (lep
== target_lep
&&
1801 VM_FRAME_RUBYFRAME_P(escape_cfp
) &&
1802 ISEQ_BODY(escape_cfp
->iseq
)->type
== ISEQ_TYPE_CLASS
) {
1807 if (lep
== target_lep
) {
1808 if (VM_FRAME_LAMBDA_P(escape_cfp
)) {
1810 if (in_class_frame
) {
1811 /* lambda {class A; ... return ...; end} */
1815 const VALUE
*tep
= current_ep
;
1817 while (target_lep
!= tep
) {
1818 if (escape_cfp
->ep
== tep
) {
1820 if (tep
== target_ep
) {
1824 goto unexpected_return
;
1827 tep
= VM_ENV_PREV_EP(tep
);
1831 else if (VM_FRAME_RUBYFRAME_P(escape_cfp
)) {
1832 switch (ISEQ_BODY(escape_cfp
->iseq
)->type
) {
1834 case ISEQ_TYPE_MAIN
:
1836 if (in_class_frame
) goto unexpected_return
;
1837 if (target_ep
== NULL
) {
1841 goto unexpected_return
;
1845 case ISEQ_TYPE_EVAL
: {
1846 const rb_iseq_t
*is
= escape_cfp
->iseq
;
1847 enum rb_iseq_type t
= ISEQ_BODY(is
)->type
;
1848 while (t
== ISEQ_TYPE_RESCUE
|| t
== ISEQ_TYPE_ENSURE
|| t
== ISEQ_TYPE_EVAL
) {
1849 if (!(is
= ISEQ_BODY(is
)->parent_iseq
)) break;
1850 t
= ISEQ_BODY(is
)->type
;
1852 toplevel
= t
== ISEQ_TYPE_TOP
|| t
== ISEQ_TYPE_MAIN
;
1855 case ISEQ_TYPE_CLASS
:
1864 if (escape_cfp
->ep
== target_lep
&& ISEQ_BODY(escape_cfp
->iseq
)->type
== ISEQ_TYPE_METHOD
) {
1865 if (target_ep
== NULL
) {
1869 goto unexpected_return
;
1873 escape_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp
);
1876 rb_vm_localjump_error("unexpected return", throwobj
, TAG_RETURN
);
1882 rb_bug("isns(throw): unsupported throw type");
1885 ec
->tag
->state
= state
;
1886 return (VALUE
)THROW_DATA_NEW(throwobj
, escape_cfp
, state
);
1890 vm_throw(const rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
1891 rb_num_t throw_state
, VALUE throwobj
)
1893 const int state
= (int)(throw_state
& VM_THROW_STATE_MASK
);
1894 const int flag
= (int)(throw_state
& VM_THROW_NO_ESCAPE_FLAG
);
1897 return vm_throw_start(ec
, reg_cfp
, state
, flag
, throwobj
);
1900 return vm_throw_continue(ec
, throwobj
);
1905 rb_vm_throw(const rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, rb_num_t throw_state
, VALUE throwobj
)
1907 return vm_throw(ec
, reg_cfp
, throw_state
, throwobj
);
1911 vm_expandarray(struct rb_control_frame_struct
*cfp
, VALUE ary
, rb_num_t num
, int flag
)
1913 int is_splat
= flag
& 0x01;
1916 const VALUE obj
= ary
;
1918 if (!RB_TYPE_P(ary
, T_ARRAY
) && NIL_P(ary
= rb_check_array_type(ary
))) {
1924 ptr
= RARRAY_CONST_PTR(ary
);
1925 len
= (rb_num_t
)RARRAY_LEN(ary
);
1928 if (num
+ is_splat
== 0) {
1929 /* no space left on stack */
1931 else if (flag
& 0x02) {
1932 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1936 for (i
= 0; i
< num
- len
; i
++) {
1941 for (j
= 0; i
< num
; i
++, j
++) {
1942 VALUE v
= ptr
[len
- j
- 1];
1947 *cfp
->sp
++ = rb_ary_new4(len
- j
, ptr
);
1951 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1954 *cfp
->sp
++ = rb_ary_new();
1957 *cfp
->sp
++ = rb_ary_new4(len
- num
, ptr
+ num
);
1963 for (; i
< num
- len
; i
++) {
1967 for (rb_num_t j
= 0; i
< num
; i
++, j
++) {
1968 *cfp
->sp
++ = ptr
[len
- j
- 1];
1972 for (rb_num_t j
= 0; j
< num
; j
++) {
1973 *cfp
->sp
++ = ptr
[num
- j
- 1];
1981 static VALUE
vm_call_general(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
);
1983 static VALUE
vm_mtbl_dump(VALUE klass
, ID target_mid
);
1985 static struct rb_class_cc_entries
*
1986 vm_ccs_create(VALUE klass
, struct rb_id_table
*cc_tbl
, ID mid
, const rb_callable_method_entry_t
*cme
)
1988 struct rb_class_cc_entries
*ccs
= ALLOC(struct rb_class_cc_entries
);
1989 #if VM_CHECK_MODE > 0
1990 ccs
->debug_sig
= ~(VALUE
)ccs
;
1995 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t
*)cme
);
1996 ccs
->entries
= NULL
;
1998 rb_id_table_insert(cc_tbl
, mid
, (VALUE
)ccs
);
1999 RB_OBJ_WRITTEN(klass
, Qundef
, cme
);
2004 vm_ccs_push(VALUE klass
, struct rb_class_cc_entries
*ccs
, const struct rb_callinfo
*ci
, const struct rb_callcache
*cc
)
2006 if (! vm_cc_markable(cc
)) {
2010 if (UNLIKELY(ccs
->len
== ccs
->capa
)) {
2011 if (ccs
->capa
== 0) {
2013 ccs
->entries
= ALLOC_N(struct rb_class_cc_entries_entry
, ccs
->capa
);
2017 REALLOC_N(ccs
->entries
, struct rb_class_cc_entries_entry
, ccs
->capa
);
2020 VM_ASSERT(ccs
->len
< ccs
->capa
);
2022 const int pos
= ccs
->len
++;
2023 ccs
->entries
[pos
].argc
= vm_ci_argc(ci
);
2024 ccs
->entries
[pos
].flag
= vm_ci_flag(ci
);
2025 RB_OBJ_WRITE(klass
, &ccs
->entries
[pos
].cc
, cc
);
2027 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen
, ccs
->len
)) {
2029 // vm_mtbl_dump(klass, 0);
2033 #if VM_CHECK_MODE > 0
2035 rb_vm_ccs_dump(struct rb_class_cc_entries
*ccs
)
2037 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs
, ccs
->len
, ccs
->capa
);
2038 for (int i
=0; i
<ccs
->len
; i
++) {
2039 ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
2040 ccs
->entries
[i
].flag
,
2041 ccs
->entries
[i
].argc
);
2042 rp(ccs
->entries
[i
].cc
);
2047 vm_ccs_verify(struct rb_class_cc_entries
*ccs
, ID mid
, VALUE klass
)
2049 VM_ASSERT(vm_ccs_p(ccs
));
2050 VM_ASSERT(ccs
->len
<= ccs
->capa
);
2052 for (int i
=0; i
<ccs
->len
; i
++) {
2053 const struct rb_callcache
*cc
= ccs
->entries
[i
].cc
;
2055 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
2056 VM_ASSERT(vm_cc_class_check(cc
, klass
));
2057 VM_ASSERT(vm_cc_check_cme(cc
, ccs
->cme
));
2058 VM_ASSERT(!vm_cc_super_p(cc
));
2059 VM_ASSERT(!vm_cc_refinement_p(cc
));
2065 const rb_callable_method_entry_t
*rb_check_overloaded_cme(const rb_callable_method_entry_t
*cme
, const struct rb_callinfo
* const ci
);
2067 static const struct rb_callcache
*
2068 vm_search_cc(const VALUE klass
, const struct rb_callinfo
* const ci
)
2070 const ID mid
= vm_ci_mid(ci
);
2071 struct rb_id_table
*cc_tbl
= RCLASS_CC_TBL(klass
);
2072 struct rb_class_cc_entries
*ccs
= NULL
;
2076 // CCS data is keyed on method id, so we don't need the method id
2077 // for doing comparisons in the `for` loop below.
2078 if (rb_id_table_lookup(cc_tbl
, mid
, &ccs_data
)) {
2079 ccs
= (struct rb_class_cc_entries
*)ccs_data
;
2080 const int ccs_len
= ccs
->len
;
2082 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs
->cme
))) {
2083 rb_vm_ccs_free(ccs
);
2084 rb_id_table_delete(cc_tbl
, mid
);
2088 VM_ASSERT(vm_ccs_verify(ccs
, mid
, klass
));
2090 // We already know the method id is correct because we had
2091 // to look up the ccs_data by method id. All we need to
2092 // compare is argc and flag
2093 unsigned int argc
= vm_ci_argc(ci
);
2094 unsigned int flag
= vm_ci_flag(ci
);
2096 for (int i
=0; i
<ccs_len
; i
++) {
2097 unsigned int ccs_ci_argc
= ccs
->entries
[i
].argc
;
2098 unsigned int ccs_ci_flag
= ccs
->entries
[i
].flag
;
2099 const struct rb_callcache
*ccs_cc
= ccs
->entries
[i
].cc
;
2101 VM_ASSERT(IMEMO_TYPE_P(ccs_cc
, imemo_callcache
));
2103 if (ccs_ci_argc
== argc
&& ccs_ci_flag
== flag
) {
2104 RB_DEBUG_COUNTER_INC(cc_found_in_ccs
);
2106 VM_ASSERT(vm_cc_cme(ccs_cc
)->called_id
== mid
);
2107 VM_ASSERT(ccs_cc
->klass
== klass
);
2108 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc
)));
2117 cc_tbl
= RCLASS_CC_TBL(klass
) = rb_id_table_create(2);
2120 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs
);
2122 const rb_callable_method_entry_t
*cme
;
2126 cme
= UNDEFINED_METHOD_ENTRY_P(cme
) ? NULL
: cme
;
2128 VM_ASSERT(cme
== rb_callable_method_entry(klass
, mid
));
2131 cme
= rb_callable_method_entry(klass
, mid
);
2134 VM_ASSERT(cme
== NULL
|| IMEMO_TYPE_P(cme
, imemo_ment
));
2137 // undef or not found: can't cache the information
2138 VM_ASSERT(vm_cc_cme(&vm_empty_cc
) == NULL
);
2139 return &vm_empty_cc
;
2142 VM_ASSERT(cme
== rb_callable_method_entry(klass
, mid
));
2144 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct
*)cme
);
2147 VM_ASSERT(cc_tbl
!= NULL
);
2149 if (LIKELY(rb_id_table_lookup(cc_tbl
, mid
, &ccs_data
))) {
2150 // rb_callable_method_entry() prepares ccs.
2151 ccs
= (struct rb_class_cc_entries
*)ccs_data
;
2155 ccs
= vm_ccs_create(klass
, cc_tbl
, mid
, cme
);
2159 cme
= rb_check_overloaded_cme(cme
, ci
);
2161 const struct rb_callcache
*cc
= vm_cc_new(klass
, cme
, vm_call_general
, cc_type_normal
);
2162 vm_ccs_push(klass
, ccs
, ci
, cc
);
2164 VM_ASSERT(vm_cc_cme(cc
) != NULL
);
2165 VM_ASSERT(cme
->called_id
== mid
);
2166 VM_ASSERT(vm_cc_cme(cc
)->called_id
== mid
);
2171 const struct rb_callcache
*
2172 rb_vm_search_method_slowpath(const struct rb_callinfo
*ci
, VALUE klass
)
2174 const struct rb_callcache
*cc
;
2176 VM_ASSERT(RB_TYPE_P(klass
, T_CLASS
) || RB_TYPE_P(klass
, T_ICLASS
));
2180 cc
= vm_search_cc(klass
, ci
);
2183 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
2184 VM_ASSERT(cc
== vm_cc_empty() || cc
->klass
== klass
);
2185 VM_ASSERT(cc
== vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc
)));
2186 VM_ASSERT(cc
== vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc
)));
2187 VM_ASSERT(cc
== vm_cc_empty() || vm_cc_cme(cc
)->called_id
== vm_ci_mid(ci
));
2194 static const struct rb_callcache
*
2195 vm_search_method_slowpath0(VALUE cd_owner
, struct rb_call_data
*cd
, VALUE klass
)
2197 #if USE_DEBUG_COUNTER
2198 const struct rb_callcache
*old_cc
= cd
->cc
;
2201 const struct rb_callcache
*cc
= rb_vm_search_method_slowpath(cd
->ci
, klass
);
2203 #if OPT_INLINE_METHOD_CACHE
2206 const struct rb_callcache
*empty_cc
= &vm_empty_cc
;
2207 if (cd_owner
&& cc
!= empty_cc
) {
2208 RB_OBJ_WRITTEN(cd_owner
, Qundef
, cc
);
2211 #if USE_DEBUG_COUNTER
2212 if (old_cc
== empty_cc
) {
2214 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty
);
2216 else if (old_cc
== cc
) {
2217 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc
);
2219 else if (vm_cc_cme(old_cc
) == vm_cc_cme(cc
)) {
2220 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme
);
2222 else if (vm_cc_cme(old_cc
) && vm_cc_cme(cc
) &&
2223 vm_cc_cme(old_cc
)->def
== vm_cc_cme(cc
)->def
) {
2224 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def
);
2227 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff
);
2230 #endif // OPT_INLINE_METHOD_CACHE
2232 VM_ASSERT(vm_cc_cme(cc
) == NULL
||
2233 vm_cc_cme(cc
)->called_id
== vm_ci_mid(cd
->ci
));
2238 ALWAYS_INLINE(static const struct rb_callcache
*vm_search_method_fastpath(VALUE cd_owner
, struct rb_call_data
*cd
, VALUE klass
));
2239 static const struct rb_callcache
*
2240 vm_search_method_fastpath(VALUE cd_owner
, struct rb_call_data
*cd
, VALUE klass
)
2242 const struct rb_callcache
*cc
= cd
->cc
;
2244 #if OPT_INLINE_METHOD_CACHE
2245 if (LIKELY(vm_cc_class_check(cc
, klass
))) {
2246 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc
)))) {
2247 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc
)));
2248 RB_DEBUG_COUNTER_INC(mc_inline_hit
);
2249 VM_ASSERT(vm_cc_cme(cc
) == NULL
|| // not found
2250 (vm_ci_flag(cd
->ci
) & VM_CALL_SUPER
) || // search_super w/ define_method
2251 vm_cc_cme(cc
)->called_id
== vm_ci_mid(cd
->ci
)); // cme->called_id == ci->mid
2255 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated
);
2258 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass
);
2262 return vm_search_method_slowpath0(cd_owner
, cd
, klass
);
2265 static const struct rb_callcache
*
2266 vm_search_method(VALUE cd_owner
, struct rb_call_data
*cd
, VALUE recv
)
2268 VALUE klass
= CLASS_OF(recv
);
2269 VM_ASSERT(klass
!= Qfalse
);
2270 VM_ASSERT(RBASIC_CLASS(klass
) == 0 || rb_obj_is_kind_of(klass
, rb_cClass
));
2272 return vm_search_method_fastpath(cd_owner
, cd
, klass
);
2275 #if __has_attribute(transparent_union)
2277 VALUE (*anyargs
)(ANYARGS
);
2278 VALUE (*f00
)(VALUE
);
2279 VALUE (*f01
)(VALUE
, VALUE
);
2280 VALUE (*f02
)(VALUE
, VALUE
, VALUE
);
2281 VALUE (*f03
)(VALUE
, VALUE
, VALUE
, VALUE
);
2282 VALUE (*f04
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2283 VALUE (*f05
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2284 VALUE (*f06
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2285 VALUE (*f07
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2286 VALUE (*f08
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2287 VALUE (*f09
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2288 VALUE (*f10
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2289 VALUE (*f11
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2290 VALUE (*f12
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2291 VALUE (*f13
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2292 VALUE (*f14
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2293 VALUE (*f15
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
);
2294 VALUE (*fm1
)(int, union { VALUE
*x
; const VALUE
*y
; } __attribute__((__transparent_union__
)), VALUE
);
2295 } __attribute__((__transparent_union__
)) cfunc_type
;
2297 typedef VALUE (*cfunc_type
)(ANYARGS
);
2301 check_cfunc(const rb_callable_method_entry_t
*me
, cfunc_type func
)
2307 VM_ASSERT(IMEMO_TYPE_P(me
, imemo_ment
));
2308 VM_ASSERT(callable_method_entry_p(me
));
2310 if (me
->def
->type
!= VM_METHOD_TYPE_CFUNC
) {
2314 #if __has_attribute(transparent_union)
2315 return me
->def
->body
.cfunc
.func
== func
.anyargs
;
2317 return me
->def
->body
.cfunc
.func
== func
;
2324 vm_method_cfunc_is(const rb_iseq_t
*iseq
, CALL_DATA cd
, VALUE recv
, cfunc_type func
)
2326 VM_ASSERT(iseq
!= NULL
);
2327 const struct rb_callcache
*cc
= vm_search_method((VALUE
)iseq
, cd
, recv
);
2328 return check_cfunc(vm_cc_cme(cc
), func
);
2331 #define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2334 FIXNUM_2_P(VALUE a
, VALUE b
)
2336 /* FIXNUM_P(a) && FIXNUM_P(b)
2337 * == ((a & 1) && (b & 1))
2341 SIGNED_VALUE z
= x
& y
& 1;
2346 FLONUM_2_P(VALUE a
, VALUE b
)
2349 /* FLONUM_P(a) && FLONUM_P(b)
2350 * == ((a & 3) == 2) && ((b & 3) == 2)
2351 * == ! ((a ^ 2) | (b ^ 2) & 3)
2355 SIGNED_VALUE z
= ((x
^ 2) | (y
^ 2)) & 3;
2363 opt_equality_specialized(VALUE recv
, VALUE obj
)
2365 if (FIXNUM_2_P(recv
, obj
) && EQ_UNREDEFINED_P(INTEGER
)) {
2366 goto compare_by_identity
;
2368 else if (FLONUM_2_P(recv
, obj
) && EQ_UNREDEFINED_P(FLOAT
)) {
2369 goto compare_by_identity
;
2371 else if (STATIC_SYM_P(recv
) && STATIC_SYM_P(obj
) && EQ_UNREDEFINED_P(SYMBOL
)) {
2372 goto compare_by_identity
;
2374 else if (SPECIAL_CONST_P(recv
)) {
2377 else if (RBASIC_CLASS(recv
) == rb_cFloat
&& RB_FLOAT_TYPE_P(obj
) && EQ_UNREDEFINED_P(FLOAT
)) {
2378 double a
= RFLOAT_VALUE(recv
);
2379 double b
= RFLOAT_VALUE(obj
);
2381 #if MSC_VERSION_BEFORE(1300)
2385 else if (isnan(b
)) {
2390 return RBOOL(a
== b
);
2392 else if (RBASIC_CLASS(recv
) == rb_cString
&& EQ_UNREDEFINED_P(STRING
)) {
2396 else if (RB_TYPE_P(obj
, T_STRING
)) {
2397 return rb_str_eql_internal(obj
, recv
);
2402 compare_by_identity
:
2403 return RBOOL(recv
== obj
);
2407 opt_equality(const rb_iseq_t
*cd_owner
, VALUE recv
, VALUE obj
, CALL_DATA cd
)
2409 VM_ASSERT(cd_owner
!= NULL
);
2411 VALUE val
= opt_equality_specialized(recv
, obj
);
2412 if (!UNDEF_P(val
)) return val
;
2414 if (!vm_method_cfunc_is(cd_owner
, cd
, recv
, rb_obj_equal
)) {
2418 return RBOOL(recv
== obj
);
2422 #undef EQ_UNREDEFINED_P
2424 static inline const struct rb_callcache
*gccct_method_search(rb_execution_context_t
*ec
, VALUE recv
, ID mid
, const struct rb_callinfo
*ci
); // vm_eval.c
2425 NOINLINE(static VALUE
opt_equality_by_mid_slowpath(VALUE recv
, VALUE obj
, ID mid
));
2428 opt_equality_by_mid_slowpath(VALUE recv
, VALUE obj
, ID mid
)
2430 const struct rb_callcache
*cc
= gccct_method_search(GET_EC(), recv
, mid
, &VM_CI_ON_STACK(mid
, 0, 1, NULL
));
2432 if (cc
&& check_cfunc(vm_cc_cme(cc
), rb_obj_equal
)) {
2433 return RBOOL(recv
== obj
);
2441 opt_equality_by_mid(VALUE recv
, VALUE obj
, ID mid
)
2443 VALUE val
= opt_equality_specialized(recv
, obj
);
2444 if (!UNDEF_P(val
)) {
2448 return opt_equality_by_mid_slowpath(recv
, obj
, mid
);
2453 rb_equal_opt(VALUE obj1
, VALUE obj2
)
2455 return opt_equality_by_mid(obj1
, obj2
, idEq
);
2459 rb_eql_opt(VALUE obj1
, VALUE obj2
)
2461 return opt_equality_by_mid(obj1
, obj2
, idEqlP
);
2464 extern VALUE
rb_vm_call0(rb_execution_context_t
*ec
, VALUE
, ID
, int, const VALUE
*, const rb_callable_method_entry_t
*, int kw_splat
);
2465 extern VALUE
rb_vm_call_with_refinements(rb_execution_context_t
*, VALUE
, ID
, int, const VALUE
*, int);
2468 check_match(rb_execution_context_t
*ec
, VALUE pattern
, VALUE target
, enum vm_check_match_type type
)
2471 case VM_CHECKMATCH_TYPE_WHEN
:
2473 case VM_CHECKMATCH_TYPE_RESCUE
:
2474 if (!rb_obj_is_kind_of(pattern
, rb_cModule
)) {
2475 rb_raise(rb_eTypeError
, "class or module required for rescue clause");
2478 case VM_CHECKMATCH_TYPE_CASE
: {
2479 return rb_vm_call_with_refinements(ec
, pattern
, idEqq
, 1, &target
, RB_NO_KEYWORDS
);
2482 rb_bug("check_match: unreachable");
2487 #if MSC_VERSION_BEFORE(1300)
2488 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2490 #define CHECK_CMP_NAN(a, b) /* do nothing */
2494 double_cmp_lt(double a
, double b
)
2496 CHECK_CMP_NAN(a
, b
);
2497 return RBOOL(a
< b
);
2501 double_cmp_le(double a
, double b
)
2503 CHECK_CMP_NAN(a
, b
);
2504 return RBOOL(a
<= b
);
2508 double_cmp_gt(double a
, double b
)
2510 CHECK_CMP_NAN(a
, b
);
2511 return RBOOL(a
> b
);
2515 double_cmp_ge(double a
, double b
)
2517 CHECK_CMP_NAN(a
, b
);
2518 return RBOOL(a
>= b
);
2521 // Copied by vm_dump.c
2522 static inline VALUE
*
2523 vm_base_ptr(const rb_control_frame_t
*cfp
)
2525 const rb_control_frame_t
*prev_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
2527 if (cfp
->iseq
&& VM_FRAME_RUBYFRAME_P(cfp
)) {
2528 VALUE
*bp
= prev_cfp
->sp
+ ISEQ_BODY(cfp
->iseq
)->local_table_size
+ VM_ENV_DATA_SIZE
;
2529 if (ISEQ_BODY(cfp
->iseq
)->type
== ISEQ_TYPE_METHOD
|| VM_FRAME_BMETHOD_P(cfp
)) {
2533 #if VM_DEBUG_BP_CHECK
2534 if (bp
!= cfp
->bp_check
) {
2535 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2536 (long)(cfp
->bp_check
- GET_EC()->vm_stack
),
2537 (long)(bp
- GET_EC()->vm_stack
));
2538 rb_bug("vm_base_ptr: unreachable");
2549 rb_vm_base_ptr(const rb_control_frame_t
*cfp
)
2551 return vm_base_ptr(cfp
);
2554 /* method call processes with call_info */
2556 #include "vm_args.c"
2558 static inline VALUE
vm_call_iseq_setup_2(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, int opt_pc
, int param_size
, int local_size
);
2559 ALWAYS_INLINE(static VALUE
vm_call_iseq_setup_normal(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, const rb_callable_method_entry_t
*me
, int opt_pc
, int param_size
, int local_size
));
2560 static inline VALUE
vm_call_iseq_setup_tailcall(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, int opt_pc
);
2561 static VALUE
vm_call_super_method(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
);
2562 static VALUE
vm_call_method_nome(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
);
2563 static VALUE
vm_call_method_each_type(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
);
2564 static inline VALUE
vm_call_method(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
);
2566 static vm_call_handler
vm_call_iseq_setup_func(const struct rb_callinfo
*ci
, const int param_size
, const int local_size
);
2569 vm_call_iseq_setup_tailcall_0start(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
2571 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start
);
2573 return vm_call_iseq_setup_tailcall(ec
, cfp
, calling
, 0);
2577 vm_call_iseq_setup_normal_0start(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
2579 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start
);
2581 const struct rb_callcache
*cc
= calling
->cc
;
2582 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
2583 int param
= ISEQ_BODY(iseq
)->param
.size
;
2584 int local
= ISEQ_BODY(iseq
)->local_table_size
;
2585 return vm_call_iseq_setup_normal(ec
, cfp
, calling
, vm_cc_cme(cc
), 0, param
, local
);
2589 rb_simple_iseq_p(const rb_iseq_t
*iseq
)
2591 return ISEQ_BODY(iseq
)->param
.flags
.has_opt
== FALSE
&&
2592 ISEQ_BODY(iseq
)->param
.flags
.has_rest
== FALSE
&&
2593 ISEQ_BODY(iseq
)->param
.flags
.has_post
== FALSE
&&
2594 ISEQ_BODY(iseq
)->param
.flags
.has_kw
== FALSE
&&
2595 ISEQ_BODY(iseq
)->param
.flags
.has_kwrest
== FALSE
&&
2596 ISEQ_BODY(iseq
)->param
.flags
.accepts_no_kwarg
== FALSE
&&
2597 ISEQ_BODY(iseq
)->param
.flags
.has_block
== FALSE
;
2601 rb_iseq_only_optparam_p(const rb_iseq_t
*iseq
)
2603 return ISEQ_BODY(iseq
)->param
.flags
.has_opt
== TRUE
&&
2604 ISEQ_BODY(iseq
)->param
.flags
.has_rest
== FALSE
&&
2605 ISEQ_BODY(iseq
)->param
.flags
.has_post
== FALSE
&&
2606 ISEQ_BODY(iseq
)->param
.flags
.has_kw
== FALSE
&&
2607 ISEQ_BODY(iseq
)->param
.flags
.has_kwrest
== FALSE
&&
2608 ISEQ_BODY(iseq
)->param
.flags
.accepts_no_kwarg
== FALSE
&&
2609 ISEQ_BODY(iseq
)->param
.flags
.has_block
== FALSE
;
2613 rb_iseq_only_kwparam_p(const rb_iseq_t
*iseq
)
2615 return ISEQ_BODY(iseq
)->param
.flags
.has_opt
== FALSE
&&
2616 ISEQ_BODY(iseq
)->param
.flags
.has_rest
== FALSE
&&
2617 ISEQ_BODY(iseq
)->param
.flags
.has_post
== FALSE
&&
2618 ISEQ_BODY(iseq
)->param
.flags
.has_kw
== TRUE
&&
2619 ISEQ_BODY(iseq
)->param
.flags
.has_kwrest
== FALSE
&&
2620 ISEQ_BODY(iseq
)->param
.flags
.has_block
== FALSE
;
2623 #define ALLOW_HEAP_ARGV (-2)
2624 #define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
2627 vm_caller_setup_arg_splat(rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, VALUE ary
, int max_args
)
2629 vm_check_canary(GET_EC(), cfp
->sp
);
2633 const VALUE
*ptr
= RARRAY_CONST_PTR(ary
);
2634 long len
= RARRAY_LEN(ary
);
2635 int argc
= calling
->argc
;
2637 if (UNLIKELY(max_args
<= ALLOW_HEAP_ARGV
&& len
+ argc
> VM_ARGC_STACK_MAX
)) {
2638 /* Avoid SystemStackError when splatting large arrays by storing arguments in
2639 * a temporary array, instead of trying to keeping arguments on the VM stack.
2641 VALUE
*argv
= cfp
->sp
- argc
;
2642 VALUE argv_ary
= rb_ary_hidden_new(len
+ argc
+ 1);
2643 rb_ary_cat(argv_ary
, argv
, argc
);
2644 rb_ary_cat(argv_ary
, ptr
, len
);
2645 cfp
->sp
-= argc
- 1;
2646 cfp
->sp
[-1] = argv_ary
;
2648 calling
->heap_argv
= argv_ary
;
2654 if (max_args
>= 0 && len
+ argc
> max_args
) {
2655 /* If only a given max_args is allowed, copy up to max args.
2656 * Used by vm_callee_setup_block_arg for non-lambda blocks,
2657 * where additional arguments are ignored.
2659 * Also, copy up to one more argument than the maximum,
2660 * in case it is an empty keyword hash that will be removed.
2662 calling
->argc
+= len
- (max_args
- argc
+ 1);
2663 len
= max_args
- argc
+ 1;
2667 /* Unset heap_argv if set originally. Can happen when
2668 * forwarding modified arguments, where heap_argv was used
2669 * originally, but heap_argv not supported by the forwarded
2670 * method in all cases.
2672 calling
->heap_argv
= 0;
2674 CHECK_VM_STACK_OVERFLOW(cfp
, len
);
2676 for (i
= 0; i
< len
; i
++) {
2677 *cfp
->sp
++ = ptr
[i
];
2687 vm_caller_setup_arg_kw(rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
)
2689 const VALUE
*const passed_keywords
= vm_ci_kwarg(ci
)->keywords
;
2690 const int kw_len
= vm_ci_kwarg(ci
)->keyword_len
;
2691 const VALUE h
= rb_hash_new_with_size(kw_len
);
2692 VALUE
*sp
= cfp
->sp
;
2695 for (i
=0; i
<kw_len
; i
++) {
2696 rb_hash_aset(h
, passed_keywords
[i
], (sp
- kw_len
)[i
]);
2700 cfp
->sp
-= kw_len
- 1;
2701 calling
->argc
-= kw_len
- 1;
2702 calling
->kw_splat
= 1;
2706 vm_caller_setup_keyword_hash(const struct rb_callinfo
*ci
, VALUE keyword_hash
)
2708 if (UNLIKELY(!RB_TYPE_P(keyword_hash
, T_HASH
))) {
2709 if (keyword_hash
!= Qnil
) {
2710 /* Convert a non-hash keyword splat to a new hash */
2711 keyword_hash
= rb_hash_dup(rb_to_hash_type(keyword_hash
));
2714 else if (!IS_ARGS_KW_SPLAT_MUT(ci
)) {
2715 /* Convert a hash keyword splat to a new hash unless
2716 * a mutable keyword splat was passed.
2718 keyword_hash
= rb_hash_dup(keyword_hash
);
2720 return keyword_hash
;
2724 CALLER_SETUP_ARG(struct rb_control_frame_struct
*restrict cfp
,
2725 struct rb_calling_info
*restrict calling
,
2726 const struct rb_callinfo
*restrict ci
, int max_args
)
2728 if (UNLIKELY(IS_ARGS_SPLAT(ci
))) {
2729 if (IS_ARGS_KW_SPLAT(ci
)) {
2731 VM_ASSERT(calling
->kw_splat
== 1);
2735 VALUE ary
= cfp
->sp
[0];
2736 VALUE kwh
= vm_caller_setup_keyword_hash(ci
, cfp
->sp
[1]);
2739 if (vm_caller_setup_arg_splat(cfp
, calling
, ary
, max_args
)) return;
2742 if (kwh
!= Qnil
&& !RHASH_EMPTY_P(kwh
)) {
2743 if (UNLIKELY(calling
->heap_argv
)) {
2744 rb_ary_push(calling
->heap_argv
, kwh
);
2745 ((struct RHash
*)kwh
)->basic
.flags
|= RHASH_PASS_AS_KEYWORDS
;
2746 if (max_args
!= ALLOW_HEAP_ARGV_KEEP_KWSPLAT
) {
2747 calling
->kw_splat
= 0;
2755 VM_ASSERT(calling
->kw_splat
== 1);
2759 calling
->kw_splat
= 0;
2764 VM_ASSERT(calling
->kw_splat
== 0);
2768 VALUE ary
= cfp
->sp
[0];
2770 if (vm_caller_setup_arg_splat(cfp
, calling
, ary
, max_args
)) {
2774 // check the last argument
2775 VALUE last_hash
, argv_ary
;
2776 if (UNLIKELY(argv_ary
= calling
->heap_argv
)) {
2777 if (!IS_ARGS_KEYWORD(ci
) &&
2778 RARRAY_LEN(argv_ary
) > 0 &&
2779 RB_TYPE_P((last_hash
= rb_ary_last(0, NULL
, argv_ary
)), T_HASH
) &&
2780 (((struct RHash
*)last_hash
)->basic
.flags
& RHASH_PASS_AS_KEYWORDS
)) {
2782 rb_ary_pop(argv_ary
);
2783 if (!RHASH_EMPTY_P(last_hash
)) {
2784 rb_ary_push(argv_ary
, rb_hash_dup(last_hash
));
2785 calling
->kw_splat
= 1;
2791 if (!IS_ARGS_KEYWORD(ci
) &&
2792 calling
->argc
> 0 &&
2793 RB_TYPE_P((last_hash
= cfp
->sp
[-1]), T_HASH
) &&
2794 (((struct RHash
*)last_hash
)->basic
.flags
& RHASH_PASS_AS_KEYWORDS
)) {
2796 if (RHASH_EMPTY_P(last_hash
)) {
2801 cfp
->sp
[-1] = rb_hash_dup(last_hash
);
2802 calling
->kw_splat
= 1;
2808 else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci
))) {
2810 VM_ASSERT(calling
->kw_splat
== 1);
2811 VALUE kwh
= vm_caller_setup_keyword_hash(ci
, cfp
->sp
[-1]);
2813 if (kwh
== Qnil
|| RHASH_EMPTY_P(kwh
)) {
2816 calling
->kw_splat
= 0;
2822 else if (UNLIKELY(IS_ARGS_KEYWORD(ci
))) {
2824 VM_ASSERT(calling
->kw_splat
== 0);
2826 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2827 * by creating a keyword hash.
2828 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2830 vm_caller_setup_arg_kw(cfp
, calling
, ci
);
2834 #define USE_OPT_HIST 0
2837 #define OPT_HIST_MAX 64
2838 static int opt_hist
[OPT_HIST_MAX
+1];
2840 __attribute__((destructor
))
2842 opt_hist_show_results_at_exit(void)
2844 for (int i
=0; i
<OPT_HIST_MAX
; i
++) {
2845 ruby_debug_printf("opt_hist\t%d\t%d\n", i
, opt_hist
[i
]);
2851 vm_call_iseq_setup_normal_opt_start(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
,
2852 struct rb_calling_info
*calling
)
2854 const struct rb_callcache
*cc
= calling
->cc
;
2855 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
2856 const int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
2857 const int opt
= calling
->argc
- lead_num
;
2858 const int opt_num
= ISEQ_BODY(iseq
)->param
.opt_num
;
2859 const int opt_pc
= (int)ISEQ_BODY(iseq
)->param
.opt_table
[opt
];
2860 const int param
= ISEQ_BODY(iseq
)->param
.size
;
2861 const int local
= ISEQ_BODY(iseq
)->local_table_size
;
2862 const int delta
= opt_num
- opt
;
2864 RB_DEBUG_COUNTER_INC(ccf_iseq_opt
);
2867 if (opt_pc
< OPT_HIST_MAX
) {
2871 opt_hist
[OPT_HIST_MAX
]++;
2875 return vm_call_iseq_setup_normal(ec
, cfp
, calling
, vm_cc_cme(cc
), opt_pc
, param
- delta
, local
);
2879 vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
,
2880 struct rb_calling_info
*calling
)
2882 const struct rb_callcache
*cc
= calling
->cc
;
2883 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
2884 const int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
2885 const int opt
= calling
->argc
- lead_num
;
2886 const int opt_pc
= (int)ISEQ_BODY(iseq
)->param
.opt_table
[opt
];
2888 RB_DEBUG_COUNTER_INC(ccf_iseq_opt
);
2891 if (opt_pc
< OPT_HIST_MAX
) {
2895 opt_hist
[OPT_HIST_MAX
]++;
2899 return vm_call_iseq_setup_tailcall(ec
, cfp
, calling
, opt_pc
);
2903 args_setup_kw_parameters(rb_execution_context_t
*const ec
, const rb_iseq_t
*const iseq
,
2904 VALUE
*const passed_values
, const int passed_keyword_len
, const VALUE
*const passed_keywords
,
2905 VALUE
*const locals
);
2908 vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
,
2909 struct rb_calling_info
*calling
)
2911 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
2912 const struct rb_callcache
*cc
= calling
->cc
;
2914 VM_ASSERT(vm_ci_flag(ci
) & VM_CALL_KWARG
);
2915 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1
);
2917 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
2918 const struct rb_iseq_param_keyword
*kw_param
= ISEQ_BODY(iseq
)->param
.keyword
;
2919 const struct rb_callinfo_kwarg
*kw_arg
= vm_ci_kwarg(ci
);
2920 const int ci_kw_len
= kw_arg
->keyword_len
;
2921 const VALUE
* const ci_keywords
= kw_arg
->keywords
;
2922 VALUE
*argv
= cfp
->sp
- calling
->argc
;
2923 VALUE
*const klocals
= argv
+ kw_param
->bits_start
- kw_param
->num
;
2924 const int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
2925 VALUE
* const ci_kws
= ALLOCA_N(VALUE
, ci_kw_len
);
2926 MEMCPY(ci_kws
, argv
+ lead_num
, VALUE
, ci_kw_len
);
2927 args_setup_kw_parameters(ec
, iseq
, ci_kws
, ci_kw_len
, ci_keywords
, klocals
);
2929 int param
= ISEQ_BODY(iseq
)->param
.size
;
2930 int local
= ISEQ_BODY(iseq
)->local_table_size
;
2931 return vm_call_iseq_setup_normal(ec
, cfp
, calling
, vm_cc_cme(cc
), 0, param
, local
);
2935 vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
,
2936 struct rb_calling_info
*calling
)
2938 const struct rb_callinfo
*MAYBE_UNUSED(ci
) = calling
->cd
->ci
;
2939 const struct rb_callcache
*cc
= calling
->cc
;
2941 VM_ASSERT((vm_ci_flag(ci
) & VM_CALL_KWARG
) == 0);
2942 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2
);
2944 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
2945 const struct rb_iseq_param_keyword
*kw_param
= ISEQ_BODY(iseq
)->param
.keyword
;
2946 VALUE
* const argv
= cfp
->sp
- calling
->argc
;
2947 VALUE
* const klocals
= argv
+ kw_param
->bits_start
- kw_param
->num
;
2950 for (i
=0; i
<kw_param
->num
; i
++) {
2951 klocals
[i
] = kw_param
->default_values
[i
];
2953 klocals
[i
] = INT2FIX(0); // kw specify flag
2955 // nobody check this value, but it should be cleared because it can
2956 // points invalid VALUE (T_NONE objects, raw pointer and so on).
2958 int param
= ISEQ_BODY(iseq
)->param
.size
;
2959 int local
= ISEQ_BODY(iseq
)->local_table_size
;
2960 return vm_call_iseq_setup_normal(ec
, cfp
, calling
, vm_cc_cme(cc
), 0, param
, local
);
2963 static VALUE
builtin_invoker0(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
);
2966 vm_call_single_noarg_leaf_builtin(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
,
2967 struct rb_calling_info
*calling
)
2969 const struct rb_builtin_function
*bf
= calling
->cc
->aux_
.bf
;
2970 cfp
->sp
-= (calling
->argc
+ 1);
2971 return builtin_invoker0(ec
, calling
->recv
, NULL
, (rb_insn_func_t
)bf
->func_ptr
);
2974 VALUE
rb_gen_method_name(VALUE owner
, VALUE name
); // in vm_backtrace.c
2977 warn_unused_block(const rb_callable_method_entry_t
*cme
, const rb_iseq_t
*iseq
, void *pc
)
2979 rb_vm_t
*vm
= GET_VM();
2980 st_table
*dup_check_table
= vm
->unused_block_warning_table
;
2985 unsigned char b
[SIZEOF_VALUE
];
2989 .v
= (VALUE
)cme
->def
,
2993 if (!vm
->unused_block_warning_strict
) {
2994 key
= (st_data_t
)cme
->def
->original_id
;
2996 if (st_lookup(dup_check_table
, key
, NULL
)) {
3002 // make unique key from pc and me->def pointer
3004 for (int i
=0; i
<SIZEOF_VALUE
; i
++) {
3005 // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
3006 key
|= (st_data_t
)(k1
.b
[i
] ^ k2
.b
[SIZEOF_VALUE
-1-i
]) << (8 * i
);
3010 fprintf(stderr
, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE
);
3011 fprintf(stderr
, "pc:%p def:%p\n", pc
, cme
->def
);
3012 fprintf(stderr
, "key:%p\n", (void *)key
);
3015 // duplication check
3016 if (st_insert(dup_check_table
, key
, 1)) {
3020 VALUE m_loc
= rb_method_entry_location((const rb_method_entry_t
*)cme
);
3021 VALUE name
= rb_gen_method_name(cme
->defined_class
, ISEQ_BODY(iseq
)->location
.base_label
);
3023 if (!NIL_P(m_loc
)) {
3024 rb_warning("the block passed to '%"PRIsVALUE
"' defined at %"PRIsVALUE
":%"PRIsVALUE
" may be ignored",
3025 name
, RARRAY_AREF(m_loc
, 0), RARRAY_AREF(m_loc
, 1));
3028 rb_warning("the block may be ignored because '%"PRIsVALUE
"' does not use a block", name
);
3034 vm_callee_setup_arg(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
,
3035 const rb_iseq_t
*iseq
, VALUE
*argv
, int param_size
, int local_size
)
3037 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
3038 const struct rb_callcache
*cc
= calling
->cc
;
3040 if (UNLIKELY(!ISEQ_BODY(iseq
)->param
.flags
.use_block
&&
3041 calling
->block_handler
!= VM_BLOCK_HANDLER_NONE
&&
3042 !(vm_ci_flag(calling
->cd
->ci
) & VM_CALL_SUPER
))) {
3043 warn_unused_block(vm_cc_cme(cc
), iseq
, (void *)ec
->cfp
->pc
);
3046 if (LIKELY(!(vm_ci_flag(ci
) & VM_CALL_KW_SPLAT
))) {
3047 if (LIKELY(rb_simple_iseq_p(iseq
))) {
3048 rb_control_frame_t
*cfp
= ec
->cfp
;
3049 int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
3050 CALLER_SETUP_ARG(cfp
, calling
, ci
, lead_num
);
3052 if (calling
->argc
!= lead_num
) {
3053 argument_arity_error(ec
, iseq
, calling
->argc
, lead_num
, lead_num
);
3056 VM_ASSERT(ci
== calling
->cd
->ci
);
3057 VM_ASSERT(cc
== calling
->cc
);
3059 if (vm_call_iseq_optimizable_p(ci
, cc
)) {
3060 if ((iseq
->body
->builtin_attrs
& BUILTIN_ATTR_SINGLE_NOARG_LEAF
) &&
3061 !(ruby_vm_event_flags
& (RUBY_EVENT_C_CALL
| RUBY_EVENT_C_RETURN
))) {
3062 VM_ASSERT(iseq
->body
->builtin_attrs
& BUILTIN_ATTR_LEAF
);
3063 vm_cc_bf_set(cc
, (void *)iseq
->body
->iseq_encoded
[1]);
3064 CC_SET_FASTPATH(cc
, vm_call_single_noarg_leaf_builtin
, true);
3067 CC_SET_FASTPATH(cc
, vm_call_iseq_setup_func(ci
, param_size
, local_size
), true);
3072 else if (rb_iseq_only_optparam_p(iseq
)) {
3073 rb_control_frame_t
*cfp
= ec
->cfp
;
3075 const int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
3076 const int opt_num
= ISEQ_BODY(iseq
)->param
.opt_num
;
3078 CALLER_SETUP_ARG(cfp
, calling
, ci
, lead_num
+ opt_num
);
3079 const int argc
= calling
->argc
;
3080 const int opt
= argc
- lead_num
;
3082 if (opt
< 0 || opt
> opt_num
) {
3083 argument_arity_error(ec
, iseq
, argc
, lead_num
, lead_num
+ opt_num
);
3086 if (LIKELY(!(vm_ci_flag(ci
) & VM_CALL_TAILCALL
))) {
3087 CC_SET_FASTPATH(cc
, vm_call_iseq_setup_normal_opt_start
,
3088 !IS_ARGS_SPLAT(ci
) && !IS_ARGS_KEYWORD(ci
) &&
3089 vm_call_cacheable(ci
, cc
));
3092 CC_SET_FASTPATH(cc
, vm_call_iseq_setup_tailcall_opt_start
,
3093 !IS_ARGS_SPLAT(ci
) && !IS_ARGS_KEYWORD(ci
) &&
3094 vm_call_cacheable(ci
, cc
));
3097 /* initialize opt vars for self-references */
3098 VM_ASSERT((int)ISEQ_BODY(iseq
)->param
.size
== lead_num
+ opt_num
);
3099 for (int i
=argc
; i
<lead_num
+ opt_num
; i
++) {
3102 return (int)ISEQ_BODY(iseq
)->param
.opt_table
[opt
];
3104 else if (rb_iseq_only_kwparam_p(iseq
) && !IS_ARGS_SPLAT(ci
)) {
3105 const int lead_num
= ISEQ_BODY(iseq
)->param
.lead_num
;
3106 const int argc
= calling
->argc
;
3107 const struct rb_iseq_param_keyword
*kw_param
= ISEQ_BODY(iseq
)->param
.keyword
;
3109 if (vm_ci_flag(ci
) & VM_CALL_KWARG
) {
3110 const struct rb_callinfo_kwarg
*kw_arg
= vm_ci_kwarg(ci
);
3112 if (argc
- kw_arg
->keyword_len
== lead_num
) {
3113 const int ci_kw_len
= kw_arg
->keyword_len
;
3114 const VALUE
* const ci_keywords
= kw_arg
->keywords
;
3115 VALUE
* const ci_kws
= ALLOCA_N(VALUE
, ci_kw_len
);
3116 MEMCPY(ci_kws
, argv
+ lead_num
, VALUE
, ci_kw_len
);
3118 VALUE
*const klocals
= argv
+ kw_param
->bits_start
- kw_param
->num
;
3119 args_setup_kw_parameters(ec
, iseq
, ci_kws
, ci_kw_len
, ci_keywords
, klocals
);
3121 CC_SET_FASTPATH(cc
, vm_call_iseq_setup_kwparm_kwarg
,
3122 vm_call_cacheable(ci
, cc
));
3127 else if (argc
== lead_num
) {
3129 VALUE
*const klocals
= argv
+ kw_param
->bits_start
- kw_param
->num
;
3130 args_setup_kw_parameters(ec
, iseq
, NULL
, 0, NULL
, klocals
);
3132 if (klocals
[kw_param
->num
] == INT2FIX(0)) {
3133 /* copy from default_values */
3134 CC_SET_FASTPATH(cc
, vm_call_iseq_setup_kwparm_nokwarg
,
3135 vm_call_cacheable(ci
, cc
));
3143 return setup_parameters_complex(ec
, iseq
, calling
, ci
, argv
, arg_setup_method
);
3147 vm_call_iseq_setup(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3149 RB_DEBUG_COUNTER_INC(ccf_iseq_setup
);
3151 const struct rb_callcache
*cc
= calling
->cc
;
3152 const rb_iseq_t
*iseq
= def_iseq_ptr(vm_cc_cme(cc
)->def
);
3153 const int param_size
= ISEQ_BODY(iseq
)->param
.size
;
3154 const int local_size
= ISEQ_BODY(iseq
)->local_table_size
;
3155 const int opt_pc
= vm_callee_setup_arg(ec
, calling
, iseq
, cfp
->sp
- calling
->argc
, param_size
, local_size
);
3156 return vm_call_iseq_setup_2(ec
, cfp
, calling
, opt_pc
, param_size
, local_size
);
3160 vm_call_iseq_setup_2(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
,
3161 int opt_pc
, int param_size
, int local_size
)
3163 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
3164 const struct rb_callcache
*cc
= calling
->cc
;
3166 if (LIKELY(!(vm_ci_flag(ci
) & VM_CALL_TAILCALL
))) {
3167 return vm_call_iseq_setup_normal(ec
, cfp
, calling
, vm_cc_cme(cc
), opt_pc
, param_size
, local_size
);
3170 return vm_call_iseq_setup_tailcall(ec
, cfp
, calling
, opt_pc
);
3175 vm_call_iseq_setup_normal(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, const rb_callable_method_entry_t
*me
,
3176 int opt_pc
, int param_size
, int local_size
)
3178 const rb_iseq_t
*iseq
= def_iseq_ptr(me
->def
);
3179 VALUE
*argv
= cfp
->sp
- calling
->argc
;
3180 VALUE
*sp
= argv
+ param_size
;
3181 cfp
->sp
= argv
- 1 /* recv */;
3183 vm_push_frame(ec
, iseq
, VM_FRAME_MAGIC_METHOD
| VM_ENV_FLAG_LOCAL
, calling
->recv
,
3184 calling
->block_handler
, (VALUE
)me
,
3185 ISEQ_BODY(iseq
)->iseq_encoded
+ opt_pc
, sp
,
3186 local_size
- param_size
,
3187 ISEQ_BODY(iseq
)->stack_max
);
3192 vm_call_iseq_setup_tailcall(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, int opt_pc
)
3194 const struct rb_callcache
*cc
= calling
->cc
;
3196 VALUE
*argv
= cfp
->sp
- calling
->argc
;
3197 const rb_callable_method_entry_t
*me
= vm_cc_cme(cc
);
3198 const rb_iseq_t
*iseq
= def_iseq_ptr(me
->def
);
3199 VALUE
*src_argv
= argv
;
3200 VALUE
*sp_orig
, *sp
;
3201 VALUE finish_flag
= VM_FRAME_FINISHED_P(cfp
) ? VM_FRAME_FLAG_FINISH
: 0;
3203 if (VM_BH_FROM_CFP_P(calling
->block_handler
, cfp
)) {
3204 struct rb_captured_block
*dst_captured
= VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
));
3205 const struct rb_captured_block
*src_captured
= VM_BH_TO_CAPT_BLOCK(calling
->block_handler
);
3206 dst_captured
->code
.val
= src_captured
->code
.val
;
3207 if (VM_BH_ISEQ_BLOCK_P(calling
->block_handler
)) {
3208 calling
->block_handler
= VM_BH_FROM_ISEQ_BLOCK(dst_captured
);
3211 calling
->block_handler
= VM_BH_FROM_IFUNC_BLOCK(dst_captured
);
3215 vm_pop_frame(ec
, cfp
, cfp
->ep
);
3218 sp_orig
= sp
= cfp
->sp
;
3221 sp
[0] = calling
->recv
;
3224 /* copy arguments */
3225 for (i
=0; i
< ISEQ_BODY(iseq
)->param
.size
; i
++) {
3226 *sp
++ = src_argv
[i
];
3229 vm_push_frame(ec
, iseq
, VM_FRAME_MAGIC_METHOD
| VM_ENV_FLAG_LOCAL
| finish_flag
,
3230 calling
->recv
, calling
->block_handler
, (VALUE
)me
,
3231 ISEQ_BODY(iseq
)->iseq_encoded
+ opt_pc
, sp
,
3232 ISEQ_BODY(iseq
)->local_table_size
- ISEQ_BODY(iseq
)->param
.size
,
3233 ISEQ_BODY(iseq
)->stack_max
);
3241 ractor_unsafe_check(void)
3243 if (!rb_ractor_main_p()) {
3244 rb_raise(rb_eRactorUnsafeError
, "ractor unsafe method called from not main ractor");
3249 call_cfunc_m2(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3251 ractor_unsafe_check();
3252 VALUE(*f
)(VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
))func
;
3253 return (*f
)(recv
, rb_ary_new4(argc
, argv
));
3257 call_cfunc_m1(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3259 ractor_unsafe_check();
3260 VALUE(*f
)(int, const VALUE
*, VALUE
) = (VALUE(*)(int, const VALUE
*, VALUE
))func
;
3261 return (*f
)(argc
, argv
, recv
);
3265 call_cfunc_0(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3267 ractor_unsafe_check();
3268 VALUE(*f
)(VALUE
) = (VALUE(*)(VALUE
))func
;
3273 call_cfunc_1(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3275 ractor_unsafe_check();
3276 VALUE(*f
)(VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
))func
;
3277 return (*f
)(recv
, argv
[0]);
3281 call_cfunc_2(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3283 ractor_unsafe_check();
3284 VALUE(*f
)(VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
))func
;
3285 return (*f
)(recv
, argv
[0], argv
[1]);
3289 call_cfunc_3(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3291 ractor_unsafe_check();
3292 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
))func
;
3293 return (*f
)(recv
, argv
[0], argv
[1], argv
[2]);
3297 call_cfunc_4(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3299 ractor_unsafe_check();
3300 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3301 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3]);
3305 call_cfunc_5(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3307 ractor_unsafe_check();
3308 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3309 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4]);
3313 call_cfunc_6(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3315 ractor_unsafe_check();
3316 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3317 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5]);
3321 call_cfunc_7(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3323 ractor_unsafe_check();
3324 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3325 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6]);
3329 call_cfunc_8(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3331 ractor_unsafe_check();
3332 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3333 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7]);
3337 call_cfunc_9(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3339 ractor_unsafe_check();
3340 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3341 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8]);
3345 call_cfunc_10(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3347 ractor_unsafe_check();
3348 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3349 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9]);
3353 call_cfunc_11(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3355 ractor_unsafe_check();
3356 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3357 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10]);
3361 call_cfunc_12(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3363 ractor_unsafe_check();
3364 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3365 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11]);
3369 call_cfunc_13(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3371 ractor_unsafe_check();
3372 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3373 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12]);
3377 call_cfunc_14(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3379 ractor_unsafe_check();
3380 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3381 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13]);
3385 call_cfunc_15(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3387 ractor_unsafe_check();
3388 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3389 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13], argv
[14]);
3393 ractor_safe_call_cfunc_m2(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3395 VALUE(*f
)(VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
))func
;
3396 return (*f
)(recv
, rb_ary_new4(argc
, argv
));
3400 ractor_safe_call_cfunc_m1(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3402 VALUE(*f
)(int, const VALUE
*, VALUE
) = (VALUE(*)(int, const VALUE
*, VALUE
))func
;
3403 return (*f
)(argc
, argv
, recv
);
3407 ractor_safe_call_cfunc_0(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3409 VALUE(*f
)(VALUE
) = (VALUE(*)(VALUE
))func
;
3414 ractor_safe_call_cfunc_1(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3416 VALUE(*f
)(VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
))func
;
3417 return (*f
)(recv
, argv
[0]);
3421 ractor_safe_call_cfunc_2(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3423 VALUE(*f
)(VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
))func
;
3424 return (*f
)(recv
, argv
[0], argv
[1]);
3428 ractor_safe_call_cfunc_3(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3430 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
))func
;
3431 return (*f
)(recv
, argv
[0], argv
[1], argv
[2]);
3435 ractor_safe_call_cfunc_4(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3437 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3438 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3]);
3442 ractor_safe_call_cfunc_5(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3444 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3445 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4]);
3449 ractor_safe_call_cfunc_6(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3451 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3452 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5]);
3456 ractor_safe_call_cfunc_7(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3458 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3459 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6]);
3463 ractor_safe_call_cfunc_8(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3465 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3466 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7]);
3470 ractor_safe_call_cfunc_9(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3472 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3473 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8]);
3477 ractor_safe_call_cfunc_10(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3479 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3480 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9]);
3484 ractor_safe_call_cfunc_11(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3486 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3487 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10]);
3491 ractor_safe_call_cfunc_12(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3493 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3494 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11]);
3498 ractor_safe_call_cfunc_13(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3500 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3501 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12]);
3505 ractor_safe_call_cfunc_14(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3507 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3508 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13]);
3512 ractor_safe_call_cfunc_15(VALUE recv
, int argc
, const VALUE
*argv
, VALUE (*func
)(ANYARGS
))
3514 VALUE(*f
)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
) = (VALUE(*)(VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
, VALUE
))func
;
3515 return (*f
)(recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13], argv
[14]);
3519 vm_cfp_consistent_p(rb_execution_context_t
*ec
, const rb_control_frame_t
*reg_cfp
)
3521 const int ov_flags
= RAISED_STACKOVERFLOW
;
3522 if (LIKELY(reg_cfp
== ec
->cfp
+ 1)) return TRUE
;
3523 if (rb_ec_raised_p(ec
, ov_flags
)) {
3524 rb_ec_raised_reset(ec
, ov_flags
);
3530 #define CHECK_CFP_CONSISTENCY(func) \
3531 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
3532 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
3535 const rb_method_cfunc_t
*
3536 vm_method_cfunc_entry(const rb_callable_method_entry_t
*me
)
3538 #if VM_DEBUG_VERIFY_METHOD_CACHE
3539 switch (me
->def
->type
) {
3540 case VM_METHOD_TYPE_CFUNC
:
3541 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
3543 # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
3545 METHOD_BUG(ATTRSET
);
3547 METHOD_BUG(BMETHOD
);
3550 METHOD_BUG(OPTIMIZED
);
3551 METHOD_BUG(MISSING
);
3552 METHOD_BUG(REFINED
);
3556 rb_bug("wrong method type: %d", me
->def
->type
);
3559 return UNALIGNED_MEMBER_PTR(me
->def
, body
.cfunc
);
3563 vm_call_cfunc_with_frame_(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
,
3564 int argc
, VALUE
*argv
, VALUE
*stack_bottom
)
3566 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame
);
3567 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
3568 const struct rb_callcache
*cc
= calling
->cc
;
3570 const rb_callable_method_entry_t
*me
= vm_cc_cme(cc
);
3571 const rb_method_cfunc_t
*cfunc
= vm_method_cfunc_entry(me
);
3573 VALUE recv
= calling
->recv
;
3574 VALUE block_handler
= calling
->block_handler
;
3575 VALUE frame_type
= VM_FRAME_MAGIC_CFUNC
| VM_FRAME_FLAG_CFRAME
| VM_ENV_FLAG_LOCAL
;
3577 if (UNLIKELY(calling
->kw_splat
)) {
3578 frame_type
|= VM_FRAME_FLAG_CFRAME_KW
;
3581 VM_ASSERT(reg_cfp
== ec
->cfp
);
3583 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec
, me
->owner
, me
->def
->original_id
);
3584 EXEC_EVENT_HOOK(ec
, RUBY_EVENT_C_CALL
, recv
, me
->def
->original_id
, vm_ci_mid(ci
), me
->owner
, Qundef
);
3586 vm_push_frame(ec
, NULL
, frame_type
, recv
,
3587 block_handler
, (VALUE
)me
,
3588 0, ec
->cfp
->sp
, 0, 0);
3590 int len
= cfunc
->argc
;
3591 if (len
>= 0) rb_check_arity(argc
, len
, len
);
3593 reg_cfp
->sp
= stack_bottom
;
3594 val
= (*cfunc
->invoker
)(recv
, argc
, argv
, cfunc
->func
);
3596 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3598 rb_vm_pop_frame(ec
);
3600 VM_ASSERT(ec
->cfp
->sp
== stack_bottom
);
3602 EXEC_EVENT_HOOK(ec
, RUBY_EVENT_C_RETURN
, recv
, me
->def
->original_id
, vm_ci_mid(ci
), me
->owner
, val
);
3603 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec
, me
->owner
, me
->def
->original_id
);
3608 // Push a C method frame for a given cme. This is called when JIT code skipped
3609 // pushing a frame but the C method reached a point where a frame is needed.
3611 rb_vm_push_cfunc_frame(const rb_callable_method_entry_t
*cme
, int recv_idx
)
3613 VM_ASSERT(cme
->def
->type
== VM_METHOD_TYPE_CFUNC
);
3614 rb_execution_context_t
*ec
= GET_EC();
3615 VALUE
*sp
= ec
->cfp
->sp
;
3616 VALUE recv
= *(sp
- recv_idx
- 1);
3617 VALUE frame_type
= VM_FRAME_MAGIC_CFUNC
| VM_FRAME_FLAG_CFRAME
| VM_ENV_FLAG_LOCAL
;
3618 VALUE block_handler
= VM_BLOCK_HANDLER_NONE
;
3619 #if VM_CHECK_MODE > 0
3620 // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
3621 *(GET_EC()->cfp
->sp
) = Qfalse
;
3623 vm_push_frame(ec
, NULL
, frame_type
, recv
, block_handler
, (VALUE
)cme
, 0, ec
->cfp
->sp
, 0, 0);
3626 // If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
3628 rb_splat_or_kwargs_p(const struct rb_callinfo
*restrict ci
)
3630 return IS_ARGS_SPLAT(ci
) || IS_ARGS_KW_OR_KW_SPLAT(ci
);
3634 vm_call_cfunc_with_frame(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
3636 int argc
= calling
->argc
;
3637 VALUE
*stack_bottom
= reg_cfp
->sp
- argc
- 1;
3638 VALUE
*argv
= &stack_bottom
[1];
3640 return vm_call_cfunc_with_frame_(ec
, reg_cfp
, calling
, argc
, argv
, stack_bottom
);
3644 vm_call_cfunc_other(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
3646 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
3647 RB_DEBUG_COUNTER_INC(ccf_cfunc_other
);
3649 CALLER_SETUP_ARG(reg_cfp
, calling
, ci
, ALLOW_HEAP_ARGV_KEEP_KWSPLAT
);
3651 if (UNLIKELY(argv_ary
= calling
->heap_argv
)) {
3652 VM_ASSERT(!IS_ARGS_KEYWORD(ci
));
3653 int argc
= RARRAY_LENINT(argv_ary
);
3654 VALUE
*argv
= (VALUE
*)RARRAY_CONST_PTR(argv_ary
);
3655 VALUE
*stack_bottom
= reg_cfp
->sp
- 2;
3657 VM_ASSERT(calling
->argc
== 1);
3658 VM_ASSERT(RB_TYPE_P(argv_ary
, T_ARRAY
));
3659 VM_ASSERT(RBASIC_CLASS(argv_ary
) == 0); // hidden ary
3661 return vm_call_cfunc_with_frame_(ec
, reg_cfp
, calling
, argc
, argv
, stack_bottom
);
3664 CC_SET_FASTPATH(calling
->cc
, vm_call_cfunc_with_frame
, !rb_splat_or_kwargs_p(ci
) && !calling
->kw_splat
);
3666 return vm_call_cfunc_with_frame(ec
, reg_cfp
, calling
);
3671 vm_call_cfunc_array_argv(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
, int stack_offset
, int argc_offset
)
3673 VALUE argv_ary
= reg_cfp
->sp
[-1 - stack_offset
];
3674 int argc
= RARRAY_LENINT(argv_ary
) - argc_offset
;
3676 if (UNLIKELY(argc
> VM_ARGC_STACK_MAX
)) {
3677 return vm_call_cfunc_other(ec
, reg_cfp
, calling
);
3680 VALUE
*argv
= (VALUE
*)RARRAY_CONST_PTR(argv_ary
);
3681 calling
->kw_splat
= 0;
3683 VALUE
*stack_bottom
= reg_cfp
->sp
- 2 - stack_offset
;
3684 VALUE
*sp
= stack_bottom
;
3685 CHECK_VM_STACK_OVERFLOW(reg_cfp
, argc
);
3686 for(i
= 0; i
< argc
; i
++) {
3691 return vm_call_cfunc_with_frame_(ec
, reg_cfp
, calling
, argc
, stack_bottom
+1, stack_bottom
);
3695 vm_call_cfunc_only_splat(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
3697 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat
);
3698 VALUE argv_ary
= reg_cfp
->sp
[-1];
3699 int argc
= RARRAY_LENINT(argv_ary
);
3700 VALUE
*argv
= (VALUE
*)RARRAY_CONST_PTR(argv_ary
);
3702 int argc_offset
= 0;
3704 if (UNLIKELY(argc
> 0 &&
3705 RB_TYPE_P((last_hash
= argv
[argc
-1]), T_HASH
) &&
3706 (((struct RHash
*)last_hash
)->basic
.flags
& RHASH_PASS_AS_KEYWORDS
))) {
3707 if (!RHASH_EMPTY_P(last_hash
)) {
3708 return vm_call_cfunc_other(ec
, reg_cfp
, calling
);
3712 return vm_call_cfunc_array_argv(ec
, reg_cfp
, calling
, 0, argc_offset
);
3716 vm_call_cfunc_only_splat_kw(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
3718 RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw
);
3719 VALUE keyword_hash
= reg_cfp
->sp
[-1];
3721 if (keyword_hash
== Qnil
|| (RB_TYPE_P(keyword_hash
, T_HASH
) && RHASH_EMPTY_P(keyword_hash
))) {
3722 return vm_call_cfunc_array_argv(ec
, reg_cfp
, calling
, 1, 0);
3725 return vm_call_cfunc_other(ec
, reg_cfp
, calling
);
3729 vm_call_cfunc(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
3731 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
3732 RB_DEBUG_COUNTER_INC(ccf_cfunc
);
3734 if (IS_ARGS_SPLAT(ci
)) {
3735 if (!IS_ARGS_KW_SPLAT(ci
) && vm_ci_argc(ci
) == 1) {
3737 CC_SET_FASTPATH(calling
->cc
, vm_call_cfunc_only_splat
, TRUE
);
3738 return vm_call_cfunc_only_splat(ec
, reg_cfp
, calling
);
3740 if (IS_ARGS_KW_SPLAT(ci
) && vm_ci_argc(ci
) == 2) {
3742 CC_SET_FASTPATH(calling
->cc
, vm_call_cfunc_only_splat_kw
, TRUE
);
3743 return vm_call_cfunc_only_splat_kw(ec
, reg_cfp
, calling
);
3747 CC_SET_FASTPATH(calling
->cc
, vm_call_cfunc_other
, TRUE
);
3748 return vm_call_cfunc_other(ec
, reg_cfp
, calling
);
3752 vm_call_ivar(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3754 const struct rb_callcache
*cc
= calling
->cc
;
3755 RB_DEBUG_COUNTER_INC(ccf_ivar
);
3757 VALUE ivar
= vm_getivar(calling
->recv
, vm_cc_cme(cc
)->def
->body
.attr
.id
, NULL
, NULL
, cc
, TRUE
, Qnil
);
3762 vm_call_attrset_direct(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, const struct rb_callcache
*cc
, VALUE obj
)
3764 RB_DEBUG_COUNTER_INC(ccf_attrset
);
3765 VALUE val
= *(cfp
->sp
- 1);
3767 attr_index_t index
= vm_cc_attr_index(cc
);
3768 shape_id_t dest_shape_id
= vm_cc_attr_index_dest_shape_id(cc
);
3769 ID id
= vm_cc_cme(cc
)->def
->body
.attr
.id
;
3770 rb_check_frozen_internal(obj
);
3771 VALUE res
= vm_setivar(obj
, id
, val
, dest_shape_id
, index
);
3773 switch (BUILTIN_TYPE(obj
)) {
3780 res
= vm_setivar_default(obj
, id
, val
, dest_shape_id
, index
);
3781 if (!UNDEF_P(res
)) {
3786 res
= vm_setivar_slowpath_attr(obj
, id
, val
, cc
);
3792 vm_call_attrset(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3794 return vm_call_attrset_direct(ec
, cfp
, calling
->cc
, calling
->recv
);
3798 vm_call_bmethod_body(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
, const VALUE
*argv
)
3802 const struct rb_callcache
*cc
= calling
->cc
;
3803 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
3804 VALUE procv
= cme
->def
->body
.bmethod
.proc
;
3806 if (!RB_OBJ_SHAREABLE_P(procv
) &&
3807 cme
->def
->body
.bmethod
.defined_ractor
!= rb_ractor_self(rb_ec_ractor_ptr(ec
))) {
3808 rb_raise(rb_eRuntimeError
, "defined with an un-shareable Proc in a different Ractor");
3811 /* control block frame */
3812 GetProcPtr(procv
, proc
);
3813 val
= rb_vm_invoke_bmethod(ec
, proc
, calling
->recv
, CALLING_ARGC(calling
), argv
, calling
->kw_splat
, calling
->block_handler
, vm_cc_cme(cc
));
3818 static int vm_callee_setup_block_arg(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, const rb_iseq_t
*iseq
, VALUE
*argv
, const enum arg_setup_type arg_setup_type
);
3819 static VALUE
invoke_bmethod(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
, VALUE self
, const struct rb_captured_block
*captured
, const rb_callable_method_entry_t
*me
, VALUE type
, int opt_pc
);
3822 vm_call_iseq_bmethod(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3824 RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod
);
3826 const struct rb_callcache
*cc
= calling
->cc
;
3827 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
3828 VALUE procv
= cme
->def
->body
.bmethod
.proc
;
3830 if (!RB_OBJ_SHAREABLE_P(procv
) &&
3831 cme
->def
->body
.bmethod
.defined_ractor
!= rb_ractor_self(rb_ec_ractor_ptr(ec
))) {
3832 rb_raise(rb_eRuntimeError
, "defined with an un-shareable Proc in a different Ractor");
3836 GetProcPtr(procv
, proc
);
3837 const struct rb_block
*block
= &proc
->block
;
3839 while (vm_block_type(block
) == block_type_proc
) {
3840 block
= vm_proc_block(block
->as
.proc
);
3842 VM_ASSERT(vm_block_type(block
) == block_type_iseq
);
3844 const struct rb_captured_block
*captured
= &block
->as
.captured
;
3845 const rb_iseq_t
*iseq
= rb_iseq_check(captured
->code
.iseq
);
3846 VALUE
* const argv
= cfp
->sp
- calling
->argc
;
3847 const int arg_size
= ISEQ_BODY(iseq
)->param
.size
;
3850 if (vm_ci_flag(calling
->cd
->ci
) & VM_CALL_ARGS_SIMPLE
) {
3851 opt_pc
= vm_callee_setup_block_arg(ec
, calling
, calling
->cd
->ci
, iseq
, argv
, arg_setup_method
);
3854 opt_pc
= setup_parameters_complex(ec
, iseq
, calling
, calling
->cd
->ci
, argv
, arg_setup_method
);
3857 cfp
->sp
= argv
- 1; // -1 for the receiver
3859 vm_push_frame(ec
, iseq
,
3860 VM_FRAME_MAGIC_BLOCK
| VM_FRAME_FLAG_BMETHOD
| VM_FRAME_FLAG_LAMBDA
,
3862 VM_GUARDED_PREV_EP(captured
->ep
),
3864 ISEQ_BODY(iseq
)->iseq_encoded
+ opt_pc
,
3866 ISEQ_BODY(iseq
)->local_table_size
- arg_size
,
3867 ISEQ_BODY(iseq
)->stack_max
);
3873 vm_call_noniseq_bmethod(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3875 RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod
);
3879 CALLER_SETUP_ARG(cfp
, calling
, calling
->cd
->ci
, ALLOW_HEAP_ARGV
);
3880 if (UNLIKELY(calling
->heap_argv
)) {
3881 argv
= RARRAY_PTR(calling
->heap_argv
);
3885 argc
= calling
->argc
;
3886 argv
= ALLOCA_N(VALUE
, argc
);
3887 MEMCPY(argv
, cfp
->sp
- argc
, VALUE
, argc
);
3888 cfp
->sp
+= - argc
- 1;
3891 return vm_call_bmethod_body(ec
, calling
, argv
);
3895 vm_call_bmethod(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3897 RB_DEBUG_COUNTER_INC(ccf_bmethod
);
3899 const struct rb_callcache
*cc
= calling
->cc
;
3900 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
3901 VALUE procv
= cme
->def
->body
.bmethod
.proc
;
3903 GetProcPtr(procv
, proc
);
3904 const struct rb_block
*block
= &proc
->block
;
3906 while (vm_block_type(block
) == block_type_proc
) {
3907 block
= vm_proc_block(block
->as
.proc
);
3909 if (vm_block_type(block
) == block_type_iseq
) {
3910 CC_SET_FASTPATH(cc
, vm_call_iseq_bmethod
, TRUE
);
3911 return vm_call_iseq_bmethod(ec
, cfp
, calling
);
3914 CC_SET_FASTPATH(cc
, vm_call_noniseq_bmethod
, TRUE
);
3915 return vm_call_noniseq_bmethod(ec
, cfp
, calling
);
3919 rb_find_defined_class_by_owner(VALUE current_class
, VALUE target_owner
)
3921 VALUE klass
= current_class
;
3923 /* for prepended Module, then start from cover class */
3924 if (RB_TYPE_P(klass
, T_ICLASS
) && FL_TEST(klass
, RICLASS_IS_ORIGIN
) &&
3925 RB_TYPE_P(RBASIC_CLASS(klass
), T_CLASS
)) {
3926 klass
= RBASIC_CLASS(klass
);
3929 while (RTEST(klass
)) {
3930 VALUE owner
= RB_TYPE_P(klass
, T_ICLASS
) ? RBASIC_CLASS(klass
) : klass
;
3931 if (owner
== target_owner
) {
3934 klass
= RCLASS_SUPER(klass
);
3937 return current_class
; /* maybe module function */
3940 static const rb_callable_method_entry_t
*
3941 aliased_callable_method_entry(const rb_callable_method_entry_t
*me
)
3943 const rb_method_entry_t
*orig_me
= me
->def
->body
.alias
.original_me
;
3944 const rb_callable_method_entry_t
*cme
;
3946 if (orig_me
->defined_class
== 0) {
3947 VALUE defined_class
= rb_find_defined_class_by_owner(me
->defined_class
, orig_me
->owner
);
3948 VM_ASSERT(RB_TYPE_P(orig_me
->owner
, T_MODULE
));
3949 cme
= rb_method_entry_complement_defined_class(orig_me
, me
->called_id
, defined_class
);
3951 if (me
->def
->reference_count
== 1) {
3952 RB_OBJ_WRITE(me
, &me
->def
->body
.alias
.original_me
, cme
);
3955 rb_method_definition_t
*def
=
3956 rb_method_definition_create(VM_METHOD_TYPE_ALIAS
, me
->def
->original_id
);
3957 rb_method_definition_set((rb_method_entry_t
*)me
, def
, (void *)cme
);
3961 cme
= (const rb_callable_method_entry_t
*)orig_me
;
3964 VM_ASSERT(callable_method_entry_p(cme
));
3968 const rb_callable_method_entry_t
*
3969 rb_aliased_callable_method_entry(const rb_callable_method_entry_t
*me
)
3971 return aliased_callable_method_entry(me
);
3975 vm_call_alias(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
3977 calling
->cc
= &VM_CC_ON_STACK(Qundef
,
3980 aliased_callable_method_entry(vm_cc_cme(calling
->cc
)));
3982 return vm_call_method_each_type(ec
, cfp
, calling
);
3985 static enum method_missing_reason
3986 ci_missing_reason(const struct rb_callinfo
*ci
)
3988 enum method_missing_reason stat
= MISSING_NOENTRY
;
3989 if (vm_ci_flag(ci
) & VM_CALL_VCALL
) stat
|= MISSING_VCALL
;
3990 if (vm_ci_flag(ci
) & VM_CALL_FCALL
) stat
|= MISSING_FCALL
;
3991 if (vm_ci_flag(ci
) & VM_CALL_SUPER
) stat
|= MISSING_SUPER
;
3995 static VALUE
vm_call_method_missing(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
);
3998 vm_call_symbol(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
3999 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, VALUE symbol
, int flags
)
4001 ASSUME(calling
->argc
>= 0);
4003 enum method_missing_reason missing_reason
= MISSING_NOENTRY
;
4004 int argc
= calling
->argc
;
4005 VALUE recv
= calling
->recv
;
4006 VALUE klass
= CLASS_OF(recv
);
4007 ID mid
= rb_check_id(&symbol
);
4008 flags
|= VM_CALL_OPT_SEND
;
4010 if (UNLIKELY(! mid
)) {
4011 mid
= idMethodMissing
;
4012 missing_reason
= ci_missing_reason(ci
);
4013 ec
->method_missing_reason
= missing_reason
;
4016 if (UNLIKELY(argv_ary
= calling
->heap_argv
)) {
4017 if (rb_method_basic_definition_p(klass
, idMethodMissing
)) {
4018 rb_ary_unshift(argv_ary
, symbol
);
4020 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4021 int priv
= vm_ci_flag(ci
) & (VM_CALL_FCALL
| VM_CALL_VCALL
);
4022 VALUE exc
= rb_make_no_method_exception(
4023 rb_eNoMethodError
, 0, recv
, RARRAY_LENINT(argv_ary
), RARRAY_CONST_PTR(argv_ary
), priv
);
4027 rb_ary_unshift(argv_ary
, rb_str_intern(symbol
));
4030 /* E.g. when argc == 2
4034 * | | +---> | arg1 | 0
4035 * +------+ | +------+
4036 * | arg1 | -+ +-> | arg0 | 1
4037 * +------+ | +------+
4038 * | arg0 | ---+ | sym | 2
4040 * | recv | | recv | 3
4041 * --+------+--------+------+------
4044 CHECK_VM_STACK_OVERFLOW(reg_cfp
, 1);
4046 MEMMOVE(&TOPN(i
- 1), &TOPN(i
), VALUE
, i
);
4047 argc
= ++calling
->argc
;
4049 if (rb_method_basic_definition_p(klass
, idMethodMissing
)) {
4050 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
4052 int priv
= vm_ci_flag(ci
) & (VM_CALL_FCALL
| VM_CALL_VCALL
);
4053 const VALUE
*argv
= STACK_ADDR_FROM_TOP(argc
);
4054 VALUE exc
= rb_make_no_method_exception(
4055 rb_eNoMethodError
, 0, recv
, argc
, argv
, priv
);
4060 TOPN(i
) = rb_str_intern(symbol
);
4065 calling
->cd
= &(struct rb_call_data
) {
4066 .ci
= &VM_CI_ON_STACK(mid
, flags
, argc
, vm_ci_kwarg(ci
)),
4069 calling
->cc
= &VM_CC_ON_STACK(klass
,
4071 { .method_missing_reason
= missing_reason
},
4072 rb_callable_method_entry_with_refinements(klass
, mid
, NULL
));
4074 if (flags
& VM_CALL_FCALL
) {
4075 return vm_call_method(ec
, reg_cfp
, calling
);
4078 const struct rb_callcache
*cc
= calling
->cc
;
4079 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc
)));
4081 if (vm_cc_cme(cc
) != NULL
) {
4082 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc
))) {
4083 case METHOD_VISI_PUBLIC
: /* likely */
4084 return vm_call_method_each_type(ec
, reg_cfp
, calling
);
4085 case METHOD_VISI_PRIVATE
:
4086 vm_cc_method_missing_reason_set(cc
, MISSING_PRIVATE
);
4088 case METHOD_VISI_PROTECTED
:
4089 vm_cc_method_missing_reason_set(cc
, MISSING_PROTECTED
);
4092 VM_UNREACHABLE(vm_call_method
);
4094 return vm_call_method_missing(ec
, reg_cfp
, calling
);
4097 return vm_call_method_nome(ec
, reg_cfp
, calling
);
4101 vm_call_opt_send0(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
, int flags
)
4103 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4107 i
= calling
->argc
- 1;
4109 if (calling
->argc
== 0) {
4110 rb_raise(rb_eArgError
, "no method name given");
4118 * | arg1 | ---+ | | 0
4119 * +------+ | +------+
4120 * | arg0 | -+ +-> | arg1 | 1
4121 * +------+ | +------+
4122 * | sym | +---> | arg0 | 2
4124 * | recv | | recv | 3
4125 * --+------+--------+------+------
4127 /* shift arguments */
4129 MEMMOVE(&TOPN(i
), &TOPN(i
-1), VALUE
, i
);
4134 return vm_call_symbol(ec
, reg_cfp
, calling
, ci
, sym
, flags
);
4138 vm_call_opt_send_complex(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4140 RB_DEBUG_COUNTER_INC(ccf_opt_send_complex
);
4141 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4142 int flags
= VM_CALL_FCALL
;
4146 CALLER_SETUP_ARG(reg_cfp
, calling
, ci
, ALLOW_HEAP_ARGV
);
4147 if (UNLIKELY(argv_ary
= calling
->heap_argv
)) {
4148 sym
= rb_ary_shift(argv_ary
);
4149 flags
|= VM_CALL_ARGS_SPLAT
;
4150 if (calling
->kw_splat
) {
4151 VALUE last_hash
= rb_ary_last(0, NULL
, argv_ary
);
4152 ((struct RHash
*)last_hash
)->basic
.flags
|= RHASH_PASS_AS_KEYWORDS
;
4153 calling
->kw_splat
= 0;
4155 return vm_call_symbol(ec
, reg_cfp
, calling
, ci
, sym
, flags
);
4158 if (calling
->kw_splat
) flags
|= VM_CALL_KW_SPLAT
;
4159 return vm_call_opt_send0(ec
, reg_cfp
, calling
, flags
);
4163 vm_call_opt_send_simple(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4165 RB_DEBUG_COUNTER_INC(ccf_opt_send_simple
);
4166 return vm_call_opt_send0(ec
, reg_cfp
, calling
, vm_ci_flag(calling
->cd
->ci
) | VM_CALL_FCALL
);
4170 vm_call_opt_send(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4172 RB_DEBUG_COUNTER_INC(ccf_opt_send
);
4174 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4175 int flags
= vm_ci_flag(ci
);
4177 if (UNLIKELY(!(flags
& VM_CALL_ARGS_SIMPLE
) &&
4178 ((calling
->argc
== 1 && (flags
& (VM_CALL_ARGS_SPLAT
| VM_CALL_KW_SPLAT
))) ||
4179 (calling
->argc
== 2 && (flags
& VM_CALL_ARGS_SPLAT
) && (flags
& VM_CALL_KW_SPLAT
)) ||
4180 ((flags
& VM_CALL_KWARG
) && (vm_ci_kwarg(ci
)->keyword_len
== calling
->argc
))))) {
4181 CC_SET_FASTPATH(calling
->cc
, vm_call_opt_send_complex
, TRUE
);
4182 return vm_call_opt_send_complex(ec
, reg_cfp
, calling
);
4185 CC_SET_FASTPATH(calling
->cc
, vm_call_opt_send_simple
, TRUE
);
4186 return vm_call_opt_send_simple(ec
, reg_cfp
, calling
);
4190 vm_call_method_missing_body(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
,
4191 const struct rb_callinfo
*orig_ci
, enum method_missing_reason reason
)
4193 RB_DEBUG_COUNTER_INC(ccf_method_missing
);
4195 VALUE
*argv
= STACK_ADDR_FROM_TOP(calling
->argc
);
4196 unsigned int argc
, flag
;
4198 flag
= VM_CALL_FCALL
| VM_CALL_OPT_SEND
| vm_ci_flag(orig_ci
);
4199 argc
= ++calling
->argc
;
4201 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
4202 CHECK_VM_STACK_OVERFLOW(reg_cfp
, 1);
4203 vm_check_canary(ec
, reg_cfp
->sp
);
4205 MEMMOVE(argv
+1, argv
, VALUE
, argc
-1);
4207 argv
[0] = ID2SYM(vm_ci_mid(orig_ci
));
4210 ec
->method_missing_reason
= reason
;
4211 calling
->cd
= &(struct rb_call_data
) {
4212 .ci
= &VM_CI_ON_STACK(idMethodMissing
, flag
, argc
, vm_ci_kwarg(orig_ci
)),
4215 calling
->cc
= &VM_CC_ON_STACK(Qundef
, vm_call_general
, {{ 0 }},
4216 rb_callable_method_entry_without_refinements(CLASS_OF(calling
->recv
), idMethodMissing
, NULL
));
4217 return vm_call_method(ec
, reg_cfp
, calling
);
4221 vm_call_method_missing(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4223 return vm_call_method_missing_body(ec
, reg_cfp
, calling
, calling
->cd
->ci
, vm_cc_cmethod_missing_reason(calling
->cc
));
4226 static const rb_callable_method_entry_t
*refined_method_callable_without_refinement(const rb_callable_method_entry_t
*me
);
4228 vm_call_zsuper(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
, VALUE klass
)
4230 klass
= RCLASS_SUPER(klass
);
4232 const rb_callable_method_entry_t
*cme
= klass
? rb_callable_method_entry(klass
, vm_ci_mid(calling
->cd
->ci
)) : NULL
;
4234 return vm_call_method_nome(ec
, cfp
, calling
);
4236 if (cme
->def
->type
== VM_METHOD_TYPE_REFINED
&&
4237 cme
->def
->body
.refined
.orig_me
) {
4238 cme
= refined_method_callable_without_refinement(cme
);
4241 calling
->cc
= &VM_CC_ON_STACK(Qundef
, vm_call_general
, {{ 0 }}, cme
);
4243 return vm_call_method_each_type(ec
, cfp
, calling
);
4247 find_refinement(VALUE refinements
, VALUE klass
)
4249 if (NIL_P(refinements
)) {
4252 return rb_hash_lookup(refinements
, klass
);
4255 PUREFUNC(static rb_control_frame_t
* current_method_entry(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
));
4256 static rb_control_frame_t
*
4257 current_method_entry(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
)
4259 rb_control_frame_t
*top_cfp
= cfp
;
4261 if (cfp
->iseq
&& ISEQ_BODY(cfp
->iseq
)->type
== ISEQ_TYPE_BLOCK
) {
4262 const rb_iseq_t
*local_iseq
= ISEQ_BODY(cfp
->iseq
)->local_iseq
;
4265 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
4266 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec
, cfp
)) {
4267 /* TODO: orphan block */
4270 } while (cfp
->iseq
!= local_iseq
);
4275 static const rb_callable_method_entry_t
*
4276 refined_method_callable_without_refinement(const rb_callable_method_entry_t
*me
)
4278 const rb_method_entry_t
*orig_me
= me
->def
->body
.refined
.orig_me
;
4279 const rb_callable_method_entry_t
*cme
;
4281 if (orig_me
->defined_class
== 0) {
4286 cme
= (const rb_callable_method_entry_t
*)orig_me
;
4289 VM_ASSERT(callable_method_entry_p(cme
));
4291 if (UNDEFINED_METHOD_ENTRY_P(cme
)) {
4298 static const rb_callable_method_entry_t
*
4299 search_refined_method(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
4301 ID mid
= vm_ci_mid(calling
->cd
->ci
);
4302 const rb_cref_t
*cref
= vm_get_cref(cfp
->ep
);
4303 const struct rb_callcache
* const cc
= calling
->cc
;
4304 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
4306 for (; cref
; cref
= CREF_NEXT(cref
)) {
4307 const VALUE refinement
= find_refinement(CREF_REFINEMENTS(cref
), vm_cc_cme(cc
)->owner
);
4308 if (NIL_P(refinement
)) continue;
4310 const rb_callable_method_entry_t
*const ref_me
=
4311 rb_callable_method_entry(refinement
, mid
);
4314 if (vm_cc_call(cc
) == vm_call_super_method
) {
4315 const rb_control_frame_t
*top_cfp
= current_method_entry(ec
, cfp
);
4316 const rb_callable_method_entry_t
*top_me
= rb_vm_frame_method_entry(top_cfp
);
4317 if (top_me
&& rb_method_definition_eq(ref_me
->def
, top_me
->def
)) {
4322 if (cme
->def
->type
!= VM_METHOD_TYPE_REFINED
||
4323 cme
->def
!= ref_me
->def
) {
4326 if (ref_me
->def
->type
!= VM_METHOD_TYPE_REFINED
) {
4335 if (vm_cc_cme(cc
)->def
->body
.refined
.orig_me
) {
4336 return refined_method_callable_without_refinement(vm_cc_cme(cc
));
4339 VALUE klass
= RCLASS_SUPER(vm_cc_cme(cc
)->defined_class
);
4340 const rb_callable_method_entry_t
*cme
= klass
? rb_callable_method_entry(klass
, mid
) : NULL
;
4346 vm_call_refined(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
4348 const rb_callable_method_entry_t
*ref_cme
= search_refined_method(ec
, cfp
, calling
);
4351 if (calling
->cd
->cc
) {
4352 const struct rb_callcache
*cc
= calling
->cc
= vm_cc_new(vm_cc_cme(calling
->cc
)->defined_class
, ref_cme
, vm_call_general
, cc_type_refinement
);
4353 RB_OBJ_WRITE(cfp
->iseq
, &calling
->cd
->cc
, cc
);
4354 return vm_call_method(ec
, cfp
, calling
);
4357 struct rb_callcache
*ref_cc
= &VM_CC_ON_STACK(Qundef
, vm_call_general
, {{ 0 }}, ref_cme
);
4358 calling
->cc
= ref_cc
;
4359 return vm_call_method(ec
, cfp
, calling
);
4363 return vm_call_method_nome(ec
, cfp
, calling
);
4367 static inline VALUE
vm_invoke_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, bool is_lambda
, VALUE block_handler
);
4369 NOINLINE(static VALUE
4370 vm_invoke_block_opt_call(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
4371 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, VALUE block_handler
));
4374 vm_invoke_block_opt_call(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
4375 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, VALUE block_handler
)
4377 int argc
= calling
->argc
;
4380 if (argc
> 0) MEMMOVE(&TOPN(argc
), &TOPN(argc
-1), VALUE
, argc
);
4383 return vm_invoke_block(ec
, reg_cfp
, calling
, ci
, false, block_handler
);
4387 vm_call_opt_call(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4389 RB_DEBUG_COUNTER_INC(ccf_opt_call
);
4391 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4392 VALUE procval
= calling
->recv
;
4393 return vm_invoke_block_opt_call(ec
, reg_cfp
, calling
, ci
, VM_BH_FROM_PROC(procval
));
4397 vm_call_opt_block_call(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4399 RB_DEBUG_COUNTER_INC(ccf_opt_block_call
);
4401 VALUE block_handler
= VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp
));
4402 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4404 if (BASIC_OP_UNREDEFINED_P(BOP_CALL
, PROC_REDEFINED_OP_FLAG
)) {
4405 return vm_invoke_block_opt_call(ec
, reg_cfp
, calling
, ci
, block_handler
);
4408 calling
->recv
= rb_vm_bh_to_procval(ec
, block_handler
);
4409 calling
->cc
= rb_vm_search_method_slowpath(ci
, CLASS_OF(calling
->recv
));
4410 return vm_call_general(ec
, reg_cfp
, calling
);
4415 vm_call_opt_struct_aref0(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
)
4417 VALUE recv
= calling
->recv
;
4419 VM_ASSERT(RB_TYPE_P(recv
, T_STRUCT
));
4420 VM_ASSERT(vm_cc_cme(calling
->cc
)->def
->type
== VM_METHOD_TYPE_OPTIMIZED
);
4421 VM_ASSERT(vm_cc_cme(calling
->cc
)->def
->body
.optimized
.type
== OPTIMIZED_METHOD_TYPE_STRUCT_AREF
);
4423 const unsigned int off
= vm_cc_cme(calling
->cc
)->def
->body
.optimized
.index
;
4424 return internal_RSTRUCT_GET(recv
, off
);
4428 vm_call_opt_struct_aref(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4430 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref
);
4432 VALUE ret
= vm_call_opt_struct_aref0(ec
, calling
);
4438 vm_call_opt_struct_aset0(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
, VALUE val
)
4440 VALUE recv
= calling
->recv
;
4442 VM_ASSERT(RB_TYPE_P(recv
, T_STRUCT
));
4443 VM_ASSERT(vm_cc_cme(calling
->cc
)->def
->type
== VM_METHOD_TYPE_OPTIMIZED
);
4444 VM_ASSERT(vm_cc_cme(calling
->cc
)->def
->body
.optimized
.type
== OPTIMIZED_METHOD_TYPE_STRUCT_ASET
);
4446 rb_check_frozen(recv
);
4448 const unsigned int off
= vm_cc_cme(calling
->cc
)->def
->body
.optimized
.index
;
4449 internal_RSTRUCT_SET(recv
, off
, val
);
4455 vm_call_opt_struct_aset(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4457 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset
);
4459 VALUE ret
= vm_call_opt_struct_aset0(ec
, calling
, *(reg_cfp
->sp
- 1));
4464 NOINLINE(static VALUE
vm_call_optimized(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
,
4465 const struct rb_callinfo
*ci
, const struct rb_callcache
*cc
));
4467 #define VM_CALL_METHOD_ATTR(var, func, nohook) \
4468 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
4469 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
4470 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
4472 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
4473 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
4481 vm_call_optimized(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
,
4482 const struct rb_callinfo
*ci
, const struct rb_callcache
*cc
)
4484 switch (vm_cc_cme(cc
)->def
->body
.optimized
.type
) {
4485 case OPTIMIZED_METHOD_TYPE_SEND
:
4486 CC_SET_FASTPATH(cc
, vm_call_opt_send
, TRUE
);
4487 return vm_call_opt_send(ec
, cfp
, calling
);
4488 case OPTIMIZED_METHOD_TYPE_CALL
:
4489 CC_SET_FASTPATH(cc
, vm_call_opt_call
, TRUE
);
4490 return vm_call_opt_call(ec
, cfp
, calling
);
4491 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL
:
4492 CC_SET_FASTPATH(cc
, vm_call_opt_block_call
, TRUE
);
4493 return vm_call_opt_block_call(ec
, cfp
, calling
);
4494 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF
: {
4495 CALLER_SETUP_ARG(cfp
, calling
, ci
, 0);
4496 rb_check_arity(calling
->argc
, 0, 0);
4499 VM_CALL_METHOD_ATTR(v
,
4500 vm_call_opt_struct_aref(ec
, cfp
, calling
),
4501 set_vm_cc_ivar(cc
); \
4502 CC_SET_FASTPATH(cc
, vm_call_opt_struct_aref
, (vm_ci_flag(ci
) & VM_CALL_ARGS_SIMPLE
)))
4505 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET
: {
4506 CALLER_SETUP_ARG(cfp
, calling
, ci
, 1);
4507 rb_check_arity(calling
->argc
, 1, 1);
4510 VM_CALL_METHOD_ATTR(v
,
4511 vm_call_opt_struct_aset(ec
, cfp
, calling
),
4512 set_vm_cc_ivar(cc
); \
4513 CC_SET_FASTPATH(cc
, vm_call_opt_struct_aset
, (vm_ci_flag(ci
) & VM_CALL_ARGS_SIMPLE
)))
4517 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc
)->def
->body
.optimized
.type
);
4522 vm_call_method_each_type(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
4524 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4525 const struct rb_callcache
*cc
= calling
->cc
;
4526 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
4529 VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme
));
4531 switch (cme
->def
->type
) {
4532 case VM_METHOD_TYPE_ISEQ
:
4533 CC_SET_FASTPATH(cc
, vm_call_iseq_setup
, TRUE
);
4534 return vm_call_iseq_setup(ec
, cfp
, calling
);
4536 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
4537 case VM_METHOD_TYPE_CFUNC
:
4538 CC_SET_FASTPATH(cc
, vm_call_cfunc
, TRUE
);
4539 return vm_call_cfunc(ec
, cfp
, calling
);
4541 case VM_METHOD_TYPE_ATTRSET
:
4542 CALLER_SETUP_ARG(cfp
, calling
, ci
, 1);
4544 rb_check_arity(calling
->argc
, 1, 1);
4546 const unsigned int aset_mask
= (VM_CALL_ARGS_SPLAT
| VM_CALL_KW_SPLAT
| VM_CALL_KWARG
);
4548 if (vm_cc_markable(cc
)) {
4549 vm_cc_attr_index_initialize(cc
, INVALID_SHAPE_ID
);
4550 VM_CALL_METHOD_ATTR(v
,
4551 vm_call_attrset_direct(ec
, cfp
, cc
, calling
->recv
),
4552 CC_SET_FASTPATH(cc
, vm_call_attrset
, !(vm_ci_flag(ci
) & aset_mask
)));
4555 cc
= &((struct rb_callcache
) {
4557 (imemo_callcache
<< FL_USHIFT
) |
4558 VM_CALLCACHE_UNMARKABLE
|
4559 VM_CALLCACHE_ON_STACK
,
4565 .value
= INVALID_SHAPE_ID
<< SHAPE_FLAG_SHIFT
,
4570 VM_CALL_METHOD_ATTR(v
,
4571 vm_call_attrset_direct(ec
, cfp
, cc
, calling
->recv
),
4572 CC_SET_FASTPATH(cc
, vm_call_attrset
, !(vm_ci_flag(ci
) & aset_mask
)));
4576 case VM_METHOD_TYPE_IVAR
:
4577 CALLER_SETUP_ARG(cfp
, calling
, ci
, 0);
4578 rb_check_arity(calling
->argc
, 0, 0);
4579 vm_cc_attr_index_initialize(cc
, INVALID_SHAPE_ID
);
4580 const unsigned int ivar_mask
= (VM_CALL_ARGS_SPLAT
| VM_CALL_KW_SPLAT
);
4581 VM_CALL_METHOD_ATTR(v
,
4582 vm_call_ivar(ec
, cfp
, calling
),
4583 CC_SET_FASTPATH(cc
, vm_call_ivar
, !(vm_ci_flag(ci
) & ivar_mask
)));
4586 case VM_METHOD_TYPE_MISSING
:
4587 vm_cc_method_missing_reason_set(cc
, 0);
4588 CC_SET_FASTPATH(cc
, vm_call_method_missing
, TRUE
);
4589 return vm_call_method_missing(ec
, cfp
, calling
);
4591 case VM_METHOD_TYPE_BMETHOD
:
4592 CC_SET_FASTPATH(cc
, vm_call_bmethod
, TRUE
);
4593 return vm_call_bmethod(ec
, cfp
, calling
);
4595 case VM_METHOD_TYPE_ALIAS
:
4596 CC_SET_FASTPATH(cc
, vm_call_alias
, TRUE
);
4597 return vm_call_alias(ec
, cfp
, calling
);
4599 case VM_METHOD_TYPE_OPTIMIZED
:
4600 return vm_call_optimized(ec
, cfp
, calling
, ci
, cc
);
4602 case VM_METHOD_TYPE_UNDEF
:
4605 case VM_METHOD_TYPE_ZSUPER
:
4606 return vm_call_zsuper(ec
, cfp
, calling
, RCLASS_ORIGIN(vm_cc_cme(cc
)->defined_class
));
4608 case VM_METHOD_TYPE_REFINED
:
4609 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
4610 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
4611 return vm_call_refined(ec
, cfp
, calling
);
4614 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc
)->def
->type
);
4617 NORETURN(static void vm_raise_method_missing(rb_execution_context_t
*ec
, int argc
, const VALUE
*argv
, VALUE obj
, int call_status
));
4620 vm_call_method_nome(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
4622 /* method missing */
4623 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4624 const int stat
= ci_missing_reason(ci
);
4626 if (vm_ci_mid(ci
) == idMethodMissing
) {
4627 if (UNLIKELY(calling
->heap_argv
)) {
4628 vm_raise_method_missing(ec
, RARRAY_LENINT(calling
->heap_argv
), RARRAY_CONST_PTR(calling
->heap_argv
), calling
->recv
, stat
);
4631 rb_control_frame_t
*reg_cfp
= cfp
;
4632 VALUE
*argv
= STACK_ADDR_FROM_TOP(calling
->argc
);
4633 vm_raise_method_missing(ec
, calling
->argc
, argv
, calling
->recv
, stat
);
4637 return vm_call_method_missing_body(ec
, cfp
, calling
, ci
, stat
);
4641 /* Protected method calls and super invocations need to check that the receiver
4642 * (self for super) inherits the module on which the method is defined.
4643 * In the case of refinements, it should consider the original class not the
4647 vm_defined_class_for_protected_call(const rb_callable_method_entry_t
*me
)
4649 VALUE defined_class
= me
->defined_class
;
4650 VALUE refined_class
= RCLASS_REFINED_CLASS(defined_class
);
4651 return NIL_P(refined_class
) ? defined_class
: refined_class
;
4655 vm_call_method(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, struct rb_calling_info
*calling
)
4657 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
4658 const struct rb_callcache
*cc
= calling
->cc
;
4660 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc
)));
4662 if (vm_cc_cme(cc
) != NULL
) {
4663 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc
))) {
4664 case METHOD_VISI_PUBLIC
: /* likely */
4665 return vm_call_method_each_type(ec
, cfp
, calling
);
4667 case METHOD_VISI_PRIVATE
:
4668 if (!(vm_ci_flag(ci
) & VM_CALL_FCALL
)) {
4669 enum method_missing_reason stat
= MISSING_PRIVATE
;
4670 if (vm_ci_flag(ci
) & VM_CALL_VCALL
) stat
|= MISSING_VCALL
;
4672 vm_cc_method_missing_reason_set(cc
, stat
);
4673 CC_SET_FASTPATH(cc
, vm_call_method_missing
, TRUE
);
4674 return vm_call_method_missing(ec
, cfp
, calling
);
4676 return vm_call_method_each_type(ec
, cfp
, calling
);
4678 case METHOD_VISI_PROTECTED
:
4679 if (!(vm_ci_flag(ci
) & (VM_CALL_OPT_SEND
| VM_CALL_FCALL
))) {
4680 VALUE defined_class
= vm_defined_class_for_protected_call(vm_cc_cme(cc
));
4681 if (!rb_obj_is_kind_of(cfp
->self
, defined_class
)) {
4682 vm_cc_method_missing_reason_set(cc
, MISSING_PROTECTED
);
4683 return vm_call_method_missing(ec
, cfp
, calling
);
4686 /* caching method info to dummy cc */
4687 VM_ASSERT(vm_cc_cme(cc
) != NULL
);
4688 struct rb_callcache cc_on_stack
= *cc
;
4689 FL_SET_RAW((VALUE
)&cc_on_stack
, VM_CALLCACHE_UNMARKABLE
);
4690 calling
->cc
= &cc_on_stack
;
4691 return vm_call_method_each_type(ec
, cfp
, calling
);
4694 return vm_call_method_each_type(ec
, cfp
, calling
);
4697 rb_bug("unreachable");
4701 return vm_call_method_nome(ec
, cfp
, calling
);
4706 vm_call_general(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4708 RB_DEBUG_COUNTER_INC(ccf_general
);
4709 return vm_call_method(ec
, reg_cfp
, calling
);
4713 rb_vm_cc_general(const struct rb_callcache
*cc
)
4715 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
4716 VM_ASSERT(cc
!= vm_cc_empty());
4718 *(vm_call_handler
*)&cc
->call_
= vm_call_general
;
4722 vm_call_super_method(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, struct rb_calling_info
*calling
)
4724 RB_DEBUG_COUNTER_INC(ccf_super_method
);
4726 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
4727 // can merge the function and the address of the function becomes same.
4728 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
4729 if (ec
== NULL
) rb_bug("unreachable");
4731 /* this check is required to distinguish with other functions. */
4732 VM_ASSERT(vm_cc_call(calling
->cc
) == vm_call_super_method
);
4733 return vm_call_method(ec
, reg_cfp
, calling
);
4739 vm_search_normal_superclass(VALUE klass
)
4741 if (BUILTIN_TYPE(klass
) == T_ICLASS
&&
4742 RB_TYPE_P(RBASIC(klass
)->klass
, T_MODULE
) &&
4743 FL_TEST_RAW(RBASIC(klass
)->klass
, RMODULE_IS_REFINEMENT
)) {
4744 klass
= RBASIC(klass
)->klass
;
4746 klass
= RCLASS_ORIGIN(klass
);
4747 return RCLASS_SUPER(klass
);
4750 NORETURN(static void vm_super_outside(void));
4753 vm_super_outside(void)
4755 rb_raise(rb_eNoMethodError
, "super called outside of method");
4758 static const struct rb_callcache
*
4759 empty_cc_for_super(void)
4761 return &vm_empty_cc_for_super
;
4764 static const struct rb_callcache
*
4765 vm_search_super_method(const rb_control_frame_t
*reg_cfp
, struct rb_call_data
*cd
, VALUE recv
)
4767 VALUE current_defined_class
;
4768 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(reg_cfp
);
4774 current_defined_class
= vm_defined_class_for_protected_call(me
);
4776 if (BUILTIN_TYPE(current_defined_class
) != T_MODULE
&&
4777 reg_cfp
->iseq
!= method_entry_iseqptr(me
) &&
4778 !rb_obj_is_kind_of(recv
, current_defined_class
)) {
4779 VALUE m
= RB_TYPE_P(current_defined_class
, T_ICLASS
) ?
4780 RCLASS_INCLUDER(current_defined_class
) : current_defined_class
;
4782 if (m
) { /* not bound UnboundMethod */
4783 rb_raise(rb_eTypeError
,
4784 "self has wrong type to call super in this context: "
4785 "%"PRIsVALUE
" (expected %"PRIsVALUE
")",
4786 rb_obj_class(recv
), m
);
4790 if (me
->def
->type
== VM_METHOD_TYPE_BMETHOD
&& (vm_ci_flag(cd
->ci
) & VM_CALL_ZSUPER
)) {
4791 rb_raise(rb_eRuntimeError
,
4792 "implicit argument passing of super from method defined"
4793 " by define_method() is not supported."
4794 " Specify all arguments explicitly.");
4797 ID mid
= me
->def
->original_id
;
4799 // update iseq. really? (TODO)
4800 cd
->ci
= vm_ci_new_runtime(mid
,
4803 vm_ci_kwarg(cd
->ci
));
4805 RB_OBJ_WRITTEN(reg_cfp
->iseq
, Qundef
, cd
->ci
);
4807 const struct rb_callcache
*cc
;
4809 VALUE klass
= vm_search_normal_superclass(me
->defined_class
);
4812 /* bound instance method of module */
4813 cc
= vm_cc_new(klass
, NULL
, vm_call_method_missing
, cc_type_super
);
4814 RB_OBJ_WRITE(reg_cfp
->iseq
, &cd
->cc
, cc
);
4817 cc
= vm_search_method_fastpath((VALUE
)reg_cfp
->iseq
, cd
, klass
);
4818 const rb_callable_method_entry_t
*cached_cme
= vm_cc_cme(cc
);
4820 // define_method can cache for different method id
4821 if (cached_cme
== NULL
) {
4822 // empty_cc_for_super is not markable object
4823 cd
->cc
= empty_cc_for_super();
4825 else if (cached_cme
->called_id
!= mid
) {
4826 const rb_callable_method_entry_t
*cme
= rb_callable_method_entry(klass
, mid
);
4828 cc
= vm_cc_new(klass
, cme
, vm_call_super_method
, cc_type_super
);
4829 RB_OBJ_WRITE(reg_cfp
->iseq
, &cd
->cc
, cc
);
4832 cd
->cc
= cc
= empty_cc_for_super();
4836 switch (cached_cme
->def
->type
) {
4837 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
4838 case VM_METHOD_TYPE_REFINED
:
4839 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
4840 case VM_METHOD_TYPE_ATTRSET
:
4841 case VM_METHOD_TYPE_IVAR
:
4842 vm_cc_call_set(cc
, vm_call_super_method
); // invalidate fastpath
4845 break; // use fastpath
4850 VM_ASSERT((vm_cc_cme(cc
), true));
4858 block_proc_is_lambda(const VALUE procval
)
4863 GetProcPtr(procval
, proc
);
4864 return proc
->is_lambda
;
4872 vm_yield_with_cfunc(rb_execution_context_t
*ec
,
4873 const struct rb_captured_block
*captured
,
4874 VALUE self
, int argc
, const VALUE
*argv
, int kw_splat
, VALUE block_handler
,
4875 const rb_callable_method_entry_t
*me
)
4877 int is_lambda
= FALSE
; /* TODO */
4878 VALUE val
, arg
, blockarg
;
4880 const struct vm_ifunc
*ifunc
= captured
->code
.ifunc
;
4883 arg
= rb_ary_new4(argc
, argv
);
4885 else if (argc
== 0) {
4892 blockarg
= rb_vm_bh_to_procval(ec
, block_handler
);
4894 frame_flag
= VM_FRAME_MAGIC_IFUNC
| VM_FRAME_FLAG_CFRAME
| (me
? VM_FRAME_FLAG_BMETHOD
: 0);
4896 frame_flag
|= VM_FRAME_FLAG_CFRAME_KW
;
4899 vm_push_frame(ec
, (const rb_iseq_t
*)captured
->code
.ifunc
,
4902 VM_GUARDED_PREV_EP(captured
->ep
),
4904 0, ec
->cfp
->sp
, 0, 0);
4905 val
= (*ifunc
->func
)(arg
, (VALUE
)ifunc
->data
, argc
, argv
, blockarg
);
4906 rb_vm_pop_frame(ec
);
4912 rb_vm_yield_with_cfunc(rb_execution_context_t
*ec
, const struct rb_captured_block
*captured
, int argc
, const VALUE
*argv
)
4914 return vm_yield_with_cfunc(ec
, captured
, captured
->self
, argc
, argv
, 0, VM_BLOCK_HANDLER_NONE
, NULL
);
4918 vm_yield_with_symbol(rb_execution_context_t
*ec
, VALUE symbol
, int argc
, const VALUE
*argv
, int kw_splat
, VALUE block_handler
)
4920 return rb_sym_proc_call(SYM2ID(symbol
), argc
, argv
, kw_splat
, rb_vm_bh_to_procval(ec
, block_handler
));
4924 vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t
*cfp
, const rb_iseq_t
*iseq
, VALUE
*argv
, VALUE ary
)
4927 long len
= RARRAY_LEN(ary
);
4929 CHECK_VM_STACK_OVERFLOW(cfp
, ISEQ_BODY(iseq
)->param
.lead_num
);
4931 for (i
=0; i
<len
&& i
<ISEQ_BODY(iseq
)->param
.lead_num
; i
++) {
4932 argv
[i
] = RARRAY_AREF(ary
, i
);
4939 vm_callee_setup_block_arg_arg0_check(VALUE
*argv
)
4941 VALUE ary
, arg0
= argv
[0];
4942 ary
= rb_check_array_type(arg0
);
4946 VM_ASSERT(argv
[0] == arg0
);
4952 vm_callee_setup_block_arg(rb_execution_context_t
*ec
, struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
, const rb_iseq_t
*iseq
, VALUE
*argv
, const enum arg_setup_type arg_setup_type
)
4954 if (rb_simple_iseq_p(iseq
)) {
4955 rb_control_frame_t
*cfp
= ec
->cfp
;
4958 CALLER_SETUP_ARG(cfp
, calling
, ci
, ISEQ_BODY(iseq
)->param
.lead_num
);
4960 if (arg_setup_type
== arg_setup_block
&&
4961 calling
->argc
== 1 &&
4962 ISEQ_BODY(iseq
)->param
.flags
.has_lead
&&
4963 !ISEQ_BODY(iseq
)->param
.flags
.ambiguous_param0
&&
4964 !NIL_P(arg0
= vm_callee_setup_block_arg_arg0_check(argv
))) {
4965 calling
->argc
= vm_callee_setup_block_arg_arg0_splat(cfp
, iseq
, argv
, arg0
);
4968 if (calling
->argc
!= ISEQ_BODY(iseq
)->param
.lead_num
) {
4969 if (arg_setup_type
== arg_setup_block
) {
4970 if (calling
->argc
< ISEQ_BODY(iseq
)->param
.lead_num
) {
4972 CHECK_VM_STACK_OVERFLOW(cfp
, ISEQ_BODY(iseq
)->param
.lead_num
);
4973 for (i
=calling
->argc
; i
<ISEQ_BODY(iseq
)->param
.lead_num
; i
++) argv
[i
] = Qnil
;
4974 calling
->argc
= ISEQ_BODY(iseq
)->param
.lead_num
; /* fill rest parameters */
4976 else if (calling
->argc
> ISEQ_BODY(iseq
)->param
.lead_num
) {
4977 calling
->argc
= ISEQ_BODY(iseq
)->param
.lead_num
; /* simply truncate arguments */
4981 argument_arity_error(ec
, iseq
, calling
->argc
, ISEQ_BODY(iseq
)->param
.lead_num
, ISEQ_BODY(iseq
)->param
.lead_num
);
4988 return setup_parameters_complex(ec
, iseq
, calling
, ci
, argv
, arg_setup_type
);
4993 vm_yield_setup_args(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
, const int argc
, VALUE
*argv
, int flags
, VALUE block_handler
, enum arg_setup_type arg_setup_type
)
4995 struct rb_calling_info calling_entry
, *calling
;
4997 calling
= &calling_entry
;
4998 calling
->argc
= argc
;
4999 calling
->block_handler
= block_handler
;
5000 calling
->kw_splat
= (flags
& VM_CALL_KW_SPLAT
) ? 1 : 0;
5001 calling
->recv
= Qundef
;
5002 calling
->heap_argv
= 0;
5003 struct rb_callinfo dummy_ci
= VM_CI_ON_STACK(0, flags
, 0, 0);
5005 return vm_callee_setup_block_arg(ec
, calling
, &dummy_ci
, iseq
, argv
, arg_setup_type
);
5008 /* ruby iseq -> ruby block */
5011 vm_invoke_iseq_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5012 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5013 bool is_lambda
, VALUE block_handler
)
5015 const struct rb_captured_block
*captured
= VM_BH_TO_ISEQ_BLOCK(block_handler
);
5016 const rb_iseq_t
*iseq
= rb_iseq_check(captured
->code
.iseq
);
5017 const int arg_size
= ISEQ_BODY(iseq
)->param
.size
;
5018 VALUE
* const rsp
= GET_SP() - calling
->argc
;
5019 VALUE
* const argv
= rsp
;
5020 int opt_pc
= vm_callee_setup_block_arg(ec
, calling
, ci
, iseq
, argv
, is_lambda
? arg_setup_method
: arg_setup_block
);
5024 vm_push_frame(ec
, iseq
,
5025 VM_FRAME_MAGIC_BLOCK
| (is_lambda
? VM_FRAME_FLAG_LAMBDA
: 0),
5027 VM_GUARDED_PREV_EP(captured
->ep
), 0,
5028 ISEQ_BODY(iseq
)->iseq_encoded
+ opt_pc
,
5030 ISEQ_BODY(iseq
)->local_table_size
- arg_size
, ISEQ_BODY(iseq
)->stack_max
);
5036 vm_invoke_symbol_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5037 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5038 MAYBE_UNUSED(bool is_lambda
), VALUE block_handler
)
5040 VALUE symbol
= VM_BH_TO_SYMBOL(block_handler
);
5041 int flags
= vm_ci_flag(ci
);
5043 if (UNLIKELY(!(flags
& VM_CALL_ARGS_SIMPLE
) &&
5044 ((calling
->argc
== 0) ||
5045 (calling
->argc
== 1 && (flags
& (VM_CALL_ARGS_SPLAT
| VM_CALL_KW_SPLAT
))) ||
5046 (calling
->argc
== 2 && (flags
& VM_CALL_ARGS_SPLAT
) && (flags
& VM_CALL_KW_SPLAT
)) ||
5047 ((flags
& VM_CALL_KWARG
) && (vm_ci_kwarg(ci
)->keyword_len
== calling
->argc
))))) {
5048 CALLER_SETUP_ARG(reg_cfp
, calling
, ci
, ALLOW_HEAP_ARGV
);
5050 if (UNLIKELY(calling
->heap_argv
)) {
5051 #if VM_ARGC_STACK_MAX < 0
5052 if (RARRAY_LEN(calling
->heap_argv
) < 1) {
5053 rb_raise(rb_eArgError
, "no receiver given");
5056 calling
->recv
= rb_ary_shift(calling
->heap_argv
);
5057 // Modify stack to avoid cfp consistency error
5059 reg_cfp
->sp
[-1] = reg_cfp
->sp
[-2];
5060 reg_cfp
->sp
[-2] = calling
->recv
;
5061 flags
|= VM_CALL_ARGS_SPLAT
;
5064 if (calling
->argc
< 1) {
5065 rb_raise(rb_eArgError
, "no receiver given");
5067 calling
->recv
= TOPN(--calling
->argc
);
5069 if (calling
->kw_splat
) {
5070 flags
|= VM_CALL_KW_SPLAT
;
5074 if (calling
->argc
< 1) {
5075 rb_raise(rb_eArgError
, "no receiver given");
5077 calling
->recv
= TOPN(--calling
->argc
);
5080 return vm_call_symbol(ec
, reg_cfp
, calling
, ci
, symbol
, flags
);
5084 vm_invoke_ifunc_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5085 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5086 MAYBE_UNUSED(bool is_lambda
), VALUE block_handler
)
5090 const struct rb_captured_block
*captured
= VM_BH_TO_IFUNC_BLOCK(block_handler
);
5091 CALLER_SETUP_ARG(ec
->cfp
, calling
, ci
, ALLOW_HEAP_ARGV_KEEP_KWSPLAT
);
5092 argc
= calling
->argc
;
5093 val
= vm_yield_with_cfunc(ec
, captured
, captured
->self
, CALLING_ARGC(calling
), calling
->heap_argv
? RARRAY_CONST_PTR(calling
->heap_argv
) : STACK_ADDR_FROM_TOP(argc
), calling
->kw_splat
, calling
->block_handler
, NULL
);
5094 POPN(argc
); /* TODO: should put before C/yield? */
5099 vm_proc_to_block_handler(VALUE procval
)
5101 const struct rb_block
*block
= vm_proc_block(procval
);
5103 switch (vm_block_type(block
)) {
5104 case block_type_iseq
:
5105 return VM_BH_FROM_ISEQ_BLOCK(&block
->as
.captured
);
5106 case block_type_ifunc
:
5107 return VM_BH_FROM_IFUNC_BLOCK(&block
->as
.captured
);
5108 case block_type_symbol
:
5109 return VM_BH_FROM_SYMBOL(block
->as
.symbol
);
5110 case block_type_proc
:
5111 return VM_BH_FROM_PROC(block
->as
.proc
);
5113 VM_UNREACHABLE(vm_yield_with_proc
);
5118 vm_invoke_proc_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5119 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5120 bool is_lambda
, VALUE block_handler
)
5122 while (vm_block_handler_type(block_handler
) == block_handler_type_proc
) {
5123 VALUE proc
= VM_BH_TO_PROC(block_handler
);
5124 is_lambda
= block_proc_is_lambda(proc
);
5125 block_handler
= vm_proc_to_block_handler(proc
);
5128 return vm_invoke_block(ec
, reg_cfp
, calling
, ci
, is_lambda
, block_handler
);
5132 vm_invoke_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5133 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5134 bool is_lambda
, VALUE block_handler
)
5136 VALUE (*func
)(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
,
5137 struct rb_calling_info
*calling
, const struct rb_callinfo
*ci
,
5138 bool is_lambda
, VALUE block_handler
);
5140 switch (vm_block_handler_type(block_handler
)) {
5141 case block_handler_type_iseq
: func
= vm_invoke_iseq_block
; break;
5142 case block_handler_type_ifunc
: func
= vm_invoke_ifunc_block
; break;
5143 case block_handler_type_proc
: func
= vm_invoke_proc_block
; break;
5144 case block_handler_type_symbol
: func
= vm_invoke_symbol_block
; break;
5145 default: rb_bug("vm_invoke_block: unreachable");
5148 return func(ec
, reg_cfp
, calling
, ci
, is_lambda
, block_handler
);
5152 vm_make_proc_with_iseq(const rb_iseq_t
*blockiseq
)
5154 const rb_execution_context_t
*ec
= GET_EC();
5155 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
5156 struct rb_captured_block
*captured
;
5159 rb_bug("vm_make_proc_with_iseq: unreachable");
5162 captured
= VM_CFP_TO_CAPTURED_BLOCK(cfp
);
5163 captured
->code
.iseq
= blockiseq
;
5165 return rb_vm_make_proc(ec
, captured
, rb_cProc
);
5169 vm_once_exec(VALUE iseq
)
5171 VALUE proc
= vm_make_proc_with_iseq((rb_iseq_t
*)iseq
);
5172 return rb_proc_call_with_block(proc
, 0, 0, Qnil
);
5176 vm_once_clear(VALUE data
)
5178 union iseq_inline_storage_entry
*is
= (union iseq_inline_storage_entry
*)data
;
5179 is
->once
.running_thread
= NULL
;
5186 check_respond_to_missing(VALUE obj
, VALUE v
)
5191 args
[0] = obj
; args
[1] = Qfalse
;
5192 r
= rb_check_funcall(v
, idRespond_to_missing
, 2, args
);
5193 if (!UNDEF_P(r
) && RTEST(r
)) {
5202 vm_defined(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, rb_num_t op_type
, VALUE obj
, VALUE v
)
5205 enum defined_type type
= (enum defined_type
)op_type
;
5209 return rb_ivar_defined(GET_SELF(), SYM2ID(obj
));
5212 return rb_gvar_defined(SYM2ID(obj
));
5214 case DEFINED_CVAR
: {
5215 const rb_cref_t
*cref
= vm_get_cref(GET_EP());
5216 klass
= vm_get_cvar_base(cref
, GET_CFP(), 0);
5217 return rb_cvar_defined(klass
, SYM2ID(obj
));
5221 case DEFINED_CONST_FROM
: {
5222 bool allow_nil
= type
== DEFINED_CONST
;
5224 return vm_get_ev_const(ec
, klass
, SYM2ID(obj
), allow_nil
, true);
5228 klass
= CLASS_OF(v
);
5229 return rb_ec_obj_respond_to(ec
, v
, SYM2ID(obj
), TRUE
);
5231 case DEFINED_METHOD
:{
5232 VALUE klass
= CLASS_OF(v
);
5233 const rb_method_entry_t
*me
= rb_method_entry_with_refinements(klass
, SYM2ID(obj
), NULL
);
5236 switch (METHOD_ENTRY_VISI(me
)) {
5237 case METHOD_VISI_PRIVATE
:
5239 case METHOD_VISI_PROTECTED
:
5240 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me
->defined_class
))) {
5243 case METHOD_VISI_PUBLIC
:
5247 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me
));
5251 return check_respond_to_missing(obj
, v
);
5256 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE
) {
5260 case DEFINED_ZSUPER
:
5262 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(GET_CFP());
5265 VALUE klass
= vm_search_normal_superclass(me
->defined_class
);
5266 if (!klass
) return false;
5268 ID id
= me
->def
->original_id
;
5270 return rb_method_boundp(klass
, id
, 0);
5275 return RTEST(vm_backref_defined(ec
, GET_LEP(), FIX2INT(obj
)));
5277 rb_bug("unimplemented defined? type (VM)");
5285 rb_vm_defined(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, rb_num_t op_type
, VALUE obj
, VALUE v
)
5287 return vm_defined(ec
, reg_cfp
, op_type
, obj
, v
);
5290 static const VALUE
*
5291 vm_get_ep(const VALUE
*const reg_ep
, rb_num_t lv
)
5294 const VALUE
*ep
= reg_ep
;
5295 for (i
= 0; i
< lv
; i
++) {
5296 ep
= GET_PREV_EP(ep
);
5302 vm_get_special_object(const VALUE
*const reg_ep
,
5303 enum vm_special_object_type type
)
5306 case VM_SPECIAL_OBJECT_VMCORE
:
5307 return rb_mRubyVMFrozenCore
;
5308 case VM_SPECIAL_OBJECT_CBASE
:
5309 return vm_get_cbase(reg_ep
);
5310 case VM_SPECIAL_OBJECT_CONST_BASE
:
5311 return vm_get_const_base(reg_ep
);
5313 rb_bug("putspecialobject insn: unknown value_type %d", type
);
5318 vm_concat_array(VALUE ary1
, VALUE ary2st
)
5320 const VALUE ary2
= ary2st
;
5321 VALUE tmp1
= rb_check_to_array(ary1
);
5322 VALUE tmp2
= rb_check_to_array(ary2
);
5325 tmp1
= rb_ary_new3(1, ary1
);
5328 tmp1
= rb_ary_dup(ary1
);
5332 return rb_ary_push(tmp1
, ary2
);
5334 return rb_ary_concat(tmp1
, tmp2
);
5339 vm_concat_to_array(VALUE ary1
, VALUE ary2st
)
5341 /* ary1 must be a newly created array */
5342 const VALUE ary2
= ary2st
;
5343 VALUE tmp2
= rb_check_to_array(ary2
);
5346 return rb_ary_push(ary1
, ary2
);
5348 return rb_ary_concat(ary1
, tmp2
);
5352 // YJIT implementation is using the C function
5353 // and needs to call a non-static function
5355 rb_vm_concat_array(VALUE ary1
, VALUE ary2st
)
5357 return vm_concat_array(ary1
, ary2st
);
5361 rb_vm_concat_to_array(VALUE ary1
, VALUE ary2st
)
5363 return vm_concat_to_array(ary1
, ary2st
);
5367 vm_splat_array(VALUE flag
, VALUE ary
)
5369 VALUE tmp
= rb_check_to_array(ary
);
5371 return rb_ary_new3(1, ary
);
5373 else if (RTEST(flag
)) {
5374 return rb_ary_dup(tmp
);
5381 // YJIT implementation is using the C function
5382 // and needs to call a non-static function
5384 rb_vm_splat_array(VALUE flag
, VALUE ary
)
5386 return vm_splat_array(flag
, ary
);
5390 vm_check_match(rb_execution_context_t
*ec
, VALUE target
, VALUE pattern
, rb_num_t flag
)
5392 enum vm_check_match_type type
= ((int)flag
) & VM_CHECKMATCH_TYPE_MASK
;
5394 if (flag
& VM_CHECKMATCH_ARRAY
) {
5396 const long n
= RARRAY_LEN(pattern
);
5398 for (i
= 0; i
< n
; i
++) {
5399 VALUE v
= RARRAY_AREF(pattern
, i
);
5400 VALUE c
= check_match(ec
, v
, target
, type
);
5409 return check_match(ec
, pattern
, target
, type
);
5414 rb_vm_check_match(rb_execution_context_t
*ec
, VALUE target
, VALUE pattern
, rb_num_t flag
)
5416 return vm_check_match(ec
, target
, pattern
, flag
);
5420 vm_check_keyword(lindex_t bits
, lindex_t idx
, const VALUE
*ep
)
5422 const VALUE kw_bits
= *(ep
- bits
);
5424 if (FIXNUM_P(kw_bits
)) {
5425 unsigned int b
= (unsigned int)FIX2ULONG(kw_bits
);
5426 if ((idx
< KW_SPECIFIED_BITS_MAX
) && (b
& (0x01 << idx
)))
5430 VM_ASSERT(RB_TYPE_P(kw_bits
, T_HASH
));
5431 if (rb_hash_has_key(kw_bits
, INT2FIX(idx
))) return Qfalse
;
5437 vm_dtrace(rb_event_flag_t flag
, rb_execution_context_t
*ec
)
5439 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
5440 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
5441 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
5442 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
5445 case RUBY_EVENT_CALL
:
5446 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec
, 0, 0);
5448 case RUBY_EVENT_C_CALL
:
5449 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec
, 0, 0);
5451 case RUBY_EVENT_RETURN
:
5452 RUBY_DTRACE_METHOD_RETURN_HOOK(ec
, 0, 0);
5454 case RUBY_EVENT_C_RETURN
:
5455 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec
, 0, 0);
5462 vm_const_get_under(ID id
, rb_num_t flags
, VALUE cbase
)
5464 if (!rb_const_defined_at(cbase
, id
)) {
5467 else if (VM_DEFINECLASS_SCOPED_P(flags
)) {
5468 return rb_public_const_get_at(cbase
, id
);
5471 return rb_const_get_at(cbase
, id
);
5476 vm_check_if_class(ID id
, rb_num_t flags
, VALUE super
, VALUE klass
)
5478 if (!RB_TYPE_P(klass
, T_CLASS
)) {
5481 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags
)) {
5482 VALUE tmp
= rb_class_real(RCLASS_SUPER(klass
));
5485 rb_raise(rb_eTypeError
,
5486 "superclass mismatch for class %"PRIsVALUE
"",
5499 vm_check_if_module(ID id
, VALUE mod
)
5501 if (!RB_TYPE_P(mod
, T_MODULE
)) {
5510 declare_under(ID id
, VALUE cbase
, VALUE c
)
5512 rb_set_class_path_string(c
, cbase
, rb_id2str(id
));
5513 rb_const_set(cbase
, id
, c
);
5518 vm_declare_class(ID id
, rb_num_t flags
, VALUE cbase
, VALUE super
)
5520 /* new class declaration */
5521 VALUE s
= VM_DEFINECLASS_HAS_SUPERCLASS_P(flags
) ? super
: rb_cObject
;
5522 VALUE c
= declare_under(id
, cbase
, rb_define_class_id(id
, s
));
5523 rb_define_alloc_func(c
, rb_get_alloc_func(c
));
5524 rb_class_inherited(s
, c
);
5529 vm_declare_module(ID id
, VALUE cbase
)
5531 /* new module declaration */
5532 return declare_under(id
, cbase
, rb_module_new());
5535 NORETURN(static void unmatched_redefinition(const char *type
, VALUE cbase
, ID id
, VALUE old
));
5537 unmatched_redefinition(const char *type
, VALUE cbase
, ID id
, VALUE old
)
5539 VALUE name
= rb_id2str(id
);
5540 VALUE message
= rb_sprintf("%"PRIsVALUE
" is not a %s",
5542 VALUE location
= rb_const_source_location_at(cbase
, id
);
5543 if (!NIL_P(location
)) {
5544 rb_str_catf(message
, "\n%"PRIsVALUE
":%"PRIsVALUE
":"
5545 " previous definition of %"PRIsVALUE
" was here",
5546 rb_ary_entry(location
, 0), rb_ary_entry(location
, 1), name
);
5548 rb_exc_raise(rb_exc_new_str(rb_eTypeError
, message
));
5552 vm_define_class(ID id
, rb_num_t flags
, VALUE cbase
, VALUE super
)
5556 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags
) && !RB_TYPE_P(super
, T_CLASS
)) {
5557 rb_raise(rb_eTypeError
,
5558 "superclass must be an instance of Class (given an instance of %"PRIsVALUE
")",
5559 rb_obj_class(super
));
5562 vm_check_if_namespace(cbase
);
5565 rb_autoload_load(cbase
, id
);
5566 if ((klass
= vm_const_get_under(id
, flags
, cbase
)) != 0) {
5567 if (!vm_check_if_class(id
, flags
, super
, klass
))
5568 unmatched_redefinition("class", cbase
, id
, klass
);
5572 return vm_declare_class(id
, flags
, cbase
, super
);
5577 vm_define_module(ID id
, rb_num_t flags
, VALUE cbase
)
5581 vm_check_if_namespace(cbase
);
5582 if ((mod
= vm_const_get_under(id
, flags
, cbase
)) != 0) {
5583 if (!vm_check_if_module(id
, mod
))
5584 unmatched_redefinition("module", cbase
, id
, mod
);
5588 return vm_declare_module(id
, cbase
);
5593 vm_find_or_create_class_by_id(ID id
,
5598 rb_vm_defineclass_type_t type
= VM_DEFINECLASS_TYPE(flags
);
5601 case VM_DEFINECLASS_TYPE_CLASS
:
5602 /* classdef returns class scope value */
5603 return vm_define_class(id
, flags
, cbase
, super
);
5605 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS
:
5606 /* classdef returns class scope value */
5607 return rb_singleton_class(cbase
);
5609 case VM_DEFINECLASS_TYPE_MODULE
:
5610 /* classdef returns class scope value */
5611 return vm_define_module(id
, flags
, cbase
);
5614 rb_bug("unknown defineclass type: %d", (int)type
);
5618 static rb_method_visibility_t
5619 vm_scope_visibility_get(const rb_execution_context_t
*ec
)
5621 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
5623 if (!vm_env_cref_by_cref(cfp
->ep
)) {
5624 return METHOD_VISI_PUBLIC
;
5627 return CREF_SCOPE_VISI(vm_ec_cref(ec
))->method_visi
;
5632 vm_scope_module_func_check(const rb_execution_context_t
*ec
)
5634 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
5636 if (!vm_env_cref_by_cref(cfp
->ep
)) {
5640 return CREF_SCOPE_VISI(vm_ec_cref(ec
))->module_func
;
5645 vm_define_method(const rb_execution_context_t
*ec
, VALUE obj
, ID id
, VALUE iseqval
, int is_singleton
)
5648 rb_method_visibility_t visi
;
5649 rb_cref_t
*cref
= vm_ec_cref(ec
);
5652 klass
= rb_singleton_class(obj
); /* class and frozen checked in this API */
5653 visi
= METHOD_VISI_PUBLIC
;
5656 klass
= CREF_CLASS_FOR_DEFINITION(cref
);
5657 visi
= vm_scope_visibility_get(ec
);
5661 rb_raise(rb_eTypeError
, "no class/module to add method");
5664 rb_add_method_iseq(klass
, id
, (const rb_iseq_t
*)iseqval
, cref
, visi
);
5665 // Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
5666 if (id
== idInitialize
&& klass
!= rb_cObject
&& RB_TYPE_P(klass
, T_CLASS
) && (rb_get_alloc_func(klass
) == rb_class_allocate_instance
)) {
5668 RCLASS_EXT(klass
)->max_iv_count
= rb_estimate_iv_count(klass
, (const rb_iseq_t
*)iseqval
);
5671 if (!is_singleton
&& vm_scope_module_func_check(ec
)) {
5672 klass
= rb_singleton_class(klass
);
5673 rb_add_method_iseq(klass
, id
, (const rb_iseq_t
*)iseqval
, cref
, METHOD_VISI_PUBLIC
);
5678 vm_invokeblock_i(struct rb_execution_context_struct
*ec
,
5679 struct rb_control_frame_struct
*reg_cfp
,
5680 struct rb_calling_info
*calling
)
5682 const struct rb_callinfo
*ci
= calling
->cd
->ci
;
5683 VALUE block_handler
= VM_CF_BLOCK_HANDLER(GET_CFP());
5685 if (block_handler
== VM_BLOCK_HANDLER_NONE
) {
5686 rb_vm_localjump_error("no block given (yield)", Qnil
, 0);
5689 return vm_invoke_block(ec
, GET_CFP(), calling
, ci
, false, block_handler
);
5693 enum method_explorer_type
{
5695 mexp_search_invokeblock
,
5701 struct rb_execution_context_struct
*ec
,
5702 struct rb_control_frame_struct
*reg_cfp
,
5703 struct rb_call_data
*cd
,
5704 VALUE block_handler
,
5705 enum method_explorer_type method_explorer
5708 const struct rb_callinfo
*ci
= cd
->ci
;
5709 const struct rb_callcache
*cc
;
5710 int argc
= vm_ci_argc(ci
);
5711 VALUE recv
= TOPN(argc
);
5712 struct rb_calling_info calling
= {
5713 .block_handler
= block_handler
,
5714 .kw_splat
= IS_ARGS_KW_SPLAT(ci
) > 0,
5720 switch (method_explorer
) {
5721 case mexp_search_method
:
5722 calling
.cc
= cc
= vm_search_method_fastpath((VALUE
)reg_cfp
->iseq
, cd
, CLASS_OF(recv
));
5723 val
= vm_cc_call(cc
)(ec
, GET_CFP(), &calling
);
5725 case mexp_search_super
:
5726 calling
.cc
= cc
= vm_search_super_method(reg_cfp
, cd
, recv
);
5727 val
= vm_cc_call(cc
)(ec
, GET_CFP(), &calling
);
5729 case mexp_search_invokeblock
:
5730 val
= vm_invokeblock_i(ec
, GET_CFP(), &calling
);
5737 rb_vm_send(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, CALL_DATA cd
, ISEQ blockiseq
)
5740 VALUE bh
= vm_caller_setup_arg_block(ec
, GET_CFP(), cd
->ci
, blockiseq
, false);
5741 VALUE val
= vm_sendish(ec
, GET_CFP(), cd
, bh
, mexp_search_method
);
5747 rb_vm_opt_send_without_block(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, CALL_DATA cd
)
5750 VALUE bh
= VM_BLOCK_HANDLER_NONE
;
5751 VALUE val
= vm_sendish(ec
, GET_CFP(), cd
, bh
, mexp_search_method
);
5757 rb_vm_invokesuper(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, CALL_DATA cd
, ISEQ blockiseq
)
5760 VALUE bh
= vm_caller_setup_arg_block(ec
, GET_CFP(), cd
->ci
, blockiseq
, true);
5761 VALUE val
= vm_sendish(ec
, GET_CFP(), cd
, bh
, mexp_search_super
);
5767 rb_vm_invokeblock(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, CALL_DATA cd
)
5770 VALUE bh
= VM_BLOCK_HANDLER_NONE
;
5771 VALUE val
= vm_sendish(ec
, GET_CFP(), cd
, bh
, mexp_search_invokeblock
);
5777 VALUE
rb_nil_to_s(VALUE
);
5778 VALUE
rb_true_to_s(VALUE
);
5779 VALUE
rb_false_to_s(VALUE
);
5781 VALUE
rb_int_to_s(int argc
, VALUE
*argv
, VALUE x
);
5782 VALUE
rb_fix_to_s(VALUE
);
5784 VALUE
rb_mod_to_s(VALUE
);
5785 VALUE
rb_mod_name(VALUE
);
5788 vm_objtostring(const rb_iseq_t
*iseq
, VALUE recv
, CALL_DATA cd
)
5790 int type
= TYPE(recv
);
5791 if (type
== T_STRING
) {
5795 const struct rb_callcache
*cc
= vm_search_method((VALUE
)iseq
, cd
, recv
);
5799 if (check_cfunc(vm_cc_cme(cc
), rb_sym_to_s
)) {
5800 // rb_sym_to_s() allocates a mutable string, but since we are only
5801 // going to use this string for interpolation, it's fine to use the
5803 return rb_sym2str(recv
);
5808 if (check_cfunc(vm_cc_cme(cc
), rb_mod_to_s
)) {
5809 // rb_mod_to_s() allocates a mutable string, but since we are only
5810 // going to use this string for interpolation, it's fine to use the
5812 VALUE val
= rb_mod_name(recv
);
5814 val
= rb_mod_to_s(recv
);
5820 if (check_cfunc(vm_cc_cme(cc
), rb_nil_to_s
)) {
5821 return rb_nil_to_s(recv
);
5825 if (check_cfunc(vm_cc_cme(cc
), rb_true_to_s
)) {
5826 return rb_true_to_s(recv
);
5830 if (check_cfunc(vm_cc_cme(cc
), rb_false_to_s
)) {
5831 return rb_false_to_s(recv
);
5835 if (check_cfunc(vm_cc_cme(cc
), rb_int_to_s
)) {
5836 return rb_fix_to_s(recv
);
5844 vm_opt_str_freeze(VALUE str
, int bop
, ID id
)
5846 if (BASIC_OP_UNREDEFINED_P(bop
, STRING_REDEFINED_OP_FLAG
)) {
5854 /* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
5855 #define id_cmp idCmp
5858 vm_opt_newarray_max(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5860 if (BASIC_OP_UNREDEFINED_P(BOP_MAX
, ARRAY_REDEFINED_OP_FLAG
)) {
5865 VALUE result
= *ptr
;
5866 rb_snum_t i
= num
- 1;
5868 const VALUE v
= *++ptr
;
5869 if (OPTIMIZED_CMP(v
, result
) > 0) {
5877 return rb_vm_call_with_refinements(ec
, rb_ary_new4(num
, ptr
), idMax
, 0, NULL
, RB_NO_KEYWORDS
);
5882 rb_vm_opt_newarray_max(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5884 return vm_opt_newarray_max(ec
, num
, ptr
);
5888 vm_opt_newarray_min(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5890 if (BASIC_OP_UNREDEFINED_P(BOP_MIN
, ARRAY_REDEFINED_OP_FLAG
)) {
5895 VALUE result
= *ptr
;
5896 rb_snum_t i
= num
- 1;
5898 const VALUE v
= *++ptr
;
5899 if (OPTIMIZED_CMP(v
, result
) < 0) {
5907 return rb_vm_call_with_refinements(ec
, rb_ary_new4(num
, ptr
), idMin
, 0, NULL
, RB_NO_KEYWORDS
);
5912 rb_vm_opt_newarray_min(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5914 return vm_opt_newarray_min(ec
, num
, ptr
);
5918 vm_opt_newarray_hash(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5920 // If Array#hash is _not_ monkeypatched, use the optimized call
5921 if (BASIC_OP_UNREDEFINED_P(BOP_HASH
, ARRAY_REDEFINED_OP_FLAG
)) {
5922 return rb_ary_hash_values(num
, ptr
);
5925 return rb_vm_call_with_refinements(ec
, rb_ary_new4(num
, ptr
), idHash
, 0, NULL
, RB_NO_KEYWORDS
);
5930 rb_vm_opt_newarray_hash(rb_execution_context_t
*ec
, rb_num_t num
, const VALUE
*ptr
)
5932 return vm_opt_newarray_hash(ec
, num
, ptr
);
5937 #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
5940 vm_track_constant_cache(ID id
, void *ic
)
5942 struct rb_id_table
*const_cache
= GET_VM()->constant_cache
;
5943 VALUE lookup_result
;
5946 if (rb_id_table_lookup(const_cache
, id
, &lookup_result
)) {
5947 ics
= (st_table
*)lookup_result
;
5950 ics
= st_init_numtable();
5951 rb_id_table_insert(const_cache
, id
, (VALUE
)ics
);
5954 st_insert(ics
, (st_data_t
) ic
, (st_data_t
) Qtrue
);
5958 vm_ic_track_const_chain(rb_control_frame_t
*cfp
, IC ic
, const ID
*segments
)
5962 for (int i
= 0; segments
[i
]; i
++) {
5963 ID id
= segments
[i
];
5964 if (id
== idNULL
) continue;
5965 vm_track_constant_cache(id
, ic
);
5971 // For RJIT inlining
5973 vm_inlined_ic_hit_p(VALUE flags
, VALUE value
, const rb_cref_t
*ic_cref
, const VALUE
*reg_ep
)
5975 if ((flags
& IMEMO_CONST_CACHE_SHAREABLE
) || rb_ractor_main_p()) {
5976 VM_ASSERT(ractor_incidental_shareable_p(flags
& IMEMO_CONST_CACHE_SHAREABLE
, value
));
5978 return (ic_cref
== NULL
|| // no need to check CREF
5979 ic_cref
== vm_get_cref(reg_ep
));
5985 vm_ic_hit_p(const struct iseq_inline_constant_cache_entry
*ice
, const VALUE
*reg_ep
)
5987 VM_ASSERT(IMEMO_TYPE_P(ice
, imemo_constcache
));
5988 return vm_inlined_ic_hit_p(ice
->flags
, ice
->value
, ice
->ic_cref
, reg_ep
);
5991 // YJIT needs this function to never allocate and never raise
5993 rb_vm_ic_hit_p(IC ic
, const VALUE
*reg_ep
)
5995 return ic
->entry
&& vm_ic_hit_p(ic
->entry
, reg_ep
);
5999 vm_ic_update(const rb_iseq_t
*iseq
, IC ic
, VALUE val
, const VALUE
*reg_ep
, const VALUE
*pc
)
6001 if (ruby_vm_const_missing_count
> 0) {
6002 ruby_vm_const_missing_count
= 0;
6007 struct iseq_inline_constant_cache_entry
*ice
= IMEMO_NEW(struct iseq_inline_constant_cache_entry
, imemo_constcache
, 0);
6008 RB_OBJ_WRITE(ice
, &ice
->value
, val
);
6009 ice
->ic_cref
= vm_get_const_key_cref(reg_ep
);
6010 if (rb_ractor_shareable_p(val
)) ice
->flags
|= IMEMO_CONST_CACHE_SHAREABLE
;
6011 RB_OBJ_WRITE(iseq
, &ic
->entry
, ice
);
6013 RUBY_ASSERT(pc
>= ISEQ_BODY(iseq
)->iseq_encoded
);
6014 unsigned pos
= (unsigned)(pc
- ISEQ_BODY(iseq
)->iseq_encoded
);
6015 rb_yjit_constant_ic_update(iseq
, ic
, pos
);
6016 rb_rjit_constant_ic_update(iseq
, ic
, pos
);
6020 rb_vm_opt_getconstant_path(rb_execution_context_t
*ec
, rb_control_frame_t
*const reg_cfp
, IC ic
)
6023 const ID
*segments
= ic
->segments
;
6024 struct iseq_inline_constant_cache_entry
*ice
= ic
->entry
;
6025 if (ice
&& vm_ic_hit_p(ice
, GET_EP())) {
6028 VM_ASSERT(val
== vm_get_ev_const_chain(ec
, segments
));
6031 ruby_vm_constant_cache_misses
++;
6032 val
= vm_get_ev_const_chain(ec
, segments
);
6033 vm_ic_track_const_chain(GET_CFP(), ic
, segments
);
6034 // Undo the PC increment to get the address to this instruction
6035 // INSN_ATTR(width) == 2
6036 vm_ic_update(GET_ISEQ(), ic
, val
, GET_EP(), GET_PC() - 2);
6042 vm_once_dispatch(rb_execution_context_t
*ec
, ISEQ iseq
, ISE is
)
6044 rb_thread_t
*th
= rb_ec_thread_ptr(ec
);
6045 rb_thread_t
*const RUNNING_THREAD_ONCE_DONE
= (rb_thread_t
*)(0x1);
6048 if (is
->once
.running_thread
== RUNNING_THREAD_ONCE_DONE
) {
6049 return is
->once
.value
;
6051 else if (is
->once
.running_thread
== NULL
) {
6053 is
->once
.running_thread
= th
;
6054 val
= rb_ensure(vm_once_exec
, (VALUE
)iseq
, vm_once_clear
, (VALUE
)is
);
6055 RB_OBJ_WRITE(ec
->cfp
->iseq
, &is
->once
.value
, val
);
6056 /* is->once.running_thread is cleared by vm_once_clear() */
6057 is
->once
.running_thread
= RUNNING_THREAD_ONCE_DONE
; /* success */
6060 else if (is
->once
.running_thread
== th
) {
6061 /* recursive once */
6062 return vm_once_exec((VALUE
)iseq
);
6065 /* waiting for finish */
6066 RUBY_VM_CHECK_INTS(ec
);
6067 rb_thread_schedule();
6073 vm_case_dispatch(CDHASH hash
, OFFSET else_offset
, VALUE key
)
6075 switch (OBJ_BUILTIN_TYPE(key
)) {
6081 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ
,
6082 SYMBOL_REDEFINED_OP_FLAG
|
6083 INTEGER_REDEFINED_OP_FLAG
|
6084 FLOAT_REDEFINED_OP_FLAG
|
6085 NIL_REDEFINED_OP_FLAG
|
6086 TRUE_REDEFINED_OP_FLAG
|
6087 FALSE_REDEFINED_OP_FLAG
|
6088 STRING_REDEFINED_OP_FLAG
)) {
6090 if (RB_FLOAT_TYPE_P(key
)) {
6091 double kval
= RFLOAT_VALUE(key
);
6092 if (!isinf(kval
) && modf(kval
, &kval
) == 0.0) {
6093 key
= FIXABLE(kval
) ? LONG2FIX((long)kval
) : rb_dbl2big(kval
);
6096 if (rb_hash_stlike_lookup(hash
, key
, &val
)) {
6097 return FIX2LONG((VALUE
)val
);
6107 NORETURN(static void
6108 vm_stack_consistency_error(const rb_execution_context_t
*ec
,
6109 const rb_control_frame_t
*,
6112 vm_stack_consistency_error(const rb_execution_context_t
*ec
,
6113 const rb_control_frame_t
*cfp
,
6116 const ptrdiff_t nsp
= VM_SP_CNT(ec
, cfp
->sp
);
6117 const ptrdiff_t nbp
= VM_SP_CNT(ec
, bp
);
6118 static const char stack_consistency_error
[] =
6119 "Stack consistency error (sp: %"PRIdPTRDIFF
", bp: %"PRIdPTRDIFF
")";
6120 #if defined RUBY_DEVEL
6121 VALUE mesg
= rb_sprintf(stack_consistency_error
, nsp
, nbp
);
6122 rb_str_cat_cstr(mesg
, "\n");
6123 rb_str_append(mesg
, rb_iseq_disasm(cfp
->iseq
));
6124 rb_exc_fatal(rb_exc_new3(rb_eFatal
, mesg
));
6126 rb_bug(stack_consistency_error
, nsp
, nbp
);
6131 vm_opt_plus(VALUE recv
, VALUE obj
)
6133 if (FIXNUM_2_P(recv
, obj
) &&
6134 BASIC_OP_UNREDEFINED_P(BOP_PLUS
, INTEGER_REDEFINED_OP_FLAG
)) {
6135 return rb_fix_plus_fix(recv
, obj
);
6137 else if (FLONUM_2_P(recv
, obj
) &&
6138 BASIC_OP_UNREDEFINED_P(BOP_PLUS
, FLOAT_REDEFINED_OP_FLAG
)) {
6139 return DBL2NUM(RFLOAT_VALUE(recv
) + RFLOAT_VALUE(obj
));
6141 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6144 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6145 RBASIC_CLASS(obj
) == rb_cFloat
&&
6146 BASIC_OP_UNREDEFINED_P(BOP_PLUS
, FLOAT_REDEFINED_OP_FLAG
)) {
6147 return DBL2NUM(RFLOAT_VALUE(recv
) + RFLOAT_VALUE(obj
));
6149 else if (RBASIC_CLASS(recv
) == rb_cString
&&
6150 RBASIC_CLASS(obj
) == rb_cString
&&
6151 BASIC_OP_UNREDEFINED_P(BOP_PLUS
, STRING_REDEFINED_OP_FLAG
)) {
6152 return rb_str_opt_plus(recv
, obj
);
6154 else if (RBASIC_CLASS(recv
) == rb_cArray
&&
6155 RBASIC_CLASS(obj
) == rb_cArray
&&
6156 BASIC_OP_UNREDEFINED_P(BOP_PLUS
, ARRAY_REDEFINED_OP_FLAG
)) {
6157 return rb_ary_plus(recv
, obj
);
6165 vm_opt_minus(VALUE recv
, VALUE obj
)
6167 if (FIXNUM_2_P(recv
, obj
) &&
6168 BASIC_OP_UNREDEFINED_P(BOP_MINUS
, INTEGER_REDEFINED_OP_FLAG
)) {
6169 return rb_fix_minus_fix(recv
, obj
);
6171 else if (FLONUM_2_P(recv
, obj
) &&
6172 BASIC_OP_UNREDEFINED_P(BOP_MINUS
, FLOAT_REDEFINED_OP_FLAG
)) {
6173 return DBL2NUM(RFLOAT_VALUE(recv
) - RFLOAT_VALUE(obj
));
6175 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6178 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6179 RBASIC_CLASS(obj
) == rb_cFloat
&&
6180 BASIC_OP_UNREDEFINED_P(BOP_MINUS
, FLOAT_REDEFINED_OP_FLAG
)) {
6181 return DBL2NUM(RFLOAT_VALUE(recv
) - RFLOAT_VALUE(obj
));
6189 vm_opt_mult(VALUE recv
, VALUE obj
)
6191 if (FIXNUM_2_P(recv
, obj
) &&
6192 BASIC_OP_UNREDEFINED_P(BOP_MULT
, INTEGER_REDEFINED_OP_FLAG
)) {
6193 return rb_fix_mul_fix(recv
, obj
);
6195 else if (FLONUM_2_P(recv
, obj
) &&
6196 BASIC_OP_UNREDEFINED_P(BOP_MULT
, FLOAT_REDEFINED_OP_FLAG
)) {
6197 return DBL2NUM(RFLOAT_VALUE(recv
) * RFLOAT_VALUE(obj
));
6199 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6202 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6203 RBASIC_CLASS(obj
) == rb_cFloat
&&
6204 BASIC_OP_UNREDEFINED_P(BOP_MULT
, FLOAT_REDEFINED_OP_FLAG
)) {
6205 return DBL2NUM(RFLOAT_VALUE(recv
) * RFLOAT_VALUE(obj
));
6213 vm_opt_div(VALUE recv
, VALUE obj
)
6215 if (FIXNUM_2_P(recv
, obj
) &&
6216 BASIC_OP_UNREDEFINED_P(BOP_DIV
, INTEGER_REDEFINED_OP_FLAG
)) {
6217 return (FIX2LONG(obj
) == 0) ? Qundef
: rb_fix_div_fix(recv
, obj
);
6219 else if (FLONUM_2_P(recv
, obj
) &&
6220 BASIC_OP_UNREDEFINED_P(BOP_DIV
, FLOAT_REDEFINED_OP_FLAG
)) {
6221 return rb_flo_div_flo(recv
, obj
);
6223 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6226 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6227 RBASIC_CLASS(obj
) == rb_cFloat
&&
6228 BASIC_OP_UNREDEFINED_P(BOP_DIV
, FLOAT_REDEFINED_OP_FLAG
)) {
6229 return rb_flo_div_flo(recv
, obj
);
6237 vm_opt_mod(VALUE recv
, VALUE obj
)
6239 if (FIXNUM_2_P(recv
, obj
) &&
6240 BASIC_OP_UNREDEFINED_P(BOP_MOD
, INTEGER_REDEFINED_OP_FLAG
)) {
6241 return (FIX2LONG(obj
) == 0) ? Qundef
: rb_fix_mod_fix(recv
, obj
);
6243 else if (FLONUM_2_P(recv
, obj
) &&
6244 BASIC_OP_UNREDEFINED_P(BOP_MOD
, FLOAT_REDEFINED_OP_FLAG
)) {
6245 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
)));
6247 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6250 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6251 RBASIC_CLASS(obj
) == rb_cFloat
&&
6252 BASIC_OP_UNREDEFINED_P(BOP_MOD
, FLOAT_REDEFINED_OP_FLAG
)) {
6253 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
)));
6261 vm_opt_neq(const rb_iseq_t
*iseq
, CALL_DATA cd
, CALL_DATA cd_eq
, VALUE recv
, VALUE obj
)
6263 if (vm_method_cfunc_is(iseq
, cd
, recv
, rb_obj_not_equal
)) {
6264 VALUE val
= opt_equality(iseq
, recv
, obj
, cd_eq
);
6266 if (!UNDEF_P(val
)) {
6267 return RBOOL(!RTEST(val
));
6275 vm_opt_lt(VALUE recv
, VALUE obj
)
6277 if (FIXNUM_2_P(recv
, obj
) &&
6278 BASIC_OP_UNREDEFINED_P(BOP_LT
, INTEGER_REDEFINED_OP_FLAG
)) {
6279 return RBOOL((SIGNED_VALUE
)recv
< (SIGNED_VALUE
)obj
);
6281 else if (FLONUM_2_P(recv
, obj
) &&
6282 BASIC_OP_UNREDEFINED_P(BOP_LT
, FLOAT_REDEFINED_OP_FLAG
)) {
6283 return RBOOL(RFLOAT_VALUE(recv
) < RFLOAT_VALUE(obj
));
6285 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6288 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6289 RBASIC_CLASS(obj
) == rb_cFloat
&&
6290 BASIC_OP_UNREDEFINED_P(BOP_LT
, FLOAT_REDEFINED_OP_FLAG
)) {
6291 CHECK_CMP_NAN(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
));
6292 return RBOOL(RFLOAT_VALUE(recv
) < RFLOAT_VALUE(obj
));
6300 vm_opt_le(VALUE recv
, VALUE obj
)
6302 if (FIXNUM_2_P(recv
, obj
) &&
6303 BASIC_OP_UNREDEFINED_P(BOP_LE
, INTEGER_REDEFINED_OP_FLAG
)) {
6304 return RBOOL((SIGNED_VALUE
)recv
<= (SIGNED_VALUE
)obj
);
6306 else if (FLONUM_2_P(recv
, obj
) &&
6307 BASIC_OP_UNREDEFINED_P(BOP_LE
, FLOAT_REDEFINED_OP_FLAG
)) {
6308 return RBOOL(RFLOAT_VALUE(recv
) <= RFLOAT_VALUE(obj
));
6310 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6313 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6314 RBASIC_CLASS(obj
) == rb_cFloat
&&
6315 BASIC_OP_UNREDEFINED_P(BOP_LE
, FLOAT_REDEFINED_OP_FLAG
)) {
6316 CHECK_CMP_NAN(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
));
6317 return RBOOL(RFLOAT_VALUE(recv
) <= RFLOAT_VALUE(obj
));
6325 vm_opt_gt(VALUE recv
, VALUE obj
)
6327 if (FIXNUM_2_P(recv
, obj
) &&
6328 BASIC_OP_UNREDEFINED_P(BOP_GT
, INTEGER_REDEFINED_OP_FLAG
)) {
6329 return RBOOL((SIGNED_VALUE
)recv
> (SIGNED_VALUE
)obj
);
6331 else if (FLONUM_2_P(recv
, obj
) &&
6332 BASIC_OP_UNREDEFINED_P(BOP_GT
, FLOAT_REDEFINED_OP_FLAG
)) {
6333 return RBOOL(RFLOAT_VALUE(recv
) > RFLOAT_VALUE(obj
));
6335 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6338 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6339 RBASIC_CLASS(obj
) == rb_cFloat
&&
6340 BASIC_OP_UNREDEFINED_P(BOP_GT
, FLOAT_REDEFINED_OP_FLAG
)) {
6341 CHECK_CMP_NAN(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
));
6342 return RBOOL(RFLOAT_VALUE(recv
) > RFLOAT_VALUE(obj
));
6350 vm_opt_ge(VALUE recv
, VALUE obj
)
6352 if (FIXNUM_2_P(recv
, obj
) &&
6353 BASIC_OP_UNREDEFINED_P(BOP_GE
, INTEGER_REDEFINED_OP_FLAG
)) {
6354 return RBOOL((SIGNED_VALUE
)recv
>= (SIGNED_VALUE
)obj
);
6356 else if (FLONUM_2_P(recv
, obj
) &&
6357 BASIC_OP_UNREDEFINED_P(BOP_GE
, FLOAT_REDEFINED_OP_FLAG
)) {
6358 return RBOOL(RFLOAT_VALUE(recv
) >= RFLOAT_VALUE(obj
));
6360 else if (SPECIAL_CONST_P(recv
) || SPECIAL_CONST_P(obj
)) {
6363 else if (RBASIC_CLASS(recv
) == rb_cFloat
&&
6364 RBASIC_CLASS(obj
) == rb_cFloat
&&
6365 BASIC_OP_UNREDEFINED_P(BOP_GE
, FLOAT_REDEFINED_OP_FLAG
)) {
6366 CHECK_CMP_NAN(RFLOAT_VALUE(recv
), RFLOAT_VALUE(obj
));
6367 return RBOOL(RFLOAT_VALUE(recv
) >= RFLOAT_VALUE(obj
));
6376 vm_opt_ltlt(VALUE recv
, VALUE obj
)
6378 if (SPECIAL_CONST_P(recv
)) {
6381 else if (RBASIC_CLASS(recv
) == rb_cString
&&
6382 BASIC_OP_UNREDEFINED_P(BOP_LTLT
, STRING_REDEFINED_OP_FLAG
)) {
6383 if (LIKELY(RB_TYPE_P(obj
, T_STRING
))) {
6384 return rb_str_buf_append(recv
, obj
);
6387 return rb_str_concat(recv
, obj
);
6390 else if (RBASIC_CLASS(recv
) == rb_cArray
&&
6391 BASIC_OP_UNREDEFINED_P(BOP_LTLT
, ARRAY_REDEFINED_OP_FLAG
)) {
6392 return rb_ary_push(recv
, obj
);
6400 vm_opt_and(VALUE recv
, VALUE obj
)
6402 // If recv and obj are both fixnums, then the bottom tag bit
6403 // will be 1 on both. 1 & 1 == 1, so the result value will also
6404 // be a fixnum. If either side is *not* a fixnum, then the tag bit
6405 // will be 0, and we return Qundef.
6406 VALUE ret
= ((SIGNED_VALUE
) recv
) & ((SIGNED_VALUE
) obj
);
6408 if (FIXNUM_P(ret
) &&
6409 BASIC_OP_UNREDEFINED_P(BOP_AND
, INTEGER_REDEFINED_OP_FLAG
)) {
6418 vm_opt_or(VALUE recv
, VALUE obj
)
6420 if (FIXNUM_2_P(recv
, obj
) &&
6421 BASIC_OP_UNREDEFINED_P(BOP_OR
, INTEGER_REDEFINED_OP_FLAG
)) {
6430 vm_opt_aref(VALUE recv
, VALUE obj
)
6432 if (SPECIAL_CONST_P(recv
)) {
6433 if (FIXNUM_2_P(recv
, obj
) &&
6434 BASIC_OP_UNREDEFINED_P(BOP_AREF
, INTEGER_REDEFINED_OP_FLAG
)) {
6435 return rb_fix_aref(recv
, obj
);
6439 else if (RBASIC_CLASS(recv
) == rb_cArray
&&
6440 BASIC_OP_UNREDEFINED_P(BOP_AREF
, ARRAY_REDEFINED_OP_FLAG
)) {
6441 if (FIXNUM_P(obj
)) {
6442 return rb_ary_entry_internal(recv
, FIX2LONG(obj
));
6445 return rb_ary_aref1(recv
, obj
);
6448 else if (RBASIC_CLASS(recv
) == rb_cHash
&&
6449 BASIC_OP_UNREDEFINED_P(BOP_AREF
, HASH_REDEFINED_OP_FLAG
)) {
6450 return rb_hash_aref(recv
, obj
);
6458 vm_opt_aset(VALUE recv
, VALUE obj
, VALUE set
)
6460 if (SPECIAL_CONST_P(recv
)) {
6463 else if (RBASIC_CLASS(recv
) == rb_cArray
&&
6464 BASIC_OP_UNREDEFINED_P(BOP_ASET
, ARRAY_REDEFINED_OP_FLAG
) &&
6466 rb_ary_store(recv
, FIX2LONG(obj
), set
);
6469 else if (RBASIC_CLASS(recv
) == rb_cHash
&&
6470 BASIC_OP_UNREDEFINED_P(BOP_ASET
, HASH_REDEFINED_OP_FLAG
)) {
6471 rb_hash_aset(recv
, obj
, set
);
6480 vm_opt_aref_with(VALUE recv
, VALUE key
)
6482 if (!SPECIAL_CONST_P(recv
) && RBASIC_CLASS(recv
) == rb_cHash
&&
6483 BASIC_OP_UNREDEFINED_P(BOP_AREF
, HASH_REDEFINED_OP_FLAG
) &&
6484 rb_hash_compare_by_id_p(recv
) == Qfalse
&&
6485 !FL_TEST(recv
, RHASH_PROC_DEFAULT
)) {
6486 return rb_hash_aref(recv
, key
);
6494 rb_vm_opt_aref_with(VALUE recv
, VALUE key
)
6496 return vm_opt_aref_with(recv
, key
);
6500 vm_opt_aset_with(VALUE recv
, VALUE key
, VALUE val
)
6502 if (!SPECIAL_CONST_P(recv
) && RBASIC_CLASS(recv
) == rb_cHash
&&
6503 BASIC_OP_UNREDEFINED_P(BOP_ASET
, HASH_REDEFINED_OP_FLAG
) &&
6504 rb_hash_compare_by_id_p(recv
) == Qfalse
) {
6505 return rb_hash_aset(recv
, key
, val
);
6513 vm_opt_length(VALUE recv
, int bop
)
6515 if (SPECIAL_CONST_P(recv
)) {
6518 else if (RBASIC_CLASS(recv
) == rb_cString
&&
6519 BASIC_OP_UNREDEFINED_P(bop
, STRING_REDEFINED_OP_FLAG
)) {
6520 if (bop
== BOP_EMPTY_P
) {
6521 return LONG2NUM(RSTRING_LEN(recv
));
6524 return rb_str_length(recv
);
6527 else if (RBASIC_CLASS(recv
) == rb_cArray
&&
6528 BASIC_OP_UNREDEFINED_P(bop
, ARRAY_REDEFINED_OP_FLAG
)) {
6529 return LONG2NUM(RARRAY_LEN(recv
));
6531 else if (RBASIC_CLASS(recv
) == rb_cHash
&&
6532 BASIC_OP_UNREDEFINED_P(bop
, HASH_REDEFINED_OP_FLAG
)) {
6533 return INT2FIX(RHASH_SIZE(recv
));
6541 vm_opt_empty_p(VALUE recv
)
6543 switch (vm_opt_length(recv
, BOP_EMPTY_P
)) {
6544 case Qundef
: return Qundef
;
6545 case INT2FIX(0): return Qtrue
;
6546 default: return Qfalse
;
6550 VALUE
rb_false(VALUE obj
);
6553 vm_opt_nil_p(const rb_iseq_t
*iseq
, CALL_DATA cd
, VALUE recv
)
6556 BASIC_OP_UNREDEFINED_P(BOP_NIL_P
, NIL_REDEFINED_OP_FLAG
)) {
6559 else if (vm_method_cfunc_is(iseq
, cd
, recv
, rb_false
)) {
6572 /* 0xFFFF_FFFF == INT2FIX(-1)
6573 * `-1.succ` is of course 0. */
6575 case RSHIFT(~0UL, 1):
6576 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
6577 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
6578 return rb_uint2big(1UL << (SIZEOF_LONG
* CHAR_BIT
- 2));
6580 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
6581 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
6582 * == lx*2 + ly*2 + 1
6583 * == (lx*2+1) + (ly*2+1) - 1
6586 * Here, if we put y := INT2FIX(1):
6588 * == x + INT2FIX(1) - 1
6596 vm_opt_succ(VALUE recv
)
6598 if (FIXNUM_P(recv
) &&
6599 BASIC_OP_UNREDEFINED_P(BOP_SUCC
, INTEGER_REDEFINED_OP_FLAG
)) {
6600 return fix_succ(recv
);
6602 else if (SPECIAL_CONST_P(recv
)) {
6605 else if (RBASIC_CLASS(recv
) == rb_cString
&&
6606 BASIC_OP_UNREDEFINED_P(BOP_SUCC
, STRING_REDEFINED_OP_FLAG
)) {
6607 return rb_str_succ(recv
);
6615 vm_opt_not(const rb_iseq_t
*iseq
, CALL_DATA cd
, VALUE recv
)
6617 if (vm_method_cfunc_is(iseq
, cd
, recv
, rb_obj_not
)) {
6618 return RBOOL(!RTEST(recv
));
6626 vm_opt_regexpmatch2(VALUE recv
, VALUE obj
)
6628 if (SPECIAL_CONST_P(recv
)) {
6631 else if (RBASIC_CLASS(recv
) == rb_cString
&&
6632 CLASS_OF(obj
) == rb_cRegexp
&&
6633 BASIC_OP_UNREDEFINED_P(BOP_MATCH
, STRING_REDEFINED_OP_FLAG
)) {
6634 return rb_reg_match(obj
, recv
);
6636 else if (RBASIC_CLASS(recv
) == rb_cRegexp
&&
6637 BASIC_OP_UNREDEFINED_P(BOP_MATCH
, REGEXP_REDEFINED_OP_FLAG
)) {
6638 return rb_reg_match(recv
, obj
);
6645 rb_event_flag_t
rb_iseq_event_flags(const rb_iseq_t
*iseq
, size_t pos
);
6647 NOINLINE(static void vm_trace(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
));
6650 vm_trace_hook(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, const VALUE
*pc
,
6651 rb_event_flag_t pc_events
, rb_event_flag_t target_event
,
6652 rb_hook_list_t
*global_hooks
, rb_hook_list_t
*const *local_hooks_ptr
, VALUE val
)
6654 rb_event_flag_t event
= pc_events
& target_event
;
6655 VALUE self
= GET_SELF();
6657 VM_ASSERT(rb_popcount64((uint64_t)event
) == 1);
6659 if (event
& global_hooks
->events
) {
6660 /* increment PC because source line is calculated with PC-1 */
6662 vm_dtrace(event
, ec
);
6663 rb_exec_event_hook_orig(ec
, global_hooks
, event
, self
, 0, 0, 0 , val
, 0);
6667 // Load here since global hook above can add and free local hooks
6668 rb_hook_list_t
*local_hooks
= *local_hooks_ptr
;
6669 if (local_hooks
!= NULL
) {
6670 if (event
& local_hooks
->events
) {
6671 /* increment PC because source line is calculated with PC-1 */
6673 rb_exec_event_hook_orig(ec
, local_hooks
, event
, self
, 0, 0, 0 , val
, 0);
6679 #define VM_TRACE_HOOK(target_event, val) do { \
6680 if ((pc_events & (target_event)) & enabled_flags) { \
6681 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
6686 rescue_errinfo(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
)
6688 VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp
));
6689 VM_ASSERT(ISEQ_BODY(cfp
->iseq
)->type
== ISEQ_TYPE_RESCUE
);
6690 return cfp
->ep
[VM_ENV_INDEX_LAST_LVAR
];
6694 vm_trace(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
)
6696 const VALUE
*pc
= reg_cfp
->pc
;
6697 rb_event_flag_t enabled_flags
= ruby_vm_event_flags
& ISEQ_TRACE_EVENTS
;
6698 rb_event_flag_t global_events
= enabled_flags
;
6700 if (enabled_flags
== 0 && ruby_vm_event_local_num
== 0) {
6704 const rb_iseq_t
*iseq
= reg_cfp
->iseq
;
6705 VALUE iseq_val
= (VALUE
)iseq
;
6706 size_t pos
= pc
- ISEQ_BODY(iseq
)->iseq_encoded
;
6707 rb_event_flag_t pc_events
= rb_iseq_event_flags(iseq
, pos
);
6708 rb_hook_list_t
*local_hooks
= iseq
->aux
.exec
.local_hooks
;
6709 rb_hook_list_t
*const *local_hooks_ptr
= &iseq
->aux
.exec
.local_hooks
;
6710 rb_event_flag_t iseq_local_events
= local_hooks
!= NULL
? local_hooks
->events
: 0;
6711 rb_hook_list_t
*bmethod_local_hooks
= NULL
;
6712 rb_hook_list_t
**bmethod_local_hooks_ptr
= NULL
;
6713 rb_event_flag_t bmethod_local_events
= 0;
6714 const bool bmethod_frame
= VM_FRAME_BMETHOD_P(reg_cfp
);
6715 enabled_flags
|= iseq_local_events
;
6717 VM_ASSERT((iseq_local_events
& ~ISEQ_TRACE_EVENTS
) == 0);
6719 if (bmethod_frame
) {
6720 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(reg_cfp
);
6721 VM_ASSERT(me
->def
->type
== VM_METHOD_TYPE_BMETHOD
);
6722 bmethod_local_hooks
= me
->def
->body
.bmethod
.hooks
;
6723 bmethod_local_hooks_ptr
= &me
->def
->body
.bmethod
.hooks
;
6724 if (bmethod_local_hooks
) {
6725 bmethod_local_events
= bmethod_local_hooks
->events
;
6730 if ((pc_events
& enabled_flags
) == 0 && !bmethod_frame
) {
6733 /* TODO: incomplete */
6734 rb_iseq_trace_set(iseq
, vm_event_flags
& ISEQ_TRACE_EVENTS
);
6736 /* do not disable trace because of performance problem
6737 * (re-enable overhead)
6742 else if (ec
->trace_arg
!= NULL
) {
6743 /* already tracing */
6747 rb_hook_list_t
*global_hooks
= rb_ec_ractor_hooks(ec
);
6748 /* Note, not considering iseq local events here since the same
6749 * iseq could be used in multiple bmethods. */
6750 rb_event_flag_t bmethod_events
= global_events
| bmethod_local_events
;
6753 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
6756 RSTRING_PTR(rb_iseq_path(iseq
)),
6757 (int)rb_iseq_line_no(iseq
, pos
),
6758 RSTRING_PTR(rb_iseq_label(iseq
)));
6760 VM_ASSERT(reg_cfp
->pc
== pc
);
6761 VM_ASSERT(pc_events
!= 0);
6764 if ((pc_events
& RUBY_EVENT_B_CALL
) && bmethod_frame
&& (bmethod_events
& RUBY_EVENT_CALL
)) {
6765 /* b_call instruction running as a method. Fire call event. */
6766 vm_trace_hook(ec
, reg_cfp
, pc
, RUBY_EVENT_CALL
, RUBY_EVENT_CALL
, global_hooks
, bmethod_local_hooks_ptr
, Qundef
);
6768 VM_TRACE_HOOK(RUBY_EVENT_CLASS
| RUBY_EVENT_CALL
| RUBY_EVENT_B_CALL
, Qundef
);
6769 VM_TRACE_HOOK(RUBY_EVENT_RESCUE
, rescue_errinfo(ec
, reg_cfp
));
6770 VM_TRACE_HOOK(RUBY_EVENT_LINE
, Qundef
);
6771 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE
, Qundef
);
6772 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH
, Qundef
);
6773 VM_TRACE_HOOK(RUBY_EVENT_END
| RUBY_EVENT_RETURN
| RUBY_EVENT_B_RETURN
, TOPN(0));
6774 if ((pc_events
& RUBY_EVENT_B_RETURN
) && bmethod_frame
&& (bmethod_events
& RUBY_EVENT_RETURN
)) {
6775 /* b_return instruction running as a method. Fire return event. */
6776 vm_trace_hook(ec
, reg_cfp
, pc
, RUBY_EVENT_RETURN
, RUBY_EVENT_RETURN
, global_hooks
, bmethod_local_hooks_ptr
, TOPN(0));
6779 // Pin the iseq since `local_hooks_ptr` points inside the iseq's slot on the GC heap.
6780 // We need the pointer to stay valid in case compaction happens in a trace hook.
6782 // Similar treatment is unnecessary for `bmethod_local_hooks_ptr` since
6783 // storage for `rb_method_definition_t` is not on the GC heap.
6784 RB_GC_GUARD(iseq_val
);
6788 #undef VM_TRACE_HOOK
6790 #if VM_CHECK_MODE > 0
6791 NORETURN( NOINLINE( COLDFUNC
6792 void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i
, VALUE c
)));
6795 Init_vm_stack_canary(void)
6797 /* This has to be called _after_ our PRNG is properly set up. */
6798 int n
= ruby_fill_random_bytes(&vm_stack_canary
, sizeof vm_stack_canary
, false);
6799 vm_stack_canary
|= 0x01; // valid VALUE (Fixnum)
6801 vm_stack_canary_was_born
= true;
6806 rb_vm_canary_is_found_dead(enum ruby_vminsn_type i
, VALUE c
)
6808 /* Because a method has already been called, why not call
6810 const char *insn
= rb_insns_name(i
);
6811 VALUE inspection
= rb_inspect(c
);
6812 const char *str
= StringValueCStr(inspection
);
6814 rb_bug("dead canary found at %s: %s", insn
, str
);
6818 void Init_vm_stack_canary(void) { /* nothing to do */ }
6822 /* a part of the following code is generated by this ruby script:
6825 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
6826 typedef_args.prepend(", ") if i != 0
6827 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
6828 call_args.prepend(", ") if i != 0
6831 builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
6833 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
6834 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
6839 puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
6841 puts " builtin_invoker#{i},"
6847 builtin_invoker0(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6849 typedef VALUE (*rb_invoke_funcptr0_t
)(rb_execution_context_t
*ec
, VALUE self
);
6850 return (*(rb_invoke_funcptr0_t
)funcptr
)(ec
, self
);
6854 builtin_invoker1(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6856 typedef VALUE (*rb_invoke_funcptr1_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
);
6857 return (*(rb_invoke_funcptr1_t
)funcptr
)(ec
, self
, argv
[0]);
6861 builtin_invoker2(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6863 typedef VALUE (*rb_invoke_funcptr2_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
);
6864 return (*(rb_invoke_funcptr2_t
)funcptr
)(ec
, self
, argv
[0], argv
[1]);
6868 builtin_invoker3(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6870 typedef VALUE (*rb_invoke_funcptr3_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
);
6871 return (*(rb_invoke_funcptr3_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2]);
6875 builtin_invoker4(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6877 typedef VALUE (*rb_invoke_funcptr4_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
);
6878 return (*(rb_invoke_funcptr4_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3]);
6882 builtin_invoker5(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6884 typedef VALUE (*rb_invoke_funcptr5_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
);
6885 return (*(rb_invoke_funcptr5_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4]);
6889 builtin_invoker6(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6891 typedef VALUE (*rb_invoke_funcptr6_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
);
6892 return (*(rb_invoke_funcptr6_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5]);
6896 builtin_invoker7(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6898 typedef VALUE (*rb_invoke_funcptr7_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
);
6899 return (*(rb_invoke_funcptr7_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6]);
6903 builtin_invoker8(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6905 typedef VALUE (*rb_invoke_funcptr8_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
);
6906 return (*(rb_invoke_funcptr8_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7]);
6910 builtin_invoker9(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6912 typedef VALUE (*rb_invoke_funcptr9_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
);
6913 return (*(rb_invoke_funcptr9_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8]);
6917 builtin_invoker10(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6919 typedef VALUE (*rb_invoke_funcptr10_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
);
6920 return (*(rb_invoke_funcptr10_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9]);
6924 builtin_invoker11(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6926 typedef VALUE (*rb_invoke_funcptr11_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
, VALUE v11
);
6927 return (*(rb_invoke_funcptr11_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10]);
6931 builtin_invoker12(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6933 typedef VALUE (*rb_invoke_funcptr12_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
, VALUE v11
, VALUE v12
);
6934 return (*(rb_invoke_funcptr12_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11]);
6938 builtin_invoker13(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6940 typedef VALUE (*rb_invoke_funcptr13_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
, VALUE v11
, VALUE v12
, VALUE v13
);
6941 return (*(rb_invoke_funcptr13_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12]);
6945 builtin_invoker14(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6947 typedef VALUE (*rb_invoke_funcptr14_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
, VALUE v11
, VALUE v12
, VALUE v13
, VALUE v14
);
6948 return (*(rb_invoke_funcptr14_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13]);
6952 builtin_invoker15(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
)
6954 typedef VALUE (*rb_invoke_funcptr15_t
)(rb_execution_context_t
*ec
, VALUE self
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v4
, VALUE v5
, VALUE v6
, VALUE v7
, VALUE v8
, VALUE v9
, VALUE v10
, VALUE v11
, VALUE v12
, VALUE v13
, VALUE v14
, VALUE v15
);
6955 return (*(rb_invoke_funcptr15_t
)funcptr
)(ec
, self
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4], argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10], argv
[11], argv
[12], argv
[13], argv
[14]);
6958 typedef VALUE (*builtin_invoker
)(rb_execution_context_t
*ec
, VALUE self
, const VALUE
*argv
, rb_insn_func_t funcptr
);
6960 static builtin_invoker
6961 lookup_builtin_invoker(int argc
)
6963 static const builtin_invoker invokers
[] = {
6982 return invokers
[argc
];
6986 invoke_bf(rb_execution_context_t
*ec
, rb_control_frame_t
*reg_cfp
, const struct rb_builtin_function
* bf
, const VALUE
*argv
)
6988 const bool canary_p
= ISEQ_BODY(reg_cfp
->iseq
)->builtin_attrs
& BUILTIN_ATTR_LEAF
; // Verify an assumption of `Primitive.attr! :leaf`
6989 SETUP_CANARY(canary_p
);
6990 VALUE ret
= (*lookup_builtin_invoker(bf
->argc
))(ec
, reg_cfp
->self
, argv
, (rb_insn_func_t
)bf
->func_ptr
);
6991 CHECK_CANARY(canary_p
, BIN(invokebuiltin
));
6996 vm_invoke_builtin(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, const struct rb_builtin_function
* bf
, const VALUE
*argv
)
6998 return invoke_bf(ec
, cfp
, bf
, argv
);
7002 vm_invoke_builtin_delegate(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, const struct rb_builtin_function
*bf
, unsigned int start_index
)
7004 if (0) { // debug print
7005 fputs("vm_invoke_builtin_delegate: passing -> ", stderr
);
7006 for (int i
=0; i
<bf
->argc
; i
++) {
7007 ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp
->iseq
)->local_table
[i
+start_index
]));
7009 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING
, bf
->name
, bf
->argc
, bf
->func_ptr
);
7012 if (bf
->argc
== 0) {
7013 return invoke_bf(ec
, cfp
, bf
, NULL
);
7016 const VALUE
*argv
= cfp
->ep
- ISEQ_BODY(cfp
->iseq
)->local_table_size
- VM_ENV_DATA_SIZE
+ 1 + start_index
;
7017 return invoke_bf(ec
, cfp
, bf
, argv
);
7021 // for __builtin_inline!()
7024 rb_vm_lvar_exposed(rb_execution_context_t
*ec
, int index
)
7026 const rb_control_frame_t
*cfp
= ec
->cfp
;
7027 return cfp
->ep
[index
];