* 2022-01-18 [ci skip]
[ruby-80x24.org.git] / vm_insnhelper.c
blob271b347d3bc65abe08ebdedac8a46bdc7dd67e84
1 /**********************************************************************
3 vm_insnhelper.c - instruction helper functions.
5 $Author$
7 Copyright (C) 2007 Koichi Sasada
9 **********************************************************************/
11 #include "ruby/internal/config.h"
13 #include <math.h>
15 #include "constant.h"
16 #include "debug_counter.h"
17 #include "internal.h"
18 #include "internal/class.h"
19 #include "internal/compar.h"
20 #include "internal/hash.h"
21 #include "internal/numeric.h"
22 #include "internal/proc.h"
23 #include "internal/random.h"
24 #include "internal/variable.h"
25 #include "internal/struct.h"
26 #include "variable.h"
28 /* finish iseq array */
29 #include "insns.inc"
30 #ifndef MJIT_HEADER
31 #include "insns_info.inc"
32 #endif
34 extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
35 extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
36 extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
37 extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
38 int argc, const VALUE *argv, int priv);
40 #ifndef MJIT_HEADER
41 static const struct rb_callcache vm_empty_cc;
42 static const struct rb_callcache vm_empty_cc_for_super;
43 #endif
45 /* control stack frame */
47 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
49 MJIT_STATIC VALUE
50 ruby_vm_special_exception_copy(VALUE exc)
52 VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
53 rb_obj_copy_ivar(e, exc);
54 return e;
57 NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
58 static void
59 ec_stack_overflow(rb_execution_context_t *ec, int setup)
61 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
62 ec->raised_flag = RAISED_STACKOVERFLOW;
63 if (setup) {
64 VALUE at = rb_ec_backtrace_object(ec);
65 mesg = ruby_vm_special_exception_copy(mesg);
66 rb_ivar_set(mesg, idBt, at);
67 rb_ivar_set(mesg, idBt_locations, at);
69 ec->errinfo = mesg;
70 EC_JUMP_TAG(ec, TAG_RAISE);
73 NORETURN(static void vm_stackoverflow(void));
74 #ifdef MJIT_HEADER
75 NOINLINE(static COLDFUNC void vm_stackoverflow(void));
76 #endif
78 static void
79 vm_stackoverflow(void)
81 ec_stack_overflow(GET_EC(), TRUE);
84 NORETURN(MJIT_STATIC void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
85 MJIT_STATIC void
86 rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
88 if (rb_during_gc()) {
89 rb_bug("system stack overflow during GC. Faulty native extension?");
91 if (crit) {
92 ec->raised_flag = RAISED_STACKOVERFLOW;
93 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
94 EC_JUMP_TAG(ec, TAG_RAISE);
96 #ifdef USE_SIGALTSTACK
97 ec_stack_overflow(ec, TRUE);
98 #else
99 ec_stack_overflow(ec, FALSE);
100 #endif
104 #if VM_CHECK_MODE > 0
105 static int
106 callable_class_p(VALUE klass)
108 #if VM_CHECK_MODE >= 2
109 if (!klass) return FALSE;
110 switch (RB_BUILTIN_TYPE(klass)) {
111 default:
112 break;
113 case T_ICLASS:
114 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
115 case T_MODULE:
116 return TRUE;
118 while (klass) {
119 if (klass == rb_cBasicObject) {
120 return TRUE;
122 klass = RCLASS_SUPER(klass);
124 return FALSE;
125 #else
126 return klass != 0;
127 #endif
130 static int
131 callable_method_entry_p(const rb_callable_method_entry_t *cme)
133 if (cme == NULL) {
134 return TRUE;
136 else {
137 VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
139 if (callable_class_p(cme->defined_class)) {
140 return TRUE;
142 else {
143 return FALSE;
148 static void
149 vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
151 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
152 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
154 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
155 cref_or_me_type = imemo_type(cref_or_me);
157 if (type & VM_FRAME_FLAG_BMETHOD) {
158 req_me = TRUE;
161 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
162 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
164 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
165 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
168 if (req_me) {
169 if (cref_or_me_type != imemo_ment) {
170 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
173 else {
174 if (req_cref && cref_or_me_type != imemo_cref) {
175 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
177 else { /* cref or Qfalse */
178 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
179 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
180 /* ignore */
182 else {
183 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
189 if (cref_or_me_type == imemo_ment) {
190 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
192 if (!callable_method_entry_p(me)) {
193 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
197 if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
198 VM_ASSERT(iseq == NULL ||
199 RUBY_VM_NORMAL_ISEQ_P(iseq) /* argument error. it should be fixed */);
201 else {
202 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
206 static void
207 vm_check_frame(VALUE type,
208 VALUE specval,
209 VALUE cref_or_me,
210 const rb_iseq_t *iseq)
212 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
213 VM_ASSERT(FIXNUM_P(type));
215 #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
216 case magic: \
217 vm_check_frame_detail(type, req_block, req_me, req_cref, \
218 specval, cref_or_me, is_cframe, iseq); \
219 break
220 switch (given_magic) {
221 /* BLK ME CREF CFRAME */
222 CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
223 CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
224 CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
225 CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
226 CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
227 CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
228 CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
229 CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
230 CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
231 default:
232 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
234 #undef CHECK
237 static VALUE vm_stack_canary; /* Initialized later */
238 static bool vm_stack_canary_was_born = false;
240 #ifndef MJIT_HEADER
241 MJIT_FUNC_EXPORTED void
242 rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
244 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
245 const struct rb_iseq_struct *iseq;
247 if (! LIKELY(vm_stack_canary_was_born)) {
248 return; /* :FIXME: isn't it rather fatal to enter this branch? */
250 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
251 /* This is at the very beginning of a thread. cfp does not exist. */
252 return;
254 else if (! (iseq = GET_ISEQ())) {
255 return;
257 else if (LIKELY(sp[0] != vm_stack_canary)) {
258 return;
260 else {
261 /* we are going to call methods below; squash the canary to
262 * prevent infinite loop. */
263 sp[0] = Qundef;
266 const VALUE *orig = rb_iseq_original_iseq(iseq);
267 const VALUE *encoded = iseq->body->iseq_encoded;
268 const ptrdiff_t pos = GET_PC() - encoded;
269 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
270 const char *name = insn_name(insn);
271 const VALUE iseqw = rb_iseqw_new(iseq);
272 const VALUE inspection = rb_inspect(iseqw);
273 const char *stri = rb_str_to_cstr(inspection);
274 const VALUE disasm = rb_iseq_disasm(iseq);
275 const char *strd = rb_str_to_cstr(disasm);
277 /* rb_bug() is not capable of outputting this large contents. It
278 is designed to run form a SIGSEGV handler, which tends to be
279 very restricted. */
280 ruby_debug_printf(
281 "We are killing the stack canary set by %s, "
282 "at %s@pc=%"PRIdPTR"\n"
283 "watch out the C stack trace.\n"
284 "%s",
285 name, stri, pos, strd);
286 rb_bug("see above.");
288 #endif
289 #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
291 #else
292 #define vm_check_canary(ec, sp)
293 #define vm_check_frame(a, b, c, d)
294 #endif /* VM_CHECK_MODE > 0 */
296 #if USE_DEBUG_COUNTER
297 static void
298 vm_push_frame_debug_counter_inc(
299 const struct rb_execution_context_struct *ec,
300 const struct rb_control_frame_struct *reg_cfp,
301 VALUE type)
303 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
305 RB_DEBUG_COUNTER_INC(frame_push);
307 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
308 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
309 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
310 if (prev) {
311 if (curr) {
312 RB_DEBUG_COUNTER_INC(frame_R2R);
314 else {
315 RB_DEBUG_COUNTER_INC(frame_R2C);
318 else {
319 if (curr) {
320 RB_DEBUG_COUNTER_INC(frame_C2R);
322 else {
323 RB_DEBUG_COUNTER_INC(frame_C2C);
328 switch (type & VM_FRAME_MAGIC_MASK) {
329 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
330 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
331 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
332 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
333 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
334 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
335 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
336 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
337 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
340 rb_bug("unreachable");
342 #else
343 #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
344 #endif
346 STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
347 STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
348 STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
350 static void
351 vm_push_frame(rb_execution_context_t *ec,
352 const rb_iseq_t *iseq,
353 VALUE type,
354 VALUE self,
355 VALUE specval,
356 VALUE cref_or_me,
357 const VALUE *pc,
358 VALUE *sp,
359 int local_size,
360 int stack_max)
362 rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
364 vm_check_frame(type, specval, cref_or_me, iseq);
365 VM_ASSERT(local_size >= 0);
367 /* check stack overflow */
368 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
369 vm_check_canary(ec, sp);
371 /* setup vm value stack */
373 /* initialize local variables */
374 for (int i=0; i < local_size; i++) {
375 *sp++ = Qnil;
378 /* setup ep with managing data */
379 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
380 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
381 *sp++ = type; /* ep[-0] / ENV_FLAGS */
383 /* setup new frame */
384 *cfp = (const struct rb_control_frame_struct) {
385 .pc = pc,
386 .sp = sp,
387 .iseq = iseq,
388 .self = self,
389 .ep = sp - 1,
390 .block_code = NULL,
391 .__bp__ = sp, /* Store initial value of ep as bp to skip calculation cost of bp on JIT cancellation. */
392 #if VM_DEBUG_BP_CHECK
393 .bp_check = sp,
394 #endif
395 .jit_return = NULL
398 ec->cfp = cfp;
400 if (VMDEBUG == 2) {
401 SDR();
403 vm_push_frame_debug_counter_inc(ec, cfp, type);
406 /* return TRUE if the frame is finished */
407 static inline int
408 vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
410 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
412 if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
413 if (VMDEBUG == 2) SDR();
415 RUBY_VM_CHECK_INTS(ec);
416 ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
418 return flags & VM_FRAME_FLAG_FINISH;
421 MJIT_STATIC void
422 rb_vm_pop_frame(rb_execution_context_t *ec)
424 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
427 /* method dispatch */
428 static inline VALUE
429 rb_arity_error_new(int argc, int min, int max)
431 VALUE err_mess = 0;
432 if (min == max) {
433 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d)", argc, min);
435 else if (max == UNLIMITED_ARGUMENTS) {
436 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d+)", argc, min);
438 else {
439 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d..%d)", argc, min, max);
441 return rb_exc_new3(rb_eArgError, err_mess);
444 MJIT_STATIC void
445 rb_error_arity(int argc, int min, int max)
447 rb_exc_raise(rb_arity_error_new(argc, min, max));
450 /* lvar */
452 NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
454 static void
455 vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
457 /* remember env value forcely */
458 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
459 VM_FORCE_WRITE(&ep[index], v);
460 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
461 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
464 static inline void
465 vm_env_write(const VALUE *ep, int index, VALUE v)
467 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
468 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
469 VM_STACK_ENV_WRITE(ep, index, v);
471 else {
472 vm_env_write_slowpath(ep, index, v);
476 MJIT_STATIC VALUE
477 rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
479 if (block_handler == VM_BLOCK_HANDLER_NONE) {
480 return Qnil;
482 else {
483 switch (vm_block_handler_type(block_handler)) {
484 case block_handler_type_iseq:
485 case block_handler_type_ifunc:
486 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
487 case block_handler_type_symbol:
488 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
489 case block_handler_type_proc:
490 return VM_BH_TO_PROC(block_handler);
491 default:
492 VM_UNREACHABLE(rb_vm_bh_to_procval);
497 /* svar */
499 #if VM_CHECK_MODE > 0
500 static int
501 vm_svar_valid_p(VALUE svar)
503 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
504 switch (imemo_type(svar)) {
505 case imemo_svar:
506 case imemo_cref:
507 case imemo_ment:
508 return TRUE;
509 default:
510 break;
513 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
514 return FALSE;
516 #endif
518 static inline struct vm_svar *
519 lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
521 VALUE svar;
523 if (lep && (ec == NULL || ec->root_lep != lep)) {
524 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
526 else {
527 svar = ec->root_svar;
530 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
532 return (struct vm_svar *)svar;
535 static inline void
536 lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
538 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
540 if (lep && (ec == NULL || ec->root_lep != lep)) {
541 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
543 else {
544 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
548 static VALUE
549 lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
551 const struct vm_svar *svar = lep_svar(ec, lep);
553 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
555 switch (key) {
556 case VM_SVAR_LASTLINE:
557 return svar->lastline;
558 case VM_SVAR_BACKREF:
559 return svar->backref;
560 default: {
561 const VALUE ary = svar->others;
563 if (NIL_P(ary)) {
564 return Qnil;
566 else {
567 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
573 static struct vm_svar *
574 svar_new(VALUE obj)
576 return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
579 static void
580 lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
582 struct vm_svar *svar = lep_svar(ec, lep);
584 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
585 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
588 switch (key) {
589 case VM_SVAR_LASTLINE:
590 RB_OBJ_WRITE(svar, &svar->lastline, val);
591 return;
592 case VM_SVAR_BACKREF:
593 RB_OBJ_WRITE(svar, &svar->backref, val);
594 return;
595 default: {
596 VALUE ary = svar->others;
598 if (NIL_P(ary)) {
599 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
601 rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
606 static inline VALUE
607 vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
609 VALUE val;
611 if (type == 0) {
612 val = lep_svar_get(ec, lep, key);
614 else {
615 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
617 if (type & 0x01) {
618 switch (type >> 1) {
619 case '&':
620 val = rb_reg_last_match(backref);
621 break;
622 case '`':
623 val = rb_reg_match_pre(backref);
624 break;
625 case '\'':
626 val = rb_reg_match_post(backref);
627 break;
628 case '+':
629 val = rb_reg_match_last(backref);
630 break;
631 default:
632 rb_bug("unexpected back-ref");
635 else {
636 val = rb_reg_nth_match((int)(type >> 1), backref);
639 return val;
642 PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
643 static rb_callable_method_entry_t *
644 check_method_entry(VALUE obj, int can_be_svar)
646 if (obj == Qfalse) return NULL;
648 #if VM_CHECK_MODE > 0
649 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
650 #endif
652 switch (imemo_type(obj)) {
653 case imemo_ment:
654 return (rb_callable_method_entry_t *)obj;
655 case imemo_cref:
656 return NULL;
657 case imemo_svar:
658 if (can_be_svar) {
659 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
661 default:
662 #if VM_CHECK_MODE > 0
663 rb_bug("check_method_entry: svar should not be there:");
664 #endif
665 return NULL;
669 MJIT_STATIC const rb_callable_method_entry_t *
670 rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
672 const VALUE *ep = cfp->ep;
673 rb_callable_method_entry_t *me;
675 while (!VM_ENV_LOCAL_P(ep)) {
676 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
677 ep = VM_ENV_PREV_EP(ep);
680 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
683 static const rb_iseq_t *
684 method_entry_iseqptr(const rb_callable_method_entry_t *me)
686 switch (me->def->type) {
687 case VM_METHOD_TYPE_ISEQ:
688 return me->def->body.iseq.iseqptr;
689 default:
690 return NULL;
694 static rb_cref_t *
695 method_entry_cref(const rb_callable_method_entry_t *me)
697 switch (me->def->type) {
698 case VM_METHOD_TYPE_ISEQ:
699 return me->def->body.iseq.cref;
700 default:
701 return NULL;
705 #if VM_CHECK_MODE == 0
706 PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
707 #endif
708 static rb_cref_t *
709 check_cref(VALUE obj, int can_be_svar)
711 if (obj == Qfalse) return NULL;
713 #if VM_CHECK_MODE > 0
714 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
715 #endif
717 switch (imemo_type(obj)) {
718 case imemo_ment:
719 return method_entry_cref((rb_callable_method_entry_t *)obj);
720 case imemo_cref:
721 return (rb_cref_t *)obj;
722 case imemo_svar:
723 if (can_be_svar) {
724 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
726 default:
727 #if VM_CHECK_MODE > 0
728 rb_bug("check_method_entry: svar should not be there:");
729 #endif
730 return NULL;
734 static inline rb_cref_t *
735 vm_env_cref(const VALUE *ep)
737 rb_cref_t *cref;
739 while (!VM_ENV_LOCAL_P(ep)) {
740 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
741 ep = VM_ENV_PREV_EP(ep);
744 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
747 static int
748 is_cref(const VALUE v, int can_be_svar)
750 if (RB_TYPE_P(v, T_IMEMO)) {
751 switch (imemo_type(v)) {
752 case imemo_cref:
753 return TRUE;
754 case imemo_svar:
755 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
756 default:
757 break;
760 return FALSE;
763 static int
764 vm_env_cref_by_cref(const VALUE *ep)
766 while (!VM_ENV_LOCAL_P(ep)) {
767 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
768 ep = VM_ENV_PREV_EP(ep);
770 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
773 static rb_cref_t *
774 cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
776 const VALUE v = *vptr;
777 rb_cref_t *cref, *new_cref;
779 if (RB_TYPE_P(v, T_IMEMO)) {
780 switch (imemo_type(v)) {
781 case imemo_cref:
782 cref = (rb_cref_t *)v;
783 new_cref = vm_cref_dup(cref);
784 if (parent) {
785 RB_OBJ_WRITE(parent, vptr, new_cref);
787 else {
788 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
790 return (rb_cref_t *)new_cref;
791 case imemo_svar:
792 if (can_be_svar) {
793 return cref_replace_with_duplicated_cref_each_frame(&((struct vm_svar *)v)->cref_or_me, FALSE, v);
795 /* fall through */
796 case imemo_ment:
797 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
798 default:
799 break;
802 return FALSE;
805 static rb_cref_t *
806 vm_cref_replace_with_duplicated_cref(const VALUE *ep)
808 if (vm_env_cref_by_cref(ep)) {
809 rb_cref_t *cref;
810 VALUE envval;
812 while (!VM_ENV_LOCAL_P(ep)) {
813 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
814 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
815 return cref;
817 ep = VM_ENV_PREV_EP(ep);
819 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
820 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
822 else {
823 rb_bug("vm_cref_dup: unreachable");
827 static rb_cref_t *
828 vm_get_cref(const VALUE *ep)
830 rb_cref_t *cref = vm_env_cref(ep);
832 if (cref != NULL) {
833 return cref;
835 else {
836 rb_bug("vm_get_cref: unreachable");
840 rb_cref_t *
841 rb_vm_get_cref(const VALUE *ep)
843 return vm_get_cref(ep);
846 static rb_cref_t *
847 vm_ec_cref(const rb_execution_context_t *ec)
849 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
851 if (cfp == NULL) {
852 return NULL;
854 return vm_get_cref(cfp->ep);
857 static const rb_cref_t *
858 vm_get_const_key_cref(const VALUE *ep)
860 const rb_cref_t *cref = vm_get_cref(ep);
861 const rb_cref_t *key_cref = cref;
863 while (cref) {
864 if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
865 FL_TEST(CREF_CLASS(cref), RCLASS_CLONED)) {
866 return key_cref;
868 cref = CREF_NEXT(cref);
871 /* does not include singleton class */
872 return NULL;
875 void
876 rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
878 rb_cref_t *new_cref;
880 while (cref) {
881 if (CREF_CLASS(cref) == old_klass) {
882 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
883 *new_cref_ptr = new_cref;
884 return;
886 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
887 cref = CREF_NEXT(cref);
888 *new_cref_ptr = new_cref;
889 new_cref_ptr = &new_cref->next;
891 *new_cref_ptr = NULL;
894 static rb_cref_t *
895 vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval, int singleton)
897 rb_cref_t *prev_cref = NULL;
899 if (ep) {
900 prev_cref = vm_env_cref(ep);
902 else {
903 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
905 if (cfp) {
906 prev_cref = vm_env_cref(cfp->ep);
910 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval, singleton);
913 static inline VALUE
914 vm_get_cbase(const VALUE *ep)
916 const rb_cref_t *cref = vm_get_cref(ep);
918 return CREF_CLASS_FOR_DEFINITION(cref);
921 static inline VALUE
922 vm_get_const_base(const VALUE *ep)
924 const rb_cref_t *cref = vm_get_cref(ep);
926 while (cref) {
927 if (!CREF_PUSHED_BY_EVAL(cref)) {
928 return CREF_CLASS_FOR_DEFINITION(cref);
930 cref = CREF_NEXT(cref);
933 return Qundef;
936 static inline void
937 vm_check_if_namespace(VALUE klass)
939 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
940 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
944 static inline void
945 vm_ensure_not_refinement_module(VALUE self)
947 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
948 rb_warn("not defined at the refinement, but at the outer class/module");
952 static inline VALUE
953 vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
955 return klass;
958 static inline VALUE
959 vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
961 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
962 VALUE val;
964 if (NIL_P(orig_klass) && allow_nil) {
965 /* in current lexical scope */
966 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
967 const rb_cref_t *cref;
968 VALUE klass = Qnil;
970 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
971 root_cref = CREF_NEXT(root_cref);
973 cref = root_cref;
974 while (cref && CREF_NEXT(cref)) {
975 if (CREF_PUSHED_BY_EVAL(cref)) {
976 klass = Qnil;
978 else {
979 klass = CREF_CLASS(cref);
981 cref = CREF_NEXT(cref);
983 if (!NIL_P(klass)) {
984 VALUE av, am = 0;
985 rb_const_entry_t *ce;
986 search_continue:
987 if ((ce = rb_const_lookup(klass, id))) {
988 rb_const_warn_if_deprecated(ce, klass, id);
989 val = ce->value;
990 if (val == Qundef) {
991 if (am == klass) break;
992 am = klass;
993 if (is_defined) return 1;
994 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
995 rb_autoload_load(klass, id);
996 goto search_continue;
998 else {
999 if (is_defined) {
1000 return 1;
1002 else {
1003 if (UNLIKELY(!rb_ractor_main_p())) {
1004 if (!rb_ractor_shareable_p(val)) {
1005 rb_raise(rb_eRactorIsolationError,
1006 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1009 return val;
1016 /* search self */
1017 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1018 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1020 else {
1021 klass = CLASS_OF(ec->cfp->self);
1024 if (is_defined) {
1025 return rb_const_defined(klass, id);
1027 else {
1028 return rb_const_get(klass, id);
1031 else {
1032 vm_check_if_namespace(orig_klass);
1033 if (is_defined) {
1034 return rb_public_const_defined_from(orig_klass, id);
1036 else {
1037 return rb_public_const_get_from(orig_klass, id);
1042 static inline VALUE
1043 vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
1045 VALUE klass;
1047 if (!cref) {
1048 rb_bug("vm_get_cvar_base: no cref");
1051 while (CREF_NEXT(cref) &&
1052 (NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
1053 CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
1054 cref = CREF_NEXT(cref);
1056 if (top_level_raise && !CREF_NEXT(cref)) {
1057 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1060 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1062 if (NIL_P(klass)) {
1063 rb_raise(rb_eTypeError, "no class variables available");
1065 return klass;
1068 static VALUE
1069 vm_search_const_defined_class(const VALUE cbase, ID id)
1071 if (rb_const_defined_at(cbase, id)) return cbase;
1072 if (cbase == rb_cObject) {
1073 VALUE tmp = RCLASS_SUPER(cbase);
1074 while (tmp) {
1075 if (rb_const_defined_at(tmp, id)) return tmp;
1076 tmp = RCLASS_SUPER(tmp);
1079 return 0;
1082 static bool
1083 iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
1085 int found;
1086 st_data_t ent_data;
1088 if (iv_index_tbl == NULL) return false;
1090 RB_VM_LOCK_ENTER();
1092 found = st_lookup(iv_index_tbl, (st_data_t)id, &ent_data);
1094 RB_VM_LOCK_LEAVE();
1095 if (found) *ent = (struct rb_iv_index_tbl_entry *)ent_data;
1097 return found ? true : false;
1100 ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
1102 static inline void
1103 fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
1105 // fill cache
1106 if (!is_attr) {
1107 ic->entry = ent;
1108 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1110 else {
1111 vm_cc_attr_index_set(cc, (int)ent->index + 1);
1115 ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
1116 static inline VALUE
1117 vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1119 #if OPT_IC_FOR_IVAR
1120 VALUE val = Qundef;
1122 if (SPECIAL_CONST_P(obj)) {
1123 // frozen?
1125 else if (LIKELY(is_attr ?
1126 RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
1127 RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
1128 ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
1129 uint32_t index = !is_attr ? ic->entry->index : (vm_cc_attr_index(cc) - 1);
1131 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1133 if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
1134 LIKELY(index < ROBJECT_NUMIV(obj))) {
1135 val = ROBJECT_IVPTR(obj)[index];
1137 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1139 else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1140 val = rb_ivar_generic_lookup_with_index(obj, id, index);
1143 goto ret;
1145 else {
1146 struct rb_iv_index_tbl_entry *ent;
1148 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1149 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1151 if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1152 fill_ivar_cache(iseq, ic, cc, is_attr, ent);
1154 // get value
1155 if (ent->index < ROBJECT_NUMIV(obj)) {
1156 val = ROBJECT_IVPTR(obj)[ent->index];
1158 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1162 else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1163 struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
1165 if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1166 fill_ivar_cache(iseq, ic, cc, is_attr, ent);
1167 val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
1170 else {
1171 // T_CLASS / T_MODULE
1172 goto general_path;
1175 ret:
1176 if (LIKELY(val != Qundef)) {
1177 return val;
1179 else {
1180 return Qnil;
1183 general_path:
1184 #endif /* OPT_IC_FOR_IVAR */
1185 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1187 if (is_attr) {
1188 return rb_attr_get(obj, id);
1190 else {
1191 return rb_ivar_get(obj, id);
1195 ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1196 NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1197 NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1199 static VALUE
1200 vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1202 rb_check_frozen_internal(obj);
1204 #if OPT_IC_FOR_IVAR
1205 if (RB_TYPE_P(obj, T_OBJECT)) {
1206 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1207 struct rb_iv_index_tbl_entry *ent;
1209 if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1210 if (!is_attr) {
1211 ic->entry = ent;
1212 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1214 else if (ent->index >= INT_MAX) {
1215 rb_raise(rb_eArgError, "too many instance variables");
1217 else {
1218 vm_cc_attr_index_set(cc, (int)(ent->index + 1));
1221 uint32_t index = ent->index;
1223 if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
1224 rb_init_iv_list(obj);
1226 VALUE *ptr = ROBJECT_IVPTR(obj);
1227 RB_OBJ_WRITE(obj, &ptr[index], val);
1228 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
1230 return val;
1233 #endif
1234 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1235 return rb_ivar_set(obj, id, val);
1238 static VALUE
1239 vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1241 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1244 static VALUE
1245 vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1247 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1250 static inline VALUE
1251 vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1253 #if OPT_IC_FOR_IVAR
1254 if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
1255 LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
1257 VM_ASSERT(!rb_ractor_shareable_p(obj));
1259 if (LIKELY(
1260 (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
1261 ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
1262 uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc)-1;
1264 if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
1265 rb_init_iv_list(obj);
1267 VALUE *ptr = ROBJECT_IVPTR(obj);
1268 RB_OBJ_WRITE(obj, &ptr[index], val);
1269 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1270 return val; /* inline cache hit */
1273 else {
1274 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1276 #endif /* OPT_IC_FOR_IVAR */
1277 if (is_attr) {
1278 return vm_setivar_slowpath_attr(obj, id, val, cc);
1280 else {
1281 return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1285 static VALUE
1286 update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, ICVARC ic)
1288 VALUE defined_class = 0;
1289 VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
1291 if (RB_TYPE_P(defined_class, T_ICLASS)) {
1292 defined_class = RBASIC(defined_class)->klass;
1295 struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
1296 if (!rb_cvc_tbl) {
1297 rb_bug("the cvc table should be set");
1300 VALUE ent_data;
1301 if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
1302 rb_bug("should have cvar cache entry");
1305 struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
1306 ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
1308 ic->entry = ent;
1309 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1311 return cvar_value;
1314 static inline VALUE
1315 vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
1317 const rb_cref_t *cref;
1319 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
1320 VALUE v = Qundef;
1321 RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
1323 if (st_lookup(RCLASS_IV_TBL(ic->entry->class_value), (st_data_t)id, &v) &&
1324 LIKELY(rb_ractor_main_p())) {
1326 return v;
1330 cref = vm_get_cref(GET_EP());
1331 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1333 return update_classvariable_cache(iseq, klass, id, ic);
1336 VALUE
1337 rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic)
1339 return vm_getclassvariable(iseq, cfp, id, ic);
1342 static inline void
1343 vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
1345 const rb_cref_t *cref;
1347 if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
1348 RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
1350 rb_class_ivar_set(ic->entry->class_value, id, val);
1351 return;
1354 cref = vm_get_cref(GET_EP());
1355 VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
1357 rb_cvar_set(klass, id, val);
1359 update_classvariable_cache(iseq, klass, id, ic);
1362 void
1363 rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
1365 vm_setclassvariable(iseq, cfp, id, val, ic);
1368 static inline VALUE
1369 vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1371 return vm_getivar(obj, id, iseq, ic, NULL, FALSE);
1374 static inline void
1375 vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1377 vm_setivar(obj, id, val, iseq, ic, 0, 0);
1380 void
1381 rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1383 vm_setinstancevariable(iseq, obj, id, val, ic);
1386 /* Set the instance variable +val+ on object +obj+ at the +index+.
1387 * This function only works with T_OBJECT objects, so make sure
1388 * +obj+ is of type T_OBJECT before using this function.
1390 VALUE
1391 rb_vm_set_ivar_idx(VALUE obj, uint32_t index, VALUE val)
1393 RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
1395 rb_check_frozen_internal(obj);
1397 VM_ASSERT(!rb_ractor_shareable_p(obj));
1399 if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
1400 rb_init_iv_list(obj);
1402 VALUE *ptr = ROBJECT_IVPTR(obj);
1403 RB_OBJ_WRITE(obj, &ptr[index], val);
1405 return val;
1408 static VALUE
1409 vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1411 /* continue throw */
1413 if (FIXNUM_P(err)) {
1414 ec->tag->state = FIX2INT(err);
1416 else if (SYMBOL_P(err)) {
1417 ec->tag->state = TAG_THROW;
1419 else if (THROW_DATA_P(err)) {
1420 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1422 else {
1423 ec->tag->state = TAG_RAISE;
1425 return err;
1428 static VALUE
1429 vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1430 const int flag, const VALUE throwobj)
1432 const rb_control_frame_t *escape_cfp = NULL;
1433 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1435 if (flag != 0) {
1436 /* do nothing */
1438 else if (state == TAG_BREAK) {
1439 int is_orphan = 1;
1440 const VALUE *ep = GET_EP();
1441 const rb_iseq_t *base_iseq = GET_ISEQ();
1442 escape_cfp = reg_cfp;
1444 while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
1445 if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
1446 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1447 ep = escape_cfp->ep;
1448 base_iseq = escape_cfp->iseq;
1450 else {
1451 ep = VM_ENV_PREV_EP(ep);
1452 base_iseq = base_iseq->body->parent_iseq;
1453 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1454 VM_ASSERT(escape_cfp->iseq == base_iseq);
1458 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1459 /* lambda{... break ...} */
1460 is_orphan = 0;
1461 state = TAG_RETURN;
1463 else {
1464 ep = VM_ENV_PREV_EP(ep);
1466 while (escape_cfp < eocfp) {
1467 if (escape_cfp->ep == ep) {
1468 const rb_iseq_t *const iseq = escape_cfp->iseq;
1469 const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
1470 const struct iseq_catch_table *const ct = iseq->body->catch_table;
1471 unsigned int i;
1473 if (!ct) break;
1474 for (i=0; i < ct->size; i++) {
1475 const struct iseq_catch_table_entry *const entry =
1476 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1478 if (entry->type == CATCH_TYPE_BREAK &&
1479 entry->iseq == base_iseq &&
1480 entry->start < epc && entry->end >= epc) {
1481 if (entry->cont == epc) { /* found! */
1482 is_orphan = 0;
1484 break;
1487 break;
1490 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1494 if (is_orphan) {
1495 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1498 else if (state == TAG_RETRY) {
1499 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1501 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1503 else if (state == TAG_RETURN) {
1504 const VALUE *current_ep = GET_EP();
1505 const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
1506 int in_class_frame = 0;
1507 int toplevel = 1;
1508 escape_cfp = reg_cfp;
1510 // find target_lep, target_ep
1511 while (!VM_ENV_LOCAL_P(ep)) {
1512 if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
1513 target_ep = ep;
1515 ep = VM_ENV_PREV_EP(ep);
1517 target_lep = ep;
1519 while (escape_cfp < eocfp) {
1520 const VALUE *lep = VM_CF_LEP(escape_cfp);
1522 if (!target_lep) {
1523 target_lep = lep;
1526 if (lep == target_lep &&
1527 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1528 escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
1529 in_class_frame = 1;
1530 target_lep = 0;
1533 if (lep == target_lep) {
1534 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1535 toplevel = 0;
1536 if (in_class_frame) {
1537 /* lambda {class A; ... return ...; end} */
1538 goto valid_return;
1540 else {
1541 const VALUE *tep = current_ep;
1543 while (target_lep != tep) {
1544 if (escape_cfp->ep == tep) {
1545 /* in lambda */
1546 if (tep == target_ep) {
1547 goto valid_return;
1549 else {
1550 goto unexpected_return;
1553 tep = VM_ENV_PREV_EP(tep);
1557 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1558 switch (escape_cfp->iseq->body->type) {
1559 case ISEQ_TYPE_TOP:
1560 case ISEQ_TYPE_MAIN:
1561 if (toplevel) {
1562 if (in_class_frame) goto unexpected_return;
1563 if (target_ep == NULL) {
1564 goto valid_return;
1566 else {
1567 goto unexpected_return;
1570 break;
1571 case ISEQ_TYPE_EVAL:
1572 case ISEQ_TYPE_CLASS:
1573 toplevel = 0;
1574 break;
1575 default:
1576 break;
1581 if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
1582 if (target_ep == NULL) {
1583 goto valid_return;
1585 else {
1586 goto unexpected_return;
1590 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1592 unexpected_return:;
1593 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1595 valid_return:;
1596 /* do nothing */
1598 else {
1599 rb_bug("isns(throw): unsupported throw type");
1602 ec->tag->state = state;
1603 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1606 static VALUE
1607 vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1608 rb_num_t throw_state, VALUE throwobj)
1610 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1611 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1613 if (state != 0) {
1614 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1616 else {
1617 return vm_throw_continue(ec, throwobj);
1621 static inline void
1622 vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
1624 int is_splat = flag & 0x01;
1625 rb_num_t space_size = num + is_splat;
1626 VALUE *base = sp - 1;
1627 const VALUE *ptr;
1628 rb_num_t len;
1629 const VALUE obj = ary;
1631 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1632 ary = obj;
1633 ptr = &ary;
1634 len = 1;
1636 else {
1637 ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
1638 len = (rb_num_t)RARRAY_LEN(ary);
1641 if (space_size == 0) {
1642 /* no space left on stack */
1644 else if (flag & 0x02) {
1645 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1646 rb_num_t i = 0, j;
1648 if (len < num) {
1649 for (i=0; i<num-len; i++) {
1650 *base++ = Qnil;
1653 for (j=0; i<num; i++, j++) {
1654 VALUE v = ptr[len - j - 1];
1655 *base++ = v;
1657 if (is_splat) {
1658 *base = rb_ary_new4(len - j, ptr);
1661 else {
1662 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1663 rb_num_t i;
1664 VALUE *bptr = &base[space_size - 1];
1666 for (i=0; i<num; i++) {
1667 if (len <= i) {
1668 for (; i<num; i++) {
1669 *bptr-- = Qnil;
1671 break;
1673 *bptr-- = ptr[i];
1675 if (is_splat) {
1676 if (num > len) {
1677 *bptr = rb_ary_new();
1679 else {
1680 *bptr = rb_ary_new4(len - num, ptr + num);
1684 RB_GC_GUARD(ary);
1687 static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1689 static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1691 static struct rb_class_cc_entries *
1692 vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
1694 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1695 #if VM_CHECK_MODE > 0
1696 ccs->debug_sig = ~(VALUE)ccs;
1697 #endif
1698 ccs->capa = 0;
1699 ccs->len = 0;
1700 RB_OBJ_WRITE(klass, &ccs->cme, cme);
1701 METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
1702 ccs->entries = NULL;
1703 return ccs;
1706 static void
1707 vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
1709 if (! vm_cc_markable(cc)) {
1710 return;
1712 else if (! vm_ci_markable(ci)) {
1713 return;
1716 if (UNLIKELY(ccs->len == ccs->capa)) {
1717 if (ccs->capa == 0) {
1718 ccs->capa = 1;
1719 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
1721 else {
1722 ccs->capa *= 2;
1723 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
1726 VM_ASSERT(ccs->len < ccs->capa);
1728 const int pos = ccs->len++;
1729 RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
1730 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
1732 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
1733 // for tuning
1734 // vm_mtbl_dump(klass, 0);
1738 #if VM_CHECK_MODE > 0
1739 void
1740 rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
1742 ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
1743 for (int i=0; i<ccs->len; i++) {
1744 vm_ci_dump(ccs->entries[i].ci);
1745 rp(ccs->entries[i].cc);
1749 static int
1750 vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
1752 VM_ASSERT(vm_ccs_p(ccs));
1753 VM_ASSERT(ccs->len <= ccs->capa);
1755 for (int i=0; i<ccs->len; i++) {
1756 const struct rb_callinfo *ci = ccs->entries[i].ci;
1757 const struct rb_callcache *cc = ccs->entries[i].cc;
1759 VM_ASSERT(vm_ci_p(ci));
1760 VM_ASSERT(vm_ci_mid(ci) == mid);
1761 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
1762 VM_ASSERT(vm_cc_class_check(cc, klass));
1763 VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
1765 return TRUE;
1767 #endif
1769 #ifndef MJIT_HEADER
1771 static const rb_callable_method_entry_t *check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
1773 static const struct rb_callcache *
1774 vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
1776 const ID mid = vm_ci_mid(ci);
1777 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
1778 struct rb_class_cc_entries *ccs = NULL;
1779 VALUE ccs_data;
1781 if (cc_tbl) {
1782 if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
1783 ccs = (struct rb_class_cc_entries *)ccs_data;
1784 const int ccs_len = ccs->len;
1786 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
1787 rb_vm_ccs_free(ccs);
1788 rb_id_table_delete(cc_tbl, mid);
1789 ccs = NULL;
1791 else {
1792 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
1794 for (int i=0; i<ccs_len; i++) {
1795 const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
1796 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
1798 VM_ASSERT(vm_ci_p(ccs_ci));
1799 VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
1801 if (ccs_ci == ci) { // TODO: equality
1802 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
1804 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
1805 VM_ASSERT(ccs_cc->klass == klass);
1806 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
1808 return ccs_cc;
1814 else {
1815 cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
1818 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
1820 const rb_callable_method_entry_t *cme;
1822 if (ccs) {
1823 cme = ccs->cme;
1824 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
1826 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
1828 else {
1829 cme = rb_callable_method_entry(klass, mid);
1832 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
1834 if (cme == NULL) {
1835 // undef or not found: can't cache the information
1836 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
1837 return &vm_empty_cc;
1840 VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
1842 METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
1844 if (ccs == NULL) {
1845 VM_ASSERT(cc_tbl != NULL);
1847 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
1848 // rb_callable_method_entry() prepares ccs.
1849 ccs = (struct rb_class_cc_entries *)ccs_data;
1851 else {
1852 // TODO: required?
1853 ccs = vm_ccs_create(klass, cme);
1854 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1858 cme = check_overloaded_cme(cme, ci);
1860 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
1861 vm_ccs_push(klass, ccs, ci, cc);
1863 VM_ASSERT(vm_cc_cme(cc) != NULL);
1864 VM_ASSERT(cme->called_id == mid);
1865 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
1867 return cc;
1870 MJIT_FUNC_EXPORTED const struct rb_callcache *
1871 rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
1873 const struct rb_callcache *cc;
1875 VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
1877 RB_VM_LOCK_ENTER();
1879 cc = vm_search_cc(klass, ci);
1881 VM_ASSERT(cc);
1882 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
1883 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
1884 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
1885 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
1886 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
1888 RB_VM_LOCK_LEAVE();
1890 return cc;
1892 #endif
1894 static const struct rb_callcache *
1895 vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
1897 #if USE_DEBUG_COUNTER
1898 const struct rb_callcache *old_cc = cd->cc;
1899 #endif
1901 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
1903 #if OPT_INLINE_METHOD_CACHE
1904 cd->cc = cc;
1906 const struct rb_callcache *empty_cc =
1907 #ifdef MJIT_HEADER
1908 rb_vm_empty_cc();
1909 #else
1910 &vm_empty_cc;
1911 #endif
1912 if (cd_owner && cc != empty_cc) RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
1914 #if USE_DEBUG_COUNTER
1915 if (old_cc == &empty_cc) {
1916 // empty
1917 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
1919 else if (old_cc == cc) {
1920 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
1922 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
1923 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
1925 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
1926 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
1927 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
1929 else {
1930 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
1932 #endif
1933 #endif // OPT_INLINE_METHOD_CACHE
1935 VM_ASSERT(vm_cc_cme(cc) == NULL ||
1936 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
1938 return cc;
1941 #ifndef MJIT_HEADER
1942 ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
1943 #endif
1944 static const struct rb_callcache *
1945 vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
1947 const struct rb_callcache *cc = cd->cc;
1949 #if OPT_INLINE_METHOD_CACHE
1950 if (LIKELY(vm_cc_class_check(cc, klass))) {
1951 if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
1952 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
1953 RB_DEBUG_COUNTER_INC(mc_inline_hit);
1954 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
1955 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
1956 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
1958 return cc;
1960 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
1962 else {
1963 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
1965 #endif
1967 return vm_search_method_slowpath0(cd_owner, cd, klass);
1970 static const struct rb_callcache *
1971 vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
1973 VALUE klass = CLASS_OF(recv);
1974 VM_ASSERT(klass != Qfalse);
1975 VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
1977 return vm_search_method_fastpath(cd_owner, cd, klass);
1980 static inline int
1981 check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)(ANYARGS))
1983 if (! me) {
1984 return false;
1986 else {
1987 VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
1988 VM_ASSERT(callable_method_entry_p(me));
1989 VM_ASSERT(me->def);
1990 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
1991 return false;
1993 else {
1994 return me->def->body.cfunc.func == func;
1999 static inline int
2000 vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, VALUE (*func)(ANYARGS))
2002 VM_ASSERT(iseq != NULL);
2003 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
2004 return check_cfunc(vm_cc_cme(cc), func);
2007 #define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
2009 static inline bool
2010 FIXNUM_2_P(VALUE a, VALUE b)
2012 /* FIXNUM_P(a) && FIXNUM_P(b)
2013 * == ((a & 1) && (b & 1))
2014 * == a & b & 1 */
2015 SIGNED_VALUE x = a;
2016 SIGNED_VALUE y = b;
2017 SIGNED_VALUE z = x & y & 1;
2018 return z == 1;
2021 static inline bool
2022 FLONUM_2_P(VALUE a, VALUE b)
2024 #if USE_FLONUM
2025 /* FLONUM_P(a) && FLONUM_P(b)
2026 * == ((a & 3) == 2) && ((b & 3) == 2)
2027 * == ! ((a ^ 2) | (b ^ 2) & 3)
2029 SIGNED_VALUE x = a;
2030 SIGNED_VALUE y = b;
2031 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
2032 return !z;
2033 #else
2034 return false;
2035 #endif
2038 static VALUE
2039 opt_equality_specialized(VALUE recv, VALUE obj)
2041 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
2042 goto compare_by_identity;
2044 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
2045 goto compare_by_identity;
2047 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
2048 goto compare_by_identity;
2050 else if (SPECIAL_CONST_P(recv)) {
2053 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
2054 double a = RFLOAT_VALUE(recv);
2055 double b = RFLOAT_VALUE(obj);
2057 #if MSC_VERSION_BEFORE(1300)
2058 if (isnan(a)) {
2059 return Qfalse;
2061 else if (isnan(b)) {
2062 return Qfalse;
2064 else
2065 #endif
2066 return RBOOL(a == b);
2068 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
2069 if (recv == obj) {
2070 return Qtrue;
2072 else if (RB_TYPE_P(obj, T_STRING)) {
2073 return rb_str_eql_internal(obj, recv);
2076 return Qundef;
2078 compare_by_identity:
2079 return RBOOL(recv == obj);
2082 static VALUE
2083 opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
2085 VM_ASSERT(cd_owner != NULL);
2087 VALUE val = opt_equality_specialized(recv, obj);
2088 if (val != Qundef) return val;
2090 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
2091 return Qundef;
2093 else {
2094 return RBOOL(recv == obj);
2098 #undef EQ_UNREDEFINED_P
2100 #ifndef MJIT_HEADER
2102 static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, int argc); // vm_eval.c
2103 NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
2105 static VALUE
2106 opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
2108 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, 1);
2110 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
2111 return RBOOL(recv == obj);
2113 else {
2114 return Qundef;
2118 static VALUE
2119 opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
2121 VALUE val = opt_equality_specialized(recv, obj);
2122 if (val != Qundef) {
2123 return val;
2125 else {
2126 return opt_equality_by_mid_slowpath(recv, obj, mid);
2130 VALUE
2131 rb_equal_opt(VALUE obj1, VALUE obj2)
2133 return opt_equality_by_mid(obj1, obj2, idEq);
2136 VALUE
2137 rb_eql_opt(VALUE obj1, VALUE obj2)
2139 return opt_equality_by_mid(obj1, obj2, idEqlP);
2142 #endif // MJIT_HEADER
2144 extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2145 extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
2147 static VALUE
2148 check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
2150 switch (type) {
2151 case VM_CHECKMATCH_TYPE_WHEN:
2152 return pattern;
2153 case VM_CHECKMATCH_TYPE_RESCUE:
2154 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2155 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2157 /* fall through */
2158 case VM_CHECKMATCH_TYPE_CASE: {
2159 return rb_vm_call_with_refinements(ec, pattern, idEqq, 1, &target, RB_NO_KEYWORDS);
2161 default:
2162 rb_bug("check_match: unreachable");
2167 #if MSC_VERSION_BEFORE(1300)
2168 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2169 #else
2170 #define CHECK_CMP_NAN(a, b) /* do nothing */
2171 #endif
2173 static inline VALUE
2174 double_cmp_lt(double a, double b)
2176 CHECK_CMP_NAN(a, b);
2177 return RBOOL(a < b);
2180 static inline VALUE
2181 double_cmp_le(double a, double b)
2183 CHECK_CMP_NAN(a, b);
2184 return RBOOL(a <= b);
2187 static inline VALUE
2188 double_cmp_gt(double a, double b)
2190 CHECK_CMP_NAN(a, b);
2191 return RBOOL(a > b);
2194 static inline VALUE
2195 double_cmp_ge(double a, double b)
2197 CHECK_CMP_NAN(a, b);
2198 return RBOOL(a >= b);
2201 static inline VALUE *
2202 vm_base_ptr(const rb_control_frame_t *cfp)
2204 #if 0 // we may optimize and use this once we confirm it does not spoil performance on JIT.
2205 const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2207 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2208 VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
2209 if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
2210 /* adjust `self' */
2211 bp += 1;
2213 #if VM_DEBUG_BP_CHECK
2214 if (bp != cfp->bp_check) {
2215 ruby_debug_printf("bp_check: %ld, bp: %ld\n",
2216 (long)(cfp->bp_check - GET_EC()->vm_stack),
2217 (long)(bp - GET_EC()->vm_stack));
2218 rb_bug("vm_base_ptr: unreachable");
2220 #endif
2221 return bp;
2223 else {
2224 return NULL;
2226 #else
2227 return cfp->__bp__;
2228 #endif
2231 /* method call processes with call_info */
2233 #include "vm_args.c"
2235 static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2236 ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2237 static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2238 static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2239 static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2240 static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2241 static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2243 static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2245 static VALUE
2246 vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2248 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2250 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2253 static VALUE
2254 vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2256 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2258 const struct rb_callcache *cc = calling->cc;
2259 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2260 int param = iseq->body->param.size;
2261 int local = iseq->body->local_table_size;
2262 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2265 MJIT_STATIC bool
2266 rb_simple_iseq_p(const rb_iseq_t *iseq)
2268 return iseq->body->param.flags.has_opt == FALSE &&
2269 iseq->body->param.flags.has_rest == FALSE &&
2270 iseq->body->param.flags.has_post == FALSE &&
2271 iseq->body->param.flags.has_kw == FALSE &&
2272 iseq->body->param.flags.has_kwrest == FALSE &&
2273 iseq->body->param.flags.accepts_no_kwarg == FALSE &&
2274 iseq->body->param.flags.has_block == FALSE;
2277 MJIT_FUNC_EXPORTED bool
2278 rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2280 return iseq->body->param.flags.has_opt == TRUE &&
2281 iseq->body->param.flags.has_rest == FALSE &&
2282 iseq->body->param.flags.has_post == FALSE &&
2283 iseq->body->param.flags.has_kw == FALSE &&
2284 iseq->body->param.flags.has_kwrest == FALSE &&
2285 iseq->body->param.flags.accepts_no_kwarg == FALSE &&
2286 iseq->body->param.flags.has_block == FALSE;
2289 MJIT_FUNC_EXPORTED bool
2290 rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2292 return iseq->body->param.flags.has_opt == FALSE &&
2293 iseq->body->param.flags.has_rest == FALSE &&
2294 iseq->body->param.flags.has_post == FALSE &&
2295 iseq->body->param.flags.has_kw == TRUE &&
2296 iseq->body->param.flags.has_kwrest == FALSE &&
2297 iseq->body->param.flags.has_block == FALSE;
2300 // If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
2301 MJIT_STATIC bool
2302 rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
2304 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
2308 static inline void
2309 CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2310 struct rb_calling_info *restrict calling,
2311 const struct rb_callinfo *restrict ci)
2313 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2314 VALUE final_hash;
2315 /* This expands the rest argument to the stack.
2316 * So, vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT is now inconsistent.
2318 vm_caller_setup_arg_splat(cfp, calling);
2319 if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
2320 calling->argc > 0 &&
2321 RB_TYPE_P((final_hash = *(cfp->sp - 1)), T_HASH) &&
2322 (((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2323 *(cfp->sp - 1) = rb_hash_dup(final_hash);
2324 calling->kw_splat = 1;
2327 if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
2328 if (IS_ARGS_KEYWORD(ci)) {
2329 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2330 * by creating a keyword hash.
2331 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2333 vm_caller_setup_arg_kw(cfp, calling, ci);
2335 else {
2336 VALUE keyword_hash = cfp->sp[-1];
2337 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
2338 /* Convert a non-hash keyword splat to a new hash */
2339 cfp->sp[-1] = rb_hash_dup(rb_to_hash_type(keyword_hash));
2341 else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
2342 /* Convert a hash keyword splat to a new hash unless
2343 * a mutable keyword splat was passed.
2345 cfp->sp[-1] = rb_hash_dup(keyword_hash);
2351 static inline void
2352 CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
2353 struct rb_calling_info *restrict calling,
2354 const struct rb_callinfo *restrict ci)
2356 if (UNLIKELY(calling->kw_splat)) {
2357 /* This removes the last Hash object if it is empty.
2358 * So, vm_ci_flag(ci) & VM_CALL_KW_SPLAT is now inconsistent.
2360 if (RHASH_EMPTY_P(cfp->sp[-1])) {
2361 cfp->sp--;
2362 calling->argc--;
2363 calling->kw_splat = 0;
2368 #define USE_OPT_HIST 0
2370 #if USE_OPT_HIST
2371 #define OPT_HIST_MAX 64
2372 static int opt_hist[OPT_HIST_MAX+1];
2374 __attribute__((destructor))
2375 static void
2376 opt_hist_show_results_at_exit(void)
2378 for (int i=0; i<OPT_HIST_MAX; i++) {
2379 ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
2382 #endif
2384 static VALUE
2385 vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2386 struct rb_calling_info *calling)
2388 const struct rb_callcache *cc = calling->cc;
2389 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2390 const int lead_num = iseq->body->param.lead_num;
2391 const int opt = calling->argc - lead_num;
2392 const int opt_num = iseq->body->param.opt_num;
2393 const int opt_pc = (int)iseq->body->param.opt_table[opt];
2394 const int param = iseq->body->param.size;
2395 const int local = iseq->body->local_table_size;
2396 const int delta = opt_num - opt;
2398 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2400 #if USE_OPT_HIST
2401 if (opt_pc < OPT_HIST_MAX) {
2402 opt_hist[opt]++;
2404 else {
2405 opt_hist[OPT_HIST_MAX]++;
2407 #endif
2409 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2412 static VALUE
2413 vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2414 struct rb_calling_info *calling)
2416 const struct rb_callcache *cc = calling->cc;
2417 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2418 const int lead_num = iseq->body->param.lead_num;
2419 const int opt = calling->argc - lead_num;
2420 const int opt_pc = (int)iseq->body->param.opt_table[opt];
2422 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2424 #if USE_OPT_HIST
2425 if (opt_pc < OPT_HIST_MAX) {
2426 opt_hist[opt]++;
2428 else {
2429 opt_hist[OPT_HIST_MAX]++;
2431 #endif
2433 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2436 static void
2437 args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2438 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2439 VALUE *const locals);
2441 static VALUE
2442 vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2443 struct rb_calling_info *calling)
2445 const struct rb_callinfo *ci = calling->ci;
2446 const struct rb_callcache *cc = calling->cc;
2448 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2449 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2451 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2452 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2453 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2454 const int ci_kw_len = kw_arg->keyword_len;
2455 const VALUE * const ci_keywords = kw_arg->keywords;
2456 VALUE *argv = cfp->sp - calling->argc;
2457 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2458 const int lead_num = iseq->body->param.lead_num;
2459 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2460 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2461 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2463 int param = iseq->body->param.size;
2464 int local = iseq->body->local_table_size;
2465 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2468 static VALUE
2469 vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2470 struct rb_calling_info *calling)
2472 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->ci;
2473 const struct rb_callcache *cc = calling->cc;
2475 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2476 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2478 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2479 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2480 VALUE * const argv = cfp->sp - calling->argc;
2481 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
2483 int i;
2484 for (i=0; i<kw_param->num; i++) {
2485 klocals[i] = kw_param->default_values[i];
2487 klocals[i] = INT2FIX(0); // kw specify flag
2488 // NOTE:
2489 // nobody check this value, but it should be cleared because it can
2490 // points invalid VALUE (T_NONE objects, raw pointer and so on).
2492 int param = iseq->body->param.size;
2493 int local = iseq->body->local_table_size;
2494 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2497 static inline int
2498 vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
2499 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
2501 const struct rb_callinfo *ci = calling->ci;
2502 const struct rb_callcache *cc = calling->cc;
2503 bool cacheable_ci = vm_ci_markable(ci);
2505 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
2506 if (LIKELY(rb_simple_iseq_p(iseq))) {
2507 rb_control_frame_t *cfp = ec->cfp;
2508 CALLER_SETUP_ARG(cfp, calling, ci);
2509 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
2511 if (calling->argc != iseq->body->param.lead_num) {
2512 argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
2515 VM_ASSERT(ci == calling->ci);
2516 VM_ASSERT(cc == calling->cc);
2517 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
2518 return 0;
2520 else if (rb_iseq_only_optparam_p(iseq)) {
2521 rb_control_frame_t *cfp = ec->cfp;
2522 CALLER_SETUP_ARG(cfp, calling, ci);
2523 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
2525 const int lead_num = iseq->body->param.lead_num;
2526 const int opt_num = iseq->body->param.opt_num;
2527 const int argc = calling->argc;
2528 const int opt = argc - lead_num;
2530 if (opt < 0 || opt > opt_num) {
2531 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
2534 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
2535 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
2536 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
2537 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2539 else {
2540 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
2541 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
2542 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2545 /* initialize opt vars for self-references */
2546 VM_ASSERT((int)iseq->body->param.size == lead_num + opt_num);
2547 for (int i=argc; i<lead_num + opt_num; i++) {
2548 argv[i] = Qnil;
2550 return (int)iseq->body->param.opt_table[opt];
2552 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
2553 const int lead_num = iseq->body->param.lead_num;
2554 const int argc = calling->argc;
2555 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2557 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
2558 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2560 if (argc - kw_arg->keyword_len == lead_num) {
2561 const int ci_kw_len = kw_arg->keyword_len;
2562 const VALUE * const ci_keywords = kw_arg->keywords;
2563 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2564 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2566 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2567 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2569 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
2570 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2572 return 0;
2575 else if (argc == lead_num) {
2576 /* no kwarg */
2577 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2578 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
2580 if (klocals[kw_param->num] == INT2FIX(0)) {
2581 /* copy from default_values */
2582 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
2583 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2586 return 0;
2591 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
2594 static VALUE
2595 vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2597 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
2599 const struct rb_callcache *cc = calling->cc;
2600 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2601 const int param_size = iseq->body->param.size;
2602 const int local_size = iseq->body->local_table_size;
2603 const int opt_pc = vm_callee_setup_arg(ec, calling, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
2604 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
2607 static inline VALUE
2608 vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
2609 int opt_pc, int param_size, int local_size)
2611 const struct rb_callinfo *ci = calling->ci;
2612 const struct rb_callcache *cc = calling->cc;
2614 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
2615 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
2617 else {
2618 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2622 static inline VALUE
2623 vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
2624 int opt_pc, int param_size, int local_size)
2626 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
2627 VALUE *argv = cfp->sp - calling->argc;
2628 VALUE *sp = argv + param_size;
2629 cfp->sp = argv - 1 /* recv */;
2631 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
2632 calling->block_handler, (VALUE)me,
2633 iseq->body->iseq_encoded + opt_pc, sp,
2634 local_size - param_size,
2635 iseq->body->stack_max);
2636 return Qundef;
2639 static inline VALUE
2640 vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
2642 const struct rb_callcache *cc = calling->cc;
2643 unsigned int i;
2644 VALUE *argv = cfp->sp - calling->argc;
2645 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
2646 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
2647 VALUE *src_argv = argv;
2648 VALUE *sp_orig, *sp;
2649 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
2651 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
2652 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
2653 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
2654 dst_captured->code.val = src_captured->code.val;
2655 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
2656 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
2658 else {
2659 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
2663 vm_pop_frame(ec, cfp, cfp->ep);
2664 cfp = ec->cfp;
2666 sp_orig = sp = cfp->sp;
2668 /* push self */
2669 sp[0] = calling->recv;
2670 sp++;
2672 /* copy arguments */
2673 for (i=0; i < iseq->body->param.size; i++) {
2674 *sp++ = src_argv[i];
2677 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
2678 calling->recv, calling->block_handler, (VALUE)me,
2679 iseq->body->iseq_encoded + opt_pc, sp,
2680 iseq->body->local_table_size - iseq->body->param.size,
2681 iseq->body->stack_max);
2683 cfp->sp = sp_orig;
2685 return Qundef;
2688 static void
2689 ractor_unsafe_check(void)
2691 if (!rb_ractor_main_p()) {
2692 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
2696 static VALUE
2697 call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2699 ractor_unsafe_check();
2700 return (*func)(recv, rb_ary_new4(argc, argv));
2703 static VALUE
2704 call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2706 ractor_unsafe_check();
2707 return (*func)(argc, argv, recv);
2710 static VALUE
2711 call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2713 ractor_unsafe_check();
2714 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
2715 return (*f)(recv);
2718 static VALUE
2719 call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2721 ractor_unsafe_check();
2722 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
2723 return (*f)(recv, argv[0]);
2726 static VALUE
2727 call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2729 ractor_unsafe_check();
2730 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
2731 return (*f)(recv, argv[0], argv[1]);
2734 static VALUE
2735 call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2737 ractor_unsafe_check();
2738 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
2739 return (*f)(recv, argv[0], argv[1], argv[2]);
2742 static VALUE
2743 call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2745 ractor_unsafe_check();
2746 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
2747 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
2750 static VALUE
2751 call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2753 ractor_unsafe_check();
2754 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2755 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
2758 static VALUE
2759 call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2761 ractor_unsafe_check();
2762 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2763 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
2766 static VALUE
2767 call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2769 ractor_unsafe_check();
2770 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2771 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
2774 static VALUE
2775 call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2777 ractor_unsafe_check();
2778 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2779 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
2782 static VALUE
2783 call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2785 ractor_unsafe_check();
2786 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2787 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
2790 static VALUE
2791 call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2793 ractor_unsafe_check();
2794 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2795 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
2798 static VALUE
2799 call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2801 ractor_unsafe_check();
2802 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2803 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
2806 static VALUE
2807 call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2809 ractor_unsafe_check();
2810 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2811 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
2814 static VALUE
2815 call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2817 ractor_unsafe_check();
2818 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2819 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
2822 static VALUE
2823 call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2825 ractor_unsafe_check();
2826 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2827 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
2830 static VALUE
2831 call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2833 ractor_unsafe_check();
2834 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2835 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
2838 static VALUE
2839 ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2841 return (*func)(recv, rb_ary_new4(argc, argv));
2844 static VALUE
2845 ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2847 return (*func)(argc, argv, recv);
2850 static VALUE
2851 ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2853 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
2854 return (*f)(recv);
2857 static VALUE
2858 ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2860 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
2861 return (*f)(recv, argv[0]);
2864 static VALUE
2865 ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2867 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
2868 return (*f)(recv, argv[0], argv[1]);
2871 static VALUE
2872 ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2874 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
2875 return (*f)(recv, argv[0], argv[1], argv[2]);
2878 static VALUE
2879 ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2881 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
2882 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
2885 static VALUE
2886 ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2888 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2889 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
2892 static VALUE
2893 ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2895 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2896 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
2899 static VALUE
2900 ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2902 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2903 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
2906 static VALUE
2907 ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2909 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2910 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
2913 static VALUE
2914 ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2916 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2917 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
2920 static VALUE
2921 ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2923 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2924 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
2927 static VALUE
2928 ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2930 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2931 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
2934 static VALUE
2935 ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2937 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2938 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
2941 static VALUE
2942 ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2944 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2945 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
2948 static VALUE
2949 ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2951 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2952 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
2955 static VALUE
2956 ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2958 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
2959 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
2962 static inline int
2963 vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
2965 const int ov_flags = RAISED_STACKOVERFLOW;
2966 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
2967 if (rb_ec_raised_p(ec, ov_flags)) {
2968 rb_ec_raised_reset(ec, ov_flags);
2969 return TRUE;
2971 return FALSE;
2974 #define CHECK_CFP_CONSISTENCY(func) \
2975 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
2976 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
2978 static inline
2979 const rb_method_cfunc_t *
2980 vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
2982 #if VM_DEBUG_VERIFY_METHOD_CACHE
2983 switch (me->def->type) {
2984 case VM_METHOD_TYPE_CFUNC:
2985 case VM_METHOD_TYPE_NOTIMPLEMENTED:
2986 break;
2987 # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
2988 METHOD_BUG(ISEQ);
2989 METHOD_BUG(ATTRSET);
2990 METHOD_BUG(IVAR);
2991 METHOD_BUG(BMETHOD);
2992 METHOD_BUG(ZSUPER);
2993 METHOD_BUG(UNDEF);
2994 METHOD_BUG(OPTIMIZED);
2995 METHOD_BUG(MISSING);
2996 METHOD_BUG(REFINED);
2997 METHOD_BUG(ALIAS);
2998 # undef METHOD_BUG
2999 default:
3000 rb_bug("wrong method type: %d", me->def->type);
3002 #endif
3003 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
3006 static VALUE
3007 vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3009 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
3010 const struct rb_callinfo *ci = calling->ci;
3011 const struct rb_callcache *cc = calling->cc;
3012 VALUE val;
3013 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
3014 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
3015 int len = cfunc->argc;
3017 VALUE recv = calling->recv;
3018 VALUE block_handler = calling->block_handler;
3019 VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
3020 int argc = calling->argc;
3021 int orig_argc = argc;
3023 if (UNLIKELY(calling->kw_splat)) {
3024 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
3027 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
3028 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
3030 vm_push_frame(ec, NULL, frame_type, recv,
3031 block_handler, (VALUE)me,
3032 0, ec->cfp->sp, 0, 0);
3034 if (len >= 0) rb_check_arity(argc, len, len);
3036 reg_cfp->sp -= orig_argc + 1;
3037 val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
3039 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
3041 rb_vm_pop_frame(ec);
3043 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
3044 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
3046 return val;
3049 static VALUE
3050 vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3052 const struct rb_callinfo *ci = calling->ci;
3053 RB_DEBUG_COUNTER_INC(ccf_cfunc);
3055 CALLER_SETUP_ARG(reg_cfp, calling, ci);
3056 CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
3057 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
3058 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
3061 static VALUE
3062 vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3064 const struct rb_callcache *cc = calling->cc;
3065 RB_DEBUG_COUNTER_INC(ccf_ivar);
3066 cfp->sp -= 1;
3067 return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
3070 static VALUE
3071 vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3073 const struct rb_callcache *cc = calling->cc;
3074 RB_DEBUG_COUNTER_INC(ccf_attrset);
3075 VALUE val = *(cfp->sp - 1);
3076 cfp->sp -= 2;
3077 return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
3080 bool
3081 rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
3083 return (ch == vm_call_ivar || ch == vm_call_attrset);
3086 static inline VALUE
3087 vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
3089 rb_proc_t *proc;
3090 VALUE val;
3091 const struct rb_callcache *cc = calling->cc;
3092 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3093 VALUE procv = cme->def->body.bmethod.proc;
3095 if (!RB_OBJ_SHAREABLE_P(procv) &&
3096 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
3097 rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
3100 /* control block frame */
3101 GetProcPtr(procv, proc);
3102 val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
3104 return val;
3107 static VALUE
3108 vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3110 RB_DEBUG_COUNTER_INC(ccf_bmethod);
3112 VALUE *argv;
3113 int argc;
3114 const struct rb_callinfo *ci = calling->ci;
3116 CALLER_SETUP_ARG(cfp, calling, ci);
3117 argc = calling->argc;
3118 argv = ALLOCA_N(VALUE, argc);
3119 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
3120 cfp->sp += - argc - 1;
3122 return vm_call_bmethod_body(ec, calling, argv);
3125 MJIT_FUNC_EXPORTED VALUE
3126 rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
3128 VALUE klass = current_class;
3130 /* for prepended Module, then start from cover class */
3131 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
3132 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
3133 klass = RBASIC_CLASS(klass);
3136 while (RTEST(klass)) {
3137 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
3138 if (owner == target_owner) {
3139 return klass;
3141 klass = RCLASS_SUPER(klass);
3144 return current_class; /* maybe module function */
3147 static const rb_callable_method_entry_t *
3148 aliased_callable_method_entry(const rb_callable_method_entry_t *me)
3150 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
3151 const rb_callable_method_entry_t *cme;
3153 if (orig_me->defined_class == 0) {
3154 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
3155 VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
3156 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
3158 if (me->def->alias_count + me->def->complemented_count == 0) {
3159 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
3161 else {
3162 rb_method_definition_t *def =
3163 rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
3164 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
3167 else {
3168 cme = (const rb_callable_method_entry_t *)orig_me;
3171 VM_ASSERT(callable_method_entry_p(cme));
3172 return cme;
3175 const rb_callable_method_entry_t *
3176 rb_aliased_callable_method_entry(const rb_callable_method_entry_t *me)
3178 return aliased_callable_method_entry(me);
3181 static VALUE
3182 vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3184 calling->cc = &VM_CC_ON_STACK(Qundef,
3185 vm_call_general,
3186 { 0 },
3187 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
3189 return vm_call_method_each_type(ec, cfp, calling);
3192 static enum method_missing_reason
3193 ci_missing_reason(const struct rb_callinfo *ci)
3195 enum method_missing_reason stat = MISSING_NOENTRY;
3196 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
3197 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
3198 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
3199 return stat;
3202 static VALUE
3203 vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3204 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol)
3206 ASSUME(calling->argc >= 0);
3207 /* Also assumes CALLER_SETUP_ARG is already done. */
3209 enum method_missing_reason missing_reason = MISSING_NOENTRY;
3210 int argc = calling->argc;
3211 VALUE recv = calling->recv;
3212 VALUE klass = CLASS_OF(recv);
3213 ID mid = rb_check_id(&symbol);
3214 int flags = VM_CALL_FCALL |
3215 VM_CALL_OPT_SEND |
3216 (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
3218 if (UNLIKELY(! mid)) {
3219 mid = idMethodMissing;
3220 missing_reason = ci_missing_reason(ci);
3221 ec->method_missing_reason = missing_reason;
3223 /* E.g. when argc == 2
3225 * | | | | TOPN
3226 * | | +------+
3227 * | | +---> | arg1 | 0
3228 * +------+ | +------+
3229 * | arg1 | -+ +-> | arg0 | 1
3230 * +------+ | +------+
3231 * | arg0 | ---+ | sym | 2
3232 * +------+ +------+
3233 * | recv | | recv | 3
3234 * --+------+--------+------+------
3236 int i = argc;
3237 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
3238 INC_SP(1);
3239 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
3240 argc = ++calling->argc;
3242 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
3243 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
3244 TOPN(i) = symbol;
3245 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
3246 const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
3247 VALUE exc = rb_make_no_method_exception(
3248 rb_eNoMethodError, 0, recv, argc, argv, priv);
3250 rb_exc_raise(exc);
3252 else {
3253 TOPN(i) = rb_str_intern(symbol);
3257 calling->ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci));
3258 calling->cc = &VM_CC_ON_STACK(klass,
3259 vm_call_general,
3260 { .method_missing_reason = missing_reason },
3261 rb_callable_method_entry_with_refinements(klass, mid, NULL));
3263 return vm_call_method(ec, reg_cfp, calling);
3266 static VALUE
3267 vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3269 RB_DEBUG_COUNTER_INC(ccf_opt_send);
3271 int i;
3272 VALUE sym;
3274 CALLER_SETUP_ARG(reg_cfp, calling, calling->ci);
3276 i = calling->argc - 1;
3278 if (calling->argc == 0) {
3279 rb_raise(rb_eArgError, "no method name given");
3281 else {
3282 sym = TOPN(i);
3283 /* E.g. when i == 2
3285 * | | | | TOPN
3286 * +------+ | |
3287 * | arg1 | ---+ | | 0
3288 * +------+ | +------+
3289 * | arg0 | -+ +-> | arg1 | 1
3290 * +------+ | +------+
3291 * | sym | +---> | arg0 | 2
3292 * +------+ +------+
3293 * | recv | | recv | 3
3294 * --+------+--------+------+------
3296 /* shift arguments */
3297 if (i > 0) {
3298 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
3300 calling->argc -= 1;
3301 DEC_SP(1);
3303 return vm_call_symbol(ec, reg_cfp, calling, calling->ci, sym);
3307 static VALUE
3308 vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3309 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
3311 RB_DEBUG_COUNTER_INC(ccf_method_missing);
3313 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
3314 unsigned int argc;
3316 CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
3317 argc = calling->argc + 1;
3319 unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
3320 calling->argc = argc;
3322 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
3323 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
3324 vm_check_canary(ec, reg_cfp->sp);
3325 if (argc > 1) {
3326 MEMMOVE(argv+1, argv, VALUE, argc-1);
3328 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
3329 INC_SP(1);
3331 ec->method_missing_reason = reason;
3332 calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
3333 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
3334 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
3335 return vm_call_method(ec, reg_cfp, calling);
3338 static VALUE
3339 vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3341 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->ci, vm_cc_cmethod_missing_reason(calling->cc));
3344 static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
3345 static VALUE
3346 vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
3348 klass = RCLASS_SUPER(klass);
3350 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->ci)) : NULL;
3351 if (cme == NULL) {
3352 return vm_call_method_nome(ec, cfp, calling);
3354 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
3355 cme->def->body.refined.orig_me) {
3356 cme = refined_method_callable_without_refinement(cme);
3359 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
3361 return vm_call_method_each_type(ec, cfp, calling);
3364 static inline VALUE
3365 find_refinement(VALUE refinements, VALUE klass)
3367 if (NIL_P(refinements)) {
3368 return Qnil;
3370 return rb_hash_lookup(refinements, klass);
3373 PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
3374 static rb_control_frame_t *
3375 current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
3377 rb_control_frame_t *top_cfp = cfp;
3379 if (cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_BLOCK) {
3380 const rb_iseq_t *local_iseq = cfp->iseq->body->local_iseq;
3382 do {
3383 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3384 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
3385 /* TODO: orphan block */
3386 return top_cfp;
3388 } while (cfp->iseq != local_iseq);
3390 return cfp;
3393 static const rb_callable_method_entry_t *
3394 refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
3396 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
3397 const rb_callable_method_entry_t *cme;
3399 if (orig_me->defined_class == 0) {
3400 cme = NULL;
3401 rb_notimplement();
3403 else {
3404 cme = (const rb_callable_method_entry_t *)orig_me;
3407 VM_ASSERT(callable_method_entry_p(cme));
3409 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
3410 cme = NULL;
3413 return cme;
3416 static const rb_callable_method_entry_t *
3417 search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3419 ID mid = vm_ci_mid(calling->ci);
3420 const rb_cref_t *cref = vm_get_cref(cfp->ep);
3421 const struct rb_callcache * const cc = calling->cc;
3422 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3424 for (; cref; cref = CREF_NEXT(cref)) {
3425 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
3426 if (NIL_P(refinement)) continue;
3428 const rb_callable_method_entry_t *const ref_me =
3429 rb_callable_method_entry(refinement, mid);
3431 if (ref_me) {
3432 if (vm_cc_call(cc) == vm_call_super_method) {
3433 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
3434 const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
3435 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
3436 continue;
3440 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
3441 cme->def != ref_me->def) {
3442 cme = ref_me;
3444 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
3445 return cme;
3448 else {
3449 return NULL;
3453 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
3454 return refined_method_callable_without_refinement(vm_cc_cme(cc));
3456 else {
3457 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
3458 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
3459 return cme;
3463 static VALUE
3464 vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3466 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
3467 search_refined_method(ec, cfp, calling));
3469 if (vm_cc_cme(ref_cc)) {
3470 calling->cc= ref_cc;
3471 return vm_call_method(ec, cfp, calling);
3473 else {
3474 return vm_call_method_nome(ec, cfp, calling);
3478 static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
3480 NOINLINE(static VALUE
3481 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3482 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
3484 static VALUE
3485 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3486 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
3488 int argc = calling->argc;
3490 /* remove self */
3491 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
3492 DEC_SP(1);
3494 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
3497 static VALUE
3498 vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3500 RB_DEBUG_COUNTER_INC(ccf_opt_call);
3502 const struct rb_callinfo *ci = calling->ci;
3503 VALUE procval = calling->recv;
3504 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
3507 static VALUE
3508 vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3510 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
3512 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
3513 const struct rb_callinfo *ci = calling->ci;
3515 if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
3516 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
3518 else {
3519 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
3520 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
3521 return vm_call_general(ec, reg_cfp, calling);
3525 static VALUE
3526 vm_call_opt_struct_aref0(rb_execution_context_t *ec, struct rb_calling_info *calling)
3528 VALUE recv = calling->recv;
3530 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
3531 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
3532 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF);
3534 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
3535 return internal_RSTRUCT_GET(recv, off);
3538 static VALUE
3539 vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3541 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref);
3543 VALUE ret = vm_call_opt_struct_aref0(ec, calling);
3544 reg_cfp->sp -= 1;
3545 return ret;
3548 static VALUE
3549 vm_call_opt_struct_aset0(rb_execution_context_t *ec, struct rb_calling_info *calling, VALUE val)
3551 VALUE recv = calling->recv;
3553 VM_ASSERT(RB_TYPE_P(recv, T_STRUCT));
3554 VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED);
3555 VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET);
3557 rb_check_frozen(recv);
3559 const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index;
3560 internal_RSTRUCT_SET(recv, off, val);
3562 return val;
3565 static VALUE
3566 vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3568 RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset);
3570 VALUE ret = vm_call_opt_struct_aset0(ec, calling, *(reg_cfp->sp - 1));
3571 reg_cfp->sp -= 2;
3572 return ret;
3575 NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3576 const struct rb_callinfo *ci, const struct rb_callcache *cc));
3578 static VALUE
3579 vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
3580 const struct rb_callinfo *ci, const struct rb_callcache *cc)
3582 switch (vm_cc_cme(cc)->def->body.optimized.type) {
3583 case OPTIMIZED_METHOD_TYPE_SEND:
3584 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
3585 return vm_call_opt_send(ec, cfp, calling);
3586 case OPTIMIZED_METHOD_TYPE_CALL:
3587 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
3588 return vm_call_opt_call(ec, cfp, calling);
3589 case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
3590 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
3591 return vm_call_opt_block_call(ec, cfp, calling);
3592 case OPTIMIZED_METHOD_TYPE_STRUCT_AREF:
3593 CALLER_SETUP_ARG(cfp, calling, ci);
3594 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3595 rb_check_arity(calling->argc, 0, 0);
3596 CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE));
3597 return vm_call_opt_struct_aref(ec, cfp, calling);
3599 case OPTIMIZED_METHOD_TYPE_STRUCT_ASET:
3600 CALLER_SETUP_ARG(cfp, calling, ci);
3601 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3602 rb_check_arity(calling->argc, 1, 1);
3603 CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE));
3604 return vm_call_opt_struct_aset(ec, cfp, calling);
3605 default:
3606 rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
3610 #define VM_CALL_METHOD_ATTR(var, func, nohook) \
3611 if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
3612 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
3613 vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
3614 var = func; \
3615 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
3616 vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
3618 else { \
3619 nohook; \
3620 var = func; \
3623 static VALUE
3624 vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3626 const struct rb_callinfo *ci = calling->ci;
3627 const struct rb_callcache *cc = calling->cc;
3628 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3629 VALUE v;
3631 switch (cme->def->type) {
3632 case VM_METHOD_TYPE_ISEQ:
3633 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
3634 return vm_call_iseq_setup(ec, cfp, calling);
3636 case VM_METHOD_TYPE_NOTIMPLEMENTED:
3637 case VM_METHOD_TYPE_CFUNC:
3638 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
3639 return vm_call_cfunc(ec, cfp, calling);
3641 case VM_METHOD_TYPE_ATTRSET:
3642 CALLER_SETUP_ARG(cfp, calling, ci);
3643 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3645 rb_check_arity(calling->argc, 1, 1);
3646 vm_cc_attr_index_set(cc, 0);
3647 const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
3648 VM_CALL_METHOD_ATTR(v,
3649 vm_call_attrset(ec, cfp, calling),
3650 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
3651 return v;
3653 case VM_METHOD_TYPE_IVAR:
3654 CALLER_SETUP_ARG(cfp, calling, ci);
3655 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3656 rb_check_arity(calling->argc, 0, 0);
3657 vm_cc_attr_index_set(cc, 0);
3658 const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
3659 VM_CALL_METHOD_ATTR(v,
3660 vm_call_ivar(ec, cfp, calling),
3661 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
3662 return v;
3664 case VM_METHOD_TYPE_MISSING:
3665 vm_cc_method_missing_reason_set(cc, 0);
3666 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
3667 return vm_call_method_missing(ec, cfp, calling);
3669 case VM_METHOD_TYPE_BMETHOD:
3670 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
3671 return vm_call_bmethod(ec, cfp, calling);
3673 case VM_METHOD_TYPE_ALIAS:
3674 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
3675 return vm_call_alias(ec, cfp, calling);
3677 case VM_METHOD_TYPE_OPTIMIZED:
3678 return vm_call_optimized(ec, cfp, calling, ci, cc);
3680 case VM_METHOD_TYPE_UNDEF:
3681 break;
3683 case VM_METHOD_TYPE_ZSUPER:
3684 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
3686 case VM_METHOD_TYPE_REFINED:
3687 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
3688 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
3689 return vm_call_refined(ec, cfp, calling);
3692 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
3695 NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
3697 static VALUE
3698 vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3700 /* method missing */
3701 const struct rb_callinfo *ci = calling->ci;
3702 const int stat = ci_missing_reason(ci);
3704 if (vm_ci_mid(ci) == idMethodMissing) {
3705 rb_control_frame_t *reg_cfp = cfp;
3706 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
3707 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
3709 else {
3710 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
3714 static inline VALUE
3715 vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3717 const struct rb_callinfo *ci = calling->ci;
3718 const struct rb_callcache *cc = calling->cc;
3720 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
3722 if (vm_cc_cme(cc) != NULL) {
3723 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
3724 case METHOD_VISI_PUBLIC: /* likely */
3725 return vm_call_method_each_type(ec, cfp, calling);
3727 case METHOD_VISI_PRIVATE:
3728 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
3729 enum method_missing_reason stat = MISSING_PRIVATE;
3730 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
3732 vm_cc_method_missing_reason_set(cc, stat);
3733 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
3734 return vm_call_method_missing(ec, cfp, calling);
3736 return vm_call_method_each_type(ec, cfp, calling);
3738 case METHOD_VISI_PROTECTED:
3739 if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
3740 if (!rb_obj_is_kind_of(cfp->self, vm_cc_cme(cc)->defined_class)) {
3741 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
3742 return vm_call_method_missing(ec, cfp, calling);
3744 else {
3745 /* caching method info to dummy cc */
3746 VM_ASSERT(vm_cc_cme(cc) != NULL);
3747 struct rb_callcache cc_on_stack = *cc;
3748 FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
3749 calling->cc = &cc_on_stack;
3750 return vm_call_method_each_type(ec, cfp, calling);
3753 return vm_call_method_each_type(ec, cfp, calling);
3755 default:
3756 rb_bug("unreachable");
3759 else {
3760 return vm_call_method_nome(ec, cfp, calling);
3764 static VALUE
3765 vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3767 RB_DEBUG_COUNTER_INC(ccf_general);
3768 return vm_call_method(ec, reg_cfp, calling);
3771 void
3772 rb_vm_cc_general(const struct rb_callcache *cc)
3774 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
3775 VM_ASSERT(cc != vm_cc_empty());
3777 *(vm_call_handler *)&cc->call_ = vm_call_general;
3780 static VALUE
3781 vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3783 RB_DEBUG_COUNTER_INC(ccf_super_method);
3785 // This line is introduced to make different from `vm_call_general` because some compilers (VC we found)
3786 // can merge the function and the address of the function becomes same.
3787 // The address of `vm_call_super_method` is used in `search_refined_method`, so it should be different.
3788 if (ec == NULL) rb_bug("unreachable");
3790 /* this check is required to distinguish with other functions. */
3791 VM_ASSERT(vm_cc_call(calling->cc) == vm_call_super_method);
3792 return vm_call_method(ec, reg_cfp, calling);
3795 /* super */
3797 static inline VALUE
3798 vm_search_normal_superclass(VALUE klass)
3800 if (BUILTIN_TYPE(klass) == T_ICLASS &&
3801 FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
3802 klass = RBASIC(klass)->klass;
3804 klass = RCLASS_ORIGIN(klass);
3805 return RCLASS_SUPER(klass);
3808 NORETURN(static void vm_super_outside(void));
3810 static void
3811 vm_super_outside(void)
3813 rb_raise(rb_eNoMethodError, "super called outside of method");
3816 static const struct rb_callcache *
3817 empty_cc_for_super(void)
3819 #ifdef MJIT_HEADER
3820 return rb_vm_empty_cc_for_super();
3821 #else
3822 return &vm_empty_cc_for_super;
3823 #endif
3826 static const struct rb_callcache *
3827 vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
3829 VALUE current_defined_class;
3830 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
3832 if (!me) {
3833 vm_super_outside();
3836 current_defined_class = me->defined_class;
3838 if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
3839 current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
3842 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
3843 !FL_TEST_RAW(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
3844 reg_cfp->iseq != method_entry_iseqptr(me) &&
3845 !rb_obj_is_kind_of(recv, current_defined_class)) {
3846 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
3847 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
3849 if (m) { /* not bound UnboundMethod */
3850 rb_raise(rb_eTypeError,
3851 "self has wrong type to call super in this context: "
3852 "%"PRIsVALUE" (expected %"PRIsVALUE")",
3853 rb_obj_class(recv), m);
3857 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
3858 rb_raise(rb_eRuntimeError,
3859 "implicit argument passing of super from method defined"
3860 " by define_method() is not supported."
3861 " Specify all arguments explicitly.");
3864 ID mid = me->def->original_id;
3866 // update iseq. really? (TODO)
3867 cd->ci = vm_ci_new_runtime(mid,
3868 vm_ci_flag(cd->ci),
3869 vm_ci_argc(cd->ci),
3870 vm_ci_kwarg(cd->ci));
3872 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
3874 const struct rb_callcache *cc;
3876 VALUE klass = vm_search_normal_superclass(me->defined_class);
3878 if (!klass) {
3879 /* bound instance method of module */
3880 cc = vm_cc_new(klass, NULL, vm_call_method_missing);
3881 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
3883 else {
3884 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
3885 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
3887 // define_method can cache for different method id
3888 if (cached_cme == NULL) {
3889 // empty_cc_for_super is not markable object
3890 cd->cc = empty_cc_for_super();
3892 else if (cached_cme->called_id != mid) {
3893 const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
3894 if (cme) {
3895 cc = vm_cc_new(klass, cme, vm_call_super_method);
3896 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
3898 else {
3899 cd->cc = cc = empty_cc_for_super();
3902 else {
3903 switch (cached_cme->def->type) {
3904 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
3905 case VM_METHOD_TYPE_REFINED:
3906 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
3907 case VM_METHOD_TYPE_ATTRSET:
3908 case VM_METHOD_TYPE_IVAR:
3909 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
3910 break;
3911 default:
3912 break; // use fastpath
3917 VM_ASSERT((vm_cc_cme(cc), true));
3919 return cc;
3922 /* yield */
3924 static inline int
3925 block_proc_is_lambda(const VALUE procval)
3927 rb_proc_t *proc;
3929 if (procval) {
3930 GetProcPtr(procval, proc);
3931 return proc->is_lambda;
3933 else {
3934 return 0;
3938 static VALUE
3939 vm_yield_with_cfunc(rb_execution_context_t *ec,
3940 const struct rb_captured_block *captured,
3941 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
3942 const rb_callable_method_entry_t *me)
3944 int is_lambda = FALSE; /* TODO */
3945 VALUE val, arg, blockarg;
3946 int frame_flag;
3947 const struct vm_ifunc *ifunc = captured->code.ifunc;
3949 if (is_lambda) {
3950 arg = rb_ary_new4(argc, argv);
3952 else if (argc == 0) {
3953 arg = Qnil;
3955 else {
3956 arg = argv[0];
3959 blockarg = rb_vm_bh_to_procval(ec, block_handler);
3961 frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
3962 if (kw_splat) {
3963 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
3966 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
3967 frame_flag,
3968 self,
3969 VM_GUARDED_PREV_EP(captured->ep),
3970 (VALUE)me,
3971 0, ec->cfp->sp, 0, 0);
3972 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
3973 rb_vm_pop_frame(ec);
3975 return val;
3978 static VALUE
3979 vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
3981 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
3984 static inline int
3985 vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
3987 int i;
3988 long len = RARRAY_LEN(ary);
3990 CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
3992 for (i=0; i<len && i<iseq->body->param.lead_num; i++) {
3993 argv[i] = RARRAY_AREF(ary, i);
3996 return i;
3999 static inline VALUE
4000 vm_callee_setup_block_arg_arg0_check(VALUE *argv)
4002 VALUE ary, arg0 = argv[0];
4003 ary = rb_check_array_type(arg0);
4004 #if 0
4005 argv[0] = arg0;
4006 #else
4007 VM_ASSERT(argv[0] == arg0);
4008 #endif
4009 return ary;
4012 static int
4013 vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
4015 if (rb_simple_iseq_p(iseq)) {
4016 rb_control_frame_t *cfp = ec->cfp;
4017 VALUE arg0;
4019 CALLER_SETUP_ARG(cfp, calling, ci);
4020 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
4022 if (arg_setup_type == arg_setup_block &&
4023 calling->argc == 1 &&
4024 iseq->body->param.flags.has_lead &&
4025 !iseq->body->param.flags.ambiguous_param0 &&
4026 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
4027 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
4030 if (calling->argc != iseq->body->param.lead_num) {
4031 if (arg_setup_type == arg_setup_block) {
4032 if (calling->argc < iseq->body->param.lead_num) {
4033 int i;
4034 CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
4035 for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
4036 calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
4038 else if (calling->argc > iseq->body->param.lead_num) {
4039 calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
4042 else {
4043 argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
4047 return 0;
4049 else {
4050 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
4054 static int
4055 vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
4057 struct rb_calling_info calling_entry, *calling;
4059 calling = &calling_entry;
4060 calling->argc = argc;
4061 calling->block_handler = block_handler;
4062 calling->kw_splat = kw_splat;
4063 calling->recv = Qundef;
4064 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, (kw_splat ? VM_CALL_KW_SPLAT : 0), 0, 0);
4066 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
4069 /* ruby iseq -> ruby block */
4071 static VALUE
4072 vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4073 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4074 bool is_lambda, VALUE block_handler)
4076 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
4077 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
4078 const int arg_size = iseq->body->param.size;
4079 VALUE * const rsp = GET_SP() - calling->argc;
4080 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
4082 SET_SP(rsp);
4084 vm_push_frame(ec, iseq,
4085 VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
4086 captured->self,
4087 VM_GUARDED_PREV_EP(captured->ep), 0,
4088 iseq->body->iseq_encoded + opt_pc,
4089 rsp + arg_size,
4090 iseq->body->local_table_size - arg_size, iseq->body->stack_max);
4092 return Qundef;
4095 static VALUE
4096 vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4097 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4098 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
4100 if (calling->argc < 1) {
4101 rb_raise(rb_eArgError, "no receiver given");
4103 else {
4104 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
4105 CALLER_SETUP_ARG(reg_cfp, calling, ci);
4106 calling->recv = TOPN(--calling->argc);
4107 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol);
4111 static VALUE
4112 vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4113 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4114 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
4116 VALUE val;
4117 int argc;
4118 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
4119 CALLER_SETUP_ARG(ec->cfp, calling, ci);
4120 CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling, ci);
4121 argc = calling->argc;
4122 val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
4123 POPN(argc); /* TODO: should put before C/yield? */
4124 return val;
4127 static VALUE
4128 vm_proc_to_block_handler(VALUE procval)
4130 const struct rb_block *block = vm_proc_block(procval);
4132 switch (vm_block_type(block)) {
4133 case block_type_iseq:
4134 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
4135 case block_type_ifunc:
4136 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
4137 case block_type_symbol:
4138 return VM_BH_FROM_SYMBOL(block->as.symbol);
4139 case block_type_proc:
4140 return VM_BH_FROM_PROC(block->as.proc);
4142 VM_UNREACHABLE(vm_yield_with_proc);
4143 return Qundef;
4146 static VALUE
4147 vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4148 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4149 bool is_lambda, VALUE block_handler)
4151 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
4152 VALUE proc = VM_BH_TO_PROC(block_handler);
4153 is_lambda = block_proc_is_lambda(proc);
4154 block_handler = vm_proc_to_block_handler(proc);
4157 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
4160 static inline VALUE
4161 vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4162 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4163 bool is_lambda, VALUE block_handler)
4165 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
4166 struct rb_calling_info *calling, const struct rb_callinfo *ci,
4167 bool is_lambda, VALUE block_handler);
4169 switch (vm_block_handler_type(block_handler)) {
4170 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
4171 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
4172 case block_handler_type_proc: func = vm_invoke_proc_block; break;
4173 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
4174 default: rb_bug("vm_invoke_block: unreachable");
4177 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
4180 static VALUE
4181 vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
4183 const rb_execution_context_t *ec = GET_EC();
4184 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
4185 struct rb_captured_block *captured;
4187 if (cfp == 0) {
4188 rb_bug("vm_make_proc_with_iseq: unreachable");
4191 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
4192 captured->code.iseq = blockiseq;
4194 return rb_vm_make_proc(ec, captured, rb_cProc);
4197 static VALUE
4198 vm_once_exec(VALUE iseq)
4200 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
4201 return rb_proc_call_with_block(proc, 0, 0, Qnil);
4204 static VALUE
4205 vm_once_clear(VALUE data)
4207 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
4208 is->once.running_thread = NULL;
4209 return Qnil;
4212 /* defined insn */
4214 static bool
4215 check_respond_to_missing(VALUE obj, VALUE v)
4217 VALUE args[2];
4218 VALUE r;
4220 args[0] = obj; args[1] = Qfalse;
4221 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
4222 if (r != Qundef && RTEST(r)) {
4223 return true;
4225 else {
4226 return false;
4230 static bool
4231 vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
4233 VALUE klass;
4234 enum defined_type type = (enum defined_type)op_type;
4236 switch (type) {
4237 case DEFINED_IVAR:
4238 return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
4239 break;
4240 case DEFINED_GVAR:
4241 return rb_gvar_defined(SYM2ID(obj));
4242 break;
4243 case DEFINED_CVAR: {
4244 const rb_cref_t *cref = vm_get_cref(GET_EP());
4245 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
4246 return rb_cvar_defined(klass, SYM2ID(obj));
4247 break;
4249 case DEFINED_CONST:
4250 case DEFINED_CONST_FROM: {
4251 bool allow_nil = type == DEFINED_CONST;
4252 klass = v;
4253 return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
4254 break;
4256 case DEFINED_FUNC:
4257 klass = CLASS_OF(v);
4258 return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
4259 break;
4260 case DEFINED_METHOD:{
4261 VALUE klass = CLASS_OF(v);
4262 const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
4264 if (me) {
4265 switch (METHOD_ENTRY_VISI(me)) {
4266 case METHOD_VISI_PRIVATE:
4267 break;
4268 case METHOD_VISI_PROTECTED:
4269 if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
4270 break;
4272 case METHOD_VISI_PUBLIC:
4273 return true;
4274 break;
4275 default:
4276 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
4279 else {
4280 return check_respond_to_missing(obj, v);
4282 break;
4284 case DEFINED_YIELD:
4285 if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
4286 return true;
4288 break;
4289 case DEFINED_ZSUPER:
4291 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
4293 if (me) {
4294 VALUE klass = vm_search_normal_superclass(me->defined_class);
4295 ID id = me->def->original_id;
4297 return rb_method_boundp(klass, id, 0);
4300 break;
4301 case DEFINED_REF:{
4302 return vm_getspecial(ec, GET_LEP(), Qfalse, FIX2INT(obj)) != Qnil;
4303 break;
4305 default:
4306 rb_bug("unimplemented defined? type (VM)");
4307 break;
4310 return false;
4313 bool
4314 rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
4316 return vm_defined(ec, reg_cfp, op_type, obj, v);
4319 static const VALUE *
4320 vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
4322 rb_num_t i;
4323 const VALUE *ep = reg_ep;
4324 for (i = 0; i < lv; i++) {
4325 ep = GET_PREV_EP(ep);
4327 return ep;
4330 static VALUE
4331 vm_get_special_object(const VALUE *const reg_ep,
4332 enum vm_special_object_type type)
4334 switch (type) {
4335 case VM_SPECIAL_OBJECT_VMCORE:
4336 return rb_mRubyVMFrozenCore;
4337 case VM_SPECIAL_OBJECT_CBASE:
4338 return vm_get_cbase(reg_ep);
4339 case VM_SPECIAL_OBJECT_CONST_BASE:
4340 return vm_get_const_base(reg_ep);
4341 default:
4342 rb_bug("putspecialobject insn: unknown value_type %d", type);
4346 static VALUE
4347 vm_concat_array(VALUE ary1, VALUE ary2st)
4349 const VALUE ary2 = ary2st;
4350 VALUE tmp1 = rb_check_to_array(ary1);
4351 VALUE tmp2 = rb_check_to_array(ary2);
4353 if (NIL_P(tmp1)) {
4354 tmp1 = rb_ary_new3(1, ary1);
4357 if (NIL_P(tmp2)) {
4358 tmp2 = rb_ary_new3(1, ary2);
4361 if (tmp1 == ary1) {
4362 tmp1 = rb_ary_dup(ary1);
4364 return rb_ary_concat(tmp1, tmp2);
4367 static VALUE
4368 vm_splat_array(VALUE flag, VALUE ary)
4370 VALUE tmp = rb_check_to_array(ary);
4371 if (NIL_P(tmp)) {
4372 return rb_ary_new3(1, ary);
4374 else if (RTEST(flag)) {
4375 return rb_ary_dup(tmp);
4377 else {
4378 return tmp;
4382 VALUE
4383 rb_vm_splat_array(VALUE flag, VALUE ary)
4385 return vm_splat_array(flag, ary);
4388 static VALUE
4389 vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
4391 enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
4393 if (flag & VM_CHECKMATCH_ARRAY) {
4394 long i;
4395 const long n = RARRAY_LEN(pattern);
4397 for (i = 0; i < n; i++) {
4398 VALUE v = RARRAY_AREF(pattern, i);
4399 VALUE c = check_match(ec, v, target, type);
4401 if (RTEST(c)) {
4402 return c;
4405 return Qfalse;
4407 else {
4408 return check_match(ec, pattern, target, type);
4412 static VALUE
4413 vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
4415 const VALUE kw_bits = *(ep - bits);
4417 if (FIXNUM_P(kw_bits)) {
4418 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
4419 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
4420 return Qfalse;
4422 else {
4423 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
4424 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
4426 return Qtrue;
4429 static void
4430 vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
4432 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
4433 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
4434 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
4435 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
4437 switch (flag) {
4438 case RUBY_EVENT_CALL:
4439 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
4440 return;
4441 case RUBY_EVENT_C_CALL:
4442 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
4443 return;
4444 case RUBY_EVENT_RETURN:
4445 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
4446 return;
4447 case RUBY_EVENT_C_RETURN:
4448 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
4449 return;
4454 static VALUE
4455 vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
4457 VALUE ns;
4459 if ((ns = vm_search_const_defined_class(cbase, id)) == 0) {
4460 return ns;
4462 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
4463 return rb_public_const_get_at(ns, id);
4465 else {
4466 return rb_const_get_at(ns, id);
4470 static VALUE
4471 vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
4473 if (!RB_TYPE_P(klass, T_CLASS)) {
4474 return 0;
4476 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
4477 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
4479 if (tmp != super) {
4480 rb_raise(rb_eTypeError,
4481 "superclass mismatch for class %"PRIsVALUE"",
4482 rb_id2str(id));
4484 else {
4485 return klass;
4488 else {
4489 return klass;
4493 static VALUE
4494 vm_check_if_module(ID id, VALUE mod)
4496 if (!RB_TYPE_P(mod, T_MODULE)) {
4497 return 0;
4499 else {
4500 return mod;
4504 static VALUE
4505 declare_under(ID id, VALUE cbase, VALUE c)
4507 rb_set_class_path_string(c, cbase, rb_id2str(id));
4508 rb_const_set(cbase, id, c);
4509 return c;
4512 static VALUE
4513 vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
4515 /* new class declaration */
4516 VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
4517 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
4518 rb_define_alloc_func(c, rb_get_alloc_func(c));
4519 rb_class_inherited(s, c);
4520 return c;
4523 static VALUE
4524 vm_declare_module(ID id, VALUE cbase)
4526 /* new module declaration */
4527 return declare_under(id, cbase, rb_module_new());
4530 NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
4531 static void
4532 unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
4534 VALUE name = rb_id2str(id);
4535 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
4536 name, type);
4537 VALUE location = rb_const_source_location_at(cbase, id);
4538 if (!NIL_P(location)) {
4539 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
4540 " previous definition of %"PRIsVALUE" was here",
4541 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
4543 rb_exc_raise(rb_exc_new_str(rb_eTypeError, message));
4546 static VALUE
4547 vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
4549 VALUE klass;
4551 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
4552 rb_raise(rb_eTypeError,
4553 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
4554 rb_obj_class(super));
4557 vm_check_if_namespace(cbase);
4559 /* find klass */
4560 rb_autoload_load(cbase, id);
4561 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
4562 if (!vm_check_if_class(id, flags, super, klass))
4563 unmatched_redefinition("class", cbase, id, klass);
4564 return klass;
4566 else {
4567 return vm_declare_class(id, flags, cbase, super);
4571 static VALUE
4572 vm_define_module(ID id, rb_num_t flags, VALUE cbase)
4574 VALUE mod;
4576 vm_check_if_namespace(cbase);
4577 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
4578 if (!vm_check_if_module(id, mod))
4579 unmatched_redefinition("module", cbase, id, mod);
4580 return mod;
4582 else {
4583 return vm_declare_module(id, cbase);
4587 static VALUE
4588 vm_find_or_create_class_by_id(ID id,
4589 rb_num_t flags,
4590 VALUE cbase,
4591 VALUE super)
4593 rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
4595 switch (type) {
4596 case VM_DEFINECLASS_TYPE_CLASS:
4597 /* classdef returns class scope value */
4598 return vm_define_class(id, flags, cbase, super);
4600 case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
4601 /* classdef returns class scope value */
4602 return rb_singleton_class(cbase);
4604 case VM_DEFINECLASS_TYPE_MODULE:
4605 /* classdef returns class scope value */
4606 return vm_define_module(id, flags, cbase);
4608 default:
4609 rb_bug("unknown defineclass type: %d", (int)type);
4613 static rb_method_visibility_t
4614 vm_scope_visibility_get(const rb_execution_context_t *ec)
4616 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
4618 if (!vm_env_cref_by_cref(cfp->ep)) {
4619 return METHOD_VISI_PUBLIC;
4621 else {
4622 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
4626 static int
4627 vm_scope_module_func_check(const rb_execution_context_t *ec)
4629 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
4631 if (!vm_env_cref_by_cref(cfp->ep)) {
4632 return FALSE;
4634 else {
4635 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
4639 static void
4640 vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
4642 VALUE klass;
4643 rb_method_visibility_t visi;
4644 rb_cref_t *cref = vm_ec_cref(ec);
4646 if (is_singleton) {
4647 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
4648 visi = METHOD_VISI_PUBLIC;
4650 else {
4651 klass = CREF_CLASS_FOR_DEFINITION(cref);
4652 visi = vm_scope_visibility_get(ec);
4655 if (NIL_P(klass)) {
4656 rb_raise(rb_eTypeError, "no class/module to add method");
4659 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
4661 if (!is_singleton && vm_scope_module_func_check(ec)) {
4662 klass = rb_singleton_class(klass);
4663 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
4667 static VALUE
4668 vm_invokeblock_i(struct rb_execution_context_struct *ec,
4669 struct rb_control_frame_struct *reg_cfp,
4670 struct rb_calling_info *calling)
4672 const struct rb_callinfo *ci = calling->ci;
4673 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
4675 if (block_handler == VM_BLOCK_HANDLER_NONE) {
4676 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
4678 else {
4679 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
4683 #ifdef MJIT_HEADER
4684 static const struct rb_callcache *
4685 vm_search_method_wrap(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
4687 return vm_search_method((VALUE)reg_cfp->iseq, cd, recv);
4690 static const struct rb_callcache *
4691 vm_search_invokeblock(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
4693 static const struct rb_callcache cc = {
4694 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
4695 .klass = 0,
4696 .cme_ = 0,
4697 .call_ = vm_invokeblock_i,
4698 .aux_ = {0},
4700 return &cc;
4703 # define mexp_search_method vm_search_method_wrap
4704 # define mexp_search_super vm_search_super_method
4705 # define mexp_search_invokeblock vm_search_invokeblock
4706 #else
4707 enum method_explorer_type {
4708 mexp_search_method,
4709 mexp_search_invokeblock,
4710 mexp_search_super,
4712 #endif
4714 static
4715 #ifndef MJIT_HEADER
4716 inline
4717 #endif
4718 VALUE
4719 vm_sendish(
4720 struct rb_execution_context_struct *ec,
4721 struct rb_control_frame_struct *reg_cfp,
4722 struct rb_call_data *cd,
4723 VALUE block_handler,
4724 #ifdef MJIT_HEADER
4725 const struct rb_callcache *(*method_explorer)(const struct rb_control_frame_struct *cfp, struct rb_call_data *cd, VALUE recv)
4726 #else
4727 enum method_explorer_type method_explorer
4728 #endif
4730 VALUE val = Qundef;
4731 const struct rb_callinfo *ci = cd->ci;
4732 const struct rb_callcache *cc;
4733 int argc = vm_ci_argc(ci);
4734 VALUE recv = TOPN(argc);
4735 struct rb_calling_info calling = {
4736 .block_handler = block_handler,
4737 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
4738 .recv = recv,
4739 .argc = argc,
4740 .ci = ci,
4743 // The enum-based branch and inlining are faster in VM, but function pointers without inlining are faster in JIT.
4744 #ifdef MJIT_HEADER
4745 calling.cc = cc = method_explorer(GET_CFP(), cd, recv);
4746 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4747 #else
4748 switch (method_explorer) {
4749 case mexp_search_method:
4750 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
4751 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4752 break;
4753 case mexp_search_super:
4754 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
4755 calling.ci = cd->ci; // TODO: does it safe?
4756 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4757 break;
4758 case mexp_search_invokeblock:
4759 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
4760 break;
4762 #endif
4764 if (val != Qundef) {
4765 return val; /* CFUNC normal return */
4767 else {
4768 RESTORE_REGS(); /* CFP pushed in cc->call() */
4771 #ifdef MJIT_HEADER
4772 /* When calling ISeq which may catch an exception from JIT-ed
4773 code, we should not call mjit_exec directly to prevent the
4774 caller frame from being canceled. That's because the caller
4775 frame may have stack values in the local variables and the
4776 cancelling the caller frame will purge them. But directly
4777 calling mjit_exec is faster... */
4778 if (GET_ISEQ()->body->catch_except_p) {
4779 VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
4780 return vm_exec(ec, true);
4782 else if ((val = mjit_exec(ec)) == Qundef) {
4783 VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
4784 return vm_exec(ec, false);
4786 else {
4787 return val;
4789 #else
4790 /* When calling from VM, longjmp in the callee won't purge any
4791 JIT-ed caller frames. So it's safe to directly call
4792 mjit_exec. */
4793 return mjit_exec(ec);
4794 #endif
4797 /* object.c */
4798 VALUE rb_nil_to_s(VALUE);
4799 VALUE rb_true_to_s(VALUE);
4800 VALUE rb_false_to_s(VALUE);
4801 /* numeric.c */
4802 VALUE rb_int_to_s(int argc, VALUE *argv, VALUE x);
4803 VALUE rb_fix_to_s(VALUE);
4804 /* variable.c */
4805 VALUE rb_mod_to_s(VALUE);
4806 VALUE rb_mod_name(VALUE);
4808 static VALUE
4809 vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
4811 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
4813 switch (TYPE(recv)) {
4814 case T_STRING:
4815 return recv;
4816 case T_SYMBOL:
4817 if (check_cfunc(vm_cc_cme(cc), rb_sym_to_s)) {
4818 // rb_sym_to_s() allocates a mutable string, but since we are only
4819 // going to use this string for interpolation, it's fine to use the
4820 // frozen string.
4821 return rb_sym2str(recv);
4823 break;
4824 case T_MODULE:
4825 case T_CLASS:
4826 if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
4827 // rb_mod_to_s() allocates a mutable string, but since we are only
4828 // going to use this string for interpolation, it's fine to use the
4829 // frozen string.
4830 VALUE val = rb_mod_name(recv);
4831 if (val == Qnil) {
4832 val = rb_mod_to_s(recv);
4834 return val;
4836 break;
4837 case T_NIL:
4838 if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
4839 return rb_nil_to_s(recv);
4841 break;
4842 case T_TRUE:
4843 if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
4844 return rb_true_to_s(recv);
4846 break;
4847 case T_FALSE:
4848 if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
4849 return rb_false_to_s(recv);
4851 break;
4852 case T_FIXNUM:
4853 if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
4854 return rb_fix_to_s(recv);
4856 break;
4858 return Qundef;
4861 static VALUE
4862 vm_opt_str_freeze(VALUE str, int bop, ID id)
4864 if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
4865 return str;
4867 else {
4868 return Qundef;
4872 /* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
4873 #define id_cmp idCmp
4875 static VALUE
4876 vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
4878 if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
4879 if (num == 0) {
4880 return Qnil;
4882 else {
4883 struct cmp_opt_data cmp_opt = { 0, 0 };
4884 VALUE result = *ptr;
4885 rb_snum_t i = num - 1;
4886 while (i-- > 0) {
4887 const VALUE v = *++ptr;
4888 if (OPTIMIZED_CMP(v, result, cmp_opt) > 0) {
4889 result = v;
4892 return result;
4895 else {
4896 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMax, 0, NULL, RB_NO_KEYWORDS);
4900 static VALUE
4901 vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
4903 if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
4904 if (num == 0) {
4905 return Qnil;
4907 else {
4908 struct cmp_opt_data cmp_opt = { 0, 0 };
4909 VALUE result = *ptr;
4910 rb_snum_t i = num - 1;
4911 while (i-- > 0) {
4912 const VALUE v = *++ptr;
4913 if (OPTIMIZED_CMP(v, result, cmp_opt) < 0) {
4914 result = v;
4917 return result;
4920 else {
4921 return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idMin, 0, NULL, RB_NO_KEYWORDS);
4925 #undef id_cmp
4927 #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
4929 // For MJIT inlining
4930 static inline bool
4931 vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep)
4933 if (ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
4934 ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
4936 VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true);
4938 return (ic_cref == NULL || // no need to check CREF
4939 ic_cref == vm_get_cref(reg_ep));
4941 return false;
4944 static bool
4945 vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
4947 VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
4948 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, GET_IC_SERIAL(ice), reg_ep);
4951 // YJIT needs this function to never allocate and never raise
4952 bool
4953 rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
4955 return ic->entry && vm_ic_hit_p(ic->entry, reg_ep);
4958 static void
4959 vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
4962 struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
4963 RB_OBJ_WRITE(ice, &ice->value, val);
4964 ice->ic_cref = vm_get_const_key_cref(reg_ep);
4965 SET_IC_SERIAL(ice, GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count);
4966 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
4967 ruby_vm_const_missing_count = 0;
4968 RB_OBJ_WRITE(iseq, &ic->entry, ice);
4969 #ifndef MJIT_HEADER
4970 // MJIT and YJIT can't be on at the same time, so there is no need to
4971 // notify YJIT about changes to the IC when running inside MJIT code.
4972 rb_yjit_constant_ic_update(iseq, ic);
4973 #endif
4976 static VALUE
4977 vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
4979 rb_thread_t *th = rb_ec_thread_ptr(ec);
4980 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
4982 again:
4983 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
4984 return is->once.value;
4986 else if (is->once.running_thread == NULL) {
4987 VALUE val;
4988 is->once.running_thread = th;
4989 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
4990 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
4991 /* is->once.running_thread is cleared by vm_once_clear() */
4992 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
4993 return val;
4995 else if (is->once.running_thread == th) {
4996 /* recursive once */
4997 return vm_once_exec((VALUE)iseq);
4999 else {
5000 /* waiting for finish */
5001 RUBY_VM_CHECK_INTS(ec);
5002 rb_thread_schedule();
5003 goto again;
5007 static OFFSET
5008 vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
5010 switch (OBJ_BUILTIN_TYPE(key)) {
5011 case -1:
5012 case T_FLOAT:
5013 case T_SYMBOL:
5014 case T_BIGNUM:
5015 case T_STRING:
5016 if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
5017 SYMBOL_REDEFINED_OP_FLAG |
5018 INTEGER_REDEFINED_OP_FLAG |
5019 FLOAT_REDEFINED_OP_FLAG |
5020 NIL_REDEFINED_OP_FLAG |
5021 TRUE_REDEFINED_OP_FLAG |
5022 FALSE_REDEFINED_OP_FLAG |
5023 STRING_REDEFINED_OP_FLAG)) {
5024 st_data_t val;
5025 if (RB_FLOAT_TYPE_P(key)) {
5026 double kval = RFLOAT_VALUE(key);
5027 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
5028 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
5031 if (rb_hash_stlike_lookup(hash, key, &val)) {
5032 return FIX2LONG((VALUE)val);
5034 else {
5035 return else_offset;
5039 return 0;
5042 NORETURN(static void
5043 vm_stack_consistency_error(const rb_execution_context_t *ec,
5044 const rb_control_frame_t *,
5045 const VALUE *));
5046 static void
5047 vm_stack_consistency_error(const rb_execution_context_t *ec,
5048 const rb_control_frame_t *cfp,
5049 const VALUE *bp)
5051 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
5052 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
5053 static const char stack_consistency_error[] =
5054 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
5055 #if defined RUBY_DEVEL
5056 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
5057 rb_str_cat_cstr(mesg, "\n");
5058 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
5059 rb_exc_fatal(rb_exc_new3(rb_eFatal, mesg));
5060 #else
5061 rb_bug(stack_consistency_error, nsp, nbp);
5062 #endif
5065 static VALUE
5066 vm_opt_plus(VALUE recv, VALUE obj)
5068 if (FIXNUM_2_P(recv, obj) &&
5069 BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
5070 return rb_fix_plus_fix(recv, obj);
5072 else if (FLONUM_2_P(recv, obj) &&
5073 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
5074 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
5076 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5077 return Qundef;
5079 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5080 RBASIC_CLASS(obj) == rb_cFloat &&
5081 BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
5082 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
5084 else if (RBASIC_CLASS(recv) == rb_cString &&
5085 RBASIC_CLASS(obj) == rb_cString &&
5086 BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
5087 return rb_str_opt_plus(recv, obj);
5089 else if (RBASIC_CLASS(recv) == rb_cArray &&
5090 RBASIC_CLASS(obj) == rb_cArray &&
5091 BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
5092 return rb_ary_plus(recv, obj);
5094 else {
5095 return Qundef;
5099 static VALUE
5100 vm_opt_minus(VALUE recv, VALUE obj)
5102 if (FIXNUM_2_P(recv, obj) &&
5103 BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
5104 return rb_fix_minus_fix(recv, obj);
5106 else if (FLONUM_2_P(recv, obj) &&
5107 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
5108 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
5110 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5111 return Qundef;
5113 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5114 RBASIC_CLASS(obj) == rb_cFloat &&
5115 BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
5116 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
5118 else {
5119 return Qundef;
5123 static VALUE
5124 vm_opt_mult(VALUE recv, VALUE obj)
5126 if (FIXNUM_2_P(recv, obj) &&
5127 BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
5128 return rb_fix_mul_fix(recv, obj);
5130 else if (FLONUM_2_P(recv, obj) &&
5131 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
5132 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
5134 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5135 return Qundef;
5137 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5138 RBASIC_CLASS(obj) == rb_cFloat &&
5139 BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
5140 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
5142 else {
5143 return Qundef;
5147 static VALUE
5148 vm_opt_div(VALUE recv, VALUE obj)
5150 if (FIXNUM_2_P(recv, obj) &&
5151 BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
5152 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
5154 else if (FLONUM_2_P(recv, obj) &&
5155 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
5156 return rb_flo_div_flo(recv, obj);
5158 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5159 return Qundef;
5161 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5162 RBASIC_CLASS(obj) == rb_cFloat &&
5163 BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
5164 return rb_flo_div_flo(recv, obj);
5166 else {
5167 return Qundef;
5171 static VALUE
5172 vm_opt_mod(VALUE recv, VALUE obj)
5174 if (FIXNUM_2_P(recv, obj) &&
5175 BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
5176 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
5178 else if (FLONUM_2_P(recv, obj) &&
5179 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
5180 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
5182 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5183 return Qundef;
5185 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5186 RBASIC_CLASS(obj) == rb_cFloat &&
5187 BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
5188 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
5190 else {
5191 return Qundef;
5195 VALUE
5196 rb_vm_opt_mod(VALUE recv, VALUE obj)
5198 return vm_opt_mod(recv, obj);
5201 static VALUE
5202 vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
5204 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
5205 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
5207 if (val != Qundef) {
5208 return RBOOL(!RTEST(val));
5212 return Qundef;
5215 static VALUE
5216 vm_opt_lt(VALUE recv, VALUE obj)
5218 if (FIXNUM_2_P(recv, obj) &&
5219 BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
5220 return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
5222 else if (FLONUM_2_P(recv, obj) &&
5223 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
5224 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
5226 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5227 return Qundef;
5229 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5230 RBASIC_CLASS(obj) == rb_cFloat &&
5231 BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
5232 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
5233 return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
5235 else {
5236 return Qundef;
5240 static VALUE
5241 vm_opt_le(VALUE recv, VALUE obj)
5243 if (FIXNUM_2_P(recv, obj) &&
5244 BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
5245 return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
5247 else if (FLONUM_2_P(recv, obj) &&
5248 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
5249 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
5251 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5252 return Qundef;
5254 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5255 RBASIC_CLASS(obj) == rb_cFloat &&
5256 BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
5257 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
5258 return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
5260 else {
5261 return Qundef;
5265 static VALUE
5266 vm_opt_gt(VALUE recv, VALUE obj)
5268 if (FIXNUM_2_P(recv, obj) &&
5269 BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
5270 return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
5272 else if (FLONUM_2_P(recv, obj) &&
5273 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
5274 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
5276 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5277 return Qundef;
5279 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5280 RBASIC_CLASS(obj) == rb_cFloat &&
5281 BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
5282 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
5283 return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
5285 else {
5286 return Qundef;
5290 static VALUE
5291 vm_opt_ge(VALUE recv, VALUE obj)
5293 if (FIXNUM_2_P(recv, obj) &&
5294 BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
5295 return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
5297 else if (FLONUM_2_P(recv, obj) &&
5298 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
5299 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
5301 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5302 return Qundef;
5304 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5305 RBASIC_CLASS(obj) == rb_cFloat &&
5306 BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
5307 CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
5308 return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
5310 else {
5311 return Qundef;
5316 static VALUE
5317 vm_opt_ltlt(VALUE recv, VALUE obj)
5319 if (SPECIAL_CONST_P(recv)) {
5320 return Qundef;
5322 else if (RBASIC_CLASS(recv) == rb_cString &&
5323 BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
5324 return rb_str_concat(recv, obj);
5326 else if (RBASIC_CLASS(recv) == rb_cArray &&
5327 BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
5328 return rb_ary_push(recv, obj);
5330 else {
5331 return Qundef;
5335 static VALUE
5336 vm_opt_and(VALUE recv, VALUE obj)
5338 if (FIXNUM_2_P(recv, obj) &&
5339 BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
5340 return (recv & obj) | 1;
5342 else {
5343 return Qundef;
5347 static VALUE
5348 vm_opt_or(VALUE recv, VALUE obj)
5350 if (FIXNUM_2_P(recv, obj) &&
5351 BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
5352 return recv | obj;
5354 else {
5355 return Qundef;
5359 static VALUE
5360 vm_opt_aref(VALUE recv, VALUE obj)
5362 if (SPECIAL_CONST_P(recv)) {
5363 if (FIXNUM_2_P(recv, obj) &&
5364 BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
5365 return rb_fix_aref(recv, obj);
5367 return Qundef;
5369 else if (RBASIC_CLASS(recv) == rb_cArray &&
5370 BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
5371 if (FIXNUM_P(obj)) {
5372 return rb_ary_entry_internal(recv, FIX2LONG(obj));
5374 else {
5375 return rb_ary_aref1(recv, obj);
5378 else if (RBASIC_CLASS(recv) == rb_cHash &&
5379 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
5380 return rb_hash_aref(recv, obj);
5382 else {
5383 return Qundef;
5387 static VALUE
5388 vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
5390 if (SPECIAL_CONST_P(recv)) {
5391 return Qundef;
5393 else if (RBASIC_CLASS(recv) == rb_cArray &&
5394 BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
5395 FIXNUM_P(obj)) {
5396 rb_ary_store(recv, FIX2LONG(obj), set);
5397 return set;
5399 else if (RBASIC_CLASS(recv) == rb_cHash &&
5400 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
5401 rb_hash_aset(recv, obj, set);
5402 return set;
5404 else {
5405 return Qundef;
5409 static VALUE
5410 vm_opt_aref_with(VALUE recv, VALUE key)
5412 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
5413 BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
5414 rb_hash_compare_by_id_p(recv) == Qfalse) {
5415 return rb_hash_aref(recv, key);
5417 else {
5418 return Qundef;
5422 static VALUE
5423 vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
5425 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
5426 BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
5427 rb_hash_compare_by_id_p(recv) == Qfalse) {
5428 return rb_hash_aset(recv, key, val);
5430 else {
5431 return Qundef;
5435 static VALUE
5436 vm_opt_length(VALUE recv, int bop)
5438 if (SPECIAL_CONST_P(recv)) {
5439 return Qundef;
5441 else if (RBASIC_CLASS(recv) == rb_cString &&
5442 BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
5443 if (bop == BOP_EMPTY_P) {
5444 return LONG2NUM(RSTRING_LEN(recv));
5446 else {
5447 return rb_str_length(recv);
5450 else if (RBASIC_CLASS(recv) == rb_cArray &&
5451 BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
5452 return LONG2NUM(RARRAY_LEN(recv));
5454 else if (RBASIC_CLASS(recv) == rb_cHash &&
5455 BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
5456 return INT2FIX(RHASH_SIZE(recv));
5458 else {
5459 return Qundef;
5463 static VALUE
5464 vm_opt_empty_p(VALUE recv)
5466 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
5467 case Qundef: return Qundef;
5468 case INT2FIX(0): return Qtrue;
5469 default: return Qfalse;
5473 VALUE rb_false(VALUE obj);
5475 static VALUE
5476 vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
5478 if (NIL_P(recv) &&
5479 BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
5480 return Qtrue;
5482 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
5483 return Qfalse;
5485 else {
5486 return Qundef;
5490 static VALUE
5491 fix_succ(VALUE x)
5493 switch (x) {
5494 case ~0UL:
5495 /* 0xFFFF_FFFF == INT2FIX(-1)
5496 * `-1.succ` is of course 0. */
5497 return INT2FIX(0);
5498 case RSHIFT(~0UL, 1):
5499 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
5500 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
5501 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
5502 default:
5503 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
5504 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
5505 * == lx*2 + ly*2 + 1
5506 * == (lx*2+1) + (ly*2+1) - 1
5507 * == x + y - 1
5509 * Here, if we put y := INT2FIX(1):
5511 * == x + INT2FIX(1) - 1
5512 * == x + 2 .
5514 return x + 2;
5518 static VALUE
5519 vm_opt_succ(VALUE recv)
5521 if (FIXNUM_P(recv) &&
5522 BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
5523 return fix_succ(recv);
5525 else if (SPECIAL_CONST_P(recv)) {
5526 return Qundef;
5528 else if (RBASIC_CLASS(recv) == rb_cString &&
5529 BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
5530 return rb_str_succ(recv);
5532 else {
5533 return Qundef;
5537 static VALUE
5538 vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
5540 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
5541 return RBOOL(!RTEST(recv));
5543 else {
5544 return Qundef;
5548 static VALUE
5549 vm_opt_regexpmatch2(VALUE recv, VALUE obj)
5551 if (SPECIAL_CONST_P(recv)) {
5552 return Qundef;
5554 else if (RBASIC_CLASS(recv) == rb_cString &&
5555 CLASS_OF(obj) == rb_cRegexp &&
5556 BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
5557 return rb_reg_match(obj, recv);
5559 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
5560 BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
5561 return rb_reg_match(recv, obj);
5563 else {
5564 return Qundef;
5568 rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
5570 NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
5572 static inline void
5573 vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
5574 rb_event_flag_t pc_events, rb_event_flag_t target_event,
5575 rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
5577 rb_event_flag_t event = pc_events & target_event;
5578 VALUE self = GET_SELF();
5580 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
5582 if (event & global_hooks->events) {
5583 /* increment PC because source line is calculated with PC-1 */
5584 reg_cfp->pc++;
5585 vm_dtrace(event, ec);
5586 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
5587 reg_cfp->pc--;
5590 if (local_hooks != NULL) {
5591 if (event & local_hooks->events) {
5592 /* increment PC because source line is calculated with PC-1 */
5593 reg_cfp->pc++;
5594 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
5595 reg_cfp->pc--;
5600 // Return true if given cc has cfunc which is NOT handled by opt_send_without_block.
5601 bool
5602 rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn)
5604 switch (insn) {
5605 case BIN(opt_eq):
5606 return check_cfunc(vm_cc_cme(cc), rb_obj_equal);
5607 case BIN(opt_nil_p):
5608 return check_cfunc(vm_cc_cme(cc), rb_false);
5609 case BIN(opt_not):
5610 return check_cfunc(vm_cc_cme(cc), rb_obj_not);
5611 default:
5612 return false;
5616 #define VM_TRACE_HOOK(target_event, val) do { \
5617 if ((pc_events & (target_event)) & enabled_flags) { \
5618 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
5620 } while (0)
5622 static void
5623 vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
5625 const VALUE *pc = reg_cfp->pc;
5626 rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
5627 rb_event_flag_t global_events = enabled_flags;
5629 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
5630 return;
5632 else {
5633 const rb_iseq_t *iseq = reg_cfp->iseq;
5634 size_t pos = pc - iseq->body->iseq_encoded;
5635 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
5636 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
5637 rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
5638 rb_hook_list_t *bmethod_local_hooks = NULL;
5639 rb_event_flag_t bmethod_local_events = 0;
5640 bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
5641 enabled_flags |= iseq_local_events;
5643 VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
5645 if (bmethod_frame) {
5646 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
5647 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
5648 bmethod_local_hooks = me->def->body.bmethod.hooks;
5649 if (bmethod_local_hooks) {
5650 bmethod_local_events = bmethod_local_hooks->events;
5655 if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
5656 #if 0
5657 /* disable trace */
5658 /* TODO: incomplete */
5659 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
5660 #else
5661 /* do not disable trace because of performance problem
5662 * (re-enable overhead)
5664 #endif
5665 return;
5667 else if (ec->trace_arg != NULL) {
5668 /* already tracing */
5669 return;
5671 else {
5672 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
5673 /* Note, not considering iseq local events here since the same
5674 * iseq could be used in multiple bmethods. */
5675 rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
5677 if (0) {
5678 ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
5679 (int)pos,
5680 (int)pc_events,
5681 RSTRING_PTR(rb_iseq_path(iseq)),
5682 (int)rb_iseq_line_no(iseq, pos),
5683 RSTRING_PTR(rb_iseq_label(iseq)));
5685 VM_ASSERT(reg_cfp->pc == pc);
5686 VM_ASSERT(pc_events != 0);
5688 /* check traces */
5689 if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
5690 /* b_call instruction running as a method. Fire call event. */
5691 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
5693 VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
5694 VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
5695 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
5696 VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
5697 VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
5698 if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
5699 /* b_return instruction running as a method. Fire return event. */
5700 vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
5705 #undef VM_TRACE_HOOK
5707 #if VM_CHECK_MODE > 0
5708 NORETURN( NOINLINE( COLDFUNC
5709 void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
5711 void
5712 Init_vm_stack_canary(void)
5714 /* This has to be called _after_ our PRNG is properly set up. */
5715 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
5716 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
5718 vm_stack_canary_was_born = true;
5719 VM_ASSERT(n == 0);
5722 #ifndef MJIT_HEADER
5723 MJIT_FUNC_EXPORTED void
5724 rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
5726 /* Because a method has already been called, why not call
5727 * another one. */
5728 const char *insn = rb_insns_name(i);
5729 VALUE inspection = rb_inspect(c);
5730 const char *str = StringValueCStr(inspection);
5732 rb_bug("dead canary found at %s: %s", insn, str);
5734 #endif
5736 #else
5737 void Init_vm_stack_canary(void) { /* nothing to do */ }
5738 #endif
5741 /* a part of the following code is generated by this ruby script:
5743 16.times{|i|
5744 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
5745 typedef_args.prepend(", ") if i != 0
5746 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
5747 call_args.prepend(", ") if i != 0
5748 puts %Q{
5749 static VALUE
5750 builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5752 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
5753 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
5757 puts
5758 puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
5759 16.times{|i|
5760 puts " builtin_invoker#{i},"
5762 puts "};"
5765 static VALUE
5766 builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5768 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
5769 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
5772 static VALUE
5773 builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5775 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
5776 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
5779 static VALUE
5780 builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5782 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
5783 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
5786 static VALUE
5787 builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5789 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
5790 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
5793 static VALUE
5794 builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5796 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
5797 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
5800 static VALUE
5801 builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5803 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
5804 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
5807 static VALUE
5808 builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5810 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
5811 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
5814 static VALUE
5815 builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5817 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
5818 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
5821 static VALUE
5822 builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5824 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
5825 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
5828 static VALUE
5829 builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5831 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
5832 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
5835 static VALUE
5836 builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5838 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
5839 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
5842 static VALUE
5843 builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5845 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
5846 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
5849 static VALUE
5850 builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5852 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
5853 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
5856 static VALUE
5857 builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5859 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
5860 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
5863 static VALUE
5864 builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5866 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
5867 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
5870 static VALUE
5871 builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5873 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
5874 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
5877 typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
5879 static builtin_invoker
5880 lookup_builtin_invoker(int argc)
5882 static const builtin_invoker invokers[] = {
5883 builtin_invoker0,
5884 builtin_invoker1,
5885 builtin_invoker2,
5886 builtin_invoker3,
5887 builtin_invoker4,
5888 builtin_invoker5,
5889 builtin_invoker6,
5890 builtin_invoker7,
5891 builtin_invoker8,
5892 builtin_invoker9,
5893 builtin_invoker10,
5894 builtin_invoker11,
5895 builtin_invoker12,
5896 builtin_invoker13,
5897 builtin_invoker14,
5898 builtin_invoker15,
5901 return invokers[argc];
5904 static inline VALUE
5905 invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
5907 const bool canary_p = reg_cfp->iseq->body->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
5908 SETUP_CANARY(canary_p);
5909 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, (rb_insn_func_t)bf->func_ptr);
5910 CHECK_CANARY(canary_p, BIN(invokebuiltin));
5911 return ret;
5914 static VALUE
5915 vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
5917 return invoke_bf(ec, cfp, bf, argv);
5920 static VALUE
5921 vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
5923 if (0) { // debug print
5924 fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
5925 for (int i=0; i<bf->argc; i++) {
5926 ruby_debug_printf(":%s ", rb_id2name(cfp->iseq->body->local_table[i+start_index]));
5928 ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc, bf->func_ptr);
5931 if (bf->argc == 0) {
5932 return invoke_bf(ec, cfp, bf, NULL);
5934 else {
5935 const VALUE *argv = cfp->ep - cfp->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
5936 return invoke_bf(ec, cfp, bf, argv);
5940 // for __builtin_inline!()
5942 VALUE
5943 rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
5945 const rb_control_frame_t *cfp = ec->cfp;
5946 return cfp->ep[index];