1 #ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2 #define RUBY_VM_CALLINFO_H
4 * @author Ruby developers <ruby-core@ruby-lang.org>
5 * @copyright This file is a part of the programming language Ruby.
6 * Permission is hereby granted, to either redistribute and/or
7 * modify this file, provided that the conditions mentioned in the
8 * file COPYING are met. Consult the file for details.
11 #include "debug_counter.h"
12 #include "internal/class.h"
15 enum vm_call_flag_bits
{
16 VM_CALL_ARGS_SPLAT_bit
, // m(*args)
17 VM_CALL_ARGS_BLOCKARG_bit
, // m(&block)
18 VM_CALL_FCALL_bit
, // m(args) # receiver is self
19 VM_CALL_VCALL_bit
, // m # method call that looks like a local variable
20 VM_CALL_ARGS_SIMPLE_bit
, // !(ci->flag & (SPLAT|BLOCKARG|KWARG|KW_SPLAT|FORWARDING)) && !has_block_iseq
21 VM_CALL_KWARG_bit
, // has kwarg
22 VM_CALL_KW_SPLAT_bit
, // m(**opts)
23 VM_CALL_TAILCALL_bit
, // located at tail position
24 VM_CALL_SUPER_bit
, // super
25 VM_CALL_ZSUPER_bit
, // zsuper
26 VM_CALL_OPT_SEND_bit
, // internal flag
27 VM_CALL_KW_SPLAT_MUT_bit
, // kw splat hash can be modified (to avoid allocating a new one)
28 VM_CALL_ARGS_SPLAT_MUT_bit
, // args splat can be modified (to avoid allocating a new one)
29 VM_CALL_FORWARDING_bit
, // m(...)
33 #define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
34 #define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
35 #define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
36 #define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
37 #define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
38 #define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39 #define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40 #define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41 #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42 #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43 #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44 #define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45 #define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46 #define VM_CALL_FORWARDING (0x01 << VM_CALL_FORWARDING_bit)
48 struct rb_callinfo_kwarg
{
55 rb_callinfo_kwarg_bytes(int keyword_len
)
57 return rb_size_mul_add_or_raise(
60 sizeof(struct rb_callinfo_kwarg
),
67 const struct rb_callinfo_kwarg
*kwarg
;
73 #if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
75 #define USE_EMBED_CI 1
78 #define USE_EMBED_CI 0
82 #define CI_EMBED_TAG_bits 1
83 #define CI_EMBED_ARGC_bits 15
84 #define CI_EMBED_FLAG_bits 16
85 #define CI_EMBED_ID_bits 32
86 #elif SIZEOF_VALUE == 4
87 #define CI_EMBED_TAG_bits 1
88 #define CI_EMBED_ARGC_bits 3
89 #define CI_EMBED_FLAG_bits 13
90 #define CI_EMBED_ID_bits 15
93 #if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
97 #define CI_EMBED_FLAG 0x01
98 #define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
99 #define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
100 #define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
101 #define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
102 #define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
103 #define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
106 vm_ci_packed_p(const struct rb_callinfo
*ci
)
111 if (LIKELY(((VALUE
)ci
) & 0x01)) {
115 VM_ASSERT(IMEMO_TYPE_P(ci
, imemo_callinfo
));
121 vm_ci_p(const struct rb_callinfo
*ci
)
123 if (vm_ci_packed_p(ci
) || IMEMO_TYPE_P(ci
, imemo_callinfo
)) {
132 vm_ci_mid(const struct rb_callinfo
*ci
)
134 if (vm_ci_packed_p(ci
)) {
135 return (((VALUE
)ci
) >> CI_EMBED_ID_SHFT
) & CI_EMBED_ID_MASK
;
142 static inline unsigned int
143 vm_ci_flag(const struct rb_callinfo
*ci
)
145 if (vm_ci_packed_p(ci
)) {
146 return (unsigned int)((((VALUE
)ci
) >> CI_EMBED_FLAG_SHFT
) & CI_EMBED_FLAG_MASK
);
149 return (unsigned int)ci
->flag
;
153 static inline unsigned int
154 vm_ci_argc(const struct rb_callinfo
*ci
)
156 if (vm_ci_packed_p(ci
)) {
157 return (unsigned int)((((VALUE
)ci
) >> CI_EMBED_ARGC_SHFT
) & CI_EMBED_ARGC_MASK
);
160 return (unsigned int)ci
->argc
;
164 static inline const struct rb_callinfo_kwarg
*
165 vm_ci_kwarg(const struct rb_callinfo
*ci
)
167 if (vm_ci_packed_p(ci
)) {
176 vm_ci_dump(const struct rb_callinfo
*ci
)
178 if (vm_ci_packed_p(ci
)) {
179 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
180 rb_id2name(vm_ci_mid(ci
)), vm_ci_flag(ci
), vm_ci_argc(ci
));
187 #define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188 #define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
190 /* This is passed to STATIC_ASSERT. Cannot be an inline function. */
191 #define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
192 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
193 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
194 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
195 (kwarg) ? false : true)
197 #define vm_ci_new_id(mid, flag, argc, must_zero) \
198 ((const struct rb_callinfo *) \
199 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
200 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
201 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
205 const struct rb_callinfo
*rb_vm_ci_lookup(ID mid
, unsigned int flag
, unsigned int argc
, const struct rb_callinfo_kwarg
*kwarg
);
206 void rb_vm_ci_free(const struct rb_callinfo
*);
208 static inline const struct rb_callinfo
*
209 vm_ci_new_(ID mid
, unsigned int flag
, unsigned int argc
, const struct rb_callinfo_kwarg
*kwarg
, const char *file
, int line
)
211 if (USE_EMBED_CI
&& VM_CI_EMBEDDABLE_P(mid
, flag
, argc
, kwarg
)) {
212 RB_DEBUG_COUNTER_INC(ci_packed
);
213 return vm_ci_new_id(mid
, flag
, argc
, kwarg
);
216 const bool debug
= 0;
217 if (debug
) ruby_debug_printf("%s:%d ", file
, line
);
219 const struct rb_callinfo
*ci
= rb_vm_ci_lookup(mid
, flag
, argc
, kwarg
);
223 RB_DEBUG_COUNTER_INC(ci_kw
);
226 RB_DEBUG_COUNTER_INC(ci_nokw
);
229 VM_ASSERT(vm_ci_flag(ci
) == flag
);
230 VM_ASSERT(vm_ci_argc(ci
) == argc
);
236 static inline const struct rb_callinfo
*
237 vm_ci_new_runtime_(ID mid
, unsigned int flag
, unsigned int argc
, const struct rb_callinfo_kwarg
*kwarg
, const char *file
, int line
)
239 RB_DEBUG_COUNTER_INC(ci_runtime
);
240 return vm_ci_new_(mid
, flag
, argc
, kwarg
, file
, line
);
243 #define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
246 vm_ci_markable(const struct rb_callinfo
*ci
)
249 return false; /* or true? This is Qfalse... */
251 else if (vm_ci_packed_p(ci
)) {
255 VM_ASSERT(IMEMO_TYPE_P(ci
, imemo_callinfo
));
256 return ! FL_ANY_RAW((VALUE
)ci
, VM_CALLINFO_NOT_UNDER_GC
);
260 #define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
261 (struct rb_callinfo) { \
263 (imemo_callinfo << FL_USHIFT) | \
264 VM_CALLINFO_NOT_UNDER_GC, \
271 typedef VALUE (*vm_call_handler
)(
272 struct rb_execution_context_struct
*ec
,
273 struct rb_control_frame_struct
*cfp
,
274 struct rb_calling_info
*calling
);
278 struct rb_callcache
{
281 /* inline cache: key */
282 const VALUE klass
; // should not mark it because klass can not be free'd
283 // because of this marking. When klass is collected,
284 // cc will be cleared (cc->klass = 0) at vm_ccs_free().
286 /* inline cache: values */
287 const struct rb_callable_method_entry_struct
* const cme_
;
288 const vm_call_handler call_
;
292 uintptr_t value
; // Shape ID in upper bits, index in lower bits
294 const enum method_missing_reason method_missing_reason
; /* used by method_missing */
296 const struct rb_builtin_function
*bf
;
300 #define VM_CALLCACHE_UNMARKABLE FL_FREEZE
301 #define VM_CALLCACHE_ON_STACK FL_EXIVAR
303 /* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
304 #define VM_CALLCACHE_IVAR IMEMO_FL_USER0
305 #define VM_CALLCACHE_BF IMEMO_FL_USER1
306 #define VM_CALLCACHE_SUPER IMEMO_FL_USER2
307 #define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
310 cc_type_normal
, // chained from ccs
315 extern const struct rb_callcache
*rb_vm_empty_cc(void);
316 extern const struct rb_callcache
*rb_vm_empty_cc_for_super(void);
318 #define vm_cc_empty() rb_vm_empty_cc()
320 static inline void vm_cc_attr_index_set(const struct rb_callcache
*cc
, attr_index_t index
, shape_id_t dest_shape_id
);
323 vm_cc_attr_index_initialize(const struct rb_callcache
*cc
, shape_id_t shape_id
)
325 vm_cc_attr_index_set(cc
, (attr_index_t
)-1, shape_id
);
328 static inline const struct rb_callcache
*
329 vm_cc_new(VALUE klass
,
330 const struct rb_callable_method_entry_struct
*cme
,
331 vm_call_handler call
,
332 enum vm_cc_type type
)
334 struct rb_callcache
*cc
= IMEMO_NEW(struct rb_callcache
, imemo_callcache
, klass
);
335 *((struct rb_callable_method_entry_struct
**)&cc
->cme_
) = (struct rb_callable_method_entry_struct
*)cme
;
336 *((vm_call_handler
*)&cc
->call_
) = call
;
338 VM_ASSERT(RB_TYPE_P(klass
, T_CLASS
) || RB_TYPE_P(klass
, T_ICLASS
));
344 *(VALUE
*)&cc
->flags
|= VM_CALLCACHE_SUPER
;
346 case cc_type_refinement
:
347 *(VALUE
*)&cc
->flags
|= VM_CALLCACHE_REFINEMENT
;
351 if (cme
->def
->type
== VM_METHOD_TYPE_ATTRSET
|| cme
->def
->type
== VM_METHOD_TYPE_IVAR
) {
352 vm_cc_attr_index_initialize(cc
, INVALID_SHAPE_ID
);
355 RB_DEBUG_COUNTER_INC(cc_new
);
360 vm_cc_super_p(const struct rb_callcache
*cc
)
362 return (cc
->flags
& VM_CALLCACHE_SUPER
) != 0;
366 vm_cc_refinement_p(const struct rb_callcache
*cc
)
368 return (cc
->flags
& VM_CALLCACHE_REFINEMENT
) != 0;
371 #define VM_CC_ON_STACK(clazz, call, aux, cme) \
372 (struct rb_callcache) { \
374 (imemo_callcache << FL_USHIFT) | \
375 VM_CALLCACHE_UNMARKABLE | \
376 VM_CALLCACHE_ON_STACK, \
384 vm_cc_class_check(const struct rb_callcache
*cc
, VALUE klass
)
386 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
387 VM_ASSERT(cc
->klass
== 0 ||
388 RB_TYPE_P(cc
->klass
, T_CLASS
) || RB_TYPE_P(cc
->klass
, T_ICLASS
));
389 return cc
->klass
== klass
;
393 vm_cc_markable(const struct rb_callcache
*cc
)
395 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
396 return FL_TEST_RAW((VALUE
)cc
, VM_CALLCACHE_UNMARKABLE
) == 0;
399 static inline const struct rb_callable_method_entry_struct
*
400 vm_cc_cme(const struct rb_callcache
*cc
)
402 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
403 VM_ASSERT(cc
->call_
== NULL
|| // not initialized yet
404 !vm_cc_markable(cc
) ||
410 static inline vm_call_handler
411 vm_cc_call(const struct rb_callcache
*cc
)
413 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
414 VM_ASSERT(cc
->call_
!= NULL
);
418 static inline attr_index_t
419 vm_cc_attr_index(const struct rb_callcache
*cc
)
421 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
422 return (attr_index_t
)((cc
->aux_
.attr
.value
& SHAPE_FLAG_MASK
) - 1);
425 static inline shape_id_t
426 vm_cc_attr_index_dest_shape_id(const struct rb_callcache
*cc
)
428 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
430 return cc
->aux_
.attr
.value
>> SHAPE_FLAG_SHIFT
;
434 vm_cc_atomic_shape_and_index(const struct rb_callcache
*cc
, shape_id_t
* shape_id
, attr_index_t
* index
)
436 uintptr_t cache_value
= cc
->aux_
.attr
.value
; // Atomically read 64 bits
437 *shape_id
= (shape_id_t
)(cache_value
>> SHAPE_FLAG_SHIFT
);
438 *index
= (attr_index_t
)(cache_value
& SHAPE_FLAG_MASK
) - 1;
443 vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry
*ic
, shape_id_t
* shape_id
, attr_index_t
* index
)
445 uintptr_t cache_value
= ic
->value
; // Atomically read 64 bits
446 *shape_id
= (shape_id_t
)(cache_value
>> SHAPE_FLAG_SHIFT
);
447 *index
= (attr_index_t
)(cache_value
& SHAPE_FLAG_MASK
) - 1;
451 static inline shape_id_t
452 vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry
*ic
)
454 return (shape_id_t
)(ic
->value
>> SHAPE_FLAG_SHIFT
);
457 static inline unsigned int
458 vm_cc_cmethod_missing_reason(const struct rb_callcache
*cc
)
460 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
461 return cc
->aux_
.method_missing_reason
;
465 vm_cc_invalidated_p(const struct rb_callcache
*cc
)
467 if (cc
->klass
&& !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc
))) {
475 // For RJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
477 vm_cc_valid_p(const struct rb_callcache
*cc
, const rb_callable_method_entry_t
*cc_cme
, VALUE klass
)
479 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
480 if (cc
->klass
== klass
&& !METHOD_ENTRY_INVALIDATED(cc_cme
)) {
488 /* callcache: mutate */
491 vm_cc_call_set(const struct rb_callcache
*cc
, vm_call_handler call
)
493 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
494 VM_ASSERT(cc
!= vm_cc_empty());
495 *(vm_call_handler
*)&cc
->call_
= call
;
499 set_vm_cc_ivar(const struct rb_callcache
*cc
)
501 *(VALUE
*)&cc
->flags
|= VM_CALLCACHE_IVAR
;
505 vm_cc_attr_index_set(const struct rb_callcache
*cc
, attr_index_t index
, shape_id_t dest_shape_id
)
507 uintptr_t *attr_value
= (uintptr_t *)&cc
->aux_
.attr
.value
;
508 if (!vm_cc_markable(cc
)) {
509 *attr_value
= (uintptr_t)INVALID_SHAPE_ID
<< SHAPE_FLAG_SHIFT
;
512 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
513 VM_ASSERT(cc
!= vm_cc_empty());
514 *attr_value
= (attr_index_t
)(index
+ 1) | ((uintptr_t)(dest_shape_id
) << SHAPE_FLAG_SHIFT
);
519 vm_cc_ivar_p(const struct rb_callcache
*cc
)
521 return (cc
->flags
& VM_CALLCACHE_IVAR
) != 0;
525 vm_ic_attr_index_set(const rb_iseq_t
*iseq
, const struct iseq_inline_iv_cache_entry
*ic
, attr_index_t index
, shape_id_t dest_shape_id
)
527 *(uintptr_t *)&ic
->value
= ((uintptr_t)dest_shape_id
<< SHAPE_FLAG_SHIFT
) | (attr_index_t
)(index
+ 1);
531 vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry
*ic
, shape_id_t shape_id
)
533 *(uintptr_t *)&ic
->value
= (uintptr_t)shape_id
<< SHAPE_FLAG_SHIFT
;
537 vm_cc_method_missing_reason_set(const struct rb_callcache
*cc
, enum method_missing_reason reason
)
539 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
540 VM_ASSERT(cc
!= vm_cc_empty());
541 *(enum method_missing_reason
*)&cc
->aux_
.method_missing_reason
= reason
;
545 vm_cc_bf_set(const struct rb_callcache
*cc
, const struct rb_builtin_function
*bf
)
547 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
548 VM_ASSERT(cc
!= vm_cc_empty());
549 *(const struct rb_builtin_function
**)&cc
->aux_
.bf
= bf
;
550 *(VALUE
*)&cc
->flags
|= VM_CALLCACHE_BF
;
554 vm_cc_bf_p(const struct rb_callcache
*cc
)
556 return (cc
->flags
& VM_CALLCACHE_BF
) != 0;
560 vm_cc_invalidate(const struct rb_callcache
*cc
)
562 VM_ASSERT(IMEMO_TYPE_P(cc
, imemo_callcache
));
563 VM_ASSERT(cc
!= vm_cc_empty());
564 VM_ASSERT(cc
->klass
!= 0); // should be enable
566 *(VALUE
*)&cc
->klass
= 0;
567 RB_DEBUG_COUNTER_INC(cc_ent_invalidate
);
572 struct rb_call_data
{
573 const struct rb_callinfo
*ci
;
574 const struct rb_callcache
*cc
;
577 struct rb_class_cc_entries
{
578 #if VM_CHECK_MODE > 0
583 const struct rb_callable_method_entry_struct
*cme
;
584 struct rb_class_cc_entries_entry
{
587 const struct rb_callcache
*cc
;
591 #if VM_CHECK_MODE > 0
593 const rb_callable_method_entry_t
*rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t
*cme
);
594 void rb_vm_dump_overloaded_cme_table(void);
597 vm_ccs_p(const struct rb_class_cc_entries
*ccs
)
599 return ccs
->debug_sig
== ~(VALUE
)ccs
;
603 vm_cc_check_cme(const struct rb_callcache
*cc
, const rb_callable_method_entry_t
*cme
)
605 if (vm_cc_cme(cc
) == cme
||
606 (cme
->def
->iseq_overload
&& vm_cc_cme(cc
) == rb_vm_lookup_overloaded_cme(cme
))) {
613 fprintf(stderr
, "iseq_overload:%d\n", (int)cme
->def
->iseq_overload
);
616 rb_vm_lookup_overloaded_cme(cme
);
625 void rb_vm_ccs_free(struct rb_class_cc_entries
*ccs
);
627 #endif /* RUBY_VM_CALLINFO_H */