[ruby/digest] [DOC] Update document to use `rb_digest_make_metadata`
[ruby.git] / vm_callinfo.h
blob71ab9fe3fa7853ec567596e169bbfd71a875e964
1 #ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2 #define RUBY_VM_CALLINFO_H
3 /**
4 * @author Ruby developers <ruby-core@ruby-lang.org>
5 * @copyright This file is a part of the programming language Ruby.
6 * Permission is hereby granted, to either redistribute and/or
7 * modify this file, provided that the conditions mentioned in the
8 * file COPYING are met. Consult the file for details.
9 */
11 #include "debug_counter.h"
12 #include "internal/class.h"
13 #include "shape.h"
15 enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit, // m(*args)
17 VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
18 VM_CALL_FCALL_bit, // m(args) # receiver is self
19 VM_CALL_VCALL_bit, // m # method call that looks like a local variable
20 VM_CALL_ARGS_SIMPLE_bit, // (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL
21 VM_CALL_KWARG_bit, // has kwarg
22 VM_CALL_KW_SPLAT_bit, // m(**opts)
23 VM_CALL_TAILCALL_bit, // located at tail position
24 VM_CALL_SUPER_bit, // super
25 VM_CALL_ZSUPER_bit, // zsuper
26 VM_CALL_OPT_SEND_bit, // internal flag
27 VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
28 VM_CALL_ARGS_SPLAT_MUT_bit, // args splat can be modified (to avoid allocating a new one)
29 VM_CALL__END
32 #define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
33 #define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
34 #define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
35 #define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
36 #define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
37 #define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
38 #define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
39 #define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
40 #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
41 #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
42 #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
43 #define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
44 #define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46 struct rb_callinfo_kwarg {
47 int keyword_len;
48 int references;
49 VALUE keywords[];
52 static inline size_t
53 rb_callinfo_kwarg_bytes(int keyword_len)
55 return rb_size_mul_add_or_raise(
56 keyword_len,
57 sizeof(VALUE),
58 sizeof(struct rb_callinfo_kwarg),
59 rb_eRuntimeError);
62 // imemo_callinfo
63 struct rb_callinfo {
64 VALUE flags;
65 const struct rb_callinfo_kwarg *kwarg;
66 VALUE mid;
67 VALUE flag;
68 VALUE argc;
71 #if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
72 #undef USE_EMBED_CI
73 #define USE_EMBED_CI 1
74 #else
75 #undef USE_EMBED_CI
76 #define USE_EMBED_CI 0
77 #endif
79 #if SIZEOF_VALUE == 8
80 #define CI_EMBED_TAG_bits 1
81 #define CI_EMBED_ARGC_bits 15
82 #define CI_EMBED_FLAG_bits 16
83 #define CI_EMBED_ID_bits 32
84 #elif SIZEOF_VALUE == 4
85 #define CI_EMBED_TAG_bits 1
86 #define CI_EMBED_ARGC_bits 3
87 #define CI_EMBED_FLAG_bits 13
88 #define CI_EMBED_ID_bits 15
89 #endif
91 #if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
92 #error
93 #endif
95 #define CI_EMBED_FLAG 0x01
96 #define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
97 #define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
98 #define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
99 #define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
100 #define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
101 #define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
103 static inline bool
104 vm_ci_packed_p(const struct rb_callinfo *ci)
106 if (!USE_EMBED_CI) {
107 return 0;
109 if (LIKELY(((VALUE)ci) & 0x01)) {
110 return 1;
112 else {
113 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
114 return 0;
118 static inline bool
119 vm_ci_p(const struct rb_callinfo *ci)
121 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
122 return 1;
124 else {
125 return 0;
129 static inline ID
130 vm_ci_mid(const struct rb_callinfo *ci)
132 if (vm_ci_packed_p(ci)) {
133 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
135 else {
136 return (ID)ci->mid;
140 static inline unsigned int
141 vm_ci_flag(const struct rb_callinfo *ci)
143 if (vm_ci_packed_p(ci)) {
144 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
146 else {
147 return (unsigned int)ci->flag;
151 static inline unsigned int
152 vm_ci_argc(const struct rb_callinfo *ci)
154 if (vm_ci_packed_p(ci)) {
155 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
157 else {
158 return (unsigned int)ci->argc;
162 static inline const struct rb_callinfo_kwarg *
163 vm_ci_kwarg(const struct rb_callinfo *ci)
165 if (vm_ci_packed_p(ci)) {
166 return NULL;
168 else {
169 return ci->kwarg;
173 static inline void
174 vm_ci_dump(const struct rb_callinfo *ci)
176 if (vm_ci_packed_p(ci)) {
177 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
178 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
180 else {
181 rp(ci);
185 #define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
186 #define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188 /* This is passed to STATIC_ASSERT. Cannot be an inline function. */
189 #define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
190 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
191 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
192 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
193 (kwarg) ? false : true)
195 #define vm_ci_new_id(mid, flag, argc, must_zero) \
196 ((const struct rb_callinfo *) \
197 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
198 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
199 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
200 RUBY_FIXNUM_FLAG))
202 // vm_method.c
203 const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
204 void rb_vm_ci_free(const struct rb_callinfo *);
206 static inline const struct rb_callinfo *
207 vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
209 if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
210 RB_DEBUG_COUNTER_INC(ci_packed);
211 return vm_ci_new_id(mid, flag, argc, kwarg);
214 const bool debug = 0;
215 if (debug) ruby_debug_printf("%s:%d ", file, line);
217 const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
219 if (debug) rp(ci);
220 if (kwarg) {
221 RB_DEBUG_COUNTER_INC(ci_kw);
223 else {
224 RB_DEBUG_COUNTER_INC(ci_nokw);
227 VM_ASSERT(vm_ci_flag(ci) == flag);
228 VM_ASSERT(vm_ci_argc(ci) == argc);
230 return ci;
234 static inline const struct rb_callinfo *
235 vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
237 RB_DEBUG_COUNTER_INC(ci_runtime);
238 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
241 #define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
243 #define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
244 (struct rb_callinfo) { \
245 .flags = T_IMEMO | \
246 (imemo_callinfo << FL_USHIFT) | \
247 VM_CALLINFO_NOT_UNDER_GC, \
248 .mid = mid_, \
249 .flag = flags_, \
250 .argc = argc_, \
251 .kwarg = kwarg_, \
254 typedef VALUE (*vm_call_handler)(
255 struct rb_execution_context_struct *ec,
256 struct rb_control_frame_struct *cfp,
257 struct rb_calling_info *calling);
259 // imemo_callcache
261 struct rb_callcache {
262 const VALUE flags;
264 /* inline cache: key */
265 const VALUE klass; // should not mark it because klass can not be free'd
266 // because of this marking. When klass is collected,
267 // cc will be cleared (cc->klass = 0) at vm_ccs_free().
269 /* inline cache: values */
270 const struct rb_callable_method_entry_struct * const cme_;
271 const vm_call_handler call_;
273 union {
274 struct {
275 uintptr_t value; // Shape ID in upper bits, index in lower bits
276 } attr;
277 const enum method_missing_reason method_missing_reason; /* used by method_missing */
278 VALUE v;
279 const struct rb_builtin_function *bf;
280 } aux_;
283 #define VM_CALLCACHE_UNMARKABLE FL_FREEZE
284 #define VM_CALLCACHE_ON_STACK FL_EXIVAR
286 /* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
287 #define VM_CALLCACHE_IVAR IMEMO_FL_USER0
288 #define VM_CALLCACHE_BF IMEMO_FL_USER1
289 #define VM_CALLCACHE_SUPER IMEMO_FL_USER2
290 #define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
292 enum vm_cc_type {
293 cc_type_normal, // chained from ccs
294 cc_type_super,
295 cc_type_refinement,
298 extern const struct rb_callcache *rb_vm_empty_cc(void);
299 extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
301 #define vm_cc_empty() rb_vm_empty_cc()
303 static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
305 static inline void
306 vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
308 vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
311 static inline const struct rb_callcache *
312 vm_cc_new(VALUE klass,
313 const struct rb_callable_method_entry_struct *cme,
314 vm_call_handler call,
315 enum vm_cc_type type)
317 struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
318 *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
319 *((vm_call_handler *)&cc->call_) = call;
321 switch (type) {
322 case cc_type_normal:
323 break;
324 case cc_type_super:
325 *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
326 break;
327 case cc_type_refinement:
328 *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
329 break;
332 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
333 RB_DEBUG_COUNTER_INC(cc_new);
334 return cc;
337 static inline bool
338 vm_cc_super_p(const struct rb_callcache *cc)
340 return (cc->flags & VM_CALLCACHE_SUPER) != 0;
343 static inline bool
344 vm_cc_refinement_p(const struct rb_callcache *cc)
346 return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
349 #define VM_CC_ON_STACK(clazz, call, aux, cme) \
350 (struct rb_callcache) { \
351 .flags = T_IMEMO | \
352 (imemo_callcache << FL_USHIFT) | \
353 VM_CALLCACHE_UNMARKABLE | \
354 VM_CALLCACHE_ON_STACK, \
355 .klass = clazz, \
356 .cme_ = cme, \
357 .call_ = call, \
358 .aux_ = aux, \
361 static inline bool
362 vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
364 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
365 VM_ASSERT(cc->klass == 0 ||
366 RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
367 return cc->klass == klass;
370 static inline int
371 vm_cc_markable(const struct rb_callcache *cc)
373 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
374 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
377 static inline const struct rb_callable_method_entry_struct *
378 vm_cc_cme(const struct rb_callcache *cc)
380 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
381 VM_ASSERT(cc->call_ == NULL || // not initialized yet
382 !vm_cc_markable(cc) ||
383 cc->cme_ != NULL);
385 return cc->cme_;
388 static inline vm_call_handler
389 vm_cc_call(const struct rb_callcache *cc)
391 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
392 VM_ASSERT(cc->call_ != NULL);
393 return cc->call_;
396 static inline attr_index_t
397 vm_cc_attr_index(const struct rb_callcache *cc)
399 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
400 return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
403 static inline shape_id_t
404 vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
406 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
408 return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
411 static inline void
412 vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
414 uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
415 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
416 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
417 return;
420 static inline void
421 vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
423 uintptr_t cache_value = ic->value; // Atomically read 64 bits
424 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
425 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
426 return;
429 static inline shape_id_t
430 vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
432 return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
435 static inline unsigned int
436 vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
438 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
439 return cc->aux_.method_missing_reason;
442 static inline bool
443 vm_cc_invalidated_p(const struct rb_callcache *cc)
445 if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
446 return false;
448 else {
449 return true;
453 // For RJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
454 static inline bool
455 vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
457 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
458 if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(cc_cme)) {
459 return 1;
461 else {
462 return 0;
466 /* callcache: mutate */
468 static inline void
469 vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
471 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
472 VM_ASSERT(cc != vm_cc_empty());
473 *(vm_call_handler *)&cc->call_ = call;
476 static inline void
477 set_vm_cc_ivar(const struct rb_callcache *cc)
479 *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
482 static inline void
483 vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
485 uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
486 if (!vm_cc_markable(cc)) {
487 *attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
488 return;
490 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
491 VM_ASSERT(cc != vm_cc_empty());
492 *attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
493 set_vm_cc_ivar(cc);
496 static inline bool
497 vm_cc_ivar_p(const struct rb_callcache *cc)
499 return (cc->flags & VM_CALLCACHE_IVAR) != 0;
502 static inline void
503 vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
505 *(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (attr_index_t)(index + 1);
508 static inline void
509 vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
511 *(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
514 static inline void
515 vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
517 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
518 VM_ASSERT(cc != vm_cc_empty());
519 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
522 static inline void
523 vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
525 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
526 VM_ASSERT(cc != vm_cc_empty());
527 *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
528 *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
531 static inline bool
532 vm_cc_bf_p(const struct rb_callcache *cc)
534 return (cc->flags & VM_CALLCACHE_BF) != 0;
537 static inline void
538 vm_cc_invalidate(const struct rb_callcache *cc)
540 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
541 VM_ASSERT(cc != vm_cc_empty());
542 VM_ASSERT(cc->klass != 0); // should be enable
544 *(VALUE *)&cc->klass = 0;
545 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
548 /* calldata */
550 struct rb_call_data {
551 const struct rb_callinfo *ci;
552 const struct rb_callcache *cc;
555 struct rb_class_cc_entries {
556 #if VM_CHECK_MODE > 0
557 VALUE debug_sig;
558 #endif
559 int capa;
560 int len;
561 const struct rb_callable_method_entry_struct *cme;
562 struct rb_class_cc_entries_entry {
563 unsigned int argc;
564 unsigned int flag;
565 const struct rb_callcache *cc;
566 } *entries;
569 #if VM_CHECK_MODE > 0
571 const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
572 void rb_vm_dump_overloaded_cme_table(void);
574 static inline bool
575 vm_ccs_p(const struct rb_class_cc_entries *ccs)
577 return ccs->debug_sig == ~(VALUE)ccs;
580 static inline bool
581 vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
583 if (vm_cc_cme(cc) == cme ||
584 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
585 return true;
587 else {
588 #if 1
589 // debug print
591 fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
592 rp(cme);
593 rp(vm_cc_cme(cc));
594 rb_vm_lookup_overloaded_cme(cme);
595 #endif
596 return false;
600 #endif
602 // gc.c
603 void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
605 #endif /* RUBY_VM_CALLINFO_H */