[ruby/irb] Use require_relative to require lib files
[ruby-80x24.org.git] / yjit_iface.c
blobbd0d25b7d7359d5c157bd0c0bf789269a218dd45
1 // This file is a fragment of the yjit.o compilation unit. See yjit.c.
2 #include "internal.h"
3 #include "vm_sync.h"
4 #include "vm_callinfo.h"
5 #include "builtin.h"
6 #include "gc.h"
7 #include "iseq.h"
8 #include "internal/compile.h"
9 #include "internal/class.h"
10 #include "yjit.h"
11 #include "yjit_iface.h"
12 #include "yjit_codegen.h"
13 #include "yjit_core.h"
14 #include "darray.h"
16 #ifdef HAVE_LIBCAPSTONE
17 #include <capstone/capstone.h>
18 static VALUE cYjitDisasm;
19 static VALUE cYjitDisasmInsn;
20 #endif
22 static VALUE mYjit;
23 static VALUE cYjitBlock;
25 #if YJIT_STATS
26 static VALUE cYjitCodeComment;
27 #endif
29 #if YJIT_STATS
30 extern const int rb_vm_max_insn_name_size;
31 static int64_t exit_op_count[VM_INSTRUCTION_SIZE] = { 0 };
32 #endif
34 // Hash table of encoded instructions
35 extern st_table *rb_encoded_insn_data;
37 struct rb_yjit_options rb_yjit_opts;
39 // Size of code pages to allocate
40 #define CODE_PAGE_SIZE 16 * 1024
42 // How many code pages to allocate at once
43 #define PAGES_PER_ALLOC 512
45 static const rb_data_type_t yjit_block_type = {
46 "YJIT/Block",
47 {0, 0, 0, },
48 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
51 // Get the PC for a given index in an iseq
52 static VALUE *
53 yjit_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
55 RUBY_ASSERT(iseq != NULL);
56 RUBY_ASSERT(insn_idx < iseq->body->iseq_size);
57 VALUE *encoded = iseq->body->iseq_encoded;
58 VALUE *pc = &encoded[insn_idx];
59 return pc;
62 // For debugging. Print the disassembly of an iseq.
63 RBIMPL_ATTR_MAYBE_UNUSED()
64 static void
65 yjit_print_iseq(const rb_iseq_t *iseq)
67 char *ptr;
68 long len;
69 VALUE disassembly = rb_iseq_disasm(iseq);
70 RSTRING_GETMEM(disassembly, ptr, len);
71 fprintf(stderr, "%.*s\n", (int)len, ptr);
74 static int
75 yjit_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
77 const VALUE at_pc = *pc;
78 if (FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED)) {
79 return rb_vm_insn_addr2opcode((const void *)at_pc);
81 else {
82 return (int)at_pc;
86 // Verify that calling with cd on receiver goes to callee
87 static void
88 check_cfunc_dispatch(VALUE receiver, struct rb_callinfo *ci, void *callee, rb_callable_method_entry_t *compile_time_cme)
90 if (METHOD_ENTRY_INVALIDATED(compile_time_cme)) {
91 rb_bug("yjit: output code uses invalidated cme %p", (void *)compile_time_cme);
94 bool callee_correct = false;
95 const rb_callable_method_entry_t *cme = rb_callable_method_entry(CLASS_OF(receiver), vm_ci_mid(ci));
96 if (cme->def->type == VM_METHOD_TYPE_CFUNC) {
97 const rb_method_cfunc_t *cfunc = UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
98 if ((void *)cfunc->func == callee) {
99 callee_correct = true;
102 if (!callee_correct) {
103 rb_bug("yjit: output code calls wrong method");
107 MJIT_FUNC_EXPORTED VALUE rb_hash_has_key(VALUE hash, VALUE key);
109 // GC root for interacting with the GC
110 struct yjit_root_struct {
111 int unused; // empty structs are not legal in C99
114 // Hash table of BOP blocks
115 static st_table *blocks_assuming_bops;
117 static bool
118 assume_bop_not_redefined(jitstate_t *jit, int redefined_flag, enum ruby_basic_operators bop)
120 if (BASIC_OP_UNREDEFINED_P(bop, redefined_flag)) {
121 RUBY_ASSERT(blocks_assuming_bops);
123 jit_ensure_block_entry_exit(jit);
124 st_insert(blocks_assuming_bops, (st_data_t)jit->block, 0);
125 return true;
127 else {
128 return false;
132 // Map klass => id_table[mid, set of blocks]
133 // While a block `b` is in the table, b->callee_cme == rb_callable_method_entry(klass, mid).
134 // See assume_method_lookup_stable()
135 static st_table *method_lookup_dependency;
137 // For adding to method_lookup_dependency data with st_update
138 struct lookup_dependency_insertion {
139 block_t *block;
140 ID mid;
143 // Map cme => set of blocks
144 // See assume_method_lookup_stable()
145 static st_table *cme_validity_dependency;
147 static int
148 add_cme_validity_dependency_i(st_data_t *key, st_data_t *value, st_data_t new_block, int existing)
150 st_table *block_set;
151 if (existing) {
152 block_set = (st_table *)*value;
154 else {
155 // Make the set and put it into cme_validity_dependency
156 block_set = st_init_numtable();
157 *value = (st_data_t)block_set;
160 // Put block into set
161 st_insert(block_set, new_block, 1);
163 return ST_CONTINUE;
166 static int
167 add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
169 struct lookup_dependency_insertion *info = (void *)data;
171 // Find or make an id table
172 struct rb_id_table *id2blocks;
173 if (existing) {
174 id2blocks = (void *)*value;
176 else {
177 // Make an id table and put it into the st_table
178 id2blocks = rb_id_table_create(1);
179 *value = (st_data_t)id2blocks;
182 // Find or make a block set
183 st_table *block_set;
185 VALUE blocks;
186 if (rb_id_table_lookup(id2blocks, info->mid, &blocks)) {
187 // Take existing set
188 block_set = (st_table *)blocks;
190 else {
191 // Make new block set and put it into the id table
192 block_set = st_init_numtable();
193 rb_id_table_insert(id2blocks, info->mid, (VALUE)block_set);
197 st_insert(block_set, (st_data_t)info->block, 1);
199 return ST_CONTINUE;
202 // Remember that a block assumes that
203 // `rb_callable_method_entry(receiver_klass, cme->called_id) == cme` and that
204 // `cme` is valid.
205 // When either of these assumptions becomes invalid, rb_yjit_method_lookup_change() or
206 // rb_yjit_cme_invalidate() invalidates the block.
208 // @raise NoMemoryError
209 static void
210 assume_method_lookup_stable(VALUE receiver_klass, const rb_callable_method_entry_t *cme, jitstate_t *jit)
212 RUBY_ASSERT(cme_validity_dependency);
213 RUBY_ASSERT(method_lookup_dependency);
214 RUBY_ASSERT(rb_callable_method_entry(receiver_klass, cme->called_id) == cme);
215 RUBY_ASSERT_ALWAYS(RB_TYPE_P(receiver_klass, T_CLASS) || RB_TYPE_P(receiver_klass, T_ICLASS));
216 RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(receiver_klass));
218 jit_ensure_block_entry_exit(jit);
220 block_t *block = jit->block;
222 cme_dependency_t cme_dep = { receiver_klass, (VALUE)cme };
223 rb_darray_append(&block->cme_dependencies, cme_dep);
225 st_update(cme_validity_dependency, (st_data_t)cme, add_cme_validity_dependency_i, (st_data_t)block);
227 struct lookup_dependency_insertion info = { block, cme->called_id };
228 st_update(method_lookup_dependency, (st_data_t)receiver_klass, add_lookup_dependency_i, (st_data_t)&info);
231 static st_table *blocks_assuming_single_ractor_mode;
233 // Can raise NoMemoryError.
234 RBIMPL_ATTR_NODISCARD()
235 static bool
236 assume_single_ractor_mode(jitstate_t *jit)
238 if (rb_multi_ractor_p()) return false;
240 jit_ensure_block_entry_exit(jit);
242 st_insert(blocks_assuming_single_ractor_mode, (st_data_t)jit->block, 1);
243 return true;
246 static st_table *blocks_assuming_stable_global_constant_state;
248 // Assume that the global constant state has not changed since call to this function.
249 // Can raise NoMemoryError.
250 static void
251 assume_stable_global_constant_state(jitstate_t *jit)
253 jit_ensure_block_entry_exit(jit);
254 st_insert(blocks_assuming_stable_global_constant_state, (st_data_t)jit->block, 1);
257 static int
258 mark_and_pin_keys_i(st_data_t k, st_data_t v, st_data_t ignore)
260 rb_gc_mark((VALUE)k);
262 return ST_CONTINUE;
265 // GC callback during mark phase
266 static void
267 yjit_root_mark(void *ptr)
269 if (method_lookup_dependency) {
270 // TODO: This is a leak. Unused blocks linger in the table forever, preventing the
271 // callee class they speculate on from being collected.
272 // We could do a bespoke weak reference scheme on classes similar to
273 // the interpreter's call cache. See finalizer for T_CLASS and cc_table_free().
274 st_foreach(method_lookup_dependency, mark_and_pin_keys_i, 0);
277 if (cme_validity_dependency) {
278 // Why not let the GC move the cme keys in this table?
279 // Because this is basically a compare_by_identity Hash.
280 // If a key moves, we would need to reinsert it into the table so it is rehashed.
281 // That is tricky to do, espcially as it could trigger allocation which could
282 // trigger GC. Not sure if it is okay to trigger GC while the GC is updating
283 // references.
284 st_foreach(cme_validity_dependency, mark_and_pin_keys_i, 0);
288 static void
289 yjit_root_free(void *ptr)
291 // Do nothing. The root lives as long as the process.
294 static size_t
295 yjit_root_memsize(const void *ptr)
297 // Count off-gc-heap allocation size of the dependency table
298 return st_memsize(method_lookup_dependency); // TODO: more accurate accounting
301 // GC callback during compaction
302 static void
303 yjit_root_update_references(void *ptr)
307 // Custom type for interacting with the GC
308 // TODO: make this write barrier protected
309 static const rb_data_type_t yjit_root_type = {
310 "yjit_root",
311 {yjit_root_mark, yjit_root_free, yjit_root_memsize, yjit_root_update_references},
312 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
315 // st_table iterator for invalidating blocks that are keys to the table.
316 static int
317 block_set_invalidate_i(st_data_t key, st_data_t v, st_data_t ignore)
319 block_t *version = (block_t *)key;
321 // Thankfully, st_table supports deleting while iterating.
322 invalidate_block_version(version);
324 return ST_CONTINUE;
327 // Callback for when rb_callable_method_entry(klass, mid) is going to change.
328 // Invalidate blocks that assume stable method lookup of `mid` in `klass` when this happens.
329 void
330 rb_yjit_method_lookup_change(VALUE klass, ID mid)
332 if (!method_lookup_dependency) return;
334 RB_VM_LOCK_ENTER();
336 st_data_t image;
337 st_data_t key = (st_data_t)klass;
338 if (st_lookup(method_lookup_dependency, key, &image)) {
339 struct rb_id_table *id2blocks = (void *)image;
340 VALUE blocks;
342 // Invalidate all blocks in method_lookup_dependency[klass][mid]
343 if (rb_id_table_lookup(id2blocks, mid, &blocks)) {
344 rb_id_table_delete(id2blocks, mid);
346 st_table *block_set = (st_table *)blocks;
348 #if YJIT_STATS
349 yjit_runtime_counters.invalidate_method_lookup += block_set->num_entries;
350 #endif
352 st_foreach(block_set, block_set_invalidate_i, 0);
354 st_free_table(block_set);
358 RB_VM_LOCK_LEAVE();
361 // Callback for when a cme becomes invalid.
362 // Invalidate all blocks that depend on cme being valid.
363 void
364 rb_yjit_cme_invalidate(VALUE cme)
366 if (!cme_validity_dependency) return;
368 RUBY_ASSERT(IMEMO_TYPE_P(cme, imemo_ment));
370 RB_VM_LOCK_ENTER();
372 // Delete the block set from the table
373 st_data_t cme_as_st_data = (st_data_t)cme;
374 st_data_t blocks;
375 if (st_delete(cme_validity_dependency, &cme_as_st_data, &blocks)) {
376 st_table *block_set = (st_table *)blocks;
378 #if YJIT_STATS
379 yjit_runtime_counters.invalidate_method_lookup += block_set->num_entries;
380 #endif
382 // Invalidate each block
383 st_foreach(block_set, block_set_invalidate_i, 0);
385 st_free_table(block_set);
388 RB_VM_LOCK_LEAVE();
391 // For dealing with refinements
392 void
393 rb_yjit_invalidate_all_method_lookup_assumptions(void)
395 // It looks like Module#using actually doesn't need to invalidate all the
396 // method caches, so we do nothing here for now.
399 // Remove a block from the method lookup dependency table
400 static void
401 remove_method_lookup_dependency(block_t *block, VALUE receiver_klass, const rb_callable_method_entry_t *callee_cme)
403 RUBY_ASSERT(receiver_klass);
404 RUBY_ASSERT(callee_cme); // callee_cme should be set when receiver_klass is set
406 st_data_t image;
407 st_data_t key = (st_data_t)receiver_klass;
408 if (st_lookup(method_lookup_dependency, key, &image)) {
409 struct rb_id_table *id2blocks = (void *)image;
410 ID mid = callee_cme->called_id;
412 // Find block set
413 VALUE blocks;
414 if (rb_id_table_lookup(id2blocks, mid, &blocks)) {
415 st_table *block_set = (st_table *)blocks;
417 // Remove block from block set
418 st_data_t block_as_st_data = (st_data_t)block;
419 (void)st_delete(block_set, &block_as_st_data, NULL);
421 if (block_set->num_entries == 0) {
422 // Block set now empty. Remove from id table.
423 rb_id_table_delete(id2blocks, mid);
424 st_free_table(block_set);
430 // Remove a block from cme_validity_dependency
431 static void
432 remove_cme_validity_dependency(block_t *block, const rb_callable_method_entry_t *callee_cme)
434 RUBY_ASSERT(callee_cme);
436 st_data_t blocks;
437 if (st_lookup(cme_validity_dependency, (st_data_t)callee_cme, &blocks)) {
438 st_table *block_set = (st_table *)blocks;
440 st_data_t block_as_st_data = (st_data_t)block;
441 (void)st_delete(block_set, &block_as_st_data, NULL);
445 static void
446 yjit_unlink_method_lookup_dependency(block_t *block)
448 cme_dependency_t *cme_dep;
449 rb_darray_foreach(block->cme_dependencies, cme_dependency_idx, cme_dep) {
450 remove_method_lookup_dependency(block, cme_dep->receiver_klass, (const rb_callable_method_entry_t *)cme_dep->callee_cme);
451 remove_cme_validity_dependency(block, (const rb_callable_method_entry_t *)cme_dep->callee_cme);
453 rb_darray_free(block->cme_dependencies);
456 static void
457 yjit_block_assumptions_free(block_t *block)
459 st_data_t as_st_data = (st_data_t)block;
460 if (blocks_assuming_stable_global_constant_state) {
461 st_delete(blocks_assuming_stable_global_constant_state, &as_st_data, NULL);
464 if (blocks_assuming_single_ractor_mode) {
465 st_delete(blocks_assuming_single_ractor_mode, &as_st_data, NULL);
468 if (blocks_assuming_bops) {
469 st_delete(blocks_assuming_bops, &as_st_data, NULL);
473 typedef VALUE (*yjit_func_t)(rb_execution_context_t *, rb_control_frame_t *);
475 bool
476 rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
478 #if (OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE) && JIT_ENABLED
479 bool success = true;
480 RB_VM_LOCK_ENTER();
481 rb_vm_barrier();
483 // Compile a block version starting at the first instruction
484 uint8_t *code_ptr = gen_entry_point(iseq, 0, ec);
486 if (code_ptr) {
487 iseq->body->jit_func = (yjit_func_t)code_ptr;
489 else {
490 iseq->body->jit_func = 0;
491 success = false;
494 RB_VM_LOCK_LEAVE();
495 return success;
496 #else
497 return false;
498 #endif
501 struct yjit_block_itr {
502 const rb_iseq_t *iseq;
503 VALUE list;
506 /* Get a list of the YJIT blocks associated with `rb_iseq` */
507 static VALUE
508 yjit_blocks_for(VALUE mod, VALUE rb_iseq)
510 if (CLASS_OF(rb_iseq) != rb_cISeq) {
511 return rb_ary_new();
514 const rb_iseq_t *iseq = rb_iseqw_to_iseq(rb_iseq);
516 VALUE all_versions = rb_ary_new();
517 rb_darray_for(iseq->body->yjit_blocks, version_array_idx) {
518 rb_yjit_block_array_t versions = rb_darray_get(iseq->body->yjit_blocks, version_array_idx);
520 rb_darray_for(versions, block_idx) {
521 block_t *block = rb_darray_get(versions, block_idx);
523 // FIXME: The object craeted here can outlive the block itself
524 VALUE rb_block = TypedData_Wrap_Struct(cYjitBlock, &yjit_block_type, block);
525 rb_ary_push(all_versions, rb_block);
529 return all_versions;
532 /* Get the address of the code associated with a YJIT::Block */
533 static VALUE
534 block_address(VALUE self)
536 block_t * block;
537 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
538 return LONG2NUM((intptr_t)block->start_addr);
541 /* Get the machine code for YJIT::Block as a binary string */
542 static VALUE
543 block_code(VALUE self)
545 block_t * block;
546 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
548 return (VALUE)rb_str_new(
549 (const char*)block->start_addr,
550 block->end_addr - block->start_addr
554 /* Get the start index in the Instruction Sequence that corresponds to this
555 * YJIT::Block */
556 static VALUE
557 iseq_start_index(VALUE self)
559 block_t * block;
560 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
562 return INT2NUM(block->blockid.idx);
565 /* Get the end index in the Instruction Sequence that corresponds to this
566 * YJIT::Block */
567 static VALUE
568 iseq_end_index(VALUE self)
570 block_t * block;
571 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
573 return INT2NUM(block->end_idx);
576 /* Called when a basic operation is redefined */
577 void
578 rb_yjit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop)
580 if (blocks_assuming_bops) {
581 #if YJIT_STATS
582 yjit_runtime_counters.invalidate_bop_redefined += blocks_assuming_bops->num_entries;
583 #endif
585 st_foreach(blocks_assuming_bops, block_set_invalidate_i, 0);
589 /* Called when the constant state changes */
590 void
591 rb_yjit_constant_state_changed(void)
593 if (blocks_assuming_stable_global_constant_state) {
594 #if YJIT_STATS
595 yjit_runtime_counters.constant_state_bumps++;
596 yjit_runtime_counters.invalidate_constant_state_bump += blocks_assuming_stable_global_constant_state->num_entries;
597 #endif
599 st_foreach(blocks_assuming_stable_global_constant_state, block_set_invalidate_i, 0);
603 // Callback from the opt_setinlinecache instruction in the interpreter.
604 // Invalidate the block for the matching opt_getinlinecache so it could regenerate code
605 // using the new value in the constant cache.
606 void
607 rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic)
609 if (!rb_yjit_enabled_p()) return;
611 // We can't generate code in these situations, so no need to invalidate.
612 // See gen_opt_getinlinecache.
613 if (ic->entry->ic_cref || rb_multi_ractor_p()) {
614 return;
617 RB_VM_LOCK_ENTER();
618 rb_vm_barrier(); // Stop other ractors since we are going to patch machine code.
620 const struct rb_iseq_constant_body *const body = iseq->body;
621 VALUE *code = body->iseq_encoded;
622 const unsigned get_insn_idx = ic->get_insn_idx;
624 // This should come from a running iseq, so direct threading translation
625 // should have been done
626 RUBY_ASSERT(FL_TEST((VALUE)iseq, ISEQ_TRANSLATED));
627 RUBY_ASSERT(get_insn_idx < body->iseq_size);
628 RUBY_ASSERT(rb_vm_insn_addr2insn((const void *)code[get_insn_idx]) == BIN(opt_getinlinecache));
630 // Find the matching opt_getinlinecache and invalidate all the blocks there
631 RUBY_ASSERT(insn_op_type(BIN(opt_getinlinecache), 1) == TS_IC);
632 if (ic == (IC)code[get_insn_idx + 1 + 1]) {
633 rb_yjit_block_array_t getinlinecache_blocks = yjit_get_version_array(iseq, get_insn_idx);
635 // Put a bound for loop below to be defensive
636 const int32_t initial_version_count = rb_darray_size(getinlinecache_blocks);
637 for (int32_t iteration=0; iteration<initial_version_count; ++iteration) {
638 getinlinecache_blocks = yjit_get_version_array(iseq, get_insn_idx);
640 if (rb_darray_size(getinlinecache_blocks) > 0) {
641 block_t *block = rb_darray_get(getinlinecache_blocks, 0);
642 invalidate_block_version(block);
643 #if YJIT_STATS
644 yjit_runtime_counters.invalidate_constant_ic_fill++;
645 #endif
647 else {
648 break;
652 // All versions at get_insn_idx should now be gone
653 RUBY_ASSERT(0 == rb_darray_size(yjit_get_version_array(iseq, get_insn_idx)));
655 else {
656 RUBY_ASSERT(false && "ic->get_insn_diex not set properly");
659 RB_VM_LOCK_LEAVE();
662 void
663 rb_yjit_before_ractor_spawn(void)
665 if (blocks_assuming_single_ractor_mode) {
666 #if YJIT_STATS
667 yjit_runtime_counters.invalidate_ractor_spawn += blocks_assuming_single_ractor_mode->num_entries;
668 #endif
670 st_foreach(blocks_assuming_single_ractor_mode, block_set_invalidate_i, 0);
674 #ifdef HAVE_LIBCAPSTONE
675 static const rb_data_type_t yjit_disasm_type = {
676 "YJIT/Disasm",
677 {0, (void(*)(void *))cs_close, 0, },
678 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
681 static VALUE
682 yjit_disasm_init(VALUE klass)
684 csh * handle;
685 VALUE disasm = TypedData_Make_Struct(klass, csh, &yjit_disasm_type, handle);
686 if (cs_open(CS_ARCH_X86, CS_MODE_64, handle) != CS_ERR_OK) {
687 rb_raise(rb_eRuntimeError, "failed to make Capstone handle");
689 return disasm;
692 static VALUE
693 yjit_disasm(VALUE self, VALUE code, VALUE from)
695 size_t count;
696 csh * handle;
697 cs_insn *insns;
699 TypedData_Get_Struct(self, csh, &yjit_disasm_type, handle);
700 count = cs_disasm(*handle, (uint8_t*)StringValuePtr(code), RSTRING_LEN(code), NUM2ULL(from), 0, &insns);
701 VALUE insn_list = rb_ary_new_capa(count);
703 for (size_t i = 0; i < count; i++) {
704 VALUE vals = rb_ary_new_from_args(3, LONG2NUM(insns[i].address),
705 rb_str_new2(insns[i].mnemonic),
706 rb_str_new2(insns[i].op_str));
707 rb_ary_push(insn_list, rb_struct_alloc(cYjitDisasmInsn, vals));
709 cs_free(insns, count);
710 return insn_list;
712 #endif
714 // Primitive called in yjit.rb. Export all machine code comments as a Ruby array.
715 static VALUE
716 comments_for(rb_execution_context_t *ec, VALUE self, VALUE start_address, VALUE end_address)
718 VALUE comment_array = rb_ary_new();
719 #if RUBY_DEBUG
720 uint8_t *start = (void *)NUM2ULL(start_address);
721 uint8_t *end = (void *)NUM2ULL(end_address);
723 rb_darray_for(yjit_code_comments, i) {
724 struct yjit_comment comment = rb_darray_get(yjit_code_comments, i);
725 uint8_t *comment_pos = cb_get_ptr(cb, comment.offset);
727 if (comment_pos >= end) {
728 break;
730 if (comment_pos >= start) {
731 VALUE vals = rb_ary_new_from_args(
733 LL2NUM((long long) comment_pos),
734 rb_str_new_cstr(comment.comment)
736 rb_ary_push(comment_array, rb_struct_alloc(cYjitCodeComment, vals));
740 #endif // if RUBY_DEBUG
742 return comment_array;
745 static VALUE
746 yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self)
748 return RBOOL(YJIT_STATS && rb_yjit_opts.gen_stats);
751 // Primitive called in yjit.rb. Export all YJIT statistics as a Ruby hash.
752 static VALUE
753 get_yjit_stats(rb_execution_context_t *ec, VALUE self)
755 // Return Qnil if YJIT isn't enabled
756 if (cb == NULL) {
757 return Qnil;
760 VALUE hash = rb_hash_new();
762 RB_VM_LOCK_ENTER();
765 VALUE key = ID2SYM(rb_intern("inline_code_size"));
766 VALUE value = LL2NUM((long long)cb->write_pos);
767 rb_hash_aset(hash, key, value);
769 key = ID2SYM(rb_intern("outlined_code_size"));
770 value = LL2NUM((long long)ocb->write_pos);
771 rb_hash_aset(hash, key, value);
774 #if YJIT_STATS
775 if (rb_yjit_opts.gen_stats) {
776 // Indicate that the complete set of stats is available
777 rb_hash_aset(hash, ID2SYM(rb_intern("all_stats")), Qtrue);
779 int64_t *counter_reader = (int64_t *)&yjit_runtime_counters;
780 int64_t *counter_reader_end = &yjit_runtime_counters.last_member;
782 // For each counter in yjit_counter_names, add that counter as
783 // a key/value pair.
785 // Iterate through comma separated counter name list
786 char *name_reader = yjit_counter_names;
787 char *counter_name_end = yjit_counter_names + sizeof(yjit_counter_names);
788 while (name_reader < counter_name_end && counter_reader < counter_reader_end) {
789 if (*name_reader == ',' || *name_reader == ' ') {
790 name_reader++;
791 continue;
794 // Compute length of counter name
795 int name_len;
796 char *name_end;
798 name_end = strchr(name_reader, ',');
799 if (name_end == NULL) break;
800 name_len = (int)(name_end - name_reader);
803 // Put counter into hash
804 VALUE key = ID2SYM(rb_intern2(name_reader, name_len));
805 VALUE value = LL2NUM((long long)*counter_reader);
806 rb_hash_aset(hash, key, value);
808 counter_reader++;
809 name_reader = name_end;
812 // For each entry in exit_op_count, add a stats entry with key "exit_INSTRUCTION_NAME"
813 // and the value is the count of side exits for that instruction.
815 char key_string[rb_vm_max_insn_name_size + 6]; // Leave room for "exit_" and a final NUL
816 for (int i = 0; i < VM_INSTRUCTION_SIZE; i++) {
817 const char *i_name = insn_name(i); // Look up Ruby's NUL-terminated insn name string
818 snprintf(key_string, rb_vm_max_insn_name_size + 6, "%s%s", "exit_", i_name);
820 VALUE key = ID2SYM(rb_intern(key_string));
821 VALUE value = LL2NUM((long long)exit_op_count[i]);
822 rb_hash_aset(hash, key, value);
825 #endif
827 RB_VM_LOCK_LEAVE();
829 return hash;
832 // Primitive called in yjit.rb. Zero out all the counters.
833 static VALUE
834 reset_stats_bang(rb_execution_context_t *ec, VALUE self)
836 #if YJIT_STATS
837 memset(&exit_op_count, 0, sizeof(exit_op_count));
838 memset(&yjit_runtime_counters, 0, sizeof(yjit_runtime_counters));
839 #endif // if YJIT_STATS
840 return Qnil;
843 // Primitive for yjit.rb. For testing running out of executable memory
844 static VALUE
845 simulate_oom_bang(rb_execution_context_t *ec, VALUE self)
847 if (RUBY_DEBUG && cb && ocb) {
848 // Only simulate in debug builds for paranoia.
849 cb_set_pos(cb, cb->mem_size-1);
850 cb_set_pos(ocb, ocb->mem_size-1);
852 return Qnil;
855 #include "yjit.rbinc"
857 #if YJIT_STATS
858 void
859 rb_yjit_collect_vm_usage_insn(int insn)
861 yjit_runtime_counters.vm_insns_count++;
864 void
865 rb_yjit_collect_binding_alloc(void)
867 yjit_runtime_counters.binding_allocations++;
870 void
871 rb_yjit_collect_binding_set(void)
873 yjit_runtime_counters.binding_set++;
876 static const VALUE *
877 yjit_count_side_exit_op(const VALUE *exit_pc)
879 int insn = rb_vm_insn_addr2opcode((const void *)*exit_pc);
880 exit_op_count[insn]++;
881 return exit_pc; // This function must return exit_pc!
883 #endif
885 void
886 rb_yjit_iseq_mark(const struct rb_iseq_constant_body *body)
888 rb_darray_for(body->yjit_blocks, version_array_idx) {
889 rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
891 rb_darray_for(version_array, block_idx) {
892 block_t *block = rb_darray_get(version_array, block_idx);
894 rb_gc_mark_movable((VALUE)block->blockid.iseq);
896 cme_dependency_t *cme_dep;
897 rb_darray_foreach(block->cme_dependencies, cme_dependency_idx, cme_dep) {
898 rb_gc_mark_movable(cme_dep->receiver_klass);
899 rb_gc_mark_movable(cme_dep->callee_cme);
902 // Mark outgoing branch entries
903 rb_darray_for(block->outgoing, branch_idx) {
904 branch_t *branch = rb_darray_get(block->outgoing, branch_idx);
905 for (int i = 0; i < 2; ++i) {
906 rb_gc_mark_movable((VALUE)branch->targets[i].iseq);
910 // Walk over references to objects in generated code.
911 uint32_t *offset_element;
912 rb_darray_foreach(block->gc_object_offsets, offset_idx, offset_element) {
913 uint32_t offset_to_value = *offset_element;
914 uint8_t *value_address = cb_get_ptr(cb, offset_to_value);
916 VALUE object;
917 memcpy(&object, value_address, SIZEOF_VALUE);
918 rb_gc_mark_movable(object);
921 // Mark the machine code page this block lives on
922 //rb_gc_mark_movable(block->code_page);
927 void
928 rb_yjit_iseq_update_references(const struct rb_iseq_constant_body *body)
930 rb_vm_barrier();
932 rb_darray_for(body->yjit_blocks, version_array_idx) {
933 rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
935 rb_darray_for(version_array, block_idx) {
936 block_t *block = rb_darray_get(version_array, block_idx);
938 block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq);
940 cme_dependency_t *cme_dep;
941 rb_darray_foreach(block->cme_dependencies, cme_dependency_idx, cme_dep) {
942 cme_dep->receiver_klass = rb_gc_location(cme_dep->receiver_klass);
943 cme_dep->callee_cme = rb_gc_location(cme_dep->callee_cme);
946 // Update outgoing branch entries
947 rb_darray_for(block->outgoing, branch_idx) {
948 branch_t *branch = rb_darray_get(block->outgoing, branch_idx);
949 for (int i = 0; i < 2; ++i) {
950 branch->targets[i].iseq = (const void *)rb_gc_location((VALUE)branch->targets[i].iseq);
954 // Walk over references to objects in generated code.
955 uint32_t *offset_element;
956 rb_darray_foreach(block->gc_object_offsets, offset_idx, offset_element) {
957 uint32_t offset_to_value = *offset_element;
958 uint8_t *value_address = cb_get_ptr(cb, offset_to_value);
960 VALUE object;
961 memcpy(&object, value_address, SIZEOF_VALUE);
962 VALUE possibly_moved = rb_gc_location(object);
963 // Only write when the VALUE moves, to be CoW friendly.
964 if (possibly_moved != object) {
965 // Possibly unlock the page we need to update
966 cb_mark_position_writeable(cb, offset_to_value);
968 // Object could cross a page boundary, so unlock there as well
969 cb_mark_position_writeable(cb, offset_to_value + SIZEOF_VALUE - 1);
970 memcpy(value_address, &possibly_moved, SIZEOF_VALUE);
974 // Update the machine code page this block lives on
975 //block->code_page = rb_gc_location(block->code_page);
979 /* If YJIT isn't initialized, then cb or ocb could be NULL. */
980 if (cb) {
981 cb_mark_all_executable(cb);
984 if (ocb) {
985 cb_mark_all_executable(ocb);
989 // Free the yjit resources associated with an iseq
990 void
991 rb_yjit_iseq_free(const struct rb_iseq_constant_body *body)
993 rb_darray_for(body->yjit_blocks, version_array_idx) {
994 rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
996 rb_darray_for(version_array, block_idx) {
997 block_t *block = rb_darray_get(version_array, block_idx);
998 yjit_free_block(block);
1001 rb_darray_free(version_array);
1004 rb_darray_free(body->yjit_blocks);
1007 // Struct representing a code page
1008 typedef struct code_page_struct
1010 // Chunk of executable memory
1011 uint8_t* mem_block;
1013 // Size of the executable memory chunk
1014 uint32_t page_size;
1016 // Inline code block
1017 codeblock_t cb;
1019 // Outlined code block
1020 codeblock_t ocb;
1022 // Next node in the free list (private)
1023 struct code_page_struct* _next;
1025 } code_page_t;
1027 // Current code page we are writing machine code into
1028 static VALUE yjit_cur_code_page = Qfalse;
1030 // Head of the list of free code pages
1031 static code_page_t *code_page_freelist = NULL;
1033 // Free a code page, add it to the free list
1034 static void
1035 yjit_code_page_free(void *voidp)
1037 code_page_t* code_page = (code_page_t*)voidp;
1038 code_page->_next = code_page_freelist;
1039 code_page_freelist = code_page;
1042 // Custom type for interacting with the GC
1043 static const rb_data_type_t yjit_code_page_type = {
1044 "yjit_code_page",
1045 {NULL, yjit_code_page_free, NULL, NULL},
1046 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1049 // Allocate a code page and wrap it into a Ruby object owned by the GC
1050 static VALUE
1051 rb_yjit_code_page_alloc(void)
1053 // If the free list is empty
1054 if (!code_page_freelist) {
1055 // Allocate many pages at once
1056 uint8_t* code_chunk = alloc_exec_mem(PAGES_PER_ALLOC * CODE_PAGE_SIZE);
1058 // Do this in reverse order so we allocate our pages in order
1059 for (int i = PAGES_PER_ALLOC - 1; i >= 0; --i) {
1060 code_page_t* code_page = malloc(sizeof(code_page_t));
1061 code_page->mem_block = code_chunk + i * CODE_PAGE_SIZE;
1062 assert ((intptr_t)code_page->mem_block % CODE_PAGE_SIZE == 0);
1063 code_page->page_size = CODE_PAGE_SIZE;
1064 code_page->_next = code_page_freelist;
1065 code_page_freelist = code_page;
1069 code_page_t* code_page = code_page_freelist;
1070 code_page_freelist = code_page_freelist->_next;
1072 // Create a Ruby wrapper struct for the code page object
1073 VALUE wrapper = TypedData_Wrap_Struct(0, &yjit_code_page_type, code_page);
1075 // Write a pointer to the wrapper object on the page
1076 *((VALUE*)code_page->mem_block) = wrapper;
1078 // Initialize the code blocks
1079 uint8_t* page_start = code_page->mem_block + sizeof(VALUE);
1080 uint8_t* page_end = code_page->mem_block + CODE_PAGE_SIZE;
1081 uint32_t halfsize = (uint32_t)(page_end - page_start) / 2;
1082 cb_init(&code_page->cb, page_start, halfsize);
1083 cb_init(&code_page->cb, page_start + halfsize, halfsize);
1085 return wrapper;
1088 // Unwrap the Ruby object representing a code page
1089 static code_page_t *
1090 rb_yjit_code_page_unwrap(VALUE cp_obj)
1092 code_page_t * code_page;
1093 TypedData_Get_Struct(cp_obj, code_page_t, &yjit_code_page_type, code_page);
1094 return code_page;
1097 // Get the code page wrapper object for a code pointer
1098 static VALUE
1099 rb_yjit_code_page_from_ptr(uint8_t* code_ptr)
1101 VALUE* page_start = (VALUE*)((intptr_t)code_ptr & ~(CODE_PAGE_SIZE - 1));
1102 VALUE wrapper = *page_start;
1103 return wrapper;
1106 // Get the inline code block corresponding to a code pointer
1107 static void
1108 yjit_get_cb(codeblock_t* cb, uint8_t* code_ptr)
1110 VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
1111 code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
1113 // A pointer to the page wrapper object is written at the start of the code page
1114 uint8_t* mem_block = code_page->mem_block + sizeof(VALUE);
1115 uint32_t mem_size = (code_page->page_size/2) - sizeof(VALUE);
1116 RUBY_ASSERT(mem_block);
1118 // Map the code block to this memory region
1119 cb_init(cb, mem_block, mem_size);
1122 // Get the outlined code block corresponding to a code pointer
1123 static void
1124 yjit_get_ocb(codeblock_t* cb, uint8_t* code_ptr)
1126 VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
1127 code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
1129 // A pointer to the page wrapper object is written at the start of the code page
1130 uint8_t* mem_block = code_page->mem_block + (code_page->page_size/2);
1131 uint32_t mem_size = code_page->page_size/2;
1132 RUBY_ASSERT(mem_block);
1134 // Map the code block to this memory region
1135 cb_init(cb, mem_block, mem_size);
1138 // Get the current code page or allocate a new one
1139 static VALUE
1140 yjit_get_code_page(uint32_t cb_bytes_needed, uint32_t ocb_bytes_needed)
1142 // If this is the first code page
1143 if (yjit_cur_code_page == Qfalse) {
1144 yjit_cur_code_page = rb_yjit_code_page_alloc();
1147 // Get the current code page
1148 code_page_t *code_page = rb_yjit_code_page_unwrap(yjit_cur_code_page);
1150 // Compute how many bytes are left in the code blocks
1151 uint32_t cb_bytes_left = code_page->cb.mem_size - code_page->cb.write_pos;
1152 uint32_t ocb_bytes_left = code_page->ocb.mem_size - code_page->ocb.write_pos;
1153 RUBY_ASSERT_ALWAYS(cb_bytes_needed <= code_page->cb.mem_size);
1154 RUBY_ASSERT_ALWAYS(ocb_bytes_needed <= code_page->ocb.mem_size);
1156 // If there's enough space left in the current code page
1157 if (cb_bytes_needed <= cb_bytes_left && ocb_bytes_needed <= ocb_bytes_left) {
1158 return yjit_cur_code_page;
1161 // Allocate a new code page
1162 yjit_cur_code_page = rb_yjit_code_page_alloc();
1163 code_page_t *new_code_page = rb_yjit_code_page_unwrap(yjit_cur_code_page);
1165 // Jump to the new code page
1166 jmp_ptr(&code_page->cb, cb_get_ptr(&new_code_page->cb, 0));
1168 return yjit_cur_code_page;
1171 bool
1172 rb_yjit_enabled_p(void)
1174 return rb_yjit_opts.yjit_enabled;
1177 unsigned
1178 rb_yjit_call_threshold(void)
1180 return rb_yjit_opts.call_threshold;
1183 # define PTR2NUM(x) (rb_int2inum((intptr_t)(void *)(x)))
1186 * call-seq: block.id -> unique_id
1188 * Returns a unique integer ID for the block. For example:
1190 * blocks = blocks_for(iseq)
1191 * blocks.group_by(&:id)
1193 static VALUE
1194 block_id(VALUE self)
1196 block_t * block;
1197 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
1198 return PTR2NUM(block);
1202 * call-seq: block.outgoing_ids -> list
1204 * Returns a list of outgoing ids for the current block. This list can be used
1205 * in conjunction with Block#id to construct a graph of block objects.
1207 static VALUE
1208 outgoing_ids(VALUE self)
1210 block_t * block;
1211 TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
1213 VALUE ids = rb_ary_new();
1215 rb_darray_for(block->outgoing, branch_idx) {
1216 branch_t *out_branch = rb_darray_get(block->outgoing, branch_idx);
1218 for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
1219 block_t *succ = out_branch->blocks[succ_idx];
1221 if (succ == NULL)
1222 continue;
1224 rb_ary_push(ids, PTR2NUM(succ));
1229 return ids;
1232 // Can raise RuntimeError
1233 void
1234 rb_yjit_init(struct rb_yjit_options *options)
1236 if (!YJIT_SUPPORTED_P || !JIT_ENABLED) {
1237 return;
1240 rb_yjit_opts = *options;
1241 rb_yjit_opts.yjit_enabled = true;
1243 rb_yjit_opts.gen_stats = rb_yjit_opts.gen_stats || getenv("RUBY_YJIT_STATS");
1245 #if !YJIT_STATS
1246 if(rb_yjit_opts.gen_stats) {
1247 rb_warning("--yjit-stats requires that Ruby is compiled with CPPFLAGS='-DYJIT_STATS=1' or CPPFLAGS='-DRUBY_DEBUG=1'");
1249 #endif
1251 // Normalize command-line options to default values
1252 if (rb_yjit_opts.exec_mem_size < 1) {
1253 rb_yjit_opts.exec_mem_size = 256;
1255 if (rb_yjit_opts.call_threshold < 1) {
1256 rb_yjit_opts.call_threshold = YJIT_DEFAULT_CALL_THRESHOLD;
1258 if (rb_yjit_opts.max_versions < 1) {
1259 rb_yjit_opts.max_versions = 4;
1262 // If type propagation is disabled, max 1 version per block
1263 if (rb_yjit_opts.no_type_prop) {
1264 rb_yjit_opts.max_versions = 1;
1267 blocks_assuming_stable_global_constant_state = st_init_numtable();
1268 blocks_assuming_single_ractor_mode = st_init_numtable();
1269 blocks_assuming_bops = st_init_numtable();
1271 yjit_init_codegen();
1272 yjit_init_core();
1274 // YJIT Ruby module
1275 mYjit = rb_define_module_under(rb_cRubyVM, "YJIT");
1276 rb_define_module_function(mYjit, "blocks_for", yjit_blocks_for, 1);
1278 // YJIT::Block (block version, code block)
1279 cYjitBlock = rb_define_class_under(mYjit, "Block", rb_cObject);
1280 rb_undef_alloc_func(cYjitBlock);
1281 rb_define_method(cYjitBlock, "address", block_address, 0);
1282 rb_define_method(cYjitBlock, "id", block_id, 0);
1283 rb_define_method(cYjitBlock, "code", block_code, 0);
1284 rb_define_method(cYjitBlock, "iseq_start_index", iseq_start_index, 0);
1285 rb_define_method(cYjitBlock, "iseq_end_index", iseq_end_index, 0);
1286 rb_define_method(cYjitBlock, "outgoing_ids", outgoing_ids, 0);
1288 // YJIT disassembler interface
1289 #ifdef HAVE_LIBCAPSTONE
1290 cYjitDisasm = rb_define_class_under(mYjit, "Disasm", rb_cObject);
1291 rb_define_alloc_func(cYjitDisasm, yjit_disasm_init);
1292 rb_define_method(cYjitDisasm, "disasm", yjit_disasm, 2);
1293 cYjitDisasmInsn = rb_struct_define_under(cYjitDisasm, "Insn", "address", "mnemonic", "op_str", NULL);
1294 #if RUBY_DEBUG
1295 cYjitCodeComment = rb_struct_define_under(cYjitDisasm, "Comment", "address", "comment", NULL);
1296 #endif
1297 #endif
1299 // Make dependency tables
1300 method_lookup_dependency = st_init_numtable();
1301 cme_validity_dependency = st_init_numtable();
1303 // Initialize the GC hooks
1304 struct yjit_root_struct *root;
1305 VALUE yjit_root = TypedData_Make_Struct(0, struct yjit_root_struct, &yjit_root_type, root);
1306 rb_gc_register_mark_object(yjit_root);
1308 (void)yjit_get_cb;
1309 (void)yjit_get_ocb;
1310 (void)yjit_get_code_page;