1 /**********************************************************************
3 mjit.c - MRI method JIT compiler functions for Ruby's main thread
5 Copyright (C) 2017 Vladimir Makarov <vmakarov@redhat.com>.
7 **********************************************************************/
9 // Functions in this file are never executed on MJIT worker thread.
10 // So you can safely use Ruby methods and GC in this file.
12 // To share variables privately, include mjit_worker.c instead of linking.
14 #include "ruby/internal/config.h" // defines USE_MJIT
21 #include "internal/class.h"
22 #include "internal/cont.h"
23 #include "internal/file.h"
24 #include "internal/hash.h"
25 #include "internal/warnings.h"
28 #include "mjit_worker.c"
30 extern int rb_thread_create_mjit_thread(void (*worker_func
)(void));
32 // Return an unique file name in /tmp with PREFIX and SUFFIX and
33 // number ID. Use getpid if ID == 0. The return file name exists
34 // until the next function call.
36 get_uniq_filename(unsigned long id
, const char *prefix
, const char *suffix
)
38 char buff
[70], *str
= buff
;
39 int size
= sprint_uniq_filename(buff
, sizeof(buff
), id
, prefix
, suffix
);
43 if (size
<= (int)sizeof(buff
)) {
44 memcpy(str
, buff
, size
);
47 sprint_uniq_filename(str
, size
, id
, prefix
, suffix
);
52 // Wait until workers don't compile any iseq. It is called at the
55 mjit_gc_start_hook(void)
59 CRITICAL_SECTION_START(4, "mjit_gc_start_hook");
61 verbose(4, "Waiting wakeup from a worker for GC");
62 rb_native_cond_wait(&mjit_client_wakeup
, &mjit_engine_mutex
);
63 verbose(4, "Getting wakeup from a worker for GC");
66 CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
69 // Send a signal to workers to continue iseq compilations. It is
70 // called at the end of GC.
72 mjit_gc_exit_hook(void)
76 CRITICAL_SECTION_START(4, "mjit_gc_exit_hook");
78 RUBY_ASSERT_ALWAYS(in_gc
>= 0);
80 verbose(4, "Sending wakeup signal to workers after GC");
81 rb_native_cond_broadcast(&mjit_gc_wakeup
);
83 CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
86 // Prohibit calling JIT-ed code and let existing JIT-ed frames exit before the next insn.
88 mjit_cancel_all(const char *reason
)
94 if (mjit_opts
.warnings
|| mjit_opts
.verbose
) {
95 fprintf(stderr
, "JIT cancel: Disabled JIT-ed code because %s\n", reason
);
99 // Deal with ISeq movement from compactor
101 mjit_update_references(const rb_iseq_t
*iseq
)
106 CRITICAL_SECTION_START(4, "mjit_update_references");
107 if (iseq
->body
->jit_unit
) {
108 iseq
->body
->jit_unit
->iseq
= (rb_iseq_t
*)rb_gc_location((VALUE
)iseq
->body
->jit_unit
->iseq
);
109 // We need to invalidate JIT-ed code for the ISeq because it embeds pointer addresses.
110 // To efficiently do that, we use the same thing as TracePoint and thus everything is cancelled for now.
111 // See mjit.h and tool/ruby_vm/views/_mjit_compile_insn.erb for how `mjit_call_p` is used.
112 mjit_cancel_all("GC.compact is used"); // TODO: instead of cancelling all, invalidate only this one and recompile it with some threshold.
115 // Units in stale_units (list of over-speculated and invalidated code) are not referenced from
116 // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
117 // TODO: we should be able to reduce the number of units checked here.
118 struct rb_mjit_unit
*unit
= NULL
;
119 list_for_each(&stale_units
.head
, unit
, unode
) {
120 if (unit
->iseq
== iseq
) {
121 unit
->iseq
= (rb_iseq_t
*)rb_gc_location((VALUE
)unit
->iseq
);
124 CRITICAL_SECTION_FINISH(4, "mjit_update_references");
127 // Iseqs can be garbage collected. This function should call when it
128 // happens. It removes iseq from the unit.
130 mjit_free_iseq(const rb_iseq_t
*iseq
)
135 CRITICAL_SECTION_START(4, "mjit_free_iseq");
136 RUBY_ASSERT_ALWAYS(in_gc
);
137 RUBY_ASSERT_ALWAYS(!in_jit
);
138 if (iseq
->body
->jit_unit
) {
139 // jit_unit is not freed here because it may be referred by multiple
140 // lists of units. `get_from_list` and `mjit_finish` do the job.
141 iseq
->body
->jit_unit
->iseq
= NULL
;
143 // Units in stale_units (list of over-speculated and invalidated code) are not referenced from
144 // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
145 // TODO: we should be able to reduce the number of units checked here.
146 struct rb_mjit_unit
*unit
= NULL
;
147 list_for_each(&stale_units
.head
, unit
, unode
) {
148 if (unit
->iseq
== iseq
) {
152 CRITICAL_SECTION_FINISH(4, "mjit_free_iseq");
155 // Free unit list. This should be called only when worker is finished
156 // because node of unit_queue and one of active_units may have the same unit
157 // during proceeding unit.
159 free_list(struct rb_mjit_unit_list
*list
, bool close_handle_p
)
161 struct rb_mjit_unit
*unit
= 0, *next
;
163 list_for_each_safe(&list
->head
, unit
, next
, unode
) {
164 list_del(&unit
->unode
);
165 if (!close_handle_p
) unit
->handle
= NULL
; /* Skip dlclose in free_unit() */
167 if (list
== &stale_units
) { // `free_unit(unit)` crashes after GC.compact on `stale_units`
169 * TODO: REVERT THIS BRANCH
170 * Debug the crash on stale_units w/ GC.compact and just use `free_unit(unit)`!!
172 if (unit
->handle
&& dlclose(unit
->handle
)) {
173 mjit_warning("failed to close handle for u%d: %s", unit
->id
, dlerror());
175 clean_temp_files(unit
);
185 // Register a new continuation with execution context `ec`. Return MJIT info about
188 mjit_cont_new(rb_execution_context_t
*ec
)
190 struct mjit_cont
*cont
;
192 // We need to use calloc instead of something like ZALLOC to avoid triggering GC here.
193 // When this function is called from rb_thread_alloc through rb_threadptr_root_fiber_setup,
194 // the thread is still being prepared and marking it causes SEGV.
195 cont
= calloc(1, sizeof(struct mjit_cont
));
200 CRITICAL_SECTION_START(3, "in mjit_cont_new");
201 if (first_cont
== NULL
) {
202 cont
->next
= cont
->prev
= NULL
;
206 cont
->next
= first_cont
;
207 first_cont
->prev
= cont
;
210 CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
215 // Unregister continuation `cont`.
217 mjit_cont_free(struct mjit_cont
*cont
)
219 CRITICAL_SECTION_START(3, "in mjit_cont_new");
220 if (cont
== first_cont
) {
221 first_cont
= cont
->next
;
222 if (first_cont
!= NULL
)
223 first_cont
->prev
= NULL
;
226 cont
->prev
->next
= cont
->next
;
227 if (cont
->next
!= NULL
)
228 cont
->next
->prev
= cont
->prev
;
230 CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
235 // Finish work with continuation info.
239 struct mjit_cont
*cont
, *next
;
241 for (cont
= first_cont
; cont
!= NULL
; cont
= next
) {
247 // Create unit for `iseq`. This function may be called from an MJIT worker.
249 create_unit(const rb_iseq_t
*iseq
)
251 struct rb_mjit_unit
*unit
;
253 unit
= calloc(1, sizeof(struct rb_mjit_unit
));
257 unit
->id
= current_unit_num
++;
258 unit
->iseq
= (rb_iseq_t
*)iseq
;
259 iseq
->body
->jit_unit
= unit
;
262 // Return true if given ISeq body should be compiled by MJIT
264 mjit_target_iseq_p(struct rb_iseq_constant_body
*body
)
266 return (body
->type
== ISEQ_TYPE_METHOD
|| body
->type
== ISEQ_TYPE_BLOCK
)
267 && !body
->builtin_inline_p
;
270 // If recompile_p is true, the call is initiated by mjit_recompile.
271 // This assumes the caller holds CRITICAL_SECTION when recompile_p is true.
273 mjit_add_iseq_to_process(const rb_iseq_t
*iseq
, const struct rb_mjit_compile_info
*compile_info
, bool recompile_p
)
275 if (!mjit_enabled
|| pch_status
== PCH_FAILED
)
277 if (!mjit_target_iseq_p(iseq
->body
)) {
278 iseq
->body
->jit_func
= (mjit_func_t
)NOT_COMPILED_JIT_ISEQ_FUNC
; // skip mjit_wait
283 CRITICAL_SECTION_START(3, "in add_iseq_to_process");
285 // This prevents multiple Ractors from enqueueing the same ISeq twice.
286 if (rb_multi_ractor_p() && (uintptr_t)iseq
->body
->jit_func
!= NOT_ADDED_JIT_ISEQ_FUNC
) {
287 CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
292 RB_DEBUG_COUNTER_INC(mjit_add_iseq_to_process
);
293 iseq
->body
->jit_func
= (mjit_func_t
)NOT_READY_JIT_ISEQ_FUNC
;
295 if (iseq
->body
->jit_unit
== NULL
)
296 // Failure in creating the unit.
298 if (compile_info
!= NULL
)
299 iseq
->body
->jit_unit
->compile_info
= *compile_info
;
300 add_to_list(iseq
->body
->jit_unit
, &unit_queue
);
301 if (active_units
.length
>= mjit_opts
.max_cache_size
) {
306 verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
307 rb_native_cond_broadcast(&mjit_worker_wakeup
);
308 CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
312 // Add ISEQ to be JITed in parallel with the current thread.
313 // Unload some JIT codes if there are too many of them.
315 rb_mjit_add_iseq_to_process(const rb_iseq_t
*iseq
)
317 mjit_add_iseq_to_process(iseq
, NULL
, false);
320 // For this timeout seconds, --jit-wait will wait for JIT compilation finish.
321 #define MJIT_WAIT_TIMEOUT_SECONDS 60
324 mjit_wait(struct rb_iseq_constant_body
*body
)
330 while (body
->jit_func
== (mjit_func_t
)NOT_READY_JIT_ISEQ_FUNC
) {
332 if (tries
/ 1000 > MJIT_WAIT_TIMEOUT_SECONDS
|| pch_status
== PCH_FAILED
) {
333 CRITICAL_SECTION_START(3, "in rb_mjit_wait_call to set jit_func");
334 body
->jit_func
= (mjit_func_t
)NOT_COMPILED_JIT_ISEQ_FUNC
; // JIT worker seems dead. Give up.
335 CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call to set jit_func");
336 mjit_warning("timed out to wait for JIT finish");
340 CRITICAL_SECTION_START(3, "in rb_mjit_wait_call for a client wakeup");
341 rb_native_cond_broadcast(&mjit_worker_wakeup
);
342 CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call for a client wakeup");
343 rb_thread_wait_for(tv
);
347 // Wait for JIT compilation finish for --jit-wait, and call the function pointer
348 // if the compiled result is not NOT_COMPILED_JIT_ISEQ_FUNC.
350 rb_mjit_wait_call(rb_execution_context_t
*ec
, struct rb_iseq_constant_body
*body
)
356 if ((uintptr_t)body
->jit_func
<= (uintptr_t)LAST_JIT_ISEQ_FUNC
) {
359 return body
->jit_func(ec
, ec
->cfp
);
362 struct rb_mjit_compile_info
*
363 rb_mjit_iseq_compile_info(const struct rb_iseq_constant_body
*body
)
365 assert(body
->jit_unit
!= NULL
);
366 return &body
->jit_unit
->compile_info
;
370 mjit_recompile(const rb_iseq_t
*iseq
)
372 if ((uintptr_t)iseq
->body
->jit_func
<= (uintptr_t)LAST_JIT_ISEQ_FUNC
)
375 verbose(1, "JIT recompile: %s@%s:%d", RSTRING_PTR(iseq
->body
->location
.label
),
376 RSTRING_PTR(rb_iseq_path(iseq
)), FIX2INT(iseq
->body
->location
.first_lineno
));
377 assert(iseq
->body
->jit_unit
!= NULL
);
379 if (UNLIKELY(mjit_opts
.wait
)) {
380 CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
381 remove_from_list(iseq
->body
->jit_unit
, &active_units
);
382 add_to_list(iseq
->body
->jit_unit
, &stale_units
);
383 mjit_add_iseq_to_process(iseq
, &iseq
->body
->jit_unit
->compile_info
, true);
384 CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
385 mjit_wait(iseq
->body
);
388 // Lazily move active_units to stale_units to avoid race conditions around active_units with compaction.
389 // Also, it's lazily moved to unit_queue as well because otherwise it won't be added to stale_units properly.
390 // It's good to avoid a race condition between mjit_add_iseq_to_process and mjit_compile around jit_unit as well.
391 CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
392 iseq
->body
->jit_unit
->stale_p
= true;
393 iseq
->body
->jit_func
= (mjit_func_t
)NOT_READY_JIT_ISEQ_FUNC
;
394 pending_stale_p
= true;
395 CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
399 // Recompile iseq, disabling send optimization
401 rb_mjit_recompile_send(const rb_iseq_t
*iseq
)
403 rb_mjit_iseq_compile_info(iseq
->body
)->disable_send_cache
= true;
404 mjit_recompile(iseq
);
407 // Recompile iseq, disabling ivar optimization
409 rb_mjit_recompile_ivar(const rb_iseq_t
*iseq
)
411 rb_mjit_iseq_compile_info(iseq
->body
)->disable_ivar_cache
= true;
412 mjit_recompile(iseq
);
415 // Recompile iseq, disabling exivar optimization
417 rb_mjit_recompile_exivar(const rb_iseq_t
*iseq
)
419 rb_mjit_iseq_compile_info(iseq
->body
)->disable_exivar_cache
= true;
420 mjit_recompile(iseq
);
423 // Recompile iseq, disabling method inlining
425 rb_mjit_recompile_inlining(const rb_iseq_t
*iseq
)
427 rb_mjit_iseq_compile_info(iseq
->body
)->disable_inlining
= true;
428 mjit_recompile(iseq
);
431 // Recompile iseq, disabling getconstant inlining
433 rb_mjit_recompile_const(const rb_iseq_t
*iseq
)
435 rb_mjit_iseq_compile_info(iseq
->body
)->disable_const_cache
= true;
436 mjit_recompile(iseq
);
439 extern VALUE ruby_archlibdir_path
, ruby_prefix_path
;
441 // Initialize header_file, pch_file, libruby_pathflag. Return true on success.
443 init_header_filename(void)
447 // Root path of the running ruby process. Equal to RbConfig::TOPDIR.
450 const char *basedir
= "";
454 static const char libpathflag
[] =
461 const size_t libpathflag_len
= sizeof(libpathflag
) - 1;
465 basedir_val
= ruby_prefix_path
;
466 basedir
= StringValuePtr(basedir_val
);
467 baselen
= RSTRING_LEN(basedir_val
);
469 if (getenv("MJIT_SEARCH_BUILD_DIR")) {
470 // This path is not intended to be used on production, but using build directory's
471 // header file here because people want to run `make test-all` without running
472 // `make install`. Don't use $MJIT_SEARCH_BUILD_DIR except for test-all.
475 const char *hdr
= dlsym(RTLD_DEFAULT
, "MJIT_HEADER");
477 verbose(1, "No MJIT_HEADER");
479 else if (hdr
[0] != '/') {
480 verbose(1, "Non-absolute header file path: %s", hdr
);
482 else if (stat(hdr
, &st
) || !S_ISREG(st
.st_mode
)) {
483 verbose(1, "Non-file header file path: %s", hdr
);
485 else if ((st
.st_uid
!= getuid()) || (st
.st_mode
& 022) ||
486 !rb_path_check(hdr
)) {
487 verbose(1, "Unsafe header file: uid=%ld mode=%#o %s",
488 (long)st
.st_uid
, (unsigned)st
.st_mode
, hdr
);
492 // Do not pass PRELOADENV to child processes, on
493 // multi-arch environment
494 verbose(3, "PRELOADENV("PRELOADENV
")=%s", getenv(PRELOADENV
));
495 // assume no other PRELOADENV in test-all
496 unsetenv(PRELOADENV
);
497 verbose(3, "MJIT_HEADER: %s", hdr
);
498 header_file
= ruby_strdup(hdr
);
499 if (!header_file
) return false;
506 // A name of the header file included in any C file generated by MJIT for iseqs.
507 static const char header_name
[] = MJIT_HEADER_INSTALL_DIR
"/" MJIT_MIN_HEADER_NAME
;
508 const size_t header_name_len
= sizeof(header_name
) - 1;
510 header_file
= xmalloc(baselen
+ header_name_len
+ 1);
511 p
= append_str2(header_file
, basedir
, baselen
);
512 p
= append_str2(p
, header_name
, header_name_len
+ 1);
514 if ((fd
= rb_cloexec_open(header_file
, O_RDONLY
, 0)) < 0) {
515 verbose(1, "Cannot access header file: %s", header_file
);
523 pch_file
= get_uniq_filename(0, MJIT_TMP_PREFIX
"h", ".h.gch");
526 static const char pch_name
[] = MJIT_HEADER_INSTALL_DIR
"/" MJIT_PRECOMPILED_HEADER_NAME
;
527 const size_t pch_name_len
= sizeof(pch_name
) - 1;
529 pch_file
= xmalloc(baselen
+ pch_name_len
+ 1);
530 p
= append_str2(pch_file
, basedir
, baselen
);
531 p
= append_str2(p
, pch_name
, pch_name_len
+ 1);
532 if ((fd
= rb_cloexec_open(pch_file
, O_RDONLY
, 0)) < 0) {
533 verbose(1, "Cannot access precompiled header file: %s", pch_file
);
543 basedir_val
= ruby_archlibdir_path
;
544 basedir
= StringValuePtr(basedir_val
);
545 baselen
= RSTRING_LEN(basedir_val
);
546 libruby_pathflag
= p
= xmalloc(libpathflag_len
+ baselen
+ 1);
547 p
= append_str(p
, libpathflag
);
548 p
= append_str2(p
, basedir
, baselen
);
556 UINT
rb_w32_system_tmpdir(WCHAR
*path
, UINT len
);
560 system_default_tmpdir(void)
562 // c.f. ext/etc/etc.c:etc_systmpdir()
564 WCHAR tmppath
[_MAX_PATH
];
565 UINT len
= rb_w32_system_tmpdir(tmppath
, numberof(tmppath
));
567 int blen
= WideCharToMultiByte(CP_UTF8
, 0, tmppath
, len
, NULL
, 0, NULL
, NULL
);
568 char *tmpdir
= xmalloc(blen
+ 1);
569 WideCharToMultiByte(CP_UTF8
, 0, tmppath
, len
, tmpdir
, blen
, NULL
, NULL
);
573 #elif defined _CS_DARWIN_USER_TEMP_DIR
574 char path
[MAXPATHLEN
];
575 size_t len
= confstr(_CS_DARWIN_USER_TEMP_DIR
, path
, sizeof(path
));
577 char *tmpdir
= xmalloc(len
);
578 if (len
> sizeof(path
)) {
579 confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdir
, len
);
582 memcpy(tmpdir
, path
, len
);
591 check_tmpdir(const char *dir
)
595 if (!dir
) return FALSE
;
596 if (stat(dir
, &st
)) return FALSE
;
598 # define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
600 if (!S_ISDIR(st
.st_mode
)) return FALSE
;
605 if (st
.st_mode
& S_IWOTH
) {
607 if (!(st
.st_mode
& S_ISVTX
)) return FALSE
;
612 if (access(dir
, W_OK
)) return FALSE
;
621 # define RETURN_ENV(name) \
622 if (check_tmpdir(tmpdir = getenv(name))) return ruby_strdup(tmpdir)
623 RETURN_ENV("TMPDIR");
625 tmpdir
= system_default_tmpdir();
626 if (check_tmpdir(tmpdir
)) return tmpdir
;
627 return ruby_strdup("/tmp");
631 // Minimum value for JIT cache size.
632 #define MIN_CACHE_SIZE 10
633 // Default permitted number of units with a JIT code kept in memory.
634 #define DEFAULT_MAX_CACHE_SIZE 10000
635 // A default threshold used to add iseq to JIT.
636 #define DEFAULT_MIN_CALLS_TO_ADD 10000
638 // Start MJIT worker. Return TRUE if worker is successfully started.
642 stop_worker_p
= false;
643 worker_stopped
= false;
645 if (!rb_thread_create_mjit_thread(mjit_worker
)) {
646 mjit_enabled
= false;
647 rb_native_mutex_destroy(&mjit_engine_mutex
);
648 rb_native_cond_destroy(&mjit_pch_wakeup
);
649 rb_native_cond_destroy(&mjit_client_wakeup
);
650 rb_native_cond_destroy(&mjit_worker_wakeup
);
651 rb_native_cond_destroy(&mjit_gc_wakeup
);
652 verbose(1, "Failure in MJIT thread initialization\n");
658 // There's no strndup on Windows
660 ruby_strndup(const char *str
, size_t n
)
662 char *ret
= xmalloc(n
+ 1);
668 // Convert "foo bar" to {"foo", "bar", NULL} array. Caller is responsible for
669 // freeing a returned buffer and its elements.
671 split_flags(const char *flags
)
673 char *buf
[MAXPATHLEN
];
676 for (; flags
!= NULL
; flags
= next
) {
677 next
= strchr(flags
, ' ');
679 if (strlen(flags
) > 0)
680 buf
[i
++] = strdup(flags
);
684 buf
[i
++] = ruby_strndup(flags
, next
- flags
);
685 next
++; // skip space
689 char **ret
= xmalloc(sizeof(char *) * (i
+ 1));
690 memcpy(ret
, buf
, sizeof(char *) * i
);
695 // Initialize MJIT. Start a thread creating the precompiled header and
696 // processing ISeqs. The function should be called first for using MJIT.
697 // If everything is successful, MJIT_INIT_P will be TRUE.
699 mjit_init(const struct mjit_options
*opts
)
706 if (mjit_opts
.min_calls
== 0)
707 mjit_opts
.min_calls
= DEFAULT_MIN_CALLS_TO_ADD
;
708 if (mjit_opts
.max_cache_size
<= 0)
709 mjit_opts
.max_cache_size
= DEFAULT_MAX_CACHE_SIZE
;
710 if (mjit_opts
.max_cache_size
< MIN_CACHE_SIZE
)
711 mjit_opts
.max_cache_size
= MIN_CACHE_SIZE
;
713 // Initialize variables for compilation
715 pch_status
= PCH_SUCCESS
; // has prebuilt precompiled header
717 pch_status
= PCH_NOT_READY
;
719 cc_path
= CC_COMMON_ARGS
[0];
720 verbose(2, "MJIT: CC defaults to %s", cc_path
);
721 cc_common_args
= xmalloc(sizeof(CC_COMMON_ARGS
));
722 memcpy((void *)cc_common_args
, CC_COMMON_ARGS
, sizeof(CC_COMMON_ARGS
));
723 cc_added_args
= split_flags(opts
->debug_flags
);
724 xfree(opts
->debug_flags
);
726 // eliminate a flag incompatible with `-pipe`
727 for (size_t i
= 0, j
= 0; i
< sizeof(CC_COMMON_ARGS
) / sizeof(char *); i
++) {
728 if (CC_COMMON_ARGS
[i
] && strncmp("-save-temps", CC_COMMON_ARGS
[i
], strlen("-save-temps")) == 0)
729 continue; // skip -save-temps flag
730 cc_common_args
[j
] = CC_COMMON_ARGS
[i
];
735 tmp_dir
= system_tmpdir();
736 verbose(2, "MJIT: tmp_dir is %s", tmp_dir
);
738 if (!init_header_filename()) {
739 mjit_enabled
= false;
740 verbose(1, "Failure in MJIT header file name initialization\n");
743 pch_owner_pid
= getpid();
746 rb_native_mutex_initialize(&mjit_engine_mutex
);
747 rb_native_cond_initialize(&mjit_pch_wakeup
);
748 rb_native_cond_initialize(&mjit_client_wakeup
);
749 rb_native_cond_initialize(&mjit_worker_wakeup
);
750 rb_native_cond_initialize(&mjit_gc_wakeup
);
752 // Make sure the saved_ec of the initial thread's root_fiber is scanned by mark_ec_units.
754 // rb_threadptr_root_fiber_setup for the initial thread is called before mjit_init,
755 // meaning mjit_cont_new is skipped for the root_fiber. Therefore we need to call
756 // rb_fiber_init_mjit_cont again with mjit_enabled=true to set the root_fiber's mjit_cont.
757 rb_fiber_init_mjit_cont(GET_EC()->fiber_ptr
);
759 // Initialize worker thread
766 rb_execution_context_t
*ec
= GET_EC();
768 while (!worker_stopped
) {
769 verbose(3, "Sending cancel signal to worker");
770 CRITICAL_SECTION_START(3, "in stop_worker");
771 stop_worker_p
= true; // Setting this inside loop because RUBY_VM_CHECK_INTS may make this false.
772 rb_native_cond_broadcast(&mjit_worker_wakeup
);
773 CRITICAL_SECTION_FINISH(3, "in stop_worker");
774 RUBY_VM_CHECK_INTS(ec
);
778 // Stop JIT-compiling methods but compiled code is kept available.
780 mjit_pause(bool wait_p
)
783 rb_raise(rb_eRuntimeError
, "MJIT is not enabled");
785 if (worker_stopped
) {
789 // Flush all queued units with no option or `wait: true`
795 while (unit_queue
.length
> 0 && active_units
.length
< mjit_opts
.max_cache_size
) { // inverse of condition that waits for mjit_worker_wakeup
796 CRITICAL_SECTION_START(3, "in mjit_pause for a worker wakeup");
797 rb_native_cond_broadcast(&mjit_worker_wakeup
);
798 CRITICAL_SECTION_FINISH(3, "in mjit_pause for a worker wakeup");
799 rb_thread_wait_for(tv
);
807 // Restart JIT-compiling methods after mjit_pause.
812 rb_raise(rb_eRuntimeError
, "MJIT is not enabled");
814 if (!worker_stopped
) {
818 if (!start_worker()) {
819 rb_raise(rb_eRuntimeError
, "Failed to resume MJIT worker");
824 // Skip calling `clean_temp_files` for units which currently exist in the list.
826 skip_cleaning_object_files(struct rb_mjit_unit_list
*list
)
828 struct rb_mjit_unit
*unit
= NULL
, *next
;
830 // No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork.
831 list_for_each_safe(&list
->head
, unit
, next
, unode
) {
832 #if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW.
833 if (unit
->so_file
) unit
->so_file
= NULL
;
838 // This is called after fork initiated by Ruby's method to launch MJIT worker thread
839 // for child Ruby process.
841 // In multi-process Ruby applications, child Ruby processes do most of the jobs.
842 // Thus we want child Ruby processes to enqueue ISeqs to MJIT worker's queue and
843 // call the JIT-ed code.
845 // But unfortunately current MJIT-generated code is process-specific. After the fork,
846 // JIT-ed code created by parent Ruby process cannot be used in child Ruby process
847 // because the code could rely on inline cache values (ivar's IC, send's CC) which
848 // may vary between processes after fork or embed some process-specific addresses.
850 // So child Ruby process can't request parent process to JIT an ISeq and use the code.
851 // Instead of that, MJIT worker thread is created for all child Ruby processes, even
852 // while child processes would end up with compiling the same ISeqs.
854 mjit_child_after_fork(void)
859 /* Let parent process delete the already-compiled object files.
860 This must be done before starting MJIT worker on child process. */
861 skip_cleaning_object_files(&active_units
);
863 /* MJIT worker thread is not inherited on fork. Start it for this child process. */
867 // Edit 0 to 1 to enable this feature for investigating hot methods
868 #define MJIT_COUNTER 0
871 mjit_dump_total_calls(void)
873 struct rb_mjit_unit
*unit
;
874 fprintf(stderr
, "[MJIT_COUNTER] total_calls of active_units:\n");
875 list_for_each(&active_units
.head
, unit
, unode
) {
876 const rb_iseq_t
*iseq
= unit
->iseq
;
877 fprintf(stderr
, "%8ld: %s@%s:%d\n", iseq
->body
->total_calls
, RSTRING_PTR(iseq
->body
->location
.label
),
878 RSTRING_PTR(rb_iseq_path(iseq
)), FIX2INT(iseq
->body
->location
.first_lineno
));
883 // Finish the threads processing units and creating PCH, finalize
884 // and free MJIT data. It should be called last during MJIT
887 // If close_handle_p is true, it calls dlclose() for JIT-ed code. So it should be false
888 // if the code can still be on stack. ...But it means to leak JIT-ed handle forever (FIXME).
890 mjit_finish(bool close_handle_p
)
895 // Wait for pch finish
896 verbose(2, "Stopping worker thread");
897 CRITICAL_SECTION_START(3, "in mjit_finish to wakeup from pch");
898 // As our threads are detached, we could just cancel them. But it
899 // is a bad idea because OS processes (C compiler) started by
900 // threads can produce temp files. And even if the temp files are
901 // removed, the used C compiler still complaint about their
902 // absence. So wait for a clean finish of the threads.
903 while (pch_status
== PCH_NOT_READY
) {
904 verbose(3, "Waiting wakeup from make_pch");
905 rb_native_cond_wait(&mjit_pch_wakeup
, &mjit_engine_mutex
);
907 CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
912 rb_native_mutex_destroy(&mjit_engine_mutex
);
913 rb_native_cond_destroy(&mjit_pch_wakeup
);
914 rb_native_cond_destroy(&mjit_client_wakeup
);
915 rb_native_cond_destroy(&mjit_worker_wakeup
);
916 rb_native_cond_destroy(&mjit_gc_wakeup
);
919 mjit_dump_total_calls();
922 #ifndef _MSC_VER // mswin has prebuilt precompiled header
923 if (!mjit_opts
.save_temps
&& getpid() == pch_owner_pid
)
924 remove_file(pch_file
);
926 xfree(header_file
); header_file
= NULL
;
928 xfree((void *)cc_common_args
); cc_common_args
= NULL
;
929 for (char **flag
= cc_added_args
; *flag
!= NULL
; flag
++)
931 xfree((void *)cc_added_args
); cc_added_args
= NULL
;
932 xfree(tmp_dir
); tmp_dir
= NULL
;
933 xfree(pch_file
); pch_file
= NULL
;
936 free_list(&unit_queue
, close_handle_p
);
937 free_list(&active_units
, close_handle_p
);
938 free_list(&compact_units
, close_handle_p
);
939 free_list(&stale_units
, close_handle_p
);
942 mjit_enabled
= false;
943 verbose(1, "Successful MJIT finish");
946 // Called by rb_vm_mark().
948 // Mark an ISeq being compiled to prevent its CCs from being GC-ed, which
949 // an MJIT worker may concurrently see.
951 // Also mark active_units so that we do not GC ISeq which may still be
952 // referred to by mjit_recompile() or compact_all_jit_code().
958 RUBY_MARK_ENTER("mjit");
960 // We need to release a lock when calling rb_gc_mark to avoid doubly acquiring
961 // a lock by by mjit_gc_start_hook inside rb_gc_mark.
963 // Because an MJIT worker may modify active_units anytime, we need to convert
964 // the linked list to an array to safely loop its ISeqs without keeping a lock.
965 CRITICAL_SECTION_START(4, "mjit_mark");
967 if (compiling_iseqs
!= NULL
) {
968 while (compiling_iseqs
[length
]) length
++;
970 length
+= active_units
.length
;
971 const rb_iseq_t
**iseqs
= ALLOCA_N(const rb_iseq_t
*, length
);
973 struct rb_mjit_unit
*unit
= NULL
;
975 if (compiling_iseqs
!= NULL
) {
976 while (compiling_iseqs
[i
]) {
977 iseqs
[i
] = compiling_iseqs
[i
];
981 list_for_each(&active_units
.head
, unit
, unode
) {
982 iseqs
[i
] = unit
->iseq
;
986 CRITICAL_SECTION_FINISH(4, "mjit_mark");
988 for (i
= 0; i
< length
; i
++) {
989 if (iseqs
[i
] == NULL
) // ISeq is GC-ed
991 rb_gc_mark((VALUE
)iseqs
[i
]);
994 RUBY_MARK_LEAVE("mjit");
997 // Called by rb_iseq_mark() to mark cc_entries captured for MJIT
999 mjit_mark_cc_entries(const struct rb_iseq_constant_body
*const body
)
1001 const struct rb_callcache
**cc_entries
;
1002 if (body
->jit_unit
&& (cc_entries
= body
->jit_unit
->cc_entries
) != NULL
) {
1003 // It must be `body->jit_unit->cc_entries_size` instead of `body->ci_size` to mark children's cc_entries
1004 for (unsigned int i
= 0; i
< body
->jit_unit
->cc_entries_size
; i
++) {
1005 const struct rb_callcache
*cc
= cc_entries
[i
];
1006 if (cc
!= NULL
&& vm_cc_markable(cc
)) {
1007 // Pin `cc` and `cc->cme` against GC.compact as their addresses may be written in JIT-ed code.
1008 rb_gc_mark((VALUE
)cc
);
1009 rb_gc_mark((VALUE
)vm_cc_cme(cc
));