Inline TLS access. (#16882)
[mono-project.git] / mono / mini / exceptions-amd64.c
blob186c0052ddbf769809a1a07a98f5411d8ecacbef
1 /**
2 * \file
3 * exception support for AMD64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
17 #ifdef __MINGW32__
18 #define MINGW_HAS_SECURE_API 1
19 #endif
21 #include <glib.h>
22 #include <string.h>
24 #ifdef HAVE_SIGNAL_H
25 #include <signal.h>
26 #endif
27 #ifdef HAVE_UCONTEXT_H
28 #include <ucontext.h>
29 #endif
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
43 #include "mini.h"
44 #include "mini-amd64.h"
45 #include "mini-runtime.h"
46 #include "aot-runtime.h"
47 #include "tasklets.h"
49 #ifdef TARGET_WIN32
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(er->ExceptionCode, &info, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
67 #endif
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
74 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
75 static gpointer
76 get_win32_restore_stack (void)
78 static guint8 *start = NULL;
79 guint8 *code;
81 if (start)
82 return start;
84 const int size = 128;
86 /* restore_stack (void) */
87 start = code = mono_global_codeman_reserve (size);
89 amd64_push_reg (code, AMD64_RBP);
90 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
92 /* push 32 bytes of stack space for Win64 calling convention */
93 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
95 /* restore guard page */
96 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
97 amd64_call_reg (code, AMD64_R11);
99 /* get jit_tls with context to restore */
100 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls_extern);
101 amd64_call_reg (code, AMD64_R11);
103 /* move jit_tls from return reg to arg reg */
104 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
106 /* retrieve pointer to saved context */
107 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
109 /* this call does not return */
110 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
111 amd64_call_reg (code, AMD64_R11);
113 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
115 mono_arch_flush_icache (start, code - start);
116 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
118 return start;
120 #else
121 static gpointer
122 get_win32_restore_stack (void)
124 // _resetstkoflw unsupported on none desktop Windows platforms.
125 return NULL;
127 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
130 * Unhandled Exception Filter
131 * Top-level per-process exception handler.
133 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
135 EXCEPTION_RECORD* er;
136 CONTEXT* ctx;
137 LONG res;
138 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
139 MonoDomain* domain = mono_domain_get ();
140 MonoWindowsSigHandlerInfo info = { TRUE, ep };
142 /* If the thread is not managed by the runtime return early */
143 if (!jit_tls)
144 return EXCEPTION_CONTINUE_SEARCH;
146 res = EXCEPTION_CONTINUE_EXECUTION;
148 er = ep->ExceptionRecord;
149 ctx = ep->ContextRecord;
151 switch (er->ExceptionCode) {
152 case EXCEPTION_STACK_OVERFLOW:
153 if (!mono_aot_only && restore_stack) {
154 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
155 /* need to restore stack protection once stack is unwound
156 * restore_stack will restore stack protection and then
157 * resume control to the saved stack_restore_ctx */
158 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
159 ctx->Rip = (guint64)restore_stack;
161 } else {
162 info.handled = FALSE;
164 break;
165 case EXCEPTION_ACCESS_VIOLATION:
166 W32_SEH_HANDLE_EX(segv);
167 break;
168 case EXCEPTION_ILLEGAL_INSTRUCTION:
169 W32_SEH_HANDLE_EX(ill);
170 break;
171 case EXCEPTION_INT_DIVIDE_BY_ZERO:
172 case EXCEPTION_INT_OVERFLOW:
173 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
174 case EXCEPTION_FLT_OVERFLOW:
175 case EXCEPTION_FLT_UNDERFLOW:
176 case EXCEPTION_FLT_INEXACT_RESULT:
177 W32_SEH_HANDLE_EX(fpe);
178 break;
179 default:
180 info.handled = FALSE;
181 break;
184 if (!info.handled) {
185 /* Don't copy context back if we chained exception
186 * as the handler may have modfied the EXCEPTION_POINTERS
187 * directly. We don't pass sigcontext to chained handlers.
188 * Return continue search so the UnhandledExceptionFilter
189 * can correctly chain the exception.
191 res = EXCEPTION_CONTINUE_SEARCH;
194 return res;
197 void win32_seh_init()
199 if (!mono_aot_only)
200 restore_stack = (void (*) (void))get_win32_restore_stack ();
202 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
203 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
206 void win32_seh_cleanup()
208 guint32 ret = 0;
210 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
212 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
213 g_assert (ret);
216 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
218 switch (type) {
219 case SIGFPE:
220 fpe_handler = handler;
221 break;
222 case SIGILL:
223 ill_handler = handler;
224 break;
225 case SIGSEGV:
226 segv_handler = handler;
227 break;
228 default:
229 break;
233 #endif /* TARGET_WIN32 */
235 #ifndef DISABLE_JIT
237 * mono_arch_get_restore_context:
239 * Returns a pointer to a method which restores a previously saved sigcontext.
241 gpointer
242 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
244 guint8 *start = NULL;
245 guint8 *code;
246 MonoJumpInfo *ji = NULL;
247 GSList *unwind_ops = NULL;
248 int i, gregs_offset;
250 /* restore_contect (MonoContext *ctx) */
252 const int size = 256;
254 start = code = (guint8 *)mono_global_codeman_reserve (size);
256 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
258 /* Restore all registers except %rip and %r11 */
259 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
260 for (i = 0; i < AMD64_NREG; ++i) {
261 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
262 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
266 * The context resides on the stack, in the stack frame of the
267 * caller of this function. The stack pointer that we need to
268 * restore is potentially many stack frames higher up, so the
269 * distance between them can easily be more than the red zone
270 * size. Hence the stack pointer can be restored only after
271 * we have finished loading everything from the context.
273 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
274 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
275 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
277 /* jump to the saved IP */
278 amd64_jump_reg (code, AMD64_R11);
280 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
282 mono_arch_flush_icache (start, code - start);
283 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
285 if (info)
286 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
288 return start;
292 * mono_arch_get_call_filter:
294 * Returns a pointer to a method which calls an exception filter. We
295 * also use this function to call finally handlers (we pass NULL as
296 * @exc object in this case).
298 gpointer
299 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
301 guint8 *start;
302 int i, gregs_offset;
303 guint8 *code;
304 guint32 pos;
305 MonoJumpInfo *ji = NULL;
306 GSList *unwind_ops = NULL;
307 const int kMaxCodeSize = 128;
309 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
311 /* call_filter (MonoContext *ctx, unsigned long eip) */
312 code = start;
314 /* Alloc new frame */
315 amd64_push_reg (code, AMD64_RBP);
316 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
318 /* Save callee saved regs */
319 pos = 0;
320 for (i = 0; i < AMD64_NREG; ++i)
321 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
322 amd64_push_reg (code, i);
323 pos += 8;
326 /* Save EBP */
327 pos += 8;
328 amd64_push_reg (code, AMD64_RBP);
330 /* Make stack misaligned, the call will make it aligned again */
331 if (! (pos & 8))
332 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
334 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
336 /* set new EBP */
337 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
338 /* load callee saved regs */
339 for (i = 0; i < AMD64_NREG; ++i) {
340 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
341 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
343 /* load exc register */
344 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
346 /* call the handler */
347 amd64_call_reg (code, AMD64_ARG_REG2);
349 if (! (pos & 8))
350 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
352 /* restore RBP */
353 amd64_pop_reg (code, AMD64_RBP);
355 /* Restore callee saved regs */
356 for (i = AMD64_NREG; i >= 0; --i)
357 if (AMD64_IS_CALLEE_SAVED_REG (i))
358 amd64_pop_reg (code, i);
360 #if TARGET_WIN32
361 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
362 amd64_pop_reg (code, AMD64_RBP);
363 #else
364 amd64_leave (code);
365 #endif
366 amd64_ret (code);
368 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
370 mono_arch_flush_icache (start, code - start);
371 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
373 if (info)
374 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
376 return start;
378 #endif /* !DISABLE_JIT */
381 * The first few arguments are dummy, to force the other arguments to be passed on
382 * the stack, this avoids overwriting the argument registers in the throw trampoline.
384 void
385 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
386 guint64 dummy5, guint64 dummy6,
387 MonoContext *mctx, MonoObject *exc, gboolean rethrow, gboolean preserve_ips)
389 ERROR_DECL (error);
390 MonoContext ctx;
392 /* mctx is on the caller's stack */
393 memcpy (&ctx, mctx, sizeof (MonoContext));
395 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, error)) {
396 MonoException *mono_ex = (MonoException*)exc;
397 if (!rethrow) {
398 mono_ex->stack_trace = NULL;
399 mono_ex->trace_ips = NULL;
400 } else if (preserve_ips) {
401 mono_ex->caught_in_unmanaged = TRUE;
404 mono_error_assert_ok (error);
406 /* adjust eip so that it point into the call instruction */
407 ctx.gregs [AMD64_RIP] --;
409 mono_handle_exception (&ctx, exc);
410 mono_restore_context (&ctx);
411 g_assert_not_reached ();
414 void
415 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
416 guint64 dummy5, guint64 dummy6,
417 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
419 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
420 MonoException *ex;
422 ex = mono_exception_from_token (m_class_get_image (mono_defaults.exception_class), ex_token);
424 mctx->gregs [AMD64_RIP] -= pc_offset;
426 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
427 mctx->gregs [AMD64_RIP] += 1;
429 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE, FALSE);
432 void
433 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
434 guint64 dummy5, guint64 dummy6,
435 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
437 /* Only the register parameters are valid */
438 MonoContext ctx;
440 /* mctx is on the caller's stack */
441 memcpy (&ctx, mctx, sizeof (MonoContext));
443 mono_resume_unwind (&ctx);
446 #ifndef DISABLE_JIT
448 * get_throw_trampoline:
450 * Generate a call to mono_amd64_throw_exception/
451 * mono_amd64_throw_corlib_exception.
453 static gpointer
454 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot, gboolean preserve_ips)
456 guint8* start;
457 guint8 *code;
458 MonoJumpInfo *ji = NULL;
459 GSList *unwind_ops = NULL;
460 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset;
461 const int kMaxCodeSize = 256;
463 #ifdef TARGET_WIN32
464 const int dummy_stack_space = 6 * sizeof (target_mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
465 #else
466 const int dummy_stack_space = 0;
467 #endif
469 if (info)
470 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
471 else
472 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
474 /* The stack is unaligned on entry */
475 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
477 code = start;
479 if (info)
480 unwind_ops = mono_arch_get_cie_program ();
482 /* Alloc frame */
483 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
484 if (info) {
485 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
486 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
490 * To hide linux/windows calling convention differences, we pass all arguments on
491 * the stack by passing 6 dummy values in registers.
494 arg_offsets [0] = dummy_stack_space + 0;
495 arg_offsets [1] = dummy_stack_space + sizeof (target_mgreg_t);
496 arg_offsets [2] = dummy_stack_space + sizeof (target_mgreg_t) * 2;
497 arg_offsets [3] = dummy_stack_space + sizeof (target_mgreg_t) * 3;
498 ctx_offset = dummy_stack_space + sizeof (target_mgreg_t) * 4;
499 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
501 /* Save registers */
502 for (i = 0; i < AMD64_NREG; ++i)
503 if (i != AMD64_RSP)
504 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof (target_mgreg_t)), i, sizeof (target_mgreg_t));
505 /* Save RSP */
506 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof (target_mgreg_t));
507 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
508 /* Save IP */
509 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof (target_mgreg_t));
510 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof (target_mgreg_t)), AMD64_RAX, sizeof (target_mgreg_t));
511 /* Set arg1 == ctx */
512 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
513 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof (target_mgreg_t));
514 /* Set arg2 == exc/ex_token_index */
515 if (resume_unwind)
516 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof (target_mgreg_t));
517 else
518 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof (target_mgreg_t));
519 /* Set arg3 == rethrow/pc offset */
520 if (resume_unwind) {
521 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof (target_mgreg_t));
522 } else if (corlib) {
523 if (llvm_abs)
525 * The caller doesn't pass in a pc/pc offset, instead we simply use the
526 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
528 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof (target_mgreg_t));
529 else
530 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof (target_mgreg_t));
531 } else {
532 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof (target_mgreg_t));
534 /* Set arg4 == preserve_ips */
535 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], preserve_ips, sizeof (target_mgreg_t));
538 if (aot) {
539 MonoJitICallId icall_id;
541 if (resume_unwind)
542 icall_id = MONO_JIT_ICALL_mono_amd64_resume_unwind;
543 else if (corlib)
544 icall_id = MONO_JIT_ICALL_mono_amd64_throw_corlib_exception;
545 else
546 icall_id = MONO_JIT_ICALL_mono_amd64_throw_exception;
547 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id));
548 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
549 } else {
550 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
552 amd64_call_reg (code, AMD64_R11);
553 amd64_breakpoint (code);
555 mono_arch_flush_icache (start, code - start);
557 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
558 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
560 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
562 if (info)
563 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
565 return start;
569 * mono_arch_get_throw_exception:
570 * \returns a function pointer which can be used to raise
571 * exceptions. The returned function has the following
572 * signature: void (*func) (MonoException *exc);
574 gpointer
575 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
577 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot, FALSE);
580 gpointer
581 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
583 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot, FALSE);
586 gpointer
587 mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot)
589 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_preserve_exception", aot, TRUE);
593 * mono_arch_get_throw_corlib_exception:
595 * Returns a function pointer which can be used to raise
596 * corlib exceptions. The returned function has the following
597 * signature: void (*func) (guint32 ex_token, guint32 offset);
598 * Here, offset is the offset which needs to be substracted from the caller IP
599 * to get the IP of the throw. Passing the offset has the advantage that it
600 * needs no relocations in the caller.
602 gpointer
603 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
605 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot, FALSE);
607 #endif /* !DISABLE_JIT */
610 * mono_arch_unwind_frame:
612 * This function is used to gather information from @ctx, and store it in @frame_info.
613 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
614 * is modified if needed.
615 * Returns TRUE on success, FALSE otherwise.
617 gboolean
618 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
619 MonoJitInfo *ji, MonoContext *ctx,
620 MonoContext *new_ctx, MonoLMF **lmf,
621 host_mgreg_t **save_locations,
622 StackFrameInfo *frame)
624 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
625 int i;
627 memset (frame, 0, sizeof (StackFrameInfo));
628 frame->ji = ji;
630 *new_ctx = *ctx;
632 if (ji != NULL) {
633 host_mgreg_t regs [MONO_MAX_IREGS + 1];
634 guint8 *cfa;
635 guint32 unwind_info_len;
636 guint8 *unwind_info;
637 guint8 *epilog = NULL;
639 if (ji->is_trampoline)
640 frame->type = FRAME_TYPE_TRAMPOLINE;
641 else
642 frame->type = FRAME_TYPE_MANAGED;
644 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
646 frame->unwind_info = unwind_info;
647 frame->unwind_info_len = unwind_info_len;
650 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
651 mono_print_unwind_info (unwind_info, unwind_info_len);
653 /* LLVM compiled code doesn't have this info */
654 if (ji->has_arch_eh_info)
655 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
657 for (i = 0; i < AMD64_NREG; ++i)
658 regs [i] = new_ctx->gregs [i];
660 gboolean success = mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
661 (guint8*)ji->code_start + ji->code_size,
662 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
663 save_locations, MONO_MAX_IREGS, &cfa);
665 if (!success)
666 return FALSE;
668 for (i = 0; i < AMD64_NREG; ++i)
669 new_ctx->gregs [i] = regs [i];
671 /* The CFA becomes the new SP value */
672 new_ctx->gregs [AMD64_RSP] = (host_mgreg_t)(gsize)cfa;
674 /* Adjust IP */
675 new_ctx->gregs [AMD64_RIP] --;
677 return TRUE;
678 } else if (*lmf) {
679 guint64 rip;
681 g_assert ((((guint64)(*lmf)->previous_lmf) & 2) == 0);
683 if (((guint64)(*lmf)->previous_lmf) & 4) {
684 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
686 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
687 } else if ((*lmf)->rsp == 0) {
688 /* Top LMF entry */
689 return FALSE;
690 } else {
692 * The rsp field is set just before the call which transitioned to native
693 * code. Obtain the rip from the stack.
695 rip = *(guint64*)((*lmf)->rsp - sizeof(host_mgreg_t));
698 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
700 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
701 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
702 * return address.
704 //g_assert (ji);
705 if (!ji)
706 return FALSE;
708 frame->ji = ji;
709 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
711 if (((guint64)(*lmf)->previous_lmf) & 4) {
712 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
714 /* Trampoline frame */
715 for (i = 0; i < AMD64_NREG; ++i)
716 new_ctx->gregs [i] = ext->ctx->gregs [i];
717 /* Adjust IP */
718 new_ctx->gregs [AMD64_RIP] --;
719 } else {
721 * The registers saved in the LMF will be restored using the normal unwind info,
722 * when the wrapper frame is processed.
724 /* Adjust IP */
725 rip --;
726 new_ctx->gregs [AMD64_RIP] = rip;
727 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
728 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
729 for (i = 0; i < AMD64_NREG; ++i) {
730 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
731 new_ctx->gregs [i] = 0;
735 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
737 return TRUE;
740 return FALSE;
744 * handle_exception:
746 * Called by resuming from a signal handler.
748 static void
749 handle_signal_exception (gpointer obj)
751 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
752 MonoContext ctx;
754 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
756 mono_handle_exception (&ctx, (MonoObject *)obj);
758 mono_restore_context (&ctx);
761 void
762 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
764 guint64 sp = ctx->gregs [AMD64_RSP];
766 ctx->gregs [AMD64_RDI] = (gsize)user_data;
768 /* Allocate a stack frame below the red zone */
769 sp -= 128;
770 /* The stack should be unaligned */
771 if ((sp % 16) == 0)
772 sp -= 8;
773 #ifdef __linux__
774 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
775 *(guint64*)sp = ctx->gregs [AMD64_RIP];
776 #endif
777 ctx->gregs [AMD64_RSP] = sp;
778 ctx->gregs [AMD64_RIP] = (gsize)async_cb;
782 * mono_arch_handle_exception:
783 * \param ctx saved processor state
784 * \param obj the exception object
786 gboolean
787 mono_arch_handle_exception (void *sigctx, gpointer obj)
789 #if defined(MONO_ARCH_USE_SIGACTION)
790 MonoContext mctx;
793 * Handling the exception in the signal handler is problematic, since the original
794 * signal is disabled, and we could run arbitrary code though the debugger. So
795 * resume into the normal stack and do most work there if possible.
797 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
799 /* Pass the ctx parameter in TLS */
800 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
802 mctx = jit_tls->ex_ctx;
803 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
804 mono_monoctx_to_sigctx (&mctx, sigctx);
806 return TRUE;
807 #else
808 MonoContext mctx;
810 mono_sigctx_to_monoctx (sigctx, &mctx);
812 mono_handle_exception (&mctx, obj);
814 mono_monoctx_to_sigctx (&mctx, sigctx);
816 return TRUE;
817 #endif
820 gpointer
821 mono_arch_ip_from_context (void *sigctx)
823 #if defined(MONO_ARCH_USE_SIGACTION)
824 ucontext_t *ctx = (ucontext_t*)sigctx;
826 return (gpointer)UCONTEXT_REG_RIP (ctx);
827 #elif defined(HOST_WIN32)
828 return (gpointer)(((CONTEXT*)sigctx)->Rip);
829 #else
830 MonoContext *ctx = (MonoContext*)sigctx;
831 return (gpointer)ctx->gregs [AMD64_RIP];
832 #endif
835 static MonoObject*
836 restore_soft_guard_pages (void)
838 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
839 if (jit_tls->stack_ovf_guard_base)
840 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
842 if (jit_tls->stack_ovf_pending) {
843 MonoDomain *domain = mono_domain_get ();
844 jit_tls->stack_ovf_pending = 0;
845 return (MonoObject *) domain->stack_overflow_ex;
848 return NULL;
852 * this function modifies mctx so that when it is restored, it
853 * won't execcute starting at mctx.eip, but in a function that
854 * will restore the protection on the soft-guard pages and return back to
855 * continue at mctx.eip.
857 static void
858 prepare_for_guard_pages (MonoContext *mctx)
860 gpointer *sp;
861 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
862 sp -= 1;
863 /* the return addr */
864 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
865 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
866 mctx->gregs [AMD64_RSP] = (guint64)sp;
869 static void
870 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
872 MonoContext mctx;
873 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
875 if (!ji)
876 mono_handle_native_crash ("SIGSEGV", ctx, NULL);
878 mctx = *ctx;
880 mono_handle_exception (&mctx, obj);
881 if (stack_ovf) {
882 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
883 jit_tls->stack_ovf_pending = 1;
884 prepare_for_guard_pages (&mctx);
886 mono_restore_context (&mctx);
889 void
890 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
892 #if defined(MONO_ARCH_USE_SIGACTION)
893 MonoException *exc = NULL;
894 gpointer *sp;
895 int frame_size;
896 MonoContext *copied_ctx;
898 if (stack_ovf)
899 exc = mono_domain_get ()->stack_overflow_ex;
901 /* setup a call frame on the real stack so that control is returned there
902 * and exception handling can continue.
903 * The frame looks like:
904 * ucontext struct
905 * ...
906 * return ip
907 * 128 is the size of the red zone
909 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
910 frame_size += 15;
911 frame_size &= ~15;
912 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
913 sp = (gpointer *)((char*)sp - frame_size);
914 copied_ctx = (MonoContext*)(sp + 4);
915 /* the arguments must be aligned */
916 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
917 mono_sigctx_to_monoctx (sigctx, copied_ctx);
918 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
919 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
920 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
921 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
922 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
923 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
924 #endif
927 #ifndef DISABLE_JIT
928 GSList*
929 mono_amd64_get_exception_trampolines (gboolean aot)
931 MonoTrampInfo *info;
932 GSList *tramps = NULL;
934 // FIXME Macro to make one line per trampoline.
936 /* LLVM needs different throw trampolines */
937 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot, FALSE);
938 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_trampoline;
939 tramps = g_slist_prepend (tramps, info);
941 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot, FALSE);
942 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_abs_trampoline;
943 tramps = g_slist_prepend (tramps, info);
945 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot, FALSE);
946 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_resume_unwind_trampoline;
947 tramps = g_slist_prepend (tramps, info);
949 return tramps;
952 #else
954 GSList*
955 mono_amd64_get_exception_trampolines (gboolean aot)
957 g_assert_not_reached ();
958 return NULL;
961 #endif /* !DISABLE_JIT */
963 void
964 mono_arch_exceptions_init (void)
966 GSList *tramps, *l;
967 gpointer tramp;
969 if (mono_ee_features.use_aot_trampolines) {
971 // FIXME Macro can make one line per trampoline here.
972 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
973 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_trampoline, tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE, NULL);
975 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
976 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_abs_trampoline, tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE, NULL);
978 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
979 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_resume_unwind_trampoline, tramp, "llvm_resume_unwind_trampoline", NULL, TRUE, NULL);
981 } else if (!mono_llvm_only) {
982 /* Call this to avoid initialization races */
983 tramps = mono_amd64_get_exception_trampolines (FALSE);
984 for (l = tramps; l; l = l->next) {
985 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
987 mono_register_jit_icall_info (info->jit_icall_info, info->code, g_strdup (info->name), NULL, TRUE, NULL);
988 mono_tramp_info_register (info, NULL);
990 g_slist_free (tramps);
994 // Implies defined(TARGET_WIN32)
995 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
997 static void
998 mono_arch_unwindinfo_create (gpointer* monoui)
1000 PUNWIND_INFO newunwindinfo;
1001 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
1002 newunwindinfo->Version = 1;
1005 void
1006 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1008 PUNWIND_CODE unwindcode;
1009 guchar codeindex;
1011 g_assert (unwindinfo != NULL);
1013 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
1014 g_error ("Larger allocation needed for the unwind information.");
1016 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1017 unwindcode = &unwindinfo->UnwindCode [codeindex];
1018 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
1019 unwindcode->CodeOffset = (guchar)unwind_op->when;
1020 unwindcode->OpInfo = unwind_op->reg;
1022 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1023 g_error ("Adding unwind info in wrong order.");
1025 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1028 void
1029 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1031 PUNWIND_CODE unwindcode;
1032 guchar codeindex;
1034 g_assert (unwindinfo != NULL);
1036 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1037 g_error ("Larger allocation needed for the unwind information.");
1039 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1040 unwindcode = &unwindinfo->UnwindCode [codeindex];
1041 unwindcode->UnwindOp = UWOP_SET_FPREG;
1042 unwindcode->CodeOffset = (guchar)unwind_op->when;
1044 g_assert (unwind_op->val % 16 == 0);
1045 unwindinfo->FrameRegister = unwind_op->reg;
1046 unwindinfo->FrameOffset = unwind_op->val / 16;
1048 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1049 g_error ("Adding unwind info in wrong order.");
1051 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1054 void
1055 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1057 PUNWIND_CODE unwindcode;
1058 guchar codeindex;
1059 guchar codesneeded;
1060 guint size;
1062 g_assert (unwindinfo != NULL);
1064 size = unwind_op->val;
1066 if (size < 0x8)
1067 g_error ("Stack allocation must be equal to or greater than 0x8.");
1069 if (size <= 0x80)
1070 codesneeded = 1;
1071 else if (size <= 0x7FFF8)
1072 codesneeded = 2;
1073 else
1074 codesneeded = 3;
1076 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1077 g_error ("Larger allocation needed for the unwind information.");
1079 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1080 unwindcode = &unwindinfo->UnwindCode [codeindex];
1082 unwindcode->CodeOffset = (guchar)unwind_op->when;
1084 if (codesneeded == 1) {
1085 /*The size of the allocation is
1086 (the number in the OpInfo member) times 8 plus 8*/
1087 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1088 unwindcode->OpInfo = (size - 8)/8;
1090 else {
1091 if (codesneeded == 3) {
1092 /*the unscaled size of the allocation is recorded
1093 in the next two slots in little-endian format.
1094 NOTE, unwind codes are allocated from end to begining of list so
1095 unwind code will have right execution order. List is sorted on CodeOffset
1096 using descending sort order.*/
1097 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1098 unwindcode->OpInfo = 1;
1099 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1101 else {
1102 /*the size of the allocation divided by 8
1103 is recorded in the next slot.
1104 NOTE, unwind codes are allocated from end to begining of list so
1105 unwind code will have right execution order. List is sorted on CodeOffset
1106 using descending sort order.*/
1107 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1108 unwindcode->OpInfo = 0;
1109 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1113 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1114 g_error ("Adding unwind info in wrong order.");
1116 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1119 static gboolean g_dyn_func_table_inited;
1121 // Dynamic function table used when registering unwind info for OS unwind support.
1122 static GList *g_dynamic_function_table_begin;
1123 static GList *g_dynamic_function_table_end;
1125 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1126 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1128 static RtlInstallFunctionTableCallbackPtr g_rtl_install_function_table_callback;
1129 static RtlDeleteFunctionTablePtr g_rtl_delete_function_table;
1131 // If Win8 or Win2012Server or later, use growable function tables instead
1132 // of callbacks. Callback solution will still be fallback on older systems.
1133 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1134 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1135 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1137 // When using function table callback solution an out of proc module is needed by
1138 // debuggers in order to read unwind info from debug target.
1139 #ifdef _MSC_VER
1140 #define MONO_DAC_MODULE L"mono-2.0-dac-sgen.dll"
1141 #else
1142 #define MONO_DAC_MODULE L"mono-2.0-sgen.dll"
1143 #endif
1145 #define MONO_DAC_MODULE_MAX_PATH 1024
1147 static void
1148 init_table_no_lock (void)
1150 if (g_dyn_func_table_inited == FALSE) {
1151 g_assert_checked (g_dynamic_function_table_begin == NULL);
1152 g_assert_checked (g_dynamic_function_table_end == NULL);
1153 g_assert_checked (g_rtl_install_function_table_callback == NULL);
1154 g_assert_checked (g_rtl_delete_function_table == NULL);
1155 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1156 g_assert_checked (g_rtl_grow_function_table == NULL);
1157 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1159 // Load functions available on Win8/Win2012Server or later. If running on earlier
1160 // systems the below GetProceAddress will fail, this is expected behavior.
1161 HMODULE ntdll;
1162 if (GetModuleHandleEx (0, L"ntdll.dll", &ntdll)) {
1163 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlAddGrowableFunctionTable");
1164 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (ntdll, "RtlGrowFunctionTable");
1165 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlDeleteGrowableFunctionTable");
1168 // Fallback on systems not having RtlAddGrowableFunctionTable.
1169 if (g_rtl_add_growable_function_table == NULL) {
1170 HMODULE kernel32dll;
1171 if (GetModuleHandleEx (0, L"kernel32.dll", &kernel32dll)) {
1172 g_rtl_install_function_table_callback = (RtlInstallFunctionTableCallbackPtr)GetProcAddress (kernel32dll, "RtlInstallFunctionTableCallback");
1173 g_rtl_delete_function_table = (RtlDeleteFunctionTablePtr)GetProcAddress (kernel32dll, "RtlDeleteFunctionTable");
1177 g_dyn_func_table_inited = TRUE;
1181 void
1182 mono_arch_unwindinfo_init_table (void)
1184 if (g_dyn_func_table_inited == FALSE) {
1186 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1188 init_table_no_lock ();
1190 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1194 static void
1195 terminate_table_no_lock (void)
1197 if (g_dyn_func_table_inited == TRUE) {
1198 if (g_dynamic_function_table_begin != NULL) {
1199 // Free all list elements.
1200 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1201 if (l->data) {
1202 g_free (l->data);
1203 l->data = NULL;
1207 //Free the list.
1208 g_list_free (g_dynamic_function_table_begin);
1209 g_dynamic_function_table_begin = NULL;
1210 g_dynamic_function_table_end = NULL;
1213 g_rtl_delete_growable_function_table = NULL;
1214 g_rtl_grow_function_table = NULL;
1215 g_rtl_add_growable_function_table = NULL;
1217 g_rtl_delete_function_table = NULL;
1218 g_rtl_install_function_table_callback = NULL;
1220 g_dyn_func_table_inited = FALSE;
1224 void
1225 mono_arch_unwindinfo_terminate_table (void)
1227 if (g_dyn_func_table_inited == TRUE) {
1229 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1231 terminate_table_no_lock ();
1233 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1237 static GList *
1238 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1240 GList *found_entry = NULL;
1242 // Fast path, look at boundaries.
1243 if (g_dynamic_function_table_begin != NULL) {
1244 DynamicFunctionTableEntry *first_entry = (DynamicFunctionTableEntry*)g_dynamic_function_table_begin->data;
1245 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? (DynamicFunctionTableEntry*)g_dynamic_function_table_end->data : first_entry;
1247 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1248 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1249 // Entry belongs to first entry in list.
1250 found_entry = g_dynamic_function_table_begin;
1251 *continue_search = FALSE;
1252 } else {
1253 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1254 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1255 // Entry has a range that could exist in table, continue search.
1256 *continue_search = TRUE;
1262 return found_entry;
1265 static DynamicFunctionTableEntry *
1266 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1268 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1269 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1272 static GList *
1273 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1275 GList *found_entry = NULL;
1276 gboolean continue_search = FALSE;
1278 gsize begin_range = (gsize)code_block;
1279 gsize end_range = begin_range + block_size;
1281 // Fast path, check table boundaries.
1282 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1283 if (found_entry || continue_search == FALSE)
1284 return found_entry;
1286 // Scan table for an entry including range.
1287 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1288 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1289 g_assert_checked (current_entry != NULL);
1291 // Do we have a match?
1292 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1293 found_entry = node;
1294 break;
1298 return found_entry;
1301 static DynamicFunctionTableEntry *
1302 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1304 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1305 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1308 static GList *
1309 find_pc_in_table_no_lock_ex (const gpointer pc)
1311 GList *found_entry = NULL;
1312 gboolean continue_search = FALSE;
1314 gsize begin_range = (gsize)pc;
1315 gsize end_range = begin_range;
1317 // Fast path, check table boundaries.
1318 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1319 if (found_entry || continue_search == FALSE)
1320 return found_entry;
1322 // Scan table for a entry including range.
1323 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1324 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1325 g_assert_checked (current_entry != NULL);
1327 // Do we have a match?
1328 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1329 found_entry = node;
1330 break;
1334 return found_entry;
1337 static DynamicFunctionTableEntry *
1338 find_pc_in_table_no_lock (const gpointer pc)
1340 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1341 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1344 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1345 static void
1346 validate_table_no_lock (void)
1348 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1349 // Method will assert on failure to explicitly indicate what check failed.
1350 if (g_dynamic_function_table_begin != NULL) {
1351 g_assert_checked (g_dynamic_function_table_end != NULL);
1353 DynamicFunctionTableEntry *prevoious_entry = NULL;
1354 DynamicFunctionTableEntry *current_entry = NULL;
1355 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1356 current_entry = (DynamicFunctionTableEntry *)node->data;
1358 g_assert_checked (current_entry != NULL);
1359 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1361 if (prevoious_entry != NULL) {
1362 // List should be sorted in descending order on begin_range.
1363 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1365 // Check for overlapped regions.
1366 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1369 prevoious_entry = current_entry;
1374 #else
1376 static void
1377 validate_table_no_lock (void)
1380 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1382 // Forward declare.
1383 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1385 DynamicFunctionTableEntry *
1386 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1388 DynamicFunctionTableEntry *new_entry = NULL;
1390 gsize begin_range = (gsize)code_block;
1391 gsize end_range = begin_range + block_size;
1393 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1394 init_table_no_lock ();
1395 new_entry = find_range_in_table_no_lock (code_block, block_size);
1396 if (new_entry == NULL) {
1397 // Allocate new entry.
1398 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1399 if (new_entry != NULL) {
1401 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1402 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1403 InitializeSRWLock (&new_entry->lock);
1404 new_entry->handle = NULL;
1405 new_entry->begin_range = begin_range;
1406 new_entry->end_range = end_range;
1407 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1408 new_entry->rt_funcs_current_count = 0;
1409 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1411 if (new_entry->rt_funcs != NULL) {
1412 // Check insert on boundaries. List is sorted descending on begin_range.
1413 if (g_dynamic_function_table_begin == NULL) {
1414 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1415 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1416 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1417 // Insert at the head.
1418 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1419 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1420 // Insert at tail.
1421 g_list_append (g_dynamic_function_table_end, new_entry);
1422 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1423 } else {
1424 //Search and insert at correct position.
1425 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1426 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1427 g_assert_checked (current_entry != NULL);
1429 if (current_entry->begin_range < new_entry->begin_range) {
1430 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1431 break;
1436 // Register dynamic function table entry with OS.
1437 if (g_rtl_add_growable_function_table != NULL) {
1438 // Allocate new growable handle table for entry.
1439 g_assert_checked (new_entry->handle == NULL);
1440 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1441 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1442 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1443 g_assert (!result);
1444 } else if (g_rtl_install_function_table_callback != NULL) {
1445 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1446 WCHAR *path = buffer;
1448 // DAC module should be in the same directory as the
1449 // main executable.
1450 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1451 path = wcsrchr (buffer, TEXT('\\'));
1452 if (path != NULL) {
1453 path++;
1454 *path = TEXT('\0');
1457 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1458 path = buffer;
1460 // Register function table callback + out of proc module.
1461 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1462 BOOLEAN result = g_rtl_install_function_table_callback ((DWORD64)(new_entry->handle),
1463 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1464 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1465 g_assert(result);
1466 } else {
1467 g_assert_not_reached ();
1470 // Only included in checked builds. Validates the structure of table after insert.
1471 validate_table_no_lock ();
1473 } else {
1474 g_free (new_entry);
1475 new_entry = NULL;
1479 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1481 return new_entry;
1484 static void
1485 remove_range_in_table_no_lock (GList *entry)
1487 if (entry != NULL) {
1488 if (entry == g_dynamic_function_table_end)
1489 g_dynamic_function_table_end = entry->prev;
1491 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1492 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1494 g_assert_checked (removed_entry != NULL);
1495 g_assert_checked (removed_entry->rt_funcs != NULL);
1497 // Remove function table from OS.
1498 if (removed_entry->handle != NULL) {
1499 if (g_rtl_delete_growable_function_table != NULL) {
1500 g_rtl_delete_growable_function_table (removed_entry->handle);
1501 } else if (g_rtl_delete_function_table != NULL) {
1502 g_rtl_delete_function_table ((PRUNTIME_FUNCTION)removed_entry->handle);
1503 } else {
1504 g_assert_not_reached ();
1508 g_free (removed_entry->rt_funcs);
1509 g_free (removed_entry);
1511 g_list_free_1 (entry);
1514 // Only included in checked builds. Validates the structure of table after remove.
1515 validate_table_no_lock ();
1518 void
1519 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1521 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1523 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1525 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1526 remove_range_in_table_no_lock (found_entry);
1528 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1531 void
1532 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1534 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1536 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1538 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1539 remove_range_in_table_no_lock (found_entry);
1541 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1544 PRUNTIME_FUNCTION
1545 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1547 PRUNTIME_FUNCTION found_rt_func = NULL;
1549 gsize begin_range = (gsize)code;
1550 gsize end_range = begin_range + code_size;
1552 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1554 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1556 if (found_entry != NULL) {
1558 AcquireSRWLockShared (&found_entry->lock);
1560 g_assert_checked (found_entry->begin_range <= begin_range);
1561 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1562 g_assert_checked (found_entry->rt_funcs != NULL);
1564 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1565 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1567 // Is this our RT function entry?
1568 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1569 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1570 found_rt_func = current_rt_func;
1571 break;
1575 ReleaseSRWLockShared (&found_entry->lock);
1578 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1580 return found_rt_func;
1583 static PRUNTIME_FUNCTION
1584 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1586 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1589 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1590 static void
1591 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1593 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1594 // Method will assert on failure to explicitly indicate what check failed.
1595 g_assert_checked (entry != NULL);
1596 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1597 g_assert_checked (entry->rt_funcs != NULL);
1599 PRUNTIME_FUNCTION current_rt_func = NULL;
1600 PRUNTIME_FUNCTION previous_rt_func = NULL;
1601 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1602 current_rt_func = &(entry->rt_funcs [i]);
1604 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1605 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1607 if (previous_rt_func != NULL) {
1608 // List should be sorted in ascending order based on BeginAddress.
1609 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1611 // Check for overlapped regions.
1612 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1615 previous_rt_func = current_rt_func;
1619 #else
1621 static void
1622 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1625 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1627 PRUNTIME_FUNCTION
1628 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1630 PRUNTIME_FUNCTION new_rt_func = NULL;
1632 gsize begin_range = (gsize)code;
1633 gsize end_range = begin_range + code_size;
1635 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1637 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1639 if (found_entry != NULL) {
1641 AcquireSRWLockExclusive (&found_entry->lock);
1643 g_assert_checked (found_entry->begin_range <= begin_range);
1644 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1645 g_assert_checked (found_entry->rt_funcs != NULL);
1646 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1648 gsize code_offset = (gsize)code - found_entry->begin_range;
1649 gsize entry_count = found_entry->rt_funcs_current_count;
1650 gsize max_entry_count = found_entry->rt_funcs_max_count;
1651 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1653 RUNTIME_FUNCTION new_rt_func_data;
1654 new_rt_func_data.BeginAddress = code_offset;
1655 new_rt_func_data.EndAddress = code_offset + code_size;
1657 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof(host_mgreg_t));
1658 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1660 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (host_mgreg_t)));
1662 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1664 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1665 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1666 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1667 new_rt_func = &(current_rt_funcs [entry_count]);
1668 *new_rt_func = new_rt_func_data;
1669 entry_count++;
1670 } else {
1671 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1672 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1673 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1675 if (new_rt_funcs != NULL) {
1676 gsize from_index = 0;
1677 gsize to_index = 0;
1679 // Copy from old table into new table. Make sure new rt func gets inserted
1680 // into correct location based on sort order.
1681 for (; from_index < entry_count; ++from_index) {
1682 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1683 new_rt_func = &(new_rt_funcs [to_index++]);
1684 *new_rt_func = new_rt_func_data;
1687 if (current_rt_funcs [from_index].UnwindData != 0)
1688 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1691 // If we didn't insert by now, put it last in the list.
1692 if (new_rt_func == NULL) {
1693 new_rt_func = &(new_rt_funcs [to_index]);
1694 *new_rt_func = new_rt_func_data;
1698 entry_count++;
1701 // Update the stats for current entry.
1702 found_entry->rt_funcs_current_count = entry_count;
1703 found_entry->rt_funcs_max_count = max_entry_count;
1705 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1706 // No new table just report increase in use.
1707 g_assert_checked (found_entry->handle != NULL);
1708 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1709 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1710 // New table, delete old table and rt funcs, and register a new one.
1711 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1712 g_rtl_delete_growable_function_table (found_entry->handle);
1713 found_entry->handle = NULL;
1714 g_free (found_entry->rt_funcs);
1715 found_entry->rt_funcs = new_rt_funcs;
1716 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1717 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1718 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1719 g_assert (!result);
1720 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1721 // No table registered with OS, callback solution in use. Switch tables.
1722 g_free (found_entry->rt_funcs);
1723 found_entry->rt_funcs = new_rt_funcs;
1724 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1725 // No table registered with OS, callback solution in use, nothing to do.
1726 } else {
1727 g_assert_not_reached ();
1730 // Only included in checked builds. Validates the structure of table after insert.
1731 validate_rt_funcs_in_table_no_lock (found_entry);
1733 ReleaseSRWLockExclusive (&found_entry->lock);
1736 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1738 return new_rt_func;
1741 static PRUNTIME_FUNCTION
1742 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1744 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1747 static void
1748 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1750 if (unwind_ops != NULL && unwindinfo != NULL) {
1751 MonoUnwindOp *unwind_op_data;
1752 gboolean sp_alloced = FALSE;
1753 gboolean fp_alloced = FALSE;
1755 // Replay collected unwind info and setup Windows format.
1756 for (GSList *l = unwind_ops; l; l = l->next) {
1757 unwind_op_data = (MonoUnwindOp *)l->data;
1758 switch (unwind_op_data->op) {
1759 case DW_CFA_offset : {
1760 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1761 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1762 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1763 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1764 break;
1766 case DW_CFA_mono_sp_alloc_info_win64 : {
1767 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1768 sp_alloced = TRUE;
1769 break;
1771 case DW_CFA_mono_fp_alloc_info_win64 : {
1772 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1773 fp_alloced = TRUE;
1774 break;
1776 default :
1777 break;
1783 static PUNWIND_INFO
1784 initialize_unwind_info_internal (GSList *unwind_ops)
1786 PUNWIND_INFO unwindinfo;
1788 mono_arch_unwindinfo_create ((gpointer*)&unwindinfo);
1789 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1791 return unwindinfo;
1794 guchar
1795 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1797 UNWIND_INFO unwindinfo = {0};
1798 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1799 return unwindinfo.CountOfCodes;
1802 PUNWIND_INFO
1803 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops)
1805 if (!unwind_ops)
1806 return NULL;
1808 return initialize_unwind_info_internal (unwind_ops);
1811 void
1812 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info)
1814 g_free (unwind_info);
1817 guint
1818 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1820 MonoCompile * current_cfg = (MonoCompile *)cfg;
1821 g_assert (current_cfg->arch.unwindinfo == NULL);
1822 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1823 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1826 void
1827 mono_arch_unwindinfo_install_method_unwind_info (PUNWIND_INFO *monoui, gpointer code, guint code_size)
1829 PUNWIND_INFO unwindinfo, targetinfo;
1830 guchar codecount;
1831 guint64 targetlocation;
1832 if (!*monoui)
1833 return;
1835 unwindinfo = *monoui;
1836 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1837 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (host_mgreg_t));
1839 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1841 codecount = unwindinfo->CountOfCodes;
1842 if (codecount) {
1843 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1844 sizeof (UNWIND_CODE) * codecount);
1847 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1848 if (codecount) {
1849 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1850 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1851 int previous = 0;
1852 for (int current = 0; current < codecount; current++) {
1853 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1854 previous = current;
1855 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1856 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1857 current++;
1858 } else {
1859 current += 2;
1864 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1866 mono_arch_unwindinfo_free_unwind_info (unwindinfo);
1867 *monoui = 0;
1869 // Register unwind info in table.
1870 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1873 void
1874 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1876 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1877 if (unwindinfo != NULL) {
1878 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1882 void
1883 mono_arch_code_chunk_new (void *chunk, int size)
1885 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1888 void mono_arch_code_chunk_destroy (void *chunk)
1890 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1892 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1894 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1895 MonoContinuationRestore
1896 mono_tasklets_arch_restore (void)
1898 static guint8* saved = NULL;
1899 guint8 *code, *start;
1900 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1901 const int kMaxCodeSize = 64;
1903 if (saved)
1904 return (MonoContinuationRestore)saved;
1905 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1906 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1907 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1908 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1909 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1910 * We move cont to cont_reg since we need both rcx and rdi for the copy
1911 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1913 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1914 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1915 /* setup the copy of the stack */
1916 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1917 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1918 x86_cld (code);
1919 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1920 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1921 amd64_prefix (code, X86_REP_PREFIX);
1922 amd64_movsl (code);
1924 /* now restore the registers from the LMF */
1925 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1926 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1927 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1929 #ifdef WIN32
1930 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1931 #else
1932 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1933 #endif
1935 /* state is already in rax */
1936 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1937 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
1939 mono_arch_flush_icache (start, code - start);
1940 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1942 saved = start;
1943 return (MonoContinuationRestore)saved;
1945 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1948 * mono_arch_setup_resume_sighandler_ctx:
1950 * Setup CTX so execution continues at FUNC.
1952 void
1953 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1956 * When resuming from a signal handler, the stack should be misaligned, just like right after
1957 * a call.
1959 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1960 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1961 MONO_CONTEXT_SET_IP (ctx, func);
1964 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1965 MonoContinuationRestore
1966 mono_tasklets_arch_restore (void)
1968 g_assert_not_reached ();
1969 return NULL;
1971 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */
1973 void
1974 mono_arch_undo_ip_adjustment (MonoContext *ctx)
1976 ctx->gregs [AMD64_RIP]++;
1979 void
1980 mono_arch_do_ip_adjustment (MonoContext *ctx)
1982 ctx->gregs [AMD64_RIP]--;