[tests] Reenable enum equals test on interpreter (#18673)
[mono-project.git] / mono / mini / exceptions-amd64.c
bloba551ea0cabc1cc6e7e32244315ef324ec0227e51
1 /**
2 * \file
3 * exception support for AMD64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
17 #ifdef __MINGW32__
18 #define MINGW_HAS_SECURE_API 1
19 #endif
21 #include <glib.h>
22 #include <string.h>
23 #include <signal.h>
24 #ifdef HAVE_UCONTEXT_H
25 #include <ucontext.h>
26 #endif
28 #include <mono/arch/amd64/amd64-codegen.h>
29 #include <mono/metadata/abi-details.h>
30 #include <mono/metadata/appdomain.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/threads.h>
33 #include <mono/metadata/threads-types.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/gc-internals.h>
37 #include <mono/metadata/mono-debug.h>
38 #include <mono/utils/mono-mmap.h>
39 #include <mono/utils/mono-state.h>
41 #include "mini.h"
42 #include "mini-amd64.h"
43 #include "mini-runtime.h"
44 #include "aot-runtime.h"
45 #include "tasklets.h"
46 #include "mono/utils/mono-tls-inline.h"
48 #ifdef TARGET_WIN32
49 static void (*restore_stack) (void);
50 static MonoW32ExceptionHandler fpe_handler;
51 static MonoW32ExceptionHandler ill_handler;
52 static MonoW32ExceptionHandler segv_handler;
54 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
55 void *mono_win_vectored_exception_handle;
57 #define W32_SEH_HANDLE_EX(_ex) \
58 if (_ex##_handler) _ex##_handler(er->ExceptionCode, &info, ctx)
60 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
62 #ifndef MONO_CROSS_COMPILE
63 if (mono_old_win_toplevel_exception_filter) {
64 return (*mono_old_win_toplevel_exception_filter)(ep);
66 #endif
68 if (mono_dump_start ())
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
74 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
75 static gpointer
76 get_win32_restore_stack (void)
78 static guint8 *start = NULL;
79 guint8 *code;
81 if (start)
82 return start;
84 const int size = 128;
86 /* restore_stack (void) */
87 start = code = mono_global_codeman_reserve (size);
89 amd64_push_reg (code, AMD64_RBP);
90 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
92 /* push 32 bytes of stack space for Win64 calling convention */
93 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
95 /* restore guard page */
96 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
97 amd64_call_reg (code, AMD64_R11);
99 /* get jit_tls with context to restore */
100 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls_extern);
101 amd64_call_reg (code, AMD64_R11);
103 /* move jit_tls from return reg to arg reg */
104 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
106 /* retrieve pointer to saved context */
107 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
109 /* this call does not return */
110 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
111 amd64_call_reg (code, AMD64_R11);
113 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
115 mono_arch_flush_icache (start, code - start);
116 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
118 return start;
120 #else
121 static gpointer
122 get_win32_restore_stack (void)
124 // _resetstkoflw unsupported on none desktop Windows platforms.
125 return NULL;
127 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
130 * Unhandled Exception Filter
131 * Top-level per-process exception handler.
133 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
135 EXCEPTION_RECORD* er;
136 CONTEXT* ctx;
137 LONG res;
138 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
139 MonoDomain* domain = mono_domain_get ();
140 MonoWindowsSigHandlerInfo info = { TRUE, ep };
142 /* If the thread is not managed by the runtime return early */
143 if (!jit_tls)
144 return EXCEPTION_CONTINUE_SEARCH;
146 res = EXCEPTION_CONTINUE_EXECUTION;
148 er = ep->ExceptionRecord;
149 ctx = ep->ContextRecord;
151 switch (er->ExceptionCode) {
152 case EXCEPTION_STACK_OVERFLOW:
153 if (!mono_aot_only && restore_stack) {
154 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
155 /* need to restore stack protection once stack is unwound
156 * restore_stack will restore stack protection and then
157 * resume control to the saved stack_restore_ctx */
158 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
159 ctx->Rip = (guint64)restore_stack;
161 } else {
162 info.handled = FALSE;
164 break;
165 case EXCEPTION_ACCESS_VIOLATION:
166 W32_SEH_HANDLE_EX(segv);
167 break;
168 case EXCEPTION_ILLEGAL_INSTRUCTION:
169 W32_SEH_HANDLE_EX(ill);
170 break;
171 case EXCEPTION_INT_DIVIDE_BY_ZERO:
172 case EXCEPTION_INT_OVERFLOW:
173 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
174 case EXCEPTION_FLT_OVERFLOW:
175 case EXCEPTION_FLT_UNDERFLOW:
176 case EXCEPTION_FLT_INEXACT_RESULT:
177 W32_SEH_HANDLE_EX(fpe);
178 break;
179 default:
180 info.handled = FALSE;
181 break;
184 if (!info.handled) {
185 /* Don't copy context back if we chained exception
186 * as the handler may have modfied the EXCEPTION_POINTERS
187 * directly. We don't pass sigcontext to chained handlers.
188 * Return continue search so the UnhandledExceptionFilter
189 * can correctly chain the exception.
191 res = EXCEPTION_CONTINUE_SEARCH;
194 return res;
197 void win32_seh_init()
199 if (!mono_aot_only)
200 restore_stack = (void (*) (void))get_win32_restore_stack ();
202 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
203 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
206 void win32_seh_cleanup()
208 guint32 ret = 0;
210 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
212 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
213 g_assert (ret);
216 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
218 switch (type) {
219 case SIGFPE:
220 fpe_handler = handler;
221 break;
222 case SIGILL:
223 ill_handler = handler;
224 break;
225 case SIGSEGV:
226 segv_handler = handler;
227 break;
228 default:
229 break;
233 #endif /* TARGET_WIN32 */
235 #ifndef DISABLE_JIT
237 * mono_arch_get_restore_context:
239 * Returns a pointer to a method which restores a previously saved sigcontext.
241 gpointer
242 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
244 guint8 *start = NULL;
245 guint8 *code;
246 MonoJumpInfo *ji = NULL;
247 GSList *unwind_ops = NULL;
248 int i, gregs_offset;
250 /* restore_contect (MonoContext *ctx) */
252 const int size = 256;
254 start = code = (guint8 *)mono_global_codeman_reserve (size);
256 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
258 /* Restore all registers except %rip and %r11 */
259 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
260 for (i = 0; i < AMD64_NREG; ++i) {
261 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
262 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
266 * The context resides on the stack, in the stack frame of the
267 * caller of this function. The stack pointer that we need to
268 * restore is potentially many stack frames higher up, so the
269 * distance between them can easily be more than the red zone
270 * size. Hence the stack pointer can be restored only after
271 * we have finished loading everything from the context.
273 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
274 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
275 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
277 /* jump to the saved IP */
278 amd64_jump_reg (code, AMD64_R11);
280 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
282 mono_arch_flush_icache (start, code - start);
283 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
285 if (info)
286 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
288 return start;
292 * mono_arch_get_call_filter:
294 * Returns a pointer to a method which calls an exception filter. We
295 * also use this function to call finally handlers (we pass NULL as
296 * @exc object in this case).
298 gpointer
299 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
301 guint8 *start;
302 int i, gregs_offset;
303 guint8 *code;
304 guint32 pos;
305 MonoJumpInfo *ji = NULL;
306 GSList *unwind_ops = NULL;
307 const int kMaxCodeSize = 128;
309 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
311 /* call_filter (MonoContext *ctx, unsigned long eip) */
312 code = start;
314 /* Alloc new frame */
315 amd64_push_reg (code, AMD64_RBP);
316 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
318 /* Save callee saved regs */
319 pos = 0;
320 for (i = 0; i < AMD64_NREG; ++i)
321 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
322 amd64_push_reg (code, i);
323 pos += 8;
326 /* Save EBP */
327 pos += 8;
328 amd64_push_reg (code, AMD64_RBP);
330 /* Make stack misaligned, the call will make it aligned again */
331 if (! (pos & 8))
332 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
334 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
336 /* set new EBP */
337 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
338 /* load callee saved regs */
339 for (i = 0; i < AMD64_NREG; ++i) {
340 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
341 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
343 /* load exc register */
344 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
346 /* call the handler */
347 amd64_call_reg (code, AMD64_ARG_REG2);
349 if (! (pos & 8))
350 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
352 /* restore RBP */
353 amd64_pop_reg (code, AMD64_RBP);
355 /* Restore callee saved regs */
356 for (i = AMD64_NREG; i >= 0; --i)
357 if (AMD64_IS_CALLEE_SAVED_REG (i))
358 amd64_pop_reg (code, i);
360 #if TARGET_WIN32
361 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
362 amd64_pop_reg (code, AMD64_RBP);
363 #else
364 amd64_leave (code);
365 #endif
366 amd64_ret (code);
368 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
370 mono_arch_flush_icache (start, code - start);
371 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
373 if (info)
374 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
376 return start;
378 #endif /* !DISABLE_JIT */
381 * The first few arguments are dummy, to force the other arguments to be passed on
382 * the stack, this avoids overwriting the argument registers in the throw trampoline.
384 void
385 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
386 guint64 dummy5, guint64 dummy6,
387 MonoContext *mctx, MonoObject *exc, gboolean rethrow, gboolean preserve_ips)
389 ERROR_DECL (error);
390 MonoContext ctx;
392 /* mctx is on the caller's stack */
393 memcpy (&ctx, mctx, sizeof (MonoContext));
395 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, error)) {
396 MonoException *mono_ex = (MonoException*)exc;
397 if (!rethrow && !mono_ex->caught_in_unmanaged) {
398 mono_ex->stack_trace = NULL;
399 mono_ex->trace_ips = NULL;
400 } else if (preserve_ips) {
401 mono_ex->caught_in_unmanaged = TRUE;
404 mono_error_assert_ok (error);
406 /* adjust eip so that it point into the call instruction */
407 ctx.gregs [AMD64_RIP] --;
409 mono_handle_exception (&ctx, exc);
410 mono_restore_context (&ctx);
411 g_assert_not_reached ();
414 void
415 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
416 guint64 dummy5, guint64 dummy6,
417 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
419 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
420 MonoException *ex;
422 ex = mono_exception_from_token (m_class_get_image (mono_defaults.exception_class), ex_token);
424 mctx->gregs [AMD64_RIP] -= pc_offset;
426 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
427 mctx->gregs [AMD64_RIP] += 1;
429 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE, FALSE);
432 void
433 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
434 guint64 dummy5, guint64 dummy6,
435 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
437 /* Only the register parameters are valid */
438 MonoContext ctx;
440 /* mctx is on the caller's stack */
441 memcpy (&ctx, mctx, sizeof (MonoContext));
443 mono_resume_unwind (&ctx);
446 #ifndef DISABLE_JIT
448 * get_throw_trampoline:
450 * Generate a call to mono_amd64_throw_exception/
451 * mono_amd64_throw_corlib_exception.
453 static gpointer
454 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot, gboolean preserve_ips)
456 guint8* start;
457 guint8 *code;
458 MonoJumpInfo *ji = NULL;
459 GSList *unwind_ops = NULL;
460 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset;
461 const int kMaxCodeSize = 256;
463 #ifdef TARGET_WIN32
464 const int dummy_stack_space = 6 * sizeof (target_mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
465 #else
466 const int dummy_stack_space = 0;
467 #endif
469 if (info)
470 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
471 else
472 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
474 /* The stack is unaligned on entry */
475 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
477 code = start;
479 if (info)
480 unwind_ops = mono_arch_get_cie_program ();
482 /* Alloc frame */
483 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
484 if (info) {
485 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
486 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
490 * To hide linux/windows calling convention differences, we pass all arguments on
491 * the stack by passing 6 dummy values in registers.
494 arg_offsets [0] = dummy_stack_space + 0;
495 arg_offsets [1] = dummy_stack_space + sizeof (target_mgreg_t);
496 arg_offsets [2] = dummy_stack_space + sizeof (target_mgreg_t) * 2;
497 arg_offsets [3] = dummy_stack_space + sizeof (target_mgreg_t) * 3;
498 ctx_offset = dummy_stack_space + sizeof (target_mgreg_t) * 4;
499 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
501 /* Save registers */
502 for (i = 0; i < AMD64_NREG; ++i)
503 if (i != AMD64_RSP)
504 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof (target_mgreg_t)), i, sizeof (target_mgreg_t));
505 /* Save RSP */
506 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof (target_mgreg_t));
507 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
508 /* Save IP */
509 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof (target_mgreg_t));
510 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof (target_mgreg_t)), AMD64_RAX, sizeof (target_mgreg_t));
511 /* Set arg1 == ctx */
512 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
513 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof (target_mgreg_t));
514 /* Set arg2 == exc/ex_token_index */
515 if (resume_unwind)
516 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof (target_mgreg_t));
517 else
518 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof (target_mgreg_t));
519 /* Set arg3 == rethrow/pc offset */
520 if (resume_unwind) {
521 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof (target_mgreg_t));
522 } else if (corlib) {
523 if (llvm_abs)
525 * The caller doesn't pass in a pc/pc offset, instead we simply use the
526 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
528 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof (target_mgreg_t));
529 else
530 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof (target_mgreg_t));
531 } else {
532 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof (target_mgreg_t));
534 /* Set arg4 == preserve_ips */
535 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], preserve_ips, sizeof (target_mgreg_t));
538 if (aot) {
539 MonoJitICallId icall_id;
541 if (resume_unwind)
542 icall_id = MONO_JIT_ICALL_mono_amd64_resume_unwind;
543 else if (corlib)
544 icall_id = MONO_JIT_ICALL_mono_amd64_throw_corlib_exception;
545 else
546 icall_id = MONO_JIT_ICALL_mono_amd64_throw_exception;
547 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id));
548 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
549 } else {
550 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
552 amd64_call_reg (code, AMD64_R11);
553 amd64_breakpoint (code);
555 mono_arch_flush_icache (start, code - start);
557 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
558 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
560 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
562 if (info)
563 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
565 return start;
569 * mono_arch_get_throw_exception:
570 * \returns a function pointer which can be used to raise
571 * exceptions. The returned function has the following
572 * signature: void (*func) (MonoException *exc);
574 gpointer
575 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
577 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot, FALSE);
580 gpointer
581 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
583 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot, FALSE);
586 gpointer
587 mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot)
589 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_preserve_exception", aot, TRUE);
593 * mono_arch_get_throw_corlib_exception:
595 * Returns a function pointer which can be used to raise
596 * corlib exceptions. The returned function has the following
597 * signature: void (*func) (guint32 ex_token, guint32 offset);
598 * Here, offset is the offset which needs to be substracted from the caller IP
599 * to get the IP of the throw. Passing the offset has the advantage that it
600 * needs no relocations in the caller.
602 gpointer
603 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
605 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot, FALSE);
607 #endif /* !DISABLE_JIT */
610 * mono_arch_unwind_frame:
612 * This function is used to gather information from @ctx, and store it in @frame_info.
613 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
614 * is modified if needed.
615 * Returns TRUE on success, FALSE otherwise.
617 gboolean
618 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
619 MonoJitInfo *ji, MonoContext *ctx,
620 MonoContext *new_ctx, MonoLMF **lmf,
621 host_mgreg_t **save_locations,
622 StackFrameInfo *frame)
624 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
625 int i;
627 memset (frame, 0, sizeof (StackFrameInfo));
628 frame->ji = ji;
630 *new_ctx = *ctx;
632 if (ji != NULL) {
633 host_mgreg_t regs [MONO_MAX_IREGS + 1];
634 guint8 *cfa;
635 guint32 unwind_info_len;
636 guint8 *unwind_info;
637 guint8 *epilog = NULL;
639 if (ji->is_trampoline)
640 frame->type = FRAME_TYPE_TRAMPOLINE;
641 else
642 frame->type = FRAME_TYPE_MANAGED;
644 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
646 frame->unwind_info = unwind_info;
647 frame->unwind_info_len = unwind_info_len;
650 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
651 mono_print_unwind_info (unwind_info, unwind_info_len);
653 /* LLVM compiled code doesn't have this info */
654 if (ji->has_arch_eh_info)
655 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
657 for (i = 0; i < AMD64_NREG; ++i)
658 regs [i] = new_ctx->gregs [i];
660 gboolean success = mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
661 (guint8*)ji->code_start + ji->code_size,
662 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
663 save_locations, MONO_MAX_IREGS, &cfa);
665 if (!success)
666 return FALSE;
668 for (i = 0; i < AMD64_NREG; ++i)
669 new_ctx->gregs [i] = regs [i];
671 /* The CFA becomes the new SP value */
672 new_ctx->gregs [AMD64_RSP] = (host_mgreg_t)(gsize)cfa;
674 /* Adjust IP */
675 new_ctx->gregs [AMD64_RIP] --;
677 return TRUE;
678 } else if (*lmf) {
679 guint64 rip;
681 g_assert ((((guint64)(*lmf)->previous_lmf) & 2) == 0);
683 if (((guint64)(*lmf)->previous_lmf) & 4) {
684 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
686 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
687 } else if ((*lmf)->rsp == 0) {
688 /* Top LMF entry */
689 return FALSE;
690 } else {
692 * The rsp field is set just before the call which transitioned to native
693 * code. Obtain the rip from the stack.
695 rip = *(guint64*)((*lmf)->rsp - sizeof(host_mgreg_t));
698 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
700 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
701 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
702 * return address.
704 //g_assert (ji);
705 if (!ji)
706 return FALSE;
708 frame->ji = ji;
709 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
711 if (((guint64)(*lmf)->previous_lmf) & 4) {
712 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
714 /* Trampoline frame */
715 for (i = 0; i < AMD64_NREG; ++i)
716 new_ctx->gregs [i] = ext->ctx->gregs [i];
717 /* Adjust IP */
718 new_ctx->gregs [AMD64_RIP] --;
719 } else {
721 * The registers saved in the LMF will be restored using the normal unwind info,
722 * when the wrapper frame is processed.
724 /* Adjust IP */
725 rip --;
726 new_ctx->gregs [AMD64_RIP] = rip;
727 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
728 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
729 for (i = 0; i < AMD64_NREG; ++i) {
730 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
731 new_ctx->gregs [i] = 0;
735 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
737 return TRUE;
740 return FALSE;
744 * handle_exception:
746 * Called by resuming from a signal handler.
748 static void
749 handle_signal_exception (gpointer obj)
751 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
752 MonoContext ctx;
754 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
756 mono_handle_exception (&ctx, (MonoObject *)obj);
758 mono_restore_context (&ctx);
761 void
762 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
764 guint64 sp = ctx->gregs [AMD64_RSP];
766 ctx->gregs [AMD64_RDI] = (gsize)user_data;
768 /* Allocate a stack frame below the red zone */
769 sp -= 128;
770 /* The stack should be unaligned */
771 if ((sp % 16) == 0)
772 sp -= 8;
773 #ifdef __linux__
774 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
775 *(guint64*)sp = ctx->gregs [AMD64_RIP];
776 #endif
777 ctx->gregs [AMD64_RSP] = sp;
778 ctx->gregs [AMD64_RIP] = (gsize)async_cb;
782 * mono_arch_handle_exception:
783 * \param ctx saved processor state
784 * \param obj the exception object
786 gboolean
787 mono_arch_handle_exception (void *sigctx, gpointer obj)
789 #if defined(MONO_ARCH_USE_SIGACTION)
790 MonoContext mctx;
793 * Handling the exception in the signal handler is problematic, since the original
794 * signal is disabled, and we could run arbitrary code though the debugger. So
795 * resume into the normal stack and do most work there if possible.
797 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
799 /* Pass the ctx parameter in TLS */
800 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
802 mctx = jit_tls->ex_ctx;
803 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
804 mono_monoctx_to_sigctx (&mctx, sigctx);
806 return TRUE;
807 #else
808 MonoContext mctx;
810 mono_sigctx_to_monoctx (sigctx, &mctx);
812 mono_handle_exception (&mctx, obj);
814 mono_monoctx_to_sigctx (&mctx, sigctx);
816 return TRUE;
817 #endif
820 gpointer
821 mono_arch_ip_from_context (void *sigctx)
823 #if defined(MONO_ARCH_USE_SIGACTION)
824 ucontext_t *ctx = (ucontext_t*)sigctx;
826 return (gpointer)UCONTEXT_REG_RIP (ctx);
827 #elif defined(HOST_WIN32)
828 return (gpointer)(((CONTEXT*)sigctx)->Rip);
829 #else
830 MonoContext *ctx = (MonoContext*)sigctx;
831 return (gpointer)ctx->gregs [AMD64_RIP];
832 #endif
835 static MonoObject*
836 restore_soft_guard_pages (void)
838 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
839 if (jit_tls->stack_ovf_guard_base)
840 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
842 if (jit_tls->stack_ovf_pending) {
843 MonoDomain *domain = mono_domain_get ();
844 jit_tls->stack_ovf_pending = 0;
845 return (MonoObject *) domain->stack_overflow_ex;
848 return NULL;
852 * this function modifies mctx so that when it is restored, it
853 * won't execcute starting at mctx.eip, but in a function that
854 * will restore the protection on the soft-guard pages and return back to
855 * continue at mctx.eip.
857 static void
858 prepare_for_guard_pages (MonoContext *mctx)
860 gpointer *sp;
861 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
862 sp -= 1;
863 /* the return addr */
864 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
865 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
866 mctx->gregs [AMD64_RSP] = (guint64)sp;
869 static void
870 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, guint32 flags)
872 MonoContext mctx;
873 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
874 gboolean stack_ovf = (flags & 1) != 0;
875 gboolean nullref = (flags & 2) != 0;
877 if (!ji || (!stack_ovf && !nullref)) {
878 if (mono_dump_start ())
879 mono_handle_native_crash ("SIGSEGV", ctx, NULL);
880 // if couldn't dump or if mono_handle_native_crash returns, abort
881 abort ();
884 mctx = *ctx;
886 mono_handle_exception (&mctx, obj);
887 if (stack_ovf) {
888 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
889 jit_tls->stack_ovf_pending = 1;
890 prepare_for_guard_pages (&mctx);
892 mono_restore_context (&mctx);
895 void
896 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
898 #if defined(MONO_ARCH_USE_SIGACTION)
899 MonoException *exc = NULL;
900 gpointer *sp;
901 MonoJitTlsData *jit_tls = NULL;
902 MonoContext *copied_ctx = NULL;
903 gboolean nullref = TRUE;
905 jit_tls = mono_tls_get_jit_tls ();
906 g_assert (jit_tls);
908 /* use TLS as temporary storage as we want to avoid
909 * (1) stack allocation on the application stack
910 * (2) calling malloc, because it is not async-safe
911 * (3) using a global storage, because this function is not reentrant
913 * tls->orig_ex_ctx is used by the stack walker, which shouldn't be running at this point.
915 copied_ctx = &jit_tls->orig_ex_ctx;
917 if (!mono_is_addr_implicit_null_check (fault_addr))
918 nullref = FALSE;
920 if (stack_ovf)
921 exc = mono_domain_get ()->stack_overflow_ex;
923 /* setup the call frame on the application stack so that control is
924 * returned there and exception handling can continue. we want the call
925 * frame to be minimal as possible, for example no argument passing that
926 * requires allocation on the stack, as this wouldn't be encoded in unwind
927 * information for the caller frame.
929 sp = (gpointer *) ALIGN_DOWN_TO (UCONTEXT_REG_RSP (sigctx), 16);
930 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
931 mono_sigctx_to_monoctx (sigctx, copied_ctx);
932 /* at the return from the signal handler execution starts in altstack_handle_and_restore() */
933 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
934 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
935 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
936 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
937 UCONTEXT_REG_RDX (sigctx) = (stack_ovf ? 1 : 0) | (nullref ? 2 : 0);
938 #endif
941 #ifndef DISABLE_JIT
942 GSList*
943 mono_amd64_get_exception_trampolines (gboolean aot)
945 MonoTrampInfo *info;
946 GSList *tramps = NULL;
948 // FIXME Macro to make one line per trampoline.
950 /* LLVM needs different throw trampolines */
951 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot, FALSE);
952 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_trampoline;
953 tramps = g_slist_prepend (tramps, info);
955 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot, FALSE);
956 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_abs_trampoline;
957 tramps = g_slist_prepend (tramps, info);
959 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot, FALSE);
960 info->jit_icall_info = &mono_get_jit_icall_info ()->mono_llvm_resume_unwind_trampoline;
961 tramps = g_slist_prepend (tramps, info);
963 return tramps;
966 #else
968 GSList*
969 mono_amd64_get_exception_trampolines (gboolean aot)
971 g_assert_not_reached ();
972 return NULL;
975 #endif /* !DISABLE_JIT */
977 void
978 mono_arch_exceptions_init (void)
980 GSList *tramps, *l;
981 gpointer tramp;
983 if (mono_ee_features.use_aot_trampolines) {
985 // FIXME Macro can make one line per trampoline here.
986 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
987 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_trampoline, tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE, NULL);
989 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
990 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_throw_corlib_exception_abs_trampoline, tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE, NULL);
992 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
993 mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_llvm_resume_unwind_trampoline, tramp, "llvm_resume_unwind_trampoline", NULL, TRUE, NULL);
995 } else if (!mono_llvm_only) {
996 /* Call this to avoid initialization races */
997 tramps = mono_amd64_get_exception_trampolines (FALSE);
998 for (l = tramps; l; l = l->next) {
999 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
1001 mono_register_jit_icall_info (info->jit_icall_info, info->code, g_strdup (info->name), NULL, TRUE, NULL);
1002 mono_tramp_info_register (info, NULL);
1004 g_slist_free (tramps);
1008 // Implies defined(TARGET_WIN32)
1009 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
1011 static void
1012 mono_arch_unwindinfo_create (gpointer* monoui)
1014 PUNWIND_INFO newunwindinfo;
1015 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
1016 newunwindinfo->Version = 1;
1019 void
1020 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1022 PUNWIND_CODE unwindcode;
1023 guchar codeindex;
1025 g_assert (unwindinfo != NULL);
1027 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
1028 g_error ("Larger allocation needed for the unwind information.");
1030 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1031 unwindcode = &unwindinfo->UnwindCode [codeindex];
1032 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
1033 unwindcode->CodeOffset = (guchar)unwind_op->when;
1034 unwindcode->OpInfo = unwind_op->reg;
1036 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1037 g_error ("Adding unwind info in wrong order.");
1039 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1042 void
1043 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1045 PUNWIND_CODE unwindcode;
1046 guchar codeindex;
1048 g_assert (unwindinfo != NULL);
1050 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1051 g_error ("Larger allocation needed for the unwind information.");
1053 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1054 unwindcode = &unwindinfo->UnwindCode [codeindex];
1055 unwindcode->UnwindOp = UWOP_SET_FPREG;
1056 unwindcode->CodeOffset = (guchar)unwind_op->when;
1058 g_assert (unwind_op->val % 16 == 0);
1059 unwindinfo->FrameRegister = unwind_op->reg;
1060 unwindinfo->FrameOffset = unwind_op->val / 16;
1062 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1063 g_error ("Adding unwind info in wrong order.");
1065 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1068 void
1069 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1071 PUNWIND_CODE unwindcode;
1072 guchar codeindex;
1073 guchar codesneeded;
1074 guint size;
1076 g_assert (unwindinfo != NULL);
1078 size = unwind_op->val;
1080 if (size < 0x8)
1081 g_error ("Stack allocation must be equal to or greater than 0x8.");
1083 if (size <= 0x80)
1084 codesneeded = 1;
1085 else if (size <= 0x7FFF8)
1086 codesneeded = 2;
1087 else
1088 codesneeded = 3;
1090 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1091 g_error ("Larger allocation needed for the unwind information.");
1093 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1094 unwindcode = &unwindinfo->UnwindCode [codeindex];
1096 unwindcode->CodeOffset = (guchar)unwind_op->when;
1098 if (codesneeded == 1) {
1099 /*The size of the allocation is
1100 (the number in the OpInfo member) times 8 plus 8*/
1101 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1102 unwindcode->OpInfo = (size - 8)/8;
1104 else {
1105 if (codesneeded == 3) {
1106 /*the unscaled size of the allocation is recorded
1107 in the next two slots in little-endian format.
1108 NOTE, unwind codes are allocated from end to beginning of list so
1109 unwind code will have right execution order. List is sorted on CodeOffset
1110 using descending sort order.*/
1111 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1112 unwindcode->OpInfo = 1;
1113 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1115 else {
1116 /*the size of the allocation divided by 8
1117 is recorded in the next slot.
1118 NOTE, unwind codes are allocated from end to beginning of list so
1119 unwind code will have right execution order. List is sorted on CodeOffset
1120 using descending sort order.*/
1121 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1122 unwindcode->OpInfo = 0;
1123 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1127 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1128 g_error ("Adding unwind info in wrong order.");
1130 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1133 static gboolean g_dyn_func_table_inited;
1135 // Dynamic function table used when registering unwind info for OS unwind support.
1136 static GList *g_dynamic_function_table_begin;
1137 static GList *g_dynamic_function_table_end;
1139 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1140 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1142 static RtlInstallFunctionTableCallbackPtr g_rtl_install_function_table_callback;
1143 static RtlDeleteFunctionTablePtr g_rtl_delete_function_table;
1145 // If Win8 or Win2012Server or later, use growable function tables instead
1146 // of callbacks. Callback solution will still be fallback on older systems.
1147 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1148 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1149 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1151 // When using function table callback solution an out of proc module is needed by
1152 // debuggers in order to read unwind info from debug target.
1153 #ifdef _MSC_VER
1154 #define MONO_DAC_MODULE L"mono-2.0-dac-sgen.dll"
1155 #else
1156 #define MONO_DAC_MODULE L"mono-2.0-sgen.dll"
1157 #endif
1159 #define MONO_DAC_MODULE_MAX_PATH 1024
1161 static void
1162 init_table_no_lock (void)
1164 if (g_dyn_func_table_inited == FALSE) {
1165 g_assert_checked (g_dynamic_function_table_begin == NULL);
1166 g_assert_checked (g_dynamic_function_table_end == NULL);
1167 g_assert_checked (g_rtl_install_function_table_callback == NULL);
1168 g_assert_checked (g_rtl_delete_function_table == NULL);
1169 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1170 g_assert_checked (g_rtl_grow_function_table == NULL);
1171 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1173 // Load functions available on Win8/Win2012Server or later. If running on earlier
1174 // systems the below GetProceAddress will fail, this is expected behavior.
1175 HMODULE ntdll;
1176 if (GetModuleHandleEx (0, L"ntdll.dll", &ntdll)) {
1177 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlAddGrowableFunctionTable");
1178 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (ntdll, "RtlGrowFunctionTable");
1179 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlDeleteGrowableFunctionTable");
1182 // Fallback on systems not having RtlAddGrowableFunctionTable.
1183 if (g_rtl_add_growable_function_table == NULL) {
1184 HMODULE kernel32dll;
1185 if (GetModuleHandleEx (0, L"kernel32.dll", &kernel32dll)) {
1186 g_rtl_install_function_table_callback = (RtlInstallFunctionTableCallbackPtr)GetProcAddress (kernel32dll, "RtlInstallFunctionTableCallback");
1187 g_rtl_delete_function_table = (RtlDeleteFunctionTablePtr)GetProcAddress (kernel32dll, "RtlDeleteFunctionTable");
1191 g_dyn_func_table_inited = TRUE;
1195 void
1196 mono_arch_unwindinfo_init_table (void)
1198 if (g_dyn_func_table_inited == FALSE) {
1200 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1202 init_table_no_lock ();
1204 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1208 static void
1209 terminate_table_no_lock (void)
1211 if (g_dyn_func_table_inited == TRUE) {
1212 if (g_dynamic_function_table_begin != NULL) {
1213 // Free all list elements.
1214 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1215 if (l->data) {
1216 g_free (l->data);
1217 l->data = NULL;
1221 //Free the list.
1222 g_list_free (g_dynamic_function_table_begin);
1223 g_dynamic_function_table_begin = NULL;
1224 g_dynamic_function_table_end = NULL;
1227 g_rtl_delete_growable_function_table = NULL;
1228 g_rtl_grow_function_table = NULL;
1229 g_rtl_add_growable_function_table = NULL;
1231 g_rtl_delete_function_table = NULL;
1232 g_rtl_install_function_table_callback = NULL;
1234 g_dyn_func_table_inited = FALSE;
1238 void
1239 mono_arch_unwindinfo_terminate_table (void)
1241 if (g_dyn_func_table_inited == TRUE) {
1243 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1245 terminate_table_no_lock ();
1247 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1251 static GList *
1252 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1254 GList *found_entry = NULL;
1256 // Fast path, look at boundaries.
1257 if (g_dynamic_function_table_begin != NULL) {
1258 DynamicFunctionTableEntry *first_entry = (DynamicFunctionTableEntry*)g_dynamic_function_table_begin->data;
1259 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? (DynamicFunctionTableEntry*)g_dynamic_function_table_end->data : first_entry;
1261 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1262 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1263 // Entry belongs to first entry in list.
1264 found_entry = g_dynamic_function_table_begin;
1265 *continue_search = FALSE;
1266 } else {
1267 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1268 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1269 // Entry has a range that could exist in table, continue search.
1270 *continue_search = TRUE;
1276 return found_entry;
1279 static DynamicFunctionTableEntry *
1280 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1282 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1283 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1286 static GList *
1287 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1289 GList *found_entry = NULL;
1290 gboolean continue_search = FALSE;
1292 gsize begin_range = (gsize)code_block;
1293 gsize end_range = begin_range + block_size;
1295 // Fast path, check table boundaries.
1296 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1297 if (found_entry || continue_search == FALSE)
1298 return found_entry;
1300 // Scan table for an entry including range.
1301 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1302 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1303 g_assert_checked (current_entry != NULL);
1305 // Do we have a match?
1306 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1307 found_entry = node;
1308 break;
1312 return found_entry;
1315 static DynamicFunctionTableEntry *
1316 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1318 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1319 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1322 static GList *
1323 find_pc_in_table_no_lock_ex (const gpointer pc)
1325 GList *found_entry = NULL;
1326 gboolean continue_search = FALSE;
1328 gsize begin_range = (gsize)pc;
1329 gsize end_range = begin_range;
1331 // Fast path, check table boundaries.
1332 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1333 if (found_entry || continue_search == FALSE)
1334 return found_entry;
1336 // Scan table for a entry including range.
1337 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1338 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1339 g_assert_checked (current_entry != NULL);
1341 // Do we have a match?
1342 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1343 found_entry = node;
1344 break;
1348 return found_entry;
1351 static DynamicFunctionTableEntry *
1352 find_pc_in_table_no_lock (const gpointer pc)
1354 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1355 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1358 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1359 static void
1360 validate_table_no_lock (void)
1362 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1363 // Method will assert on failure to explicitly indicate what check failed.
1364 if (g_dynamic_function_table_begin != NULL) {
1365 g_assert_checked (g_dynamic_function_table_end != NULL);
1367 DynamicFunctionTableEntry *prevoious_entry = NULL;
1368 DynamicFunctionTableEntry *current_entry = NULL;
1369 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1370 current_entry = (DynamicFunctionTableEntry *)node->data;
1372 g_assert_checked (current_entry != NULL);
1373 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1375 if (prevoious_entry != NULL) {
1376 // List should be sorted in descending order on begin_range.
1377 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1379 // Check for overlapped regions.
1380 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1383 prevoious_entry = current_entry;
1388 #else
1390 static void
1391 validate_table_no_lock (void)
1394 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1396 // Forward declare.
1397 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1399 DynamicFunctionTableEntry *
1400 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1402 DynamicFunctionTableEntry *new_entry = NULL;
1404 gsize begin_range = (gsize)code_block;
1405 gsize end_range = begin_range + block_size;
1407 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1408 init_table_no_lock ();
1409 new_entry = find_range_in_table_no_lock (code_block, block_size);
1410 if (new_entry == NULL) {
1411 // Allocate new entry.
1412 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1413 if (new_entry != NULL) {
1415 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1416 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1417 InitializeSRWLock (&new_entry->lock);
1418 new_entry->handle = NULL;
1419 new_entry->begin_range = begin_range;
1420 new_entry->end_range = end_range;
1421 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1422 new_entry->rt_funcs_current_count = 0;
1423 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1425 if (new_entry->rt_funcs != NULL) {
1426 // Check insert on boundaries. List is sorted descending on begin_range.
1427 if (g_dynamic_function_table_begin == NULL) {
1428 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1429 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1430 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1431 // Insert at the head.
1432 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1433 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1434 // Insert at tail.
1435 g_list_append (g_dynamic_function_table_end, new_entry);
1436 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1437 } else {
1438 //Search and insert at correct position.
1439 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1440 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1441 g_assert_checked (current_entry != NULL);
1443 if (current_entry->begin_range < new_entry->begin_range) {
1444 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1445 break;
1450 // Register dynamic function table entry with OS.
1451 if (g_rtl_add_growable_function_table != NULL) {
1452 // Allocate new growable handle table for entry.
1453 g_assert_checked (new_entry->handle == NULL);
1454 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1455 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1456 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1457 g_assert (!result);
1458 } else if (g_rtl_install_function_table_callback != NULL) {
1459 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1460 WCHAR *path = buffer;
1462 // DAC module should be in the same directory as the
1463 // main executable.
1464 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1465 path = wcsrchr (buffer, TEXT('\\'));
1466 if (path != NULL) {
1467 path++;
1468 *path = TEXT('\0');
1471 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1472 path = buffer;
1474 // Register function table callback + out of proc module.
1475 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1476 BOOLEAN result = g_rtl_install_function_table_callback ((DWORD64)(new_entry->handle),
1477 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1478 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1479 g_assert(result);
1480 } else {
1481 g_assert_not_reached ();
1484 // Only included in checked builds. Validates the structure of table after insert.
1485 validate_table_no_lock ();
1487 } else {
1488 g_free (new_entry);
1489 new_entry = NULL;
1493 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1495 return new_entry;
1498 static void
1499 remove_range_in_table_no_lock (GList *entry)
1501 if (entry != NULL) {
1502 if (entry == g_dynamic_function_table_end)
1503 g_dynamic_function_table_end = entry->prev;
1505 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1506 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1508 g_assert_checked (removed_entry != NULL);
1509 g_assert_checked (removed_entry->rt_funcs != NULL);
1511 // Remove function table from OS.
1512 if (removed_entry->handle != NULL) {
1513 if (g_rtl_delete_growable_function_table != NULL) {
1514 g_rtl_delete_growable_function_table (removed_entry->handle);
1515 } else if (g_rtl_delete_function_table != NULL) {
1516 g_rtl_delete_function_table ((PRUNTIME_FUNCTION)removed_entry->handle);
1517 } else {
1518 g_assert_not_reached ();
1522 g_free (removed_entry->rt_funcs);
1523 g_free (removed_entry);
1525 g_list_free_1 (entry);
1528 // Only included in checked builds. Validates the structure of table after remove.
1529 validate_table_no_lock ();
1532 void
1533 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1535 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1537 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1539 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1540 remove_range_in_table_no_lock (found_entry);
1542 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1545 void
1546 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1548 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1550 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1552 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1553 remove_range_in_table_no_lock (found_entry);
1555 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1558 PRUNTIME_FUNCTION
1559 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1561 PRUNTIME_FUNCTION found_rt_func = NULL;
1563 gsize begin_range = (gsize)code;
1564 gsize end_range = begin_range + code_size;
1566 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1568 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1570 if (found_entry != NULL) {
1572 AcquireSRWLockShared (&found_entry->lock);
1574 g_assert_checked (found_entry->begin_range <= begin_range);
1575 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1576 g_assert_checked (found_entry->rt_funcs != NULL);
1578 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1579 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1581 // Is this our RT function entry?
1582 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1583 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1584 found_rt_func = current_rt_func;
1585 break;
1589 ReleaseSRWLockShared (&found_entry->lock);
1592 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1594 return found_rt_func;
1597 static PRUNTIME_FUNCTION
1598 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1600 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1603 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1604 static void
1605 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1607 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1608 // Method will assert on failure to explicitly indicate what check failed.
1609 g_assert_checked (entry != NULL);
1610 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1611 g_assert_checked (entry->rt_funcs != NULL);
1613 PRUNTIME_FUNCTION current_rt_func = NULL;
1614 PRUNTIME_FUNCTION previous_rt_func = NULL;
1615 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1616 current_rt_func = &(entry->rt_funcs [i]);
1618 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1619 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1621 if (previous_rt_func != NULL) {
1622 // List should be sorted in ascending order based on BeginAddress.
1623 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1625 // Check for overlapped regions.
1626 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1629 previous_rt_func = current_rt_func;
1633 #else
1635 static void
1636 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1639 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1641 PRUNTIME_FUNCTION
1642 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1644 PRUNTIME_FUNCTION new_rt_func = NULL;
1646 gsize begin_range = (gsize)code;
1647 gsize end_range = begin_range + code_size;
1649 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1651 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1653 if (found_entry != NULL) {
1655 AcquireSRWLockExclusive (&found_entry->lock);
1657 g_assert_checked (found_entry->begin_range <= begin_range);
1658 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1659 g_assert_checked (found_entry->rt_funcs != NULL);
1660 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1662 gsize code_offset = (gsize)code - found_entry->begin_range;
1663 gsize entry_count = found_entry->rt_funcs_current_count;
1664 gsize max_entry_count = found_entry->rt_funcs_max_count;
1665 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1667 RUNTIME_FUNCTION new_rt_func_data;
1668 new_rt_func_data.BeginAddress = code_offset;
1669 new_rt_func_data.EndAddress = code_offset + code_size;
1671 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof(host_mgreg_t));
1672 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1674 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (host_mgreg_t)));
1676 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1678 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1679 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1680 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1681 new_rt_func = &(current_rt_funcs [entry_count]);
1682 *new_rt_func = new_rt_func_data;
1683 entry_count++;
1684 } else {
1685 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1686 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1687 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1689 if (new_rt_funcs != NULL) {
1690 gsize from_index = 0;
1691 gsize to_index = 0;
1693 // Copy from old table into new table. Make sure new rt func gets inserted
1694 // into correct location based on sort order.
1695 for (; from_index < entry_count; ++from_index) {
1696 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1697 new_rt_func = &(new_rt_funcs [to_index++]);
1698 *new_rt_func = new_rt_func_data;
1701 if (current_rt_funcs [from_index].UnwindData != 0)
1702 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1705 // If we didn't insert by now, put it last in the list.
1706 if (new_rt_func == NULL) {
1707 new_rt_func = &(new_rt_funcs [to_index]);
1708 *new_rt_func = new_rt_func_data;
1712 entry_count++;
1715 // Update the stats for current entry.
1716 found_entry->rt_funcs_current_count = entry_count;
1717 found_entry->rt_funcs_max_count = max_entry_count;
1719 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1720 // No new table just report increase in use.
1721 g_assert_checked (found_entry->handle != NULL);
1722 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1723 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1724 // New table, delete old table and rt funcs, and register a new one.
1725 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1726 g_rtl_delete_growable_function_table (found_entry->handle);
1727 found_entry->handle = NULL;
1728 g_free (found_entry->rt_funcs);
1729 found_entry->rt_funcs = new_rt_funcs;
1730 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1731 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1732 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1733 g_assert (!result);
1734 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1735 // No table registered with OS, callback solution in use. Switch tables.
1736 g_free (found_entry->rt_funcs);
1737 found_entry->rt_funcs = new_rt_funcs;
1738 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1739 // No table registered with OS, callback solution in use, nothing to do.
1740 } else {
1741 g_assert_not_reached ();
1744 // Only included in checked builds. Validates the structure of table after insert.
1745 validate_rt_funcs_in_table_no_lock (found_entry);
1747 ReleaseSRWLockExclusive (&found_entry->lock);
1750 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1752 return new_rt_func;
1755 static PRUNTIME_FUNCTION
1756 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1758 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1761 static void
1762 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1764 if (unwind_ops != NULL && unwindinfo != NULL) {
1765 MonoUnwindOp *unwind_op_data;
1766 gboolean sp_alloced = FALSE;
1767 gboolean fp_alloced = FALSE;
1769 // Replay collected unwind info and setup Windows format.
1770 for (GSList *l = unwind_ops; l; l = l->next) {
1771 unwind_op_data = (MonoUnwindOp *)l->data;
1772 switch (unwind_op_data->op) {
1773 case DW_CFA_offset : {
1774 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1775 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1776 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1777 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1778 break;
1780 case DW_CFA_mono_sp_alloc_info_win64 : {
1781 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1782 sp_alloced = TRUE;
1783 break;
1785 case DW_CFA_mono_fp_alloc_info_win64 : {
1786 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1787 fp_alloced = TRUE;
1788 break;
1790 default :
1791 break;
1797 static PUNWIND_INFO
1798 initialize_unwind_info_internal (GSList *unwind_ops)
1800 PUNWIND_INFO unwindinfo;
1802 mono_arch_unwindinfo_create ((gpointer*)&unwindinfo);
1803 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1805 return unwindinfo;
1808 guchar
1809 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1811 UNWIND_INFO unwindinfo = {0};
1812 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1813 return unwindinfo.CountOfCodes;
1816 PUNWIND_INFO
1817 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops)
1819 if (!unwind_ops)
1820 return NULL;
1822 return initialize_unwind_info_internal (unwind_ops);
1825 void
1826 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info)
1828 g_free (unwind_info);
1831 guint
1832 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1834 MonoCompile * current_cfg = (MonoCompile *)cfg;
1835 g_assert (current_cfg->arch.unwindinfo == NULL);
1836 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1837 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1840 void
1841 mono_arch_unwindinfo_install_method_unwind_info (PUNWIND_INFO *monoui, gpointer code, guint code_size)
1843 PUNWIND_INFO unwindinfo, targetinfo;
1844 guchar codecount;
1845 guint64 targetlocation;
1846 if (!*monoui)
1847 return;
1849 unwindinfo = *monoui;
1850 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1851 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (host_mgreg_t));
1853 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1855 codecount = unwindinfo->CountOfCodes;
1856 if (codecount) {
1857 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1858 sizeof (UNWIND_CODE) * codecount);
1861 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1862 if (codecount) {
1863 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1864 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1865 int previous = 0;
1866 for (int current = 0; current < codecount; current++) {
1867 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1868 previous = current;
1869 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1870 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1871 current++;
1872 } else {
1873 current += 2;
1878 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1880 mono_arch_unwindinfo_free_unwind_info (unwindinfo);
1881 *monoui = 0;
1883 // Register unwind info in table.
1884 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1887 void
1888 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1890 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1891 if (unwindinfo != NULL) {
1892 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1896 void
1897 mono_arch_code_chunk_new (void *chunk, int size)
1899 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1902 void mono_arch_code_chunk_destroy (void *chunk)
1904 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1906 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1908 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1909 MonoContinuationRestore
1910 mono_tasklets_arch_restore (void)
1912 static guint8* saved = NULL;
1913 guint8 *code, *start;
1914 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1915 const int kMaxCodeSize = 64;
1917 if (saved)
1918 return (MonoContinuationRestore)saved;
1919 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1920 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1921 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1922 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1923 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1924 * We move cont to cont_reg since we need both rcx and rdi for the copy
1925 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1927 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1928 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1929 /* setup the copy of the stack */
1930 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1931 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1932 x86_cld (code);
1933 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1934 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1935 amd64_prefix (code, X86_REP_PREFIX);
1936 amd64_movsl (code);
1938 /* now restore the registers from the LMF */
1939 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1940 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1941 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1943 #ifdef WIN32
1944 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1945 #else
1946 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1947 #endif
1949 /* state is already in rax */
1950 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1951 g_assertf ((code - start) <= kMaxCodeSize, "%d %d", (int)(code - start), kMaxCodeSize);
1953 mono_arch_flush_icache (start, code - start);
1954 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1956 saved = start;
1957 return (MonoContinuationRestore)saved;
1959 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1962 * mono_arch_setup_resume_sighandler_ctx:
1964 * Setup CTX so execution continues at FUNC.
1966 void
1967 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1970 * When resuming from a signal handler, the stack should be misaligned, just like right after
1971 * a call.
1973 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1974 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1975 MONO_CONTEXT_SET_IP (ctx, func);
1978 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1979 MonoContinuationRestore
1980 mono_tasklets_arch_restore (void)
1982 g_assert_not_reached ();
1983 return NULL;
1985 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */
1987 void
1988 mono_arch_undo_ip_adjustment (MonoContext *ctx)
1990 ctx->gregs [AMD64_RIP]++;
1993 void
1994 mono_arch_do_ip_adjustment (MonoContext *ctx)
1996 ctx->gregs [AMD64_RIP]--;