Significant disambiguation between host and target. (#10969)
[mono-project.git] / mono / mini / exceptions-amd64.c
blob353b90a82f8191596e18aeff003388d87ac4de7f
1 /**
2 * \file
3 * exception support for AMD64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
17 #ifdef __MINGW32__
18 #define MINGW_HAS_SECURE_API 1
19 #endif
21 #include <glib.h>
22 #include <string.h>
24 #ifdef HAVE_SIGNAL_H
25 #include <signal.h>
26 #endif
27 #ifdef HAVE_UCONTEXT_H
28 #include <ucontext.h>
29 #endif
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
43 #include "mini.h"
44 #include "mini-amd64.h"
45 #include "mini-runtime.h"
46 #include "aot-runtime.h"
47 #include "tasklets.h"
49 #ifdef TARGET_WIN32
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(0, ep, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
67 #endif
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
74 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
75 static gpointer
76 get_win32_restore_stack (void)
78 static guint8 *start = NULL;
79 guint8 *code;
81 if (start)
82 return start;
84 /* restore_stack (void) */
85 start = code = mono_global_codeman_reserve (128);
87 amd64_push_reg (code, AMD64_RBP);
88 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
90 /* push 32 bytes of stack space for Win64 calling convention */
91 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
93 /* restore guard page */
94 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
95 amd64_call_reg (code, AMD64_R11);
97 /* get jit_tls with context to restore */
98 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls);
99 amd64_call_reg (code, AMD64_R11);
101 /* move jit_tls from return reg to arg reg */
102 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
104 /* retrieve pointer to saved context */
105 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
107 /* this call does not return */
108 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
109 amd64_call_reg (code, AMD64_R11);
111 g_assert ((code - start) < 128);
113 mono_arch_flush_icache (start, code - start);
114 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
116 return start;
118 #else
119 static gpointer
120 get_win32_restore_stack (void)
122 // _resetstkoflw unsupported on none desktop Windows platforms.
123 return NULL;
125 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
128 * Unhandled Exception Filter
129 * Top-level per-process exception handler.
131 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
133 EXCEPTION_RECORD* er;
134 CONTEXT* ctx;
135 LONG res;
136 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
137 MonoDomain* domain = mono_domain_get ();
139 /* If the thread is not managed by the runtime return early */
140 if (!jit_tls)
141 return EXCEPTION_CONTINUE_SEARCH;
143 jit_tls->mono_win_chained_exception_needs_run = FALSE;
144 res = EXCEPTION_CONTINUE_EXECUTION;
146 er = ep->ExceptionRecord;
147 ctx = ep->ContextRecord;
149 switch (er->ExceptionCode) {
150 case EXCEPTION_STACK_OVERFLOW:
151 if (!mono_aot_only && restore_stack) {
152 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
153 /* need to restore stack protection once stack is unwound
154 * restore_stack will restore stack protection and then
155 * resume control to the saved stack_restore_ctx */
156 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
157 ctx->Rip = (guint64)restore_stack;
159 } else {
160 jit_tls->mono_win_chained_exception_needs_run = TRUE;
162 break;
163 case EXCEPTION_ACCESS_VIOLATION:
164 W32_SEH_HANDLE_EX(segv);
165 break;
166 case EXCEPTION_ILLEGAL_INSTRUCTION:
167 W32_SEH_HANDLE_EX(ill);
168 break;
169 case EXCEPTION_INT_DIVIDE_BY_ZERO:
170 case EXCEPTION_INT_OVERFLOW:
171 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
172 case EXCEPTION_FLT_OVERFLOW:
173 case EXCEPTION_FLT_UNDERFLOW:
174 case EXCEPTION_FLT_INEXACT_RESULT:
175 W32_SEH_HANDLE_EX(fpe);
176 break;
177 default:
178 jit_tls->mono_win_chained_exception_needs_run = TRUE;
179 break;
182 if (jit_tls->mono_win_chained_exception_needs_run) {
183 /* Don't copy context back if we chained exception
184 * as the handler may have modfied the EXCEPTION_POINTERS
185 * directly. We don't pass sigcontext to chained handlers.
186 * Return continue search so the UnhandledExceptionFilter
187 * can correctly chain the exception.
189 res = EXCEPTION_CONTINUE_SEARCH;
192 return res;
195 void win32_seh_init()
197 if (!mono_aot_only)
198 restore_stack = (void (*) (void))get_win32_restore_stack ();
200 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
201 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
204 void win32_seh_cleanup()
206 guint32 ret = 0;
208 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
210 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
211 g_assert (ret);
214 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
216 switch (type) {
217 case SIGFPE:
218 fpe_handler = handler;
219 break;
220 case SIGILL:
221 ill_handler = handler;
222 break;
223 case SIGSEGV:
224 segv_handler = handler;
225 break;
226 default:
227 break;
231 #endif /* TARGET_WIN32 */
233 #ifndef DISABLE_JIT
235 * mono_arch_get_restore_context:
237 * Returns a pointer to a method which restores a previously saved sigcontext.
239 gpointer
240 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
242 guint8 *start = NULL;
243 guint8 *code;
244 MonoJumpInfo *ji = NULL;
245 GSList *unwind_ops = NULL;
246 int i, gregs_offset;
248 /* restore_contect (MonoContext *ctx) */
250 start = code = (guint8 *)mono_global_codeman_reserve (256);
252 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
254 /* Restore all registers except %rip and %r11 */
255 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
256 for (i = 0; i < AMD64_NREG; ++i) {
257 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
258 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
262 * The context resides on the stack, in the stack frame of the
263 * caller of this function. The stack pointer that we need to
264 * restore is potentially many stack frames higher up, so the
265 * distance between them can easily be more than the red zone
266 * size. Hence the stack pointer can be restored only after
267 * we have finished loading everything from the context.
269 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
270 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
271 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
273 /* jump to the saved IP */
274 amd64_jump_reg (code, AMD64_R11);
276 mono_arch_flush_icache (start, code - start);
277 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
279 if (info)
280 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
282 return start;
286 * mono_arch_get_call_filter:
288 * Returns a pointer to a method which calls an exception filter. We
289 * also use this function to call finally handlers (we pass NULL as
290 * @exc object in this case).
292 gpointer
293 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
295 guint8 *start;
296 int i, gregs_offset;
297 guint8 *code;
298 guint32 pos;
299 MonoJumpInfo *ji = NULL;
300 GSList *unwind_ops = NULL;
301 const guint kMaxCodeSize = 128;
303 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
305 /* call_filter (MonoContext *ctx, unsigned long eip) */
306 code = start;
308 /* Alloc new frame */
309 amd64_push_reg (code, AMD64_RBP);
310 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
312 /* Save callee saved regs */
313 pos = 0;
314 for (i = 0; i < AMD64_NREG; ++i)
315 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
316 amd64_push_reg (code, i);
317 pos += 8;
320 /* Save EBP */
321 pos += 8;
322 amd64_push_reg (code, AMD64_RBP);
324 /* Make stack misaligned, the call will make it aligned again */
325 if (! (pos & 8))
326 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
328 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
330 /* set new EBP */
331 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
332 /* load callee saved regs */
333 for (i = 0; i < AMD64_NREG; ++i) {
334 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
335 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
337 /* load exc register */
338 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
340 /* call the handler */
341 amd64_call_reg (code, AMD64_ARG_REG2);
343 if (! (pos & 8))
344 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
346 /* restore RBP */
347 amd64_pop_reg (code, AMD64_RBP);
349 /* Restore callee saved regs */
350 for (i = AMD64_NREG; i >= 0; --i)
351 if (AMD64_IS_CALLEE_SAVED_REG (i))
352 amd64_pop_reg (code, i);
354 #if TARGET_WIN32
355 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
356 amd64_pop_reg (code, AMD64_RBP);
357 #else
358 amd64_leave (code);
359 #endif
360 amd64_ret (code);
362 g_assert ((code - start) < kMaxCodeSize);
364 mono_arch_flush_icache (start, code - start);
365 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
367 if (info)
368 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
370 return start;
372 #endif /* !DISABLE_JIT */
375 * The first few arguments are dummy, to force the other arguments to be passed on
376 * the stack, this avoids overwriting the argument registers in the throw trampoline.
378 void
379 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
380 guint64 dummy5, guint64 dummy6,
381 MonoContext *mctx, MonoObject *exc, gboolean rethrow, gboolean preserve_ips)
383 ERROR_DECL (error);
384 MonoContext ctx;
386 /* mctx is on the caller's stack */
387 memcpy (&ctx, mctx, sizeof (MonoContext));
389 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, error)) {
390 MonoException *mono_ex = (MonoException*)exc;
391 if (!rethrow) {
392 mono_ex->stack_trace = NULL;
393 mono_ex->trace_ips = NULL;
394 } else if (preserve_ips) {
395 mono_ex->caught_in_unmanaged = TRUE;
398 mono_error_assert_ok (error);
400 /* adjust eip so that it point into the call instruction */
401 ctx.gregs [AMD64_RIP] --;
403 mono_handle_exception (&ctx, exc);
404 mono_restore_context (&ctx);
405 g_assert_not_reached ();
408 void
409 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
410 guint64 dummy5, guint64 dummy6,
411 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
413 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
414 MonoException *ex;
416 ex = mono_exception_from_token (m_class_get_image (mono_defaults.exception_class), ex_token);
418 mctx->gregs [AMD64_RIP] -= pc_offset;
420 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
421 mctx->gregs [AMD64_RIP] += 1;
423 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE, FALSE);
426 void
427 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
428 guint64 dummy5, guint64 dummy6,
429 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
431 /* Only the register parameters are valid */
432 MonoContext ctx;
434 /* mctx is on the caller's stack */
435 memcpy (&ctx, mctx, sizeof (MonoContext));
437 mono_resume_unwind (&ctx);
440 #ifndef DISABLE_JIT
442 * get_throw_trampoline:
444 * Generate a call to mono_amd64_throw_exception/
445 * mono_amd64_throw_corlib_exception.
447 static gpointer
448 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot, gboolean preserve_ips)
450 guint8* start;
451 guint8 *code;
452 MonoJumpInfo *ji = NULL;
453 GSList *unwind_ops = NULL;
454 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset;
455 const guint kMaxCodeSize = 256;
457 #ifdef TARGET_WIN32
458 const int dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
459 #else
460 const int dummy_stack_space = 0;
461 #endif
463 if (info)
464 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
465 else
466 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
468 /* The stack is unaligned on entry */
469 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
471 code = start;
473 if (info)
474 unwind_ops = mono_arch_get_cie_program ();
476 /* Alloc frame */
477 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
478 if (info) {
479 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
480 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
484 * To hide linux/windows calling convention differences, we pass all arguments on
485 * the stack by passing 6 dummy values in registers.
488 arg_offsets [0] = dummy_stack_space + 0;
489 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
490 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
491 arg_offsets [3] = dummy_stack_space + sizeof(mgreg_t) * 3;
492 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
493 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
495 /* Save registers */
496 for (i = 0; i < AMD64_NREG; ++i)
497 if (i != AMD64_RSP)
498 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
499 /* Save RSP */
500 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
501 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
502 /* Save IP */
503 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
504 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
505 /* Set arg1 == ctx */
506 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
507 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
508 /* Set arg2 == exc/ex_token_index */
509 if (resume_unwind)
510 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
511 else
512 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
513 /* Set arg3 == rethrow/pc offset */
514 if (resume_unwind) {
515 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
516 } else if (corlib) {
517 if (llvm_abs)
519 * The caller doesn't pass in a pc/pc offset, instead we simply use the
520 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
522 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
523 else
524 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
525 } else {
526 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
528 /* Set arg4 == preserve_ips */
529 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], preserve_ips, sizeof(mgreg_t));
532 if (aot) {
533 const char *icall_name;
535 if (resume_unwind)
536 icall_name = "mono_amd64_resume_unwind";
537 else if (corlib)
538 icall_name = "mono_amd64_throw_corlib_exception";
539 else
540 icall_name = "mono_amd64_throw_exception";
541 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
542 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
543 } else {
544 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
546 amd64_call_reg (code, AMD64_R11);
547 amd64_breakpoint (code);
549 mono_arch_flush_icache (start, code - start);
551 g_assert ((code - start) < kMaxCodeSize);
552 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
554 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
556 if (info)
557 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
559 return start;
563 * mono_arch_get_throw_exception:
564 * \returns a function pointer which can be used to raise
565 * exceptions. The returned function has the following
566 * signature: void (*func) (MonoException *exc);
568 gpointer
569 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
571 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot, FALSE);
574 gpointer
575 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
577 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot, FALSE);
580 gpointer
581 mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot)
583 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_preserve_exception", aot, TRUE);
587 * mono_arch_get_throw_corlib_exception:
589 * Returns a function pointer which can be used to raise
590 * corlib exceptions. The returned function has the following
591 * signature: void (*func) (guint32 ex_token, guint32 offset);
592 * Here, offset is the offset which needs to be substracted from the caller IP
593 * to get the IP of the throw. Passing the offset has the advantage that it
594 * needs no relocations in the caller.
596 gpointer
597 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
599 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot, FALSE);
601 #endif /* !DISABLE_JIT */
604 * mono_arch_unwind_frame:
606 * This function is used to gather information from @ctx, and store it in @frame_info.
607 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
608 * is modified if needed.
609 * Returns TRUE on success, FALSE otherwise.
611 gboolean
612 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
613 MonoJitInfo *ji, MonoContext *ctx,
614 MonoContext *new_ctx, MonoLMF **lmf,
615 host_mgreg_t **save_locations,
616 StackFrameInfo *frame)
618 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
619 int i;
621 memset (frame, 0, sizeof (StackFrameInfo));
622 frame->ji = ji;
624 *new_ctx = *ctx;
626 if (ji != NULL) {
627 host_mgreg_t regs [MONO_MAX_IREGS + 1];
628 guint8 *cfa;
629 guint32 unwind_info_len;
630 guint8 *unwind_info;
631 guint8 *epilog = NULL;
633 if (ji->is_trampoline)
634 frame->type = FRAME_TYPE_TRAMPOLINE;
635 else
636 frame->type = FRAME_TYPE_MANAGED;
638 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
640 frame->unwind_info = unwind_info;
641 frame->unwind_info_len = unwind_info_len;
644 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
645 mono_print_unwind_info (unwind_info, unwind_info_len);
647 /* LLVM compiled code doesn't have this info */
648 if (ji->has_arch_eh_info)
649 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
651 for (i = 0; i < AMD64_NREG; ++i)
652 regs [i] = new_ctx->gregs [i];
654 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
655 (guint8*)ji->code_start + ji->code_size,
656 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
657 save_locations, MONO_MAX_IREGS, &cfa);
659 for (i = 0; i < AMD64_NREG; ++i)
660 new_ctx->gregs [i] = regs [i];
662 /* The CFA becomes the new SP value */
663 new_ctx->gregs [AMD64_RSP] = (host_mgreg_t)(gsize)cfa;
665 /* Adjust IP */
666 new_ctx->gregs [AMD64_RIP] --;
668 return TRUE;
669 } else if (*lmf) {
670 guint64 rip;
672 g_assert ((((guint64)(*lmf)->previous_lmf) & 2) == 0);
674 if (((guint64)(*lmf)->previous_lmf) & 4) {
675 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
677 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
678 } else if ((*lmf)->rsp == 0) {
679 /* Top LMF entry */
680 return FALSE;
681 } else {
683 * The rsp field is set just before the call which transitioned to native
684 * code. Obtain the rip from the stack.
686 rip = *(guint64*)((*lmf)->rsp - sizeof(host_mgreg_t));
689 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
691 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
692 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
693 * return address.
695 //g_assert (ji);
696 if (!ji)
697 return FALSE;
699 frame->ji = ji;
700 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
702 if (((guint64)(*lmf)->previous_lmf) & 4) {
703 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
705 /* Trampoline frame */
706 for (i = 0; i < AMD64_NREG; ++i)
707 new_ctx->gregs [i] = ext->ctx->gregs [i];
708 /* Adjust IP */
709 new_ctx->gregs [AMD64_RIP] --;
710 } else {
712 * The registers saved in the LMF will be restored using the normal unwind info,
713 * when the wrapper frame is processed.
715 /* Adjust IP */
716 rip --;
717 new_ctx->gregs [AMD64_RIP] = rip;
718 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
719 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
720 for (i = 0; i < AMD64_NREG; ++i) {
721 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
722 new_ctx->gregs [i] = 0;
726 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
728 return TRUE;
731 return FALSE;
735 * handle_exception:
737 * Called by resuming from a signal handler.
739 static void
740 handle_signal_exception (gpointer obj)
742 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
743 MonoContext ctx;
745 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
747 mono_handle_exception (&ctx, (MonoObject *)obj);
749 mono_restore_context (&ctx);
752 void
753 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
755 guint64 sp = ctx->gregs [AMD64_RSP];
757 ctx->gregs [AMD64_RDI] = (gsize)user_data;
759 /* Allocate a stack frame below the red zone */
760 sp -= 128;
761 /* The stack should be unaligned */
762 if ((sp % 16) == 0)
763 sp -= 8;
764 #ifdef __linux__
765 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
766 *(guint64*)sp = ctx->gregs [AMD64_RIP];
767 #endif
768 ctx->gregs [AMD64_RSP] = sp;
769 ctx->gregs [AMD64_RIP] = (gsize)async_cb;
773 * mono_arch_handle_exception:
774 * \param ctx saved processor state
775 * \param obj the exception object
777 gboolean
778 mono_arch_handle_exception (void *sigctx, gpointer obj)
780 #if defined(MONO_ARCH_USE_SIGACTION)
781 MonoContext mctx;
784 * Handling the exception in the signal handler is problematic, since the original
785 * signal is disabled, and we could run arbitrary code though the debugger. So
786 * resume into the normal stack and do most work there if possible.
788 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
790 /* Pass the ctx parameter in TLS */
791 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
793 mctx = jit_tls->ex_ctx;
794 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
795 mono_monoctx_to_sigctx (&mctx, sigctx);
797 return TRUE;
798 #else
799 MonoContext mctx;
801 mono_sigctx_to_monoctx (sigctx, &mctx);
803 mono_handle_exception (&mctx, obj);
805 mono_monoctx_to_sigctx (&mctx, sigctx);
807 return TRUE;
808 #endif
811 gpointer
812 mono_arch_ip_from_context (void *sigctx)
814 #if defined(MONO_ARCH_USE_SIGACTION)
815 ucontext_t *ctx = (ucontext_t*)sigctx;
817 return (gpointer)UCONTEXT_REG_RIP (ctx);
818 #elif defined(HOST_WIN32)
819 return (gpointer)(((CONTEXT*)sigctx)->Rip);
820 #else
821 MonoContext *ctx = (MonoContext*)sigctx;
822 return (gpointer)ctx->gregs [AMD64_RIP];
823 #endif
826 static MonoObject*
827 restore_soft_guard_pages (void)
829 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
830 if (jit_tls->stack_ovf_guard_base)
831 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
833 if (jit_tls->stack_ovf_pending) {
834 MonoDomain *domain = mono_domain_get ();
835 jit_tls->stack_ovf_pending = 0;
836 return (MonoObject *) domain->stack_overflow_ex;
839 return NULL;
843 * this function modifies mctx so that when it is restored, it
844 * won't execcute starting at mctx.eip, but in a function that
845 * will restore the protection on the soft-guard pages and return back to
846 * continue at mctx.eip.
848 static void
849 prepare_for_guard_pages (MonoContext *mctx)
851 gpointer *sp;
852 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
853 sp -= 1;
854 /* the return addr */
855 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
856 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
857 mctx->gregs [AMD64_RSP] = (guint64)sp;
860 static void
861 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
863 MonoContext mctx;
864 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
866 if (!ji)
867 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
869 mctx = *ctx;
871 mono_handle_exception (&mctx, obj);
872 if (stack_ovf) {
873 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
874 jit_tls->stack_ovf_pending = 1;
875 prepare_for_guard_pages (&mctx);
877 mono_restore_context (&mctx);
880 void
881 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
883 #if defined(MONO_ARCH_USE_SIGACTION)
884 MonoException *exc = NULL;
885 gpointer *sp;
886 int frame_size;
887 MonoContext *copied_ctx;
889 if (stack_ovf)
890 exc = mono_domain_get ()->stack_overflow_ex;
892 /* setup a call frame on the real stack so that control is returned there
893 * and exception handling can continue.
894 * The frame looks like:
895 * ucontext struct
896 * ...
897 * return ip
898 * 128 is the size of the red zone
900 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
901 frame_size += 15;
902 frame_size &= ~15;
903 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
904 sp = (gpointer *)((char*)sp - frame_size);
905 copied_ctx = (MonoContext*)(sp + 4);
906 /* the arguments must be aligned */
907 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
908 mono_sigctx_to_monoctx (sigctx, copied_ctx);
909 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
910 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
911 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
912 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
913 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
914 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
915 #endif
918 #ifndef DISABLE_JIT
919 GSList*
920 mono_amd64_get_exception_trampolines (gboolean aot)
922 MonoTrampInfo *info;
923 GSList *tramps = NULL;
925 /* LLVM needs different throw trampolines */
926 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot, FALSE);
927 tramps = g_slist_prepend (tramps, info);
929 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot, FALSE);
930 tramps = g_slist_prepend (tramps, info);
932 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot, FALSE);
933 tramps = g_slist_prepend (tramps, info);
935 return tramps;
937 #endif /* !DISABLE_JIT */
939 void
940 mono_arch_exceptions_init (void)
942 GSList *tramps, *l;
943 gpointer tramp;
945 if (mono_ee_features.use_aot_trampolines) {
946 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
947 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
948 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
949 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
950 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
951 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
952 } else {
953 /* Call this to avoid initialization races */
954 tramps = mono_amd64_get_exception_trampolines (FALSE);
955 for (l = tramps; l; l = l->next) {
956 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
958 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
959 mono_tramp_info_register (info, NULL);
961 g_slist_free (tramps);
965 // Implies defined(TARGET_WIN32)
966 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
968 static void
969 mono_arch_unwindinfo_create (gpointer* monoui)
971 PUNWIND_INFO newunwindinfo;
972 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
973 newunwindinfo->Version = 1;
976 void
977 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
979 PUNWIND_CODE unwindcode;
980 guchar codeindex;
982 g_assert (unwindinfo != NULL);
984 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
985 g_error ("Larger allocation needed for the unwind information.");
987 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
988 unwindcode = &unwindinfo->UnwindCode [codeindex];
989 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
990 unwindcode->CodeOffset = (guchar)unwind_op->when;
991 unwindcode->OpInfo = unwind_op->reg;
993 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
994 g_error ("Adding unwind info in wrong order.");
996 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
999 void
1000 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1002 PUNWIND_CODE unwindcode;
1003 guchar codeindex;
1005 g_assert (unwindinfo != NULL);
1007 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1008 g_error ("Larger allocation needed for the unwind information.");
1010 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1011 unwindcode = &unwindinfo->UnwindCode [codeindex];
1012 unwindcode->UnwindOp = UWOP_SET_FPREG;
1013 unwindcode->CodeOffset = (guchar)unwind_op->when;
1015 g_assert (unwind_op->val % 16 == 0);
1016 unwindinfo->FrameRegister = unwind_op->reg;
1017 unwindinfo->FrameOffset = unwind_op->val / 16;
1019 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1020 g_error ("Adding unwind info in wrong order.");
1022 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1025 void
1026 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1028 PUNWIND_CODE unwindcode;
1029 guchar codeindex;
1030 guchar codesneeded;
1031 guint size;
1033 g_assert (unwindinfo != NULL);
1035 size = unwind_op->val;
1037 if (size < 0x8)
1038 g_error ("Stack allocation must be equal to or greater than 0x8.");
1040 if (size <= 0x80)
1041 codesneeded = 1;
1042 else if (size <= 0x7FFF8)
1043 codesneeded = 2;
1044 else
1045 codesneeded = 3;
1047 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1048 g_error ("Larger allocation needed for the unwind information.");
1050 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1051 unwindcode = &unwindinfo->UnwindCode [codeindex];
1053 unwindcode->CodeOffset = (guchar)unwind_op->when;
1055 if (codesneeded == 1) {
1056 /*The size of the allocation is
1057 (the number in the OpInfo member) times 8 plus 8*/
1058 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1059 unwindcode->OpInfo = (size - 8)/8;
1061 else {
1062 if (codesneeded == 3) {
1063 /*the unscaled size of the allocation is recorded
1064 in the next two slots in little-endian format.
1065 NOTE, unwind codes are allocated from end to begining of list so
1066 unwind code will have right execution order. List is sorted on CodeOffset
1067 using descending sort order.*/
1068 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1069 unwindcode->OpInfo = 1;
1070 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1072 else {
1073 /*the size of the allocation divided by 8
1074 is recorded in the next slot.
1075 NOTE, unwind codes are allocated from end to begining of list so
1076 unwind code will have right execution order. List is sorted on CodeOffset
1077 using descending sort order.*/
1078 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1079 unwindcode->OpInfo = 0;
1080 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1084 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1085 g_error ("Adding unwind info in wrong order.");
1087 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1090 static gboolean g_dyn_func_table_inited;
1092 // Dynamic function table used when registering unwind info for OS unwind support.
1093 static GList *g_dynamic_function_table_begin;
1094 static GList *g_dynamic_function_table_end;
1096 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1097 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1099 static RtlInstallFunctionTableCallbackPtr g_rtl_install_function_table_callback;
1100 static RtlDeleteFunctionTablePtr g_rtl_delete_function_table;
1102 // If Win8 or Win2012Server or later, use growable function tables instead
1103 // of callbacks. Callback solution will still be fallback on older systems.
1104 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1105 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1106 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1108 // When using function table callback solution an out of proc module is needed by
1109 // debuggers in order to read unwind info from debug target.
1110 #ifdef _MSC_VER
1111 #define MONO_DAC_MODULE L"mono-2.0-dac-sgen.dll"
1112 #else
1113 #define MONO_DAC_MODULE L"mono-2.0-sgen.dll"
1114 #endif
1116 #define MONO_DAC_MODULE_MAX_PATH 1024
1118 static void
1119 init_table_no_lock (void)
1121 if (g_dyn_func_table_inited == FALSE) {
1122 g_assert_checked (g_dynamic_function_table_begin == NULL);
1123 g_assert_checked (g_dynamic_function_table_end == NULL);
1124 g_assert_checked (g_rtl_install_function_table_callback == NULL);
1125 g_assert_checked (g_rtl_delete_function_table == NULL);
1126 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1127 g_assert_checked (g_rtl_grow_function_table == NULL);
1128 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1130 // Load functions available on Win8/Win2012Server or later. If running on earlier
1131 // systems the below GetProceAddress will fail, this is expected behavior.
1132 HMODULE ntdll;
1133 if (GetModuleHandleEx (0, L"ntdll.dll", &ntdll)) {
1134 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlAddGrowableFunctionTable");
1135 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (ntdll, "RtlGrowFunctionTable");
1136 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (ntdll, "RtlDeleteGrowableFunctionTable");
1139 // Fallback on systems not having RtlAddGrowableFunctionTable.
1140 if (g_rtl_add_growable_function_table == NULL) {
1141 HMODULE kernel32dll;
1142 if (GetModuleHandleEx (0, L"kernel32.dll", &kernel32dll)) {
1143 g_rtl_install_function_table_callback = (RtlInstallFunctionTableCallbackPtr)GetProcAddress (kernel32dll, "RtlInstallFunctionTableCallback");
1144 g_rtl_delete_function_table = (RtlDeleteFunctionTablePtr)GetProcAddress (kernel32dll, "RtlDeleteFunctionTable");
1148 g_dyn_func_table_inited = TRUE;
1152 void
1153 mono_arch_unwindinfo_init_table (void)
1155 if (g_dyn_func_table_inited == FALSE) {
1157 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1159 init_table_no_lock ();
1161 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1165 static void
1166 terminate_table_no_lock (void)
1168 if (g_dyn_func_table_inited == TRUE) {
1169 if (g_dynamic_function_table_begin != NULL) {
1170 // Free all list elements.
1171 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1172 if (l->data) {
1173 g_free (l->data);
1174 l->data = NULL;
1178 //Free the list.
1179 g_list_free (g_dynamic_function_table_begin);
1180 g_dynamic_function_table_begin = NULL;
1181 g_dynamic_function_table_end = NULL;
1184 g_rtl_delete_growable_function_table = NULL;
1185 g_rtl_grow_function_table = NULL;
1186 g_rtl_add_growable_function_table = NULL;
1188 g_rtl_delete_function_table = NULL;
1189 g_rtl_install_function_table_callback = NULL;
1191 g_dyn_func_table_inited = FALSE;
1195 void
1196 mono_arch_unwindinfo_terminate_table (void)
1198 if (g_dyn_func_table_inited == TRUE) {
1200 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1202 terminate_table_no_lock ();
1204 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1208 static GList *
1209 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1211 GList *found_entry = NULL;
1213 // Fast path, look at boundaries.
1214 if (g_dynamic_function_table_begin != NULL) {
1215 DynamicFunctionTableEntry *first_entry = (DynamicFunctionTableEntry*)g_dynamic_function_table_begin->data;
1216 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? (DynamicFunctionTableEntry*)g_dynamic_function_table_end->data : first_entry;
1218 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1219 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1220 // Entry belongs to first entry in list.
1221 found_entry = g_dynamic_function_table_begin;
1222 *continue_search = FALSE;
1223 } else {
1224 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1225 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1226 // Entry has a range that could exist in table, continue search.
1227 *continue_search = TRUE;
1233 return found_entry;
1236 static inline DynamicFunctionTableEntry *
1237 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1239 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1240 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1243 static GList *
1244 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1246 GList *found_entry = NULL;
1247 gboolean continue_search = FALSE;
1249 gsize begin_range = (gsize)code_block;
1250 gsize end_range = begin_range + block_size;
1252 // Fast path, check table boundaries.
1253 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1254 if (found_entry || continue_search == FALSE)
1255 return found_entry;
1257 // Scan table for an entry including range.
1258 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1259 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1260 g_assert_checked (current_entry != NULL);
1262 // Do we have a match?
1263 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1264 found_entry = node;
1265 break;
1269 return found_entry;
1272 static inline DynamicFunctionTableEntry *
1273 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1275 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1276 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1279 static GList *
1280 find_pc_in_table_no_lock_ex (const gpointer pc)
1282 GList *found_entry = NULL;
1283 gboolean continue_search = FALSE;
1285 gsize begin_range = (gsize)pc;
1286 gsize end_range = begin_range;
1288 // Fast path, check table boundaries.
1289 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1290 if (found_entry || continue_search == FALSE)
1291 return found_entry;
1293 // Scan table for a entry including range.
1294 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1295 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1296 g_assert_checked (current_entry != NULL);
1298 // Do we have a match?
1299 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1300 found_entry = node;
1301 break;
1305 return found_entry;
1308 static inline DynamicFunctionTableEntry *
1309 find_pc_in_table_no_lock (const gpointer pc)
1311 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1312 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1315 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1316 static void
1317 validate_table_no_lock (void)
1319 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1320 // Method will assert on failure to explicitly indicate what check failed.
1321 if (g_dynamic_function_table_begin != NULL) {
1322 g_assert_checked (g_dynamic_function_table_end != NULL);
1324 DynamicFunctionTableEntry *prevoious_entry = NULL;
1325 DynamicFunctionTableEntry *current_entry = NULL;
1326 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1327 current_entry = (DynamicFunctionTableEntry *)node->data;
1329 g_assert_checked (current_entry != NULL);
1330 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1332 if (prevoious_entry != NULL) {
1333 // List should be sorted in descending order on begin_range.
1334 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1336 // Check for overlapped regions.
1337 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1340 prevoious_entry = current_entry;
1345 #else
1347 static inline void
1348 validate_table_no_lock (void)
1351 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1353 // Forward declare.
1354 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1356 DynamicFunctionTableEntry *
1357 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1359 DynamicFunctionTableEntry *new_entry = NULL;
1361 gsize begin_range = (gsize)code_block;
1362 gsize end_range = begin_range + block_size;
1364 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1365 init_table_no_lock ();
1366 new_entry = find_range_in_table_no_lock (code_block, block_size);
1367 if (new_entry == NULL) {
1368 // Allocate new entry.
1369 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1370 if (new_entry != NULL) {
1372 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1373 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1374 InitializeSRWLock (&new_entry->lock);
1375 new_entry->handle = NULL;
1376 new_entry->begin_range = begin_range;
1377 new_entry->end_range = end_range;
1378 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1379 new_entry->rt_funcs_current_count = 0;
1380 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1382 if (new_entry->rt_funcs != NULL) {
1383 // Check insert on boundaries. List is sorted descending on begin_range.
1384 if (g_dynamic_function_table_begin == NULL) {
1385 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1386 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1387 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1388 // Insert at the head.
1389 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1390 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1391 // Insert at tail.
1392 g_list_append (g_dynamic_function_table_end, new_entry);
1393 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1394 } else {
1395 //Search and insert at correct position.
1396 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1397 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1398 g_assert_checked (current_entry != NULL);
1400 if (current_entry->begin_range < new_entry->begin_range) {
1401 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1402 break;
1407 // Register dynamic function table entry with OS.
1408 if (g_rtl_add_growable_function_table != NULL) {
1409 // Allocate new growable handle table for entry.
1410 g_assert_checked (new_entry->handle == NULL);
1411 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1412 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1413 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1414 g_assert (!result);
1415 } else if (g_rtl_install_function_table_callback != NULL) {
1416 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1417 WCHAR *path = buffer;
1419 // DAC module should be in the same directory as the
1420 // main executable.
1421 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1422 path = wcsrchr (buffer, TEXT('\\'));
1423 if (path != NULL) {
1424 path++;
1425 *path = TEXT('\0');
1428 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1429 path = buffer;
1431 // Register function table callback + out of proc module.
1432 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1433 BOOLEAN result = g_rtl_install_function_table_callback ((DWORD64)(new_entry->handle),
1434 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1435 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1436 g_assert(result);
1437 } else {
1438 g_assert_not_reached ();
1441 // Only included in checked builds. Validates the structure of table after insert.
1442 validate_table_no_lock ();
1444 } else {
1445 g_free (new_entry);
1446 new_entry = NULL;
1450 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1452 return new_entry;
1455 static void
1456 remove_range_in_table_no_lock (GList *entry)
1458 if (entry != NULL) {
1459 if (entry == g_dynamic_function_table_end)
1460 g_dynamic_function_table_end = entry->prev;
1462 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1463 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1465 g_assert_checked (removed_entry != NULL);
1466 g_assert_checked (removed_entry->rt_funcs != NULL);
1468 // Remove function table from OS.
1469 if (removed_entry->handle != NULL) {
1470 if (g_rtl_delete_growable_function_table != NULL) {
1471 g_rtl_delete_growable_function_table (removed_entry->handle);
1472 } else if (g_rtl_delete_function_table != NULL) {
1473 g_rtl_delete_function_table ((PRUNTIME_FUNCTION)removed_entry->handle);
1474 } else {
1475 g_assert_not_reached ();
1479 g_free (removed_entry->rt_funcs);
1480 g_free (removed_entry);
1482 g_list_free_1 (entry);
1485 // Only included in checked builds. Validates the structure of table after remove.
1486 validate_table_no_lock ();
1489 void
1490 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1492 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1494 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1496 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1497 remove_range_in_table_no_lock (found_entry);
1499 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1502 void
1503 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1505 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1507 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1509 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1510 remove_range_in_table_no_lock (found_entry);
1512 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1515 PRUNTIME_FUNCTION
1516 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1518 PRUNTIME_FUNCTION found_rt_func = NULL;
1520 gsize begin_range = (gsize)code;
1521 gsize end_range = begin_range + code_size;
1523 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1525 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1527 if (found_entry != NULL) {
1529 AcquireSRWLockShared (&found_entry->lock);
1531 g_assert_checked (found_entry->begin_range <= begin_range);
1532 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1533 g_assert_checked (found_entry->rt_funcs != NULL);
1535 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1536 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1538 // Is this our RT function entry?
1539 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1540 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1541 found_rt_func = current_rt_func;
1542 break;
1546 ReleaseSRWLockShared (&found_entry->lock);
1549 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1551 return found_rt_func;
1554 static inline PRUNTIME_FUNCTION
1555 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1557 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1560 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1561 static void
1562 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1564 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1565 // Method will assert on failure to explicitly indicate what check failed.
1566 g_assert_checked (entry != NULL);
1567 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1568 g_assert_checked (entry->rt_funcs != NULL);
1570 PRUNTIME_FUNCTION current_rt_func = NULL;
1571 PRUNTIME_FUNCTION previous_rt_func = NULL;
1572 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1573 current_rt_func = &(entry->rt_funcs [i]);
1575 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1576 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1578 if (previous_rt_func != NULL) {
1579 // List should be sorted in ascending order based on BeginAddress.
1580 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1582 // Check for overlapped regions.
1583 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1586 previous_rt_func = current_rt_func;
1590 #else
1592 static inline void
1593 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1596 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1598 PRUNTIME_FUNCTION
1599 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1601 PRUNTIME_FUNCTION new_rt_func = NULL;
1603 gsize begin_range = (gsize)code;
1604 gsize end_range = begin_range + code_size;
1606 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1608 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1610 if (found_entry != NULL) {
1612 AcquireSRWLockExclusive (&found_entry->lock);
1614 g_assert_checked (found_entry->begin_range <= begin_range);
1615 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1616 g_assert_checked (found_entry->rt_funcs != NULL);
1617 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1619 gsize code_offset = (gsize)code - found_entry->begin_range;
1620 gsize entry_count = found_entry->rt_funcs_current_count;
1621 gsize max_entry_count = found_entry->rt_funcs_max_count;
1622 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1624 RUNTIME_FUNCTION new_rt_func_data;
1625 new_rt_func_data.BeginAddress = code_offset;
1626 new_rt_func_data.EndAddress = code_offset + code_size;
1628 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof(host_mgreg_t));
1629 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1631 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (host_mgreg_t)));
1633 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1635 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1636 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1637 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1638 new_rt_func = &(current_rt_funcs [entry_count]);
1639 *new_rt_func = new_rt_func_data;
1640 entry_count++;
1641 } else {
1642 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1643 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1644 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1646 if (new_rt_funcs != NULL) {
1647 gsize from_index = 0;
1648 gsize to_index = 0;
1650 // Copy from old table into new table. Make sure new rt func gets inserted
1651 // into correct location based on sort order.
1652 for (; from_index < entry_count; ++from_index) {
1653 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1654 new_rt_func = &(new_rt_funcs [to_index++]);
1655 *new_rt_func = new_rt_func_data;
1658 if (current_rt_funcs [from_index].UnwindData != 0)
1659 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1662 // If we didn't insert by now, put it last in the list.
1663 if (new_rt_func == NULL) {
1664 new_rt_func = &(new_rt_funcs [to_index]);
1665 *new_rt_func = new_rt_func_data;
1669 entry_count++;
1672 // Update the stats for current entry.
1673 found_entry->rt_funcs_current_count = entry_count;
1674 found_entry->rt_funcs_max_count = max_entry_count;
1676 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1677 // No new table just report increase in use.
1678 g_assert_checked (found_entry->handle != NULL);
1679 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1680 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1681 // New table, delete old table and rt funcs, and register a new one.
1682 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1683 g_rtl_delete_growable_function_table (found_entry->handle);
1684 found_entry->handle = NULL;
1685 g_free (found_entry->rt_funcs);
1686 found_entry->rt_funcs = new_rt_funcs;
1687 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1688 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1689 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1690 g_assert (!result);
1691 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1692 // No table registered with OS, callback solution in use. Switch tables.
1693 g_free (found_entry->rt_funcs);
1694 found_entry->rt_funcs = new_rt_funcs;
1695 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1696 // No table registered with OS, callback solution in use, nothing to do.
1697 } else {
1698 g_assert_not_reached ();
1701 // Only included in checked builds. Validates the structure of table after insert.
1702 validate_rt_funcs_in_table_no_lock (found_entry);
1704 ReleaseSRWLockExclusive (&found_entry->lock);
1707 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1709 return new_rt_func;
1712 static PRUNTIME_FUNCTION
1713 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1715 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1718 static void
1719 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1721 if (unwind_ops != NULL && unwindinfo != NULL) {
1722 MonoUnwindOp *unwind_op_data;
1723 gboolean sp_alloced = FALSE;
1724 gboolean fp_alloced = FALSE;
1726 // Replay collected unwind info and setup Windows format.
1727 for (GSList *l = unwind_ops; l; l = l->next) {
1728 unwind_op_data = (MonoUnwindOp *)l->data;
1729 switch (unwind_op_data->op) {
1730 case DW_CFA_offset : {
1731 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1732 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1733 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1734 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1735 break;
1737 case DW_CFA_mono_sp_alloc_info_win64 : {
1738 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1739 sp_alloced = TRUE;
1740 break;
1742 case DW_CFA_mono_fp_alloc_info_win64 : {
1743 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1744 fp_alloced = TRUE;
1745 break;
1747 default :
1748 break;
1754 static PUNWIND_INFO
1755 initialize_unwind_info_internal (GSList *unwind_ops)
1757 PUNWIND_INFO unwindinfo;
1759 mono_arch_unwindinfo_create ((gpointer*)&unwindinfo);
1760 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1762 return unwindinfo;
1765 guchar
1766 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1768 UNWIND_INFO unwindinfo = {0};
1769 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1770 return unwindinfo.CountOfCodes;
1773 PUNWIND_INFO
1774 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops)
1776 if (!unwind_ops)
1777 return NULL;
1779 return initialize_unwind_info_internal (unwind_ops);
1782 void
1783 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info)
1785 g_free (unwind_info);
1788 guint
1789 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1791 MonoCompile * current_cfg = (MonoCompile *)cfg;
1792 g_assert (current_cfg->arch.unwindinfo == NULL);
1793 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1794 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1797 void
1798 mono_arch_unwindinfo_install_method_unwind_info (PUNWIND_INFO *monoui, gpointer code, guint code_size)
1800 PUNWIND_INFO unwindinfo, targetinfo;
1801 guchar codecount;
1802 guint64 targetlocation;
1803 if (!*monoui)
1804 return;
1806 unwindinfo = *monoui;
1807 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1808 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (host_mgreg_t));
1810 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1812 codecount = unwindinfo->CountOfCodes;
1813 if (codecount) {
1814 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1815 sizeof (UNWIND_CODE) * codecount);
1818 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1819 if (codecount) {
1820 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1821 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1822 int previous = 0;
1823 for (int current = 0; current < codecount; current++) {
1824 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1825 previous = current;
1826 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1827 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1828 current++;
1829 } else {
1830 current += 2;
1835 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1837 mono_arch_unwindinfo_free_unwind_info (unwindinfo);
1838 *monoui = 0;
1840 // Register unwind info in table.
1841 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1844 void
1845 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1847 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1848 if (unwindinfo != NULL) {
1849 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1853 void
1854 mono_arch_code_chunk_new (void *chunk, int size)
1856 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1859 void mono_arch_code_chunk_destroy (void *chunk)
1861 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1863 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1865 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1866 MonoContinuationRestore
1867 mono_tasklets_arch_restore (void)
1869 static guint8* saved = NULL;
1870 guint8 *code, *start;
1871 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1872 const guint kMaxCodeSize = 64;
1875 if (saved)
1876 return (MonoContinuationRestore)saved;
1877 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1878 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1879 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1880 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1881 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1882 * We move cont to cont_reg since we need both rcx and rdi for the copy
1883 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1885 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1886 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1887 /* setup the copy of the stack */
1888 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1889 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1890 x86_cld (code);
1891 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1892 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1893 amd64_prefix (code, X86_REP_PREFIX);
1894 amd64_movsl (code);
1896 /* now restore the registers from the LMF */
1897 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1898 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1899 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1901 #ifdef WIN32
1902 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1903 #else
1904 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1905 #endif
1907 /* state is already in rax */
1908 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1909 g_assert ((code - start) <= kMaxCodeSize);
1911 mono_arch_flush_icache (start, code - start);
1912 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1914 saved = start;
1915 return (MonoContinuationRestore)saved;
1917 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1920 * mono_arch_setup_resume_sighandler_ctx:
1922 * Setup CTX so execution continues at FUNC.
1924 void
1925 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1928 * When resuming from a signal handler, the stack should be misaligned, just like right after
1929 * a call.
1931 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1932 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1933 MONO_CONTEXT_SET_IP (ctx, func);
1936 #ifdef DISABLE_JIT
1937 gpointer
1938 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1940 g_assert_not_reached ();
1941 return NULL;
1944 gpointer
1945 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1947 g_assert_not_reached ();
1948 return NULL;
1951 gpointer
1952 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1954 g_assert_not_reached ();
1955 return NULL;
1958 gpointer
1959 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1961 g_assert_not_reached ();
1962 return NULL;
1965 gpointer
1966 mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot)
1968 g_assert_not_reached ();
1969 return NULL;
1972 gpointer
1973 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1975 g_assert_not_reached ();
1976 return NULL;
1979 GSList*
1980 mono_amd64_get_exception_trampolines (gboolean aot)
1982 g_assert_not_reached ();
1983 return NULL;
1985 #endif /* DISABLE_JIT */
1987 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1988 MonoContinuationRestore
1989 mono_tasklets_arch_restore (void)
1991 g_assert_not_reached ();
1992 return NULL;
1994 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */
1996 void
1997 mono_arch_undo_ip_adjustment (MonoContext *ctx)
1999 ctx->gregs [AMD64_RIP]++;
2002 void
2003 mono_arch_do_ip_adjustment (MonoContext *ctx)
2005 ctx->gregs [AMD64_RIP]--;