[build] Skips RemoteExecuted bases tests on monodroid
[mono-project.git] / mono / mini / exceptions-amd64.c
blob860f820224eae83173e07c84f8ae231b8a0e6b33
1 /**
2 * \file
3 * exception support for AMD64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
17 #ifdef __MINGW32__
18 #define MINGW_HAS_SECURE_API 1
19 #endif
21 #include <glib.h>
22 #include <string.h>
24 #ifdef HAVE_SIGNAL_H
25 #include <signal.h>
26 #endif
27 #ifdef HAVE_UCONTEXT_H
28 #include <ucontext.h>
29 #endif
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
43 #include "mini.h"
44 #include "mini-amd64.h"
45 #include "mini-runtime.h"
46 #include "aot-runtime.h"
47 #include "tasklets.h"
49 #ifdef TARGET_WIN32
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(0, ep, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
67 #endif
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
74 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
75 static gpointer
76 get_win32_restore_stack (void)
78 static guint8 *start = NULL;
79 guint8 *code;
81 if (start)
82 return start;
84 /* restore_stack (void) */
85 start = code = mono_global_codeman_reserve (128);
87 amd64_push_reg (code, AMD64_RBP);
88 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
90 /* push 32 bytes of stack space for Win64 calling convention */
91 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
93 /* restore guard page */
94 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
95 amd64_call_reg (code, AMD64_R11);
97 /* get jit_tls with context to restore */
98 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls);
99 amd64_call_reg (code, AMD64_R11);
101 /* move jit_tls from return reg to arg reg */
102 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
104 /* retrieve pointer to saved context */
105 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
107 /* this call does not return */
108 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
109 amd64_call_reg (code, AMD64_R11);
111 g_assert ((code - start) < 128);
113 mono_arch_flush_icache (start, code - start);
114 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
116 return start;
118 #else
119 static gpointer
120 get_win32_restore_stack (void)
122 // _resetstkoflw unsupported on none desktop Windows platforms.
123 return NULL;
125 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
128 * Unhandled Exception Filter
129 * Top-level per-process exception handler.
131 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
133 EXCEPTION_RECORD* er;
134 CONTEXT* ctx;
135 LONG res;
136 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
137 MonoDomain* domain = mono_domain_get ();
139 /* If the thread is not managed by the runtime return early */
140 if (!jit_tls)
141 return EXCEPTION_CONTINUE_SEARCH;
143 jit_tls->mono_win_chained_exception_needs_run = FALSE;
144 res = EXCEPTION_CONTINUE_EXECUTION;
146 er = ep->ExceptionRecord;
147 ctx = ep->ContextRecord;
149 switch (er->ExceptionCode) {
150 case EXCEPTION_STACK_OVERFLOW:
151 if (!mono_aot_only && restore_stack) {
152 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
153 /* need to restore stack protection once stack is unwound
154 * restore_stack will restore stack protection and then
155 * resume control to the saved stack_restore_ctx */
156 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
157 ctx->Rip = (guint64)restore_stack;
159 } else {
160 jit_tls->mono_win_chained_exception_needs_run = TRUE;
162 break;
163 case EXCEPTION_ACCESS_VIOLATION:
164 W32_SEH_HANDLE_EX(segv);
165 break;
166 case EXCEPTION_ILLEGAL_INSTRUCTION:
167 W32_SEH_HANDLE_EX(ill);
168 break;
169 case EXCEPTION_INT_DIVIDE_BY_ZERO:
170 case EXCEPTION_INT_OVERFLOW:
171 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
172 case EXCEPTION_FLT_OVERFLOW:
173 case EXCEPTION_FLT_UNDERFLOW:
174 case EXCEPTION_FLT_INEXACT_RESULT:
175 W32_SEH_HANDLE_EX(fpe);
176 break;
177 default:
178 jit_tls->mono_win_chained_exception_needs_run = TRUE;
179 break;
182 if (jit_tls->mono_win_chained_exception_needs_run) {
183 /* Don't copy context back if we chained exception
184 * as the handler may have modfied the EXCEPTION_POINTERS
185 * directly. We don't pass sigcontext to chained handlers.
186 * Return continue search so the UnhandledExceptionFilter
187 * can correctly chain the exception.
189 res = EXCEPTION_CONTINUE_SEARCH;
192 return res;
195 void win32_seh_init()
197 if (!mono_aot_only)
198 restore_stack = get_win32_restore_stack ();
200 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
201 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
204 void win32_seh_cleanup()
206 guint32 ret = 0;
208 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
210 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
211 g_assert (ret);
214 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
216 switch (type) {
217 case SIGFPE:
218 fpe_handler = handler;
219 break;
220 case SIGILL:
221 ill_handler = handler;
222 break;
223 case SIGSEGV:
224 segv_handler = handler;
225 break;
226 default:
227 break;
231 #endif /* TARGET_WIN32 */
233 #ifndef DISABLE_JIT
235 * mono_arch_get_restore_context:
237 * Returns a pointer to a method which restores a previously saved sigcontext.
239 gpointer
240 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
242 guint8 *start = NULL;
243 guint8 *code;
244 MonoJumpInfo *ji = NULL;
245 GSList *unwind_ops = NULL;
246 int i, gregs_offset;
248 /* restore_contect (MonoContext *ctx) */
250 start = code = (guint8 *)mono_global_codeman_reserve (256);
252 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
254 /* Restore all registers except %rip and %r11 */
255 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
256 for (i = 0; i < AMD64_NREG; ++i) {
257 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
258 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
262 * The context resides on the stack, in the stack frame of the
263 * caller of this function. The stack pointer that we need to
264 * restore is potentially many stack frames higher up, so the
265 * distance between them can easily be more than the red zone
266 * size. Hence the stack pointer can be restored only after
267 * we have finished loading everything from the context.
269 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
270 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
271 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
273 /* jump to the saved IP */
274 amd64_jump_reg (code, AMD64_R11);
276 mono_arch_flush_icache (start, code - start);
277 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
279 if (info)
280 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
282 return start;
286 * mono_arch_get_call_filter:
288 * Returns a pointer to a method which calls an exception filter. We
289 * also use this function to call finally handlers (we pass NULL as
290 * @exc object in this case).
292 gpointer
293 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
295 guint8 *start;
296 int i, gregs_offset;
297 guint8 *code;
298 guint32 pos;
299 MonoJumpInfo *ji = NULL;
300 GSList *unwind_ops = NULL;
301 const guint kMaxCodeSize = 128;
303 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
305 /* call_filter (MonoContext *ctx, unsigned long eip) */
306 code = start;
308 /* Alloc new frame */
309 amd64_push_reg (code, AMD64_RBP);
310 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
312 /* Save callee saved regs */
313 pos = 0;
314 for (i = 0; i < AMD64_NREG; ++i)
315 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
316 amd64_push_reg (code, i);
317 pos += 8;
320 /* Save EBP */
321 pos += 8;
322 amd64_push_reg (code, AMD64_RBP);
324 /* Make stack misaligned, the call will make it aligned again */
325 if (! (pos & 8))
326 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
328 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
330 /* set new EBP */
331 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
332 /* load callee saved regs */
333 for (i = 0; i < AMD64_NREG; ++i) {
334 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
335 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
337 /* load exc register */
338 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
340 /* call the handler */
341 amd64_call_reg (code, AMD64_ARG_REG2);
343 if (! (pos & 8))
344 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
346 /* restore RBP */
347 amd64_pop_reg (code, AMD64_RBP);
349 /* Restore callee saved regs */
350 for (i = AMD64_NREG; i >= 0; --i)
351 if (AMD64_IS_CALLEE_SAVED_REG (i))
352 amd64_pop_reg (code, i);
354 #if TARGET_WIN32
355 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
356 amd64_pop_reg (code, AMD64_RBP);
357 #else
358 amd64_leave (code);
359 #endif
360 amd64_ret (code);
362 g_assert ((code - start) < kMaxCodeSize);
364 mono_arch_flush_icache (start, code - start);
365 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
367 if (info)
368 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
370 return start;
372 #endif /* !DISABLE_JIT */
375 * The first few arguments are dummy, to force the other arguments to be passed on
376 * the stack, this avoids overwriting the argument registers in the throw trampoline.
378 void
379 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
380 guint64 dummy5, guint64 dummy6,
381 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
383 ERROR_DECL (error);
384 MonoContext ctx;
386 /* mctx is on the caller's stack */
387 memcpy (&ctx, mctx, sizeof (MonoContext));
389 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, error)) {
390 MonoException *mono_ex = (MonoException*)exc;
391 if (!rethrow) {
392 mono_ex->stack_trace = NULL;
393 mono_ex->trace_ips = NULL;
396 mono_error_assert_ok (error);
398 /* adjust eip so that it point into the call instruction */
399 ctx.gregs [AMD64_RIP] --;
401 mono_handle_exception (&ctx, exc);
402 mono_restore_context (&ctx);
403 g_assert_not_reached ();
406 void
407 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
408 guint64 dummy5, guint64 dummy6,
409 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
411 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
412 MonoException *ex;
414 ex = mono_exception_from_token (m_class_get_image (mono_defaults.exception_class), ex_token);
416 mctx->gregs [AMD64_RIP] -= pc_offset;
418 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
419 mctx->gregs [AMD64_RIP] += 1;
421 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
424 void
425 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
426 guint64 dummy5, guint64 dummy6,
427 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
429 /* Only the register parameters are valid */
430 MonoContext ctx;
432 /* mctx is on the caller's stack */
433 memcpy (&ctx, mctx, sizeof (MonoContext));
435 mono_resume_unwind (&ctx);
438 #ifndef DISABLE_JIT
440 * get_throw_trampoline:
442 * Generate a call to mono_amd64_throw_exception/
443 * mono_amd64_throw_corlib_exception.
445 static gpointer
446 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
448 guint8* start;
449 guint8 *code;
450 MonoJumpInfo *ji = NULL;
451 GSList *unwind_ops = NULL;
452 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
453 const guint kMaxCodeSize = 256;
455 #ifdef TARGET_WIN32
456 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
457 #else
458 dummy_stack_space = 0;
459 #endif
461 if (info)
462 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
463 else
464 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
466 /* The stack is unaligned on entry */
467 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
469 code = start;
471 if (info)
472 unwind_ops = mono_arch_get_cie_program ();
474 /* Alloc frame */
475 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
476 if (info) {
477 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
478 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
482 * To hide linux/windows calling convention differences, we pass all arguments on
483 * the stack by passing 6 dummy values in registers.
486 arg_offsets [0] = dummy_stack_space + 0;
487 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
488 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
489 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
490 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
492 /* Save registers */
493 for (i = 0; i < AMD64_NREG; ++i)
494 if (i != AMD64_RSP)
495 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
496 /* Save RSP */
497 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
498 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
499 /* Save IP */
500 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
501 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
502 /* Set arg1 == ctx */
503 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
504 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
505 /* Set arg2 == exc/ex_token_index */
506 if (resume_unwind)
507 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
508 else
509 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
510 /* Set arg3 == rethrow/pc offset */
511 if (resume_unwind) {
512 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
513 } else if (corlib) {
514 if (llvm_abs)
516 * The caller doesn't pass in a pc/pc offset, instead we simply use the
517 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
519 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
520 else
521 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
522 } else {
523 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
526 if (aot) {
527 const char *icall_name;
529 if (resume_unwind)
530 icall_name = "mono_amd64_resume_unwind";
531 else if (corlib)
532 icall_name = "mono_amd64_throw_corlib_exception";
533 else
534 icall_name = "mono_amd64_throw_exception";
535 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
536 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
537 } else {
538 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
540 amd64_call_reg (code, AMD64_R11);
541 amd64_breakpoint (code);
543 mono_arch_flush_icache (start, code - start);
545 g_assert ((code - start) < kMaxCodeSize);
546 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
548 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
550 if (info)
551 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
553 return start;
557 * mono_arch_get_throw_exception:
558 * \returns a function pointer which can be used to raise
559 * exceptions. The returned function has the following
560 * signature: void (*func) (MonoException *exc);
562 gpointer
563 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
565 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
568 gpointer
569 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
571 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
575 * mono_arch_get_throw_corlib_exception:
577 * Returns a function pointer which can be used to raise
578 * corlib exceptions. The returned function has the following
579 * signature: void (*func) (guint32 ex_token, guint32 offset);
580 * Here, offset is the offset which needs to be substracted from the caller IP
581 * to get the IP of the throw. Passing the offset has the advantage that it
582 * needs no relocations in the caller.
584 gpointer
585 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
587 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
589 #endif /* !DISABLE_JIT */
592 * mono_arch_unwind_frame:
594 * This function is used to gather information from @ctx, and store it in @frame_info.
595 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
596 * is modified if needed.
597 * Returns TRUE on success, FALSE otherwise.
599 gboolean
600 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
601 MonoJitInfo *ji, MonoContext *ctx,
602 MonoContext *new_ctx, MonoLMF **lmf,
603 mgreg_t **save_locations,
604 StackFrameInfo *frame)
606 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
607 int i;
609 memset (frame, 0, sizeof (StackFrameInfo));
610 frame->ji = ji;
612 *new_ctx = *ctx;
614 if (ji != NULL) {
615 mgreg_t regs [MONO_MAX_IREGS + 1];
616 guint8 *cfa;
617 guint32 unwind_info_len;
618 guint8 *unwind_info;
619 guint8 *epilog = NULL;
621 if (ji->is_trampoline)
622 frame->type = FRAME_TYPE_TRAMPOLINE;
623 else
624 frame->type = FRAME_TYPE_MANAGED;
626 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
628 frame->unwind_info = unwind_info;
629 frame->unwind_info_len = unwind_info_len;
632 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
633 mono_print_unwind_info (unwind_info, unwind_info_len);
635 /* LLVM compiled code doesn't have this info */
636 if (ji->has_arch_eh_info)
637 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
639 for (i = 0; i < AMD64_NREG; ++i)
640 regs [i] = new_ctx->gregs [i];
642 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
643 (guint8*)ji->code_start + ji->code_size,
644 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
645 save_locations, MONO_MAX_IREGS, &cfa);
647 for (i = 0; i < AMD64_NREG; ++i)
648 new_ctx->gregs [i] = regs [i];
650 /* The CFA becomes the new SP value */
651 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
653 /* Adjust IP */
654 new_ctx->gregs [AMD64_RIP] --;
656 return TRUE;
657 } else if (*lmf) {
658 guint64 rip;
660 g_assert ((((guint64)(*lmf)->previous_lmf) & 2) == 0);
662 if (((guint64)(*lmf)->previous_lmf) & 4) {
663 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
665 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
666 } else if ((*lmf)->rsp == 0) {
667 /* Top LMF entry */
668 return FALSE;
669 } else {
671 * The rsp field is set just before the call which transitioned to native
672 * code. Obtain the rip from the stack.
674 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
677 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
679 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
680 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
681 * return address.
683 //g_assert (ji);
684 if (!ji)
685 return FALSE;
687 frame->ji = ji;
688 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
690 if (((guint64)(*lmf)->previous_lmf) & 4) {
691 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
693 /* Trampoline frame */
694 for (i = 0; i < AMD64_NREG; ++i)
695 new_ctx->gregs [i] = ext->ctx->gregs [i];
696 /* Adjust IP */
697 new_ctx->gregs [AMD64_RIP] --;
698 } else {
700 * The registers saved in the LMF will be restored using the normal unwind info,
701 * when the wrapper frame is processed.
703 /* Adjust IP */
704 rip --;
705 new_ctx->gregs [AMD64_RIP] = rip;
706 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
707 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
708 for (i = 0; i < AMD64_NREG; ++i) {
709 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
710 new_ctx->gregs [i] = 0;
714 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
716 return TRUE;
719 return FALSE;
723 * handle_exception:
725 * Called by resuming from a signal handler.
727 static void
728 handle_signal_exception (gpointer obj)
730 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
731 MonoContext ctx;
733 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
735 mono_handle_exception (&ctx, (MonoObject *)obj);
737 mono_restore_context (&ctx);
740 void
741 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
743 guint64 sp = ctx->gregs [AMD64_RSP];
745 ctx->gregs [AMD64_RDI] = (guint64)user_data;
747 /* Allocate a stack frame below the red zone */
748 sp -= 128;
749 /* The stack should be unaligned */
750 if ((sp % 16) == 0)
751 sp -= 8;
752 #ifdef __linux__
753 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
754 *(guint64*)sp = ctx->gregs [AMD64_RIP];
755 #endif
756 ctx->gregs [AMD64_RSP] = sp;
757 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
761 * mono_arch_handle_exception:
762 * \param ctx saved processor state
763 * \param obj the exception object
765 gboolean
766 mono_arch_handle_exception (void *sigctx, gpointer obj)
768 #if defined(MONO_ARCH_USE_SIGACTION)
769 MonoContext mctx;
772 * Handling the exception in the signal handler is problematic, since the original
773 * signal is disabled, and we could run arbitrary code though the debugger. So
774 * resume into the normal stack and do most work there if possible.
776 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
778 /* Pass the ctx parameter in TLS */
779 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
781 mctx = jit_tls->ex_ctx;
782 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
783 mono_monoctx_to_sigctx (&mctx, sigctx);
785 return TRUE;
786 #else
787 MonoContext mctx;
789 mono_sigctx_to_monoctx (sigctx, &mctx);
791 mono_handle_exception (&mctx, obj);
793 mono_monoctx_to_sigctx (&mctx, sigctx);
795 return TRUE;
796 #endif
799 gpointer
800 mono_arch_ip_from_context (void *sigctx)
802 #if defined(MONO_ARCH_USE_SIGACTION)
803 ucontext_t *ctx = (ucontext_t*)sigctx;
805 return (gpointer)UCONTEXT_REG_RIP (ctx);
806 #elif defined(HOST_WIN32)
807 return ((CONTEXT*)sigctx)->Rip;
808 #else
809 MonoContext *ctx = sigctx;
810 return (gpointer)ctx->gregs [AMD64_RIP];
811 #endif
814 static MonoObject*
815 restore_soft_guard_pages ()
817 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
818 if (jit_tls->stack_ovf_guard_base)
819 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
821 if (jit_tls->stack_ovf_pending) {
822 MonoDomain *domain = mono_domain_get ();
823 jit_tls->stack_ovf_pending = 0;
824 return (MonoObject *) domain->stack_overflow_ex;
827 return NULL;
831 * this function modifies mctx so that when it is restored, it
832 * won't execcute starting at mctx.eip, but in a function that
833 * will restore the protection on the soft-guard pages and return back to
834 * continue at mctx.eip.
836 static void
837 prepare_for_guard_pages (MonoContext *mctx)
839 gpointer *sp;
840 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
841 sp -= 1;
842 /* the return addr */
843 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
844 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
845 mctx->gregs [AMD64_RSP] = (guint64)sp;
848 static void
849 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
851 MonoContext mctx;
852 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
854 if (!ji)
855 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
857 mctx = *ctx;
859 mono_handle_exception (&mctx, obj);
860 if (stack_ovf) {
861 MonoJitTlsData *jit_tls = (MonoJitTlsData *) mono_tls_get_jit_tls ();
862 jit_tls->stack_ovf_pending = 1;
863 prepare_for_guard_pages (&mctx);
865 mono_restore_context (&mctx);
868 void
869 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
871 #if defined(MONO_ARCH_USE_SIGACTION)
872 MonoException *exc = NULL;
873 gpointer *sp;
874 int frame_size;
875 MonoContext *copied_ctx;
877 if (stack_ovf)
878 exc = mono_domain_get ()->stack_overflow_ex;
880 /* setup a call frame on the real stack so that control is returned there
881 * and exception handling can continue.
882 * The frame looks like:
883 * ucontext struct
884 * ...
885 * return ip
886 * 128 is the size of the red zone
888 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
889 frame_size += 15;
890 frame_size &= ~15;
891 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
892 sp = (gpointer *)((char*)sp - frame_size);
893 copied_ctx = (MonoContext*)(sp + 4);
894 /* the arguments must be aligned */
895 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
896 mono_sigctx_to_monoctx (sigctx, copied_ctx);
897 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
898 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
899 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
900 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
901 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
902 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
903 #endif
906 #ifndef DISABLE_JIT
907 GSList*
908 mono_amd64_get_exception_trampolines (gboolean aot)
910 MonoTrampInfo *info;
911 GSList *tramps = NULL;
913 /* LLVM needs different throw trampolines */
914 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
915 tramps = g_slist_prepend (tramps, info);
917 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
918 tramps = g_slist_prepend (tramps, info);
920 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
921 tramps = g_slist_prepend (tramps, info);
923 return tramps;
925 #endif /* !DISABLE_JIT */
927 void
928 mono_arch_exceptions_init (void)
930 GSList *tramps, *l;
931 gpointer tramp;
933 if (mono_ee_features.use_aot_trampolines) {
934 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
935 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
936 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
937 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
938 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
939 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
940 } else {
941 /* Call this to avoid initialization races */
942 tramps = mono_amd64_get_exception_trampolines (FALSE);
943 for (l = tramps; l; l = l->next) {
944 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
946 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
947 mono_tramp_info_register (info, NULL);
949 g_slist_free (tramps);
953 // Implies defined(TARGET_WIN32)
954 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
956 static void
957 mono_arch_unwindinfo_create (gpointer* monoui)
959 PUNWIND_INFO newunwindinfo;
960 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
961 newunwindinfo->Version = 1;
964 void
965 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
967 PUNWIND_CODE unwindcode;
968 guchar codeindex;
970 g_assert (unwindinfo != NULL);
972 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
973 g_error ("Larger allocation needed for the unwind information.");
975 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
976 unwindcode = &unwindinfo->UnwindCode [codeindex];
977 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
978 unwindcode->CodeOffset = (guchar)unwind_op->when;
979 unwindcode->OpInfo = unwind_op->reg;
981 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
982 g_error ("Adding unwind info in wrong order.");
984 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
987 void
988 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
990 PUNWIND_CODE unwindcode;
991 guchar codeindex;
993 g_assert (unwindinfo != NULL);
995 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
996 g_error ("Larger allocation needed for the unwind information.");
998 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
999 unwindcode = &unwindinfo->UnwindCode [codeindex];
1000 unwindcode->UnwindOp = UWOP_SET_FPREG;
1001 unwindcode->CodeOffset = (guchar)unwind_op->when;
1003 g_assert (unwind_op->val % 16 == 0);
1004 unwindinfo->FrameRegister = unwind_op->reg;
1005 unwindinfo->FrameOffset = unwind_op->val / 16;
1007 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1008 g_error ("Adding unwind info in wrong order.");
1010 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1013 void
1014 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1016 PUNWIND_CODE unwindcode;
1017 guchar codeindex;
1018 guchar codesneeded;
1019 guint size;
1021 g_assert (unwindinfo != NULL);
1023 size = unwind_op->val;
1025 if (size < 0x8)
1026 g_error ("Stack allocation must be equal to or greater than 0x8.");
1028 if (size <= 0x80)
1029 codesneeded = 1;
1030 else if (size <= 0x7FFF8)
1031 codesneeded = 2;
1032 else
1033 codesneeded = 3;
1035 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1036 g_error ("Larger allocation needed for the unwind information.");
1038 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1039 unwindcode = &unwindinfo->UnwindCode [codeindex];
1041 unwindcode->CodeOffset = (guchar)unwind_op->when;
1043 if (codesneeded == 1) {
1044 /*The size of the allocation is
1045 (the number in the OpInfo member) times 8 plus 8*/
1046 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1047 unwindcode->OpInfo = (size - 8)/8;
1049 else {
1050 if (codesneeded == 3) {
1051 /*the unscaled size of the allocation is recorded
1052 in the next two slots in little-endian format.
1053 NOTE, unwind codes are allocated from end to begining of list so
1054 unwind code will have right execution order. List is sorted on CodeOffset
1055 using descending sort order.*/
1056 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1057 unwindcode->OpInfo = 1;
1058 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1060 else {
1061 /*the size of the allocation divided by 8
1062 is recorded in the next slot.
1063 NOTE, unwind codes are allocated from end to begining of list so
1064 unwind code will have right execution order. List is sorted on CodeOffset
1065 using descending sort order.*/
1066 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1067 unwindcode->OpInfo = 0;
1068 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1072 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1073 g_error ("Adding unwind info in wrong order.");
1075 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1078 static gboolean g_dyn_func_table_inited;
1080 // Dynamic function table used when registering unwind info for OS unwind support.
1081 static GList *g_dynamic_function_table_begin;
1082 static GList *g_dynamic_function_table_end;
1084 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1085 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1087 // Module handle used when explicit loading ntdll.
1088 static HMODULE g_ntdll;
1090 // If Win8 or Win2012Server or later, use growable function tables instead
1091 // of callbacks. Callback solution will still be fallback on older systems.
1092 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1093 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1094 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1096 // When using function table callback solution an out of proc module is needed by
1097 // debuggers in order to read unwind info from debug target.
1098 #ifdef _MSC_VER
1099 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1100 #else
1101 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1102 #endif
1104 #define MONO_DAC_MODULE_MAX_PATH 1024
1106 static void
1107 init_table_no_lock (void)
1109 if (g_dyn_func_table_inited == FALSE) {
1110 g_assert_checked (g_dynamic_function_table_begin == NULL);
1111 g_assert_checked (g_dynamic_function_table_end == NULL);
1112 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1113 g_assert_checked (g_rtl_grow_function_table == NULL);
1114 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1115 g_assert_checked (g_ntdll == NULL);
1117 // Load functions available on Win8/Win2012Server or later. If running on earlier
1118 // systems the below GetProceAddress will fail, this is expected behavior.
1119 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1120 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1121 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1122 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1125 g_dyn_func_table_inited = TRUE;
1129 void
1130 mono_arch_unwindinfo_init_table (void)
1132 if (g_dyn_func_table_inited == FALSE) {
1134 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1136 init_table_no_lock ();
1138 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1142 static void
1143 terminate_table_no_lock (void)
1145 if (g_dyn_func_table_inited == TRUE) {
1146 if (g_dynamic_function_table_begin != NULL) {
1147 // Free all list elements.
1148 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1149 if (l->data) {
1150 g_free (l->data);
1151 l->data = NULL;
1155 //Free the list.
1156 g_list_free (g_dynamic_function_table_begin);
1157 g_dynamic_function_table_begin = NULL;
1158 g_dynamic_function_table_end = NULL;
1161 g_rtl_delete_growable_function_table = NULL;
1162 g_rtl_grow_function_table = NULL;
1163 g_rtl_add_growable_function_table = NULL;
1165 if (g_ntdll != NULL) {
1166 FreeLibrary (g_ntdll);
1167 g_ntdll = NULL;
1170 g_dyn_func_table_inited = FALSE;
1174 void
1175 mono_arch_unwindinfo_terminate_table (void)
1177 if (g_dyn_func_table_inited == TRUE) {
1179 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1181 terminate_table_no_lock ();
1183 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1187 static GList *
1188 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1190 GList *found_entry = NULL;
1192 // Fast path, look at boundaries.
1193 if (g_dynamic_function_table_begin != NULL) {
1194 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1195 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1197 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1198 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1199 // Entry belongs to first entry in list.
1200 found_entry = g_dynamic_function_table_begin;
1201 *continue_search = FALSE;
1202 } else {
1203 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1204 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1205 // Entry has a range that could exist in table, continue search.
1206 *continue_search = TRUE;
1212 return found_entry;
1215 static inline DynamicFunctionTableEntry *
1216 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1218 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1219 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1222 static GList *
1223 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1225 GList *found_entry = NULL;
1226 gboolean continue_search = FALSE;
1228 gsize begin_range = (gsize)code_block;
1229 gsize end_range = begin_range + block_size;
1231 // Fast path, check table boundaries.
1232 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1233 if (found_entry || continue_search == FALSE)
1234 return found_entry;
1236 // Scan table for an entry including range.
1237 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1238 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1239 g_assert_checked (current_entry != NULL);
1241 // Do we have a match?
1242 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1243 found_entry = node;
1244 break;
1248 return found_entry;
1251 static inline DynamicFunctionTableEntry *
1252 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1254 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1255 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1258 static GList *
1259 find_pc_in_table_no_lock_ex (const gpointer pc)
1261 GList *found_entry = NULL;
1262 gboolean continue_search = FALSE;
1264 gsize begin_range = (gsize)pc;
1265 gsize end_range = begin_range;
1267 // Fast path, check table boundaries.
1268 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1269 if (found_entry || continue_search == FALSE)
1270 return found_entry;
1272 // Scan table for a entry including range.
1273 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1274 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1275 g_assert_checked (current_entry != NULL);
1277 // Do we have a match?
1278 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1279 found_entry = node;
1280 break;
1284 return found_entry;
1287 static inline DynamicFunctionTableEntry *
1288 find_pc_in_table_no_lock (const gpointer pc)
1290 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1291 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1294 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1295 static void
1296 validate_table_no_lock (void)
1298 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1299 // Method will assert on failure to explicitly indicate what check failed.
1300 if (g_dynamic_function_table_begin != NULL) {
1301 g_assert_checked (g_dynamic_function_table_end != NULL);
1303 DynamicFunctionTableEntry *prevoious_entry = NULL;
1304 DynamicFunctionTableEntry *current_entry = NULL;
1305 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1306 current_entry = (DynamicFunctionTableEntry *)node->data;
1308 g_assert_checked (current_entry != NULL);
1309 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1311 if (prevoious_entry != NULL) {
1312 // List should be sorted in descending order on begin_range.
1313 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1315 // Check for overlapped regions.
1316 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1319 prevoious_entry = current_entry;
1324 #else
1326 static inline void
1327 validate_table_no_lock (void)
1331 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1333 // Forward declare.
1334 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1336 DynamicFunctionTableEntry *
1337 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1339 DynamicFunctionTableEntry *new_entry = NULL;
1341 gsize begin_range = (gsize)code_block;
1342 gsize end_range = begin_range + block_size;
1344 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1345 init_table_no_lock ();
1346 new_entry = find_range_in_table_no_lock (code_block, block_size);
1347 if (new_entry == NULL) {
1348 // Allocate new entry.
1349 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1350 if (new_entry != NULL) {
1352 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1353 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1354 InitializeSRWLock (&new_entry->lock);
1355 new_entry->handle = NULL;
1356 new_entry->begin_range = begin_range;
1357 new_entry->end_range = end_range;
1358 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1359 new_entry->rt_funcs_current_count = 0;
1360 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1362 if (new_entry->rt_funcs != NULL) {
1363 // Check insert on boundaries. List is sorted descending on begin_range.
1364 if (g_dynamic_function_table_begin == NULL) {
1365 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1366 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1367 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1368 // Insert at the head.
1369 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1370 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1371 // Insert at tail.
1372 g_list_append (g_dynamic_function_table_end, new_entry);
1373 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1374 } else {
1375 //Search and insert at correct position.
1376 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1377 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1378 g_assert_checked (current_entry != NULL);
1380 if (current_entry->begin_range < new_entry->begin_range) {
1381 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1382 break;
1387 // Register dynamic function table entry with OS.
1388 if (g_rtl_add_growable_function_table != NULL) {
1389 // Allocate new growable handle table for entry.
1390 g_assert_checked (new_entry->handle == NULL);
1391 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1392 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1393 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1394 g_assert (!result);
1395 } else {
1396 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1397 WCHAR *path = buffer;
1399 // DAC module should be in the same directory as the
1400 // main executable.
1401 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1402 path = wcsrchr (buffer, TEXT('\\'));
1403 if (path != NULL) {
1404 path++;
1405 *path = TEXT('\0');
1408 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1409 path = buffer;
1411 // Register function table callback + out of proc module.
1412 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1413 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1414 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1415 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1416 g_assert(result);
1419 // Only included in checked builds. Validates the structure of table after insert.
1420 validate_table_no_lock ();
1422 } else {
1423 g_free (new_entry);
1424 new_entry = NULL;
1428 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1430 return new_entry;
1433 static void
1434 remove_range_in_table_no_lock (GList *entry)
1436 if (entry != NULL) {
1437 if (entry == g_dynamic_function_table_end)
1438 g_dynamic_function_table_end = entry->prev;
1440 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1441 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1443 g_assert_checked (removed_entry != NULL);
1444 g_assert_checked (removed_entry->rt_funcs != NULL);
1446 // Remove function table from OS.
1447 if (removed_entry->handle != NULL) {
1448 if (g_rtl_delete_growable_function_table != NULL) {
1449 g_rtl_delete_growable_function_table (removed_entry->handle);
1450 } else {
1451 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1455 g_free (removed_entry->rt_funcs);
1456 g_free (removed_entry);
1458 g_list_free_1 (entry);
1461 // Only included in checked builds. Validates the structure of table after remove.
1462 validate_table_no_lock ();
1465 void
1466 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1468 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1470 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1472 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1473 remove_range_in_table_no_lock (found_entry);
1475 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1478 void
1479 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1481 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1483 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1485 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1486 remove_range_in_table_no_lock (found_entry);
1488 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1491 PRUNTIME_FUNCTION
1492 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1494 PRUNTIME_FUNCTION found_rt_func = NULL;
1496 gsize begin_range = (gsize)code;
1497 gsize end_range = begin_range + code_size;
1499 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1501 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1503 if (found_entry != NULL) {
1505 AcquireSRWLockShared (&found_entry->lock);
1507 g_assert_checked (found_entry->begin_range <= begin_range);
1508 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1509 g_assert_checked (found_entry->rt_funcs != NULL);
1511 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1512 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1514 // Is this our RT function entry?
1515 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1516 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1517 found_rt_func = current_rt_func;
1518 break;
1522 ReleaseSRWLockShared (&found_entry->lock);
1525 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1527 return found_rt_func;
1530 static inline PRUNTIME_FUNCTION
1531 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1533 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1536 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1537 static void
1538 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1540 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1541 // Method will assert on failure to explicitly indicate what check failed.
1542 g_assert_checked (entry != NULL);
1543 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1544 g_assert_checked (entry->rt_funcs != NULL);
1546 PRUNTIME_FUNCTION current_rt_func = NULL;
1547 PRUNTIME_FUNCTION previous_rt_func = NULL;
1548 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1549 current_rt_func = &(entry->rt_funcs [i]);
1551 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1552 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1554 if (previous_rt_func != NULL) {
1555 // List should be sorted in ascending order based on BeginAddress.
1556 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1558 // Check for overlapped regions.
1559 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1562 previous_rt_func = current_rt_func;
1566 #else
1568 static inline void
1569 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1573 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1575 PRUNTIME_FUNCTION
1576 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1578 PRUNTIME_FUNCTION new_rt_func = NULL;
1580 gsize begin_range = (gsize)code;
1581 gsize end_range = begin_range + code_size;
1583 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1585 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1587 if (found_entry != NULL) {
1589 AcquireSRWLockExclusive (&found_entry->lock);
1591 g_assert_checked (found_entry->begin_range <= begin_range);
1592 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1593 g_assert_checked (found_entry->rt_funcs != NULL);
1594 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1596 gsize code_offset = (gsize)code - found_entry->begin_range;
1597 gsize entry_count = found_entry->rt_funcs_current_count;
1598 gsize max_entry_count = found_entry->rt_funcs_max_count;
1599 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1601 RUNTIME_FUNCTION new_rt_func_data;
1602 new_rt_func_data.BeginAddress = code_offset;
1603 new_rt_func_data.EndAddress = code_offset + code_size;
1605 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1606 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1608 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1610 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1612 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1613 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1614 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1615 new_rt_func = &(current_rt_funcs [entry_count]);
1616 *new_rt_func = new_rt_func_data;
1617 entry_count++;
1618 } else {
1619 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1620 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1621 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1623 if (new_rt_funcs != NULL) {
1624 gsize from_index = 0;
1625 gsize to_index = 0;
1627 // Copy from old table into new table. Make sure new rt func gets inserted
1628 // into correct location based on sort order.
1629 for (; from_index < entry_count; ++from_index) {
1630 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1631 new_rt_func = &(new_rt_funcs [to_index++]);
1632 *new_rt_func = new_rt_func_data;
1635 if (current_rt_funcs [from_index].UnwindData != 0)
1636 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1639 // If we didn't insert by now, put it last in the list.
1640 if (new_rt_func == NULL) {
1641 new_rt_func = &(new_rt_funcs [to_index]);
1642 *new_rt_func = new_rt_func_data;
1646 entry_count++;
1649 // Update the stats for current entry.
1650 found_entry->rt_funcs_current_count = entry_count;
1651 found_entry->rt_funcs_max_count = max_entry_count;
1653 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1654 // No new table just report increase in use.
1655 g_assert_checked (found_entry->handle != NULL);
1656 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1657 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1658 // New table, delete old table and rt funcs, and register a new one.
1659 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1660 g_rtl_delete_growable_function_table (found_entry->handle);
1661 found_entry->handle = NULL;
1662 g_free (found_entry->rt_funcs);
1663 found_entry->rt_funcs = new_rt_funcs;
1664 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1665 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1666 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1667 g_assert (!result);
1668 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1669 // No table registered with OS, callback solution in use. Switch tables.
1670 g_free (found_entry->rt_funcs);
1671 found_entry->rt_funcs = new_rt_funcs;
1672 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1673 // No table registered with OS, callback solution in use, nothing to do.
1674 } else {
1675 g_assert_not_reached ();
1678 // Only included in checked builds. Validates the structure of table after insert.
1679 validate_rt_funcs_in_table_no_lock (found_entry);
1681 ReleaseSRWLockExclusive (&found_entry->lock);
1684 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1686 return new_rt_func;
1689 static PRUNTIME_FUNCTION
1690 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1692 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1695 static void
1696 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1698 if (unwind_ops != NULL && unwindinfo != NULL) {
1699 MonoUnwindOp *unwind_op_data;
1700 gboolean sp_alloced = FALSE;
1701 gboolean fp_alloced = FALSE;
1703 // Replay collected unwind info and setup Windows format.
1704 for (GSList *l = unwind_ops; l; l = l->next) {
1705 unwind_op_data = (MonoUnwindOp *)l->data;
1706 switch (unwind_op_data->op) {
1707 case DW_CFA_offset : {
1708 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1709 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1710 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1711 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1712 break;
1714 case DW_CFA_mono_sp_alloc_info_win64 : {
1715 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1716 sp_alloced = TRUE;
1717 break;
1719 case DW_CFA_mono_fp_alloc_info_win64 : {
1720 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1721 fp_alloced = TRUE;
1722 break;
1724 default :
1725 break;
1731 static PUNWIND_INFO
1732 initialize_unwind_info_internal (GSList *unwind_ops)
1734 PUNWIND_INFO unwindinfo;
1736 mono_arch_unwindinfo_create (&unwindinfo);
1737 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1739 return unwindinfo;
1742 guchar
1743 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1745 UNWIND_INFO unwindinfo = {0};
1746 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1747 return unwindinfo.CountOfCodes;
1750 PUNWIND_INFO
1751 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops)
1753 if (!unwind_ops)
1754 return NULL;
1756 return initialize_unwind_info_internal (unwind_ops);
1759 void
1760 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info)
1762 g_free (unwind_info);
1765 guint
1766 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1768 MonoCompile * current_cfg = (MonoCompile *)cfg;
1769 g_assert (current_cfg->arch.unwindinfo == NULL);
1770 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1771 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1774 void
1775 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1777 PUNWIND_INFO unwindinfo, targetinfo;
1778 guchar codecount;
1779 guint64 targetlocation;
1780 if (!*monoui)
1781 return;
1783 unwindinfo = (PUNWIND_INFO)*monoui;
1784 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1785 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1787 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1789 codecount = unwindinfo->CountOfCodes;
1790 if (codecount) {
1791 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1792 sizeof (UNWIND_CODE) * codecount);
1795 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1796 if (codecount) {
1797 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1798 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1799 int previous = 0;
1800 for (int current = 0; current < codecount; current++) {
1801 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1802 previous = current;
1803 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1804 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1805 current++;
1806 } else {
1807 current += 2;
1812 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1814 mono_arch_unwindinfo_free_unwind_info (unwindinfo);
1815 *monoui = 0;
1817 // Register unwind info in table.
1818 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1821 void
1822 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1824 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1825 if (unwindinfo != NULL) {
1826 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1830 void
1831 mono_arch_code_chunk_new (void *chunk, int size)
1833 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1836 void mono_arch_code_chunk_destroy (void *chunk)
1838 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1840 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1842 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1843 MonoContinuationRestore
1844 mono_tasklets_arch_restore (void)
1846 static guint8* saved = NULL;
1847 guint8 *code, *start;
1848 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1849 const guint kMaxCodeSize = 64;
1852 if (saved)
1853 return (MonoContinuationRestore)saved;
1854 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1855 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1856 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1857 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1858 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1859 * We move cont to cont_reg since we need both rcx and rdi for the copy
1860 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1862 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1863 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1864 /* setup the copy of the stack */
1865 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1866 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1867 x86_cld (code);
1868 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1869 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1870 amd64_prefix (code, X86_REP_PREFIX);
1871 amd64_movsl (code);
1873 /* now restore the registers from the LMF */
1874 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1875 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1876 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1878 #ifdef WIN32
1879 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1880 #else
1881 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1882 #endif
1884 /* state is already in rax */
1885 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1886 g_assert ((code - start) <= kMaxCodeSize);
1888 mono_arch_flush_icache (start, code - start);
1889 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1891 saved = start;
1892 return (MonoContinuationRestore)saved;
1894 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1897 * mono_arch_setup_resume_sighandler_ctx:
1899 * Setup CTX so execution continues at FUNC.
1901 void
1902 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1905 * When resuming from a signal handler, the stack should be misaligned, just like right after
1906 * a call.
1908 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1909 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1910 MONO_CONTEXT_SET_IP (ctx, func);
1913 #ifdef DISABLE_JIT
1914 gpointer
1915 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1917 g_assert_not_reached ();
1918 return NULL;
1921 gpointer
1922 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1924 g_assert_not_reached ();
1925 return NULL;
1928 gpointer
1929 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1931 g_assert_not_reached ();
1932 return NULL;
1935 gpointer
1936 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1938 g_assert_not_reached ();
1939 return NULL;
1942 gpointer
1943 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1945 g_assert_not_reached ();
1946 return NULL;
1949 GSList*
1950 mono_amd64_get_exception_trampolines (gboolean aot)
1952 g_assert_not_reached ();
1953 return NULL;
1955 #endif /* DISABLE_JIT */
1957 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1958 MonoContinuationRestore
1959 mono_tasklets_arch_restore (void)
1961 g_assert_not_reached ();
1962 return NULL;
1964 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */
1966 void
1967 mono_arch_undo_ip_adjustment (MonoContext *ctx)
1969 ctx->gregs [AMD64_RIP]++;