Build fixes for none Windows desktop platforms.
[mono-project.git] / mono / mini / exceptions-amd64.c
blob08914ff414db29f52815656f9dca59b271845e1d
1 /**
2 * \file
3 * exception support for AMD64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
17 #ifdef __MINGW32__
18 #define MINGW_HAS_SECURE_API 1
19 #endif
21 #include <glib.h>
22 #include <string.h>
24 #ifdef HAVE_SIGNAL_H
25 #include <signal.h>
26 #endif
27 #ifdef HAVE_UCONTEXT_H
28 #include <ucontext.h>
29 #endif
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
43 #include "mini.h"
44 #include "mini-amd64.h"
45 #include "tasklets.h"
47 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
49 #ifdef TARGET_WIN32
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(0, ep, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
67 #endif
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
74 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
75 static gpointer
76 get_win32_restore_stack (void)
78 static guint8 *start = NULL;
79 guint8 *code;
81 if (start)
82 return start;
84 /* restore_stack (void) */
85 start = code = mono_global_codeman_reserve (128);
87 amd64_push_reg (code, AMD64_RBP);
88 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
90 /* push 32 bytes of stack space for Win64 calling convention */
91 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
93 /* restore guard page */
94 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
95 amd64_call_reg (code, AMD64_R11);
97 /* get jit_tls with context to restore */
98 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls);
99 amd64_call_reg (code, AMD64_R11);
101 /* move jit_tls from return reg to arg reg */
102 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
104 /* retrieve pointer to saved context */
105 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
107 /* this call does not return */
108 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
109 amd64_call_reg (code, AMD64_R11);
111 g_assert ((code - start) < 128);
113 mono_arch_flush_icache (start, code - start);
114 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
116 return start;
118 #else
119 static gpointer
120 get_win32_restore_stack (void)
122 // _resetstkoflw unsupported on none desktop Windows platforms.
123 return NULL;
125 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
128 * Unhandled Exception Filter
129 * Top-level per-process exception handler.
131 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
133 EXCEPTION_RECORD* er;
134 CONTEXT* ctx;
135 LONG res;
136 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
137 MonoDomain* domain = mono_domain_get ();
139 /* If the thread is not managed by the runtime return early */
140 if (!jit_tls)
141 return EXCEPTION_CONTINUE_SEARCH;
143 jit_tls->mono_win_chained_exception_needs_run = FALSE;
144 res = EXCEPTION_CONTINUE_EXECUTION;
146 er = ep->ExceptionRecord;
147 ctx = ep->ContextRecord;
149 switch (er->ExceptionCode) {
150 case EXCEPTION_STACK_OVERFLOW:
151 if (!mono_aot_only && restore_stack) {
152 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
153 /* need to restore stack protection once stack is unwound
154 * restore_stack will restore stack protection and then
155 * resume control to the saved stack_restore_ctx */
156 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
157 ctx->Rip = (guint64)restore_stack;
159 } else {
160 jit_tls->mono_win_chained_exception_needs_run = TRUE;
162 break;
163 case EXCEPTION_ACCESS_VIOLATION:
164 W32_SEH_HANDLE_EX(segv);
165 break;
166 case EXCEPTION_ILLEGAL_INSTRUCTION:
167 W32_SEH_HANDLE_EX(ill);
168 break;
169 case EXCEPTION_INT_DIVIDE_BY_ZERO:
170 case EXCEPTION_INT_OVERFLOW:
171 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
172 case EXCEPTION_FLT_OVERFLOW:
173 case EXCEPTION_FLT_UNDERFLOW:
174 case EXCEPTION_FLT_INEXACT_RESULT:
175 W32_SEH_HANDLE_EX(fpe);
176 break;
177 default:
178 jit_tls->mono_win_chained_exception_needs_run = TRUE;
179 break;
182 if (jit_tls->mono_win_chained_exception_needs_run) {
183 /* Don't copy context back if we chained exception
184 * as the handler may have modfied the EXCEPTION_POINTERS
185 * directly. We don't pass sigcontext to chained handlers.
186 * Return continue search so the UnhandledExceptionFilter
187 * can correctly chain the exception.
189 res = EXCEPTION_CONTINUE_SEARCH;
192 return res;
195 void win32_seh_init()
197 if (!mono_aot_only)
198 restore_stack = get_win32_restore_stack ();
200 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
201 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
204 void win32_seh_cleanup()
206 guint32 ret = 0;
208 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
210 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
211 g_assert (ret);
214 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
216 switch (type) {
217 case SIGFPE:
218 fpe_handler = handler;
219 break;
220 case SIGILL:
221 ill_handler = handler;
222 break;
223 case SIGSEGV:
224 segv_handler = handler;
225 break;
226 default:
227 break;
231 #endif /* TARGET_WIN32 */
233 #ifndef DISABLE_JIT
235 * mono_arch_get_restore_context:
237 * Returns a pointer to a method which restores a previously saved sigcontext.
239 gpointer
240 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
242 guint8 *start = NULL;
243 guint8 *code;
244 MonoJumpInfo *ji = NULL;
245 GSList *unwind_ops = NULL;
246 int i, gregs_offset;
248 /* restore_contect (MonoContext *ctx) */
250 start = code = (guint8 *)mono_global_codeman_reserve (256);
252 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
254 /* Restore all registers except %rip and %r11 */
255 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
256 for (i = 0; i < AMD64_NREG; ++i) {
257 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
258 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
262 * The context resides on the stack, in the stack frame of the
263 * caller of this function. The stack pointer that we need to
264 * restore is potentially many stack frames higher up, so the
265 * distance between them can easily be more than the red zone
266 * size. Hence the stack pointer can be restored only after
267 * we have finished loading everything from the context.
269 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
270 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
271 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
273 /* jump to the saved IP */
274 amd64_jump_reg (code, AMD64_R11);
276 mono_arch_flush_icache (start, code - start);
277 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
279 if (info)
280 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
282 return start;
286 * mono_arch_get_call_filter:
288 * Returns a pointer to a method which calls an exception filter. We
289 * also use this function to call finally handlers (we pass NULL as
290 * @exc object in this case).
292 gpointer
293 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
295 guint8 *start;
296 int i, gregs_offset;
297 guint8 *code;
298 guint32 pos;
299 MonoJumpInfo *ji = NULL;
300 GSList *unwind_ops = NULL;
301 const guint kMaxCodeSize = 128;
303 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
305 /* call_filter (MonoContext *ctx, unsigned long eip) */
306 code = start;
308 /* Alloc new frame */
309 amd64_push_reg (code, AMD64_RBP);
310 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
312 /* Save callee saved regs */
313 pos = 0;
314 for (i = 0; i < AMD64_NREG; ++i)
315 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
316 amd64_push_reg (code, i);
317 pos += 8;
320 /* Save EBP */
321 pos += 8;
322 amd64_push_reg (code, AMD64_RBP);
324 /* Make stack misaligned, the call will make it aligned again */
325 if (! (pos & 8))
326 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
328 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
330 /* set new EBP */
331 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
332 /* load callee saved regs */
333 for (i = 0; i < AMD64_NREG; ++i) {
334 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
335 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
337 /* load exc register */
338 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
340 /* call the handler */
341 amd64_call_reg (code, AMD64_ARG_REG2);
343 if (! (pos & 8))
344 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
346 /* restore RBP */
347 amd64_pop_reg (code, AMD64_RBP);
349 /* Restore callee saved regs */
350 for (i = AMD64_NREG; i >= 0; --i)
351 if (AMD64_IS_CALLEE_SAVED_REG (i))
352 amd64_pop_reg (code, i);
354 #if TARGET_WIN32
355 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
356 amd64_pop_reg (code, AMD64_RBP);
357 #else
358 amd64_leave (code);
359 #endif
360 amd64_ret (code);
362 g_assert ((code - start) < kMaxCodeSize);
364 mono_arch_flush_icache (start, code - start);
365 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
367 if (info)
368 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
370 return start;
372 #endif /* !DISABLE_JIT */
375 * The first few arguments are dummy, to force the other arguments to be passed on
376 * the stack, this avoids overwriting the argument registers in the throw trampoline.
378 void
379 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
380 guint64 dummy5, guint64 dummy6,
381 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
383 MonoError error;
384 MonoContext ctx;
386 /* mctx is on the caller's stack */
387 memcpy (&ctx, mctx, sizeof (MonoContext));
389 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
390 MonoException *mono_ex = (MonoException*)exc;
391 if (!rethrow) {
392 mono_ex->stack_trace = NULL;
393 mono_ex->trace_ips = NULL;
396 mono_error_assert_ok (&error);
398 /* adjust eip so that it point into the call instruction */
399 ctx.gregs [AMD64_RIP] --;
401 mono_handle_exception (&ctx, exc);
402 mono_restore_context (&ctx);
403 g_assert_not_reached ();
406 void
407 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
408 guint64 dummy5, guint64 dummy6,
409 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
411 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
412 MonoException *ex;
414 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
416 mctx->gregs [AMD64_RIP] -= pc_offset;
418 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
419 mctx->gregs [AMD64_RIP] += 1;
421 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
424 void
425 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
426 guint64 dummy5, guint64 dummy6,
427 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
429 /* Only the register parameters are valid */
430 MonoContext ctx;
432 /* mctx is on the caller's stack */
433 memcpy (&ctx, mctx, sizeof (MonoContext));
435 mono_resume_unwind (&ctx);
438 #ifndef DISABLE_JIT
440 * get_throw_trampoline:
442 * Generate a call to mono_amd64_throw_exception/
443 * mono_amd64_throw_corlib_exception.
445 static gpointer
446 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
448 guint8* start;
449 guint8 *code;
450 MonoJumpInfo *ji = NULL;
451 GSList *unwind_ops = NULL;
452 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
453 const guint kMaxCodeSize = 256;
455 #ifdef TARGET_WIN32
456 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
457 #else
458 dummy_stack_space = 0;
459 #endif
461 if (info)
462 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
463 else
464 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
466 /* The stack is unaligned on entry */
467 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
469 code = start;
471 if (info)
472 unwind_ops = mono_arch_get_cie_program ();
474 /* Alloc frame */
475 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
476 if (info) {
477 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
478 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
482 * To hide linux/windows calling convention differences, we pass all arguments on
483 * the stack by passing 6 dummy values in registers.
486 arg_offsets [0] = dummy_stack_space + 0;
487 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
488 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
489 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
490 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
492 /* Save registers */
493 for (i = 0; i < AMD64_NREG; ++i)
494 if (i != AMD64_RSP)
495 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
496 /* Save RSP */
497 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
498 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
499 /* Save IP */
500 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
501 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
502 /* Set arg1 == ctx */
503 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
504 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
505 /* Set arg2 == exc/ex_token_index */
506 if (resume_unwind)
507 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
508 else
509 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
510 /* Set arg3 == rethrow/pc offset */
511 if (resume_unwind) {
512 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
513 } else if (corlib) {
514 if (llvm_abs)
516 * The caller doesn't pass in a pc/pc offset, instead we simply use the
517 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
519 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
520 else
521 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
522 } else {
523 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
526 if (aot) {
527 const char *icall_name;
529 if (resume_unwind)
530 icall_name = "mono_amd64_resume_unwind";
531 else if (corlib)
532 icall_name = "mono_amd64_throw_corlib_exception";
533 else
534 icall_name = "mono_amd64_throw_exception";
535 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
536 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
537 } else {
538 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
540 amd64_call_reg (code, AMD64_R11);
541 amd64_breakpoint (code);
543 mono_arch_flush_icache (start, code - start);
545 g_assert ((code - start) < kMaxCodeSize);
546 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
548 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
550 if (info)
551 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
553 return start;
557 * mono_arch_get_throw_exception:
558 * \returns a function pointer which can be used to raise
559 * exceptions. The returned function has the following
560 * signature: void (*func) (MonoException *exc);
562 gpointer
563 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
565 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
568 gpointer
569 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
571 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
575 * mono_arch_get_throw_corlib_exception:
577 * Returns a function pointer which can be used to raise
578 * corlib exceptions. The returned function has the following
579 * signature: void (*func) (guint32 ex_token, guint32 offset);
580 * Here, offset is the offset which needs to be substracted from the caller IP
581 * to get the IP of the throw. Passing the offset has the advantage that it
582 * needs no relocations in the caller.
584 gpointer
585 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
587 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
589 #endif /* !DISABLE_JIT */
592 * mono_arch_unwind_frame:
594 * This function is used to gather information from @ctx, and store it in @frame_info.
595 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
596 * is modified if needed.
597 * Returns TRUE on success, FALSE otherwise.
599 gboolean
600 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
601 MonoJitInfo *ji, MonoContext *ctx,
602 MonoContext *new_ctx, MonoLMF **lmf,
603 mgreg_t **save_locations,
604 StackFrameInfo *frame)
606 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
607 int i;
609 memset (frame, 0, sizeof (StackFrameInfo));
610 frame->ji = ji;
612 *new_ctx = *ctx;
614 if (ji != NULL) {
615 mgreg_t regs [MONO_MAX_IREGS + 1];
616 guint8 *cfa;
617 guint32 unwind_info_len;
618 guint8 *unwind_info;
619 guint8 *epilog = NULL;
621 if (ji->is_trampoline)
622 frame->type = FRAME_TYPE_TRAMPOLINE;
623 else
624 frame->type = FRAME_TYPE_MANAGED;
626 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
628 frame->unwind_info = unwind_info;
629 frame->unwind_info_len = unwind_info_len;
632 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
633 mono_print_unwind_info (unwind_info, unwind_info_len);
635 /* LLVM compiled code doesn't have this info */
636 if (ji->has_arch_eh_info)
637 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
639 for (i = 0; i < AMD64_NREG; ++i)
640 regs [i] = new_ctx->gregs [i];
642 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
643 (guint8*)ji->code_start + ji->code_size,
644 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
645 save_locations, MONO_MAX_IREGS, &cfa);
647 for (i = 0; i < AMD64_NREG; ++i)
648 new_ctx->gregs [i] = regs [i];
650 /* The CFA becomes the new SP value */
651 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
653 /* Adjust IP */
654 new_ctx->gregs [AMD64_RIP] --;
656 return TRUE;
657 } else if (*lmf) {
658 guint64 rip;
660 if (((guint64)(*lmf)->previous_lmf) & 2) {
661 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
663 if (ext->debugger_invoke) {
665 * This LMF entry is created by the soft debug code to mark transitions to
666 * managed code done during invokes.
668 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
669 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
670 } else if (ext->interp_exit) {
671 frame->type = FRAME_TYPE_INTERP_TO_MANAGED;
672 frame->interp_exit_data = ext->interp_exit_data;
673 } else {
674 g_assert_not_reached ();
677 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
679 return TRUE;
682 if (((guint64)(*lmf)->previous_lmf) & 4) {
683 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
685 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
686 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
687 /* This LMF has the rip field set */
688 rip = (*lmf)->rip;
689 } else if ((*lmf)->rsp == 0) {
690 /* Top LMF entry */
691 return FALSE;
692 } else {
694 * The rsp field is set just before the call which transitioned to native
695 * code. Obtain the rip from the stack.
697 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
700 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
702 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
703 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
704 * return address.
706 //g_assert (ji);
707 if (!ji)
708 return FALSE;
710 frame->ji = ji;
711 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
713 if (((guint64)(*lmf)->previous_lmf) & 4) {
714 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
716 /* Trampoline frame */
717 for (i = 0; i < AMD64_NREG; ++i)
718 new_ctx->gregs [i] = ext->ctx->gregs [i];
719 /* Adjust IP */
720 new_ctx->gregs [AMD64_RIP] --;
721 } else {
723 * The registers saved in the LMF will be restored using the normal unwind info,
724 * when the wrapper frame is processed.
726 /* Adjust IP */
727 rip --;
728 new_ctx->gregs [AMD64_RIP] = rip;
729 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
730 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
731 for (i = 0; i < AMD64_NREG; ++i) {
732 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
733 new_ctx->gregs [i] = 0;
737 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
739 return TRUE;
742 return FALSE;
746 * handle_exception:
748 * Called by resuming from a signal handler.
750 static void
751 handle_signal_exception (gpointer obj)
753 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
754 MonoContext ctx;
756 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
758 mono_handle_exception (&ctx, (MonoObject *)obj);
760 mono_restore_context (&ctx);
763 void
764 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
766 guint64 sp = ctx->gregs [AMD64_RSP];
768 ctx->gregs [AMD64_RDI] = (guint64)user_data;
770 /* Allocate a stack frame below the red zone */
771 sp -= 128;
772 /* The stack should be unaligned */
773 if ((sp % 16) == 0)
774 sp -= 8;
775 #ifdef __linux__
776 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
777 *(guint64*)sp = ctx->gregs [AMD64_RIP];
778 #endif
779 ctx->gregs [AMD64_RSP] = sp;
780 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
784 * mono_arch_handle_exception:
785 * \param ctx saved processor state
786 * \param obj the exception object
788 gboolean
789 mono_arch_handle_exception (void *sigctx, gpointer obj)
791 #if defined(MONO_ARCH_USE_SIGACTION)
792 MonoContext mctx;
795 * Handling the exception in the signal handler is problematic, since the original
796 * signal is disabled, and we could run arbitrary code though the debugger. So
797 * resume into the normal stack and do most work there if possible.
799 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
801 /* Pass the ctx parameter in TLS */
802 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
804 mctx = jit_tls->ex_ctx;
805 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
806 mono_monoctx_to_sigctx (&mctx, sigctx);
808 return TRUE;
809 #else
810 MonoContext mctx;
812 mono_sigctx_to_monoctx (sigctx, &mctx);
814 mono_handle_exception (&mctx, obj);
816 mono_monoctx_to_sigctx (&mctx, sigctx);
818 return TRUE;
819 #endif
822 gpointer
823 mono_arch_ip_from_context (void *sigctx)
825 #if defined(MONO_ARCH_USE_SIGACTION)
826 ucontext_t *ctx = (ucontext_t*)sigctx;
828 return (gpointer)UCONTEXT_REG_RIP (ctx);
829 #elif defined(HOST_WIN32)
830 return ((CONTEXT*)sigctx)->Rip;
831 #else
832 MonoContext *ctx = sigctx;
833 return (gpointer)ctx->gregs [AMD64_RIP];
834 #endif
837 static void
838 restore_soft_guard_pages (void)
840 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
841 if (jit_tls->stack_ovf_guard_base)
842 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
846 * this function modifies mctx so that when it is restored, it
847 * won't execcute starting at mctx.eip, but in a function that
848 * will restore the protection on the soft-guard pages and return back to
849 * continue at mctx.eip.
851 static void
852 prepare_for_guard_pages (MonoContext *mctx)
854 gpointer *sp;
855 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
856 sp -= 1;
857 /* the return addr */
858 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
859 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
860 mctx->gregs [AMD64_RSP] = (guint64)sp;
863 static void
864 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
866 MonoContext mctx;
867 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
869 if (!ji)
870 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
872 mctx = *ctx;
874 mono_handle_exception (&mctx, obj);
875 if (stack_ovf)
876 prepare_for_guard_pages (&mctx);
877 mono_restore_context (&mctx);
880 void
881 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
883 #if defined(MONO_ARCH_USE_SIGACTION)
884 MonoException *exc = NULL;
885 gpointer *sp;
886 int frame_size;
887 MonoContext *copied_ctx;
889 if (stack_ovf)
890 exc = mono_domain_get ()->stack_overflow_ex;
892 /* setup a call frame on the real stack so that control is returned there
893 * and exception handling can continue.
894 * The frame looks like:
895 * ucontext struct
896 * ...
897 * return ip
898 * 128 is the size of the red zone
900 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
901 frame_size += 15;
902 frame_size &= ~15;
903 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
904 sp = (gpointer *)((char*)sp - frame_size);
905 copied_ctx = (MonoContext*)(sp + 4);
906 /* the arguments must be aligned */
907 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
908 mono_sigctx_to_monoctx (sigctx, copied_ctx);
909 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
910 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
911 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
912 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
913 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
914 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
915 #endif
918 guint64
919 mono_amd64_get_original_ip (void)
921 MonoLMF *lmf = mono_get_lmf ();
923 g_assert (lmf);
925 /* Reset the change to previous_lmf */
926 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
928 return lmf->rip;
931 #ifndef DISABLE_JIT
932 GSList*
933 mono_amd64_get_exception_trampolines (gboolean aot)
935 MonoTrampInfo *info;
936 GSList *tramps = NULL;
938 /* LLVM needs different throw trampolines */
939 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
940 tramps = g_slist_prepend (tramps, info);
942 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
943 tramps = g_slist_prepend (tramps, info);
945 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
946 tramps = g_slist_prepend (tramps, info);
948 return tramps;
950 #endif /* !DISABLE_JIT */
952 void
953 mono_arch_exceptions_init (void)
955 GSList *tramps, *l;
956 gpointer tramp;
958 if (mono_aot_only) {
959 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
960 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
961 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
962 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
963 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
964 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
965 } else {
966 /* Call this to avoid initialization races */
967 tramps = mono_amd64_get_exception_trampolines (FALSE);
968 for (l = tramps; l; l = l->next) {
969 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
971 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
972 mono_tramp_info_register (info, NULL);
974 g_slist_free (tramps);
978 // Implies defined(TARGET_WIN32)
979 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
981 static void
982 mono_arch_unwindinfo_create (gpointer* monoui)
984 PUNWIND_INFO newunwindinfo;
985 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
986 newunwindinfo->Version = 1;
989 void
990 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
992 PUNWIND_CODE unwindcode;
993 guchar codeindex;
995 g_assert (unwindinfo != NULL);
997 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
998 g_error ("Larger allocation needed for the unwind information.");
1000 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1001 unwindcode = &unwindinfo->UnwindCode [codeindex];
1002 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
1003 unwindcode->CodeOffset = (guchar)unwind_op->when;
1004 unwindcode->OpInfo = unwind_op->reg;
1006 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1007 g_error ("Adding unwind info in wrong order.");
1009 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1012 void
1013 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1015 PUNWIND_CODE unwindcode;
1016 guchar codeindex;
1018 g_assert (unwindinfo != NULL);
1020 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1021 g_error ("Larger allocation needed for the unwind information.");
1023 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1024 unwindcode = &unwindinfo->UnwindCode [codeindex];
1025 unwindcode->UnwindOp = UWOP_SET_FPREG;
1026 unwindcode->CodeOffset = (guchar)unwind_op->when;
1028 g_assert (unwind_op->val % 16 == 0);
1029 unwindinfo->FrameRegister = unwind_op->reg;
1030 unwindinfo->FrameOffset = unwind_op->val / 16;
1032 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1033 g_error ("Adding unwind info in wrong order.");
1035 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1038 void
1039 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1041 PUNWIND_CODE unwindcode;
1042 guchar codeindex;
1043 guchar codesneeded;
1044 guint size;
1046 g_assert (unwindinfo != NULL);
1048 size = unwind_op->val;
1050 if (size < 0x8)
1051 g_error ("Stack allocation must be equal to or greater than 0x8.");
1053 if (size <= 0x80)
1054 codesneeded = 1;
1055 else if (size <= 0x7FFF8)
1056 codesneeded = 2;
1057 else
1058 codesneeded = 3;
1060 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1061 g_error ("Larger allocation needed for the unwind information.");
1063 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1064 unwindcode = &unwindinfo->UnwindCode [codeindex];
1066 unwindcode->CodeOffset = (guchar)unwind_op->when;
1068 if (codesneeded == 1) {
1069 /*The size of the allocation is
1070 (the number in the OpInfo member) times 8 plus 8*/
1071 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1072 unwindcode->OpInfo = (size - 8)/8;
1074 else {
1075 if (codesneeded == 3) {
1076 /*the unscaled size of the allocation is recorded
1077 in the next two slots in little-endian format.
1078 NOTE, unwind codes are allocated from end to begining of list so
1079 unwind code will have right execution order. List is sorted on CodeOffset
1080 using descending sort order.*/
1081 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1082 unwindcode->OpInfo = 1;
1083 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1085 else {
1086 /*the size of the allocation divided by 8
1087 is recorded in the next slot.
1088 NOTE, unwind codes are allocated from end to begining of list so
1089 unwind code will have right execution order. List is sorted on CodeOffset
1090 using descending sort order.*/
1091 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1092 unwindcode->OpInfo = 0;
1093 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1097 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1098 g_error ("Adding unwind info in wrong order.");
1100 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1103 static gboolean g_dyn_func_table_inited;
1105 // Dynamic function table used when registering unwind info for OS unwind support.
1106 static GList *g_dynamic_function_table_begin;
1107 static GList *g_dynamic_function_table_end;
1109 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1110 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1112 // Module handle used when explicit loading ntdll.
1113 static HMODULE g_ntdll;
1115 // If Win8 or Win2012Server or later, use growable function tables instead
1116 // of callbacks. Callback solution will still be fallback on older systems.
1117 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1118 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1119 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1121 // When using function table callback solution an out of proc module is needed by
1122 // debuggers in order to read unwind info from debug target.
1123 #ifdef _MSC_VER
1124 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1125 #else
1126 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1127 #endif
1129 #define MONO_DAC_MODULE_MAX_PATH 1024
1131 static void
1132 init_table_no_lock (void)
1134 if (g_dyn_func_table_inited == FALSE) {
1135 g_assert_checked (g_dynamic_function_table_begin == NULL);
1136 g_assert_checked (g_dynamic_function_table_end == NULL);
1137 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1138 g_assert_checked (g_rtl_grow_function_table == NULL);
1139 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1140 g_assert_checked (g_ntdll == NULL);
1142 // Load functions available on Win8/Win2012Server or later. If running on earlier
1143 // systems the below GetProceAddress will fail, this is expected behavior.
1144 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1145 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1146 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1147 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1150 g_dyn_func_table_inited = TRUE;
1154 void
1155 mono_arch_unwindinfo_init_table (void)
1157 if (g_dyn_func_table_inited == FALSE) {
1159 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1161 init_table_no_lock ();
1163 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1167 static void
1168 terminate_table_no_lock (void)
1170 if (g_dyn_func_table_inited == TRUE) {
1171 if (g_dynamic_function_table_begin != NULL) {
1172 // Free all list elements.
1173 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1174 if (l->data) {
1175 g_free (l->data);
1176 l->data = NULL;
1180 //Free the list.
1181 g_list_free (g_dynamic_function_table_begin);
1182 g_dynamic_function_table_begin = NULL;
1183 g_dynamic_function_table_end = NULL;
1186 g_rtl_delete_growable_function_table = NULL;
1187 g_rtl_grow_function_table = NULL;
1188 g_rtl_add_growable_function_table = NULL;
1190 if (g_ntdll != NULL) {
1191 FreeLibrary (g_ntdll);
1192 g_ntdll = NULL;
1195 g_dyn_func_table_inited = FALSE;
1199 void
1200 mono_arch_unwindinfo_terminate_table (void)
1202 if (g_dyn_func_table_inited == TRUE) {
1204 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1206 terminate_table_no_lock ();
1208 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1212 static GList *
1213 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1215 GList *found_entry = NULL;
1217 // Fast path, look at boundaries.
1218 if (g_dynamic_function_table_begin != NULL) {
1219 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1220 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1222 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1223 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1224 // Entry belongs to first entry in list.
1225 found_entry = g_dynamic_function_table_begin;
1226 *continue_search = FALSE;
1227 } else {
1228 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1229 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1230 // Entry has a range that could exist in table, continue search.
1231 *continue_search = TRUE;
1237 return found_entry;
1240 static inline DynamicFunctionTableEntry *
1241 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1243 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1244 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1247 static GList *
1248 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1250 GList *found_entry = NULL;
1251 gboolean continue_search = FALSE;
1253 gsize begin_range = (gsize)code_block;
1254 gsize end_range = begin_range + block_size;
1256 // Fast path, check table boundaries.
1257 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1258 if (found_entry || continue_search == FALSE)
1259 return found_entry;
1261 // Scan table for an entry including range.
1262 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1263 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1264 g_assert_checked (current_entry != NULL);
1266 // Do we have a match?
1267 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1268 found_entry = node;
1269 break;
1273 return found_entry;
1276 static inline DynamicFunctionTableEntry *
1277 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1279 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1280 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1283 static GList *
1284 find_pc_in_table_no_lock_ex (const gpointer pc)
1286 GList *found_entry = NULL;
1287 gboolean continue_search = FALSE;
1289 gsize begin_range = (gsize)pc;
1290 gsize end_range = begin_range;
1292 // Fast path, check table boundaries.
1293 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1294 if (found_entry || continue_search == FALSE)
1295 return found_entry;
1297 // Scan table for a entry including range.
1298 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1299 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1300 g_assert_checked (current_entry != NULL);
1302 // Do we have a match?
1303 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1304 found_entry = node;
1305 break;
1309 return found_entry;
1312 static inline DynamicFunctionTableEntry *
1313 find_pc_in_table_no_lock (const gpointer pc)
1315 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1316 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1319 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1320 static void
1321 validate_table_no_lock (void)
1323 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1324 // Method will assert on failure to explicitly indicate what check failed.
1325 if (g_dynamic_function_table_begin != NULL) {
1326 g_assert_checked (g_dynamic_function_table_end != NULL);
1328 DynamicFunctionTableEntry *prevoious_entry = NULL;
1329 DynamicFunctionTableEntry *current_entry = NULL;
1330 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1331 current_entry = (DynamicFunctionTableEntry *)node->data;
1333 g_assert_checked (current_entry != NULL);
1334 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1336 if (prevoious_entry != NULL) {
1337 // List should be sorted in descending order on begin_range.
1338 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1340 // Check for overlapped regions.
1341 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1344 prevoious_entry = current_entry;
1349 #else
1351 static inline void
1352 validate_table_no_lock (void)
1356 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1358 // Forward declare.
1359 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1361 DynamicFunctionTableEntry *
1362 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1364 DynamicFunctionTableEntry *new_entry = NULL;
1366 gsize begin_range = (gsize)code_block;
1367 gsize end_range = begin_range + block_size;
1369 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1370 init_table_no_lock ();
1371 new_entry = find_range_in_table_no_lock (code_block, block_size);
1372 if (new_entry == NULL) {
1373 // Allocate new entry.
1374 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1375 if (new_entry != NULL) {
1377 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1378 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1379 InitializeSRWLock (&new_entry->lock);
1380 new_entry->handle = NULL;
1381 new_entry->begin_range = begin_range;
1382 new_entry->end_range = end_range;
1383 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1384 new_entry->rt_funcs_current_count = 0;
1385 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1387 if (new_entry->rt_funcs != NULL) {
1388 // Check insert on boundaries. List is sorted descending on begin_range.
1389 if (g_dynamic_function_table_begin == NULL) {
1390 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1391 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1392 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1393 // Insert at the head.
1394 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1395 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1396 // Insert at tail.
1397 g_list_append (g_dynamic_function_table_end, new_entry);
1398 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1399 } else {
1400 //Search and insert at correct position.
1401 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1402 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1403 g_assert_checked (current_entry != NULL);
1405 if (current_entry->begin_range < new_entry->begin_range) {
1406 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1407 break;
1412 // Register dynamic function table entry with OS.
1413 if (g_rtl_add_growable_function_table != NULL) {
1414 // Allocate new growable handle table for entry.
1415 g_assert_checked (new_entry->handle == NULL);
1416 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1417 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1418 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1419 g_assert (!result);
1420 } else {
1421 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1422 WCHAR *path = buffer;
1424 // DAC module should be in the same directory as the
1425 // main executable.
1426 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1427 path = wcsrchr (buffer, TEXT('\\'));
1428 if (path != NULL) {
1429 path++;
1430 *path = TEXT('\0');
1433 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1434 path = buffer;
1436 // Register function table callback + out of proc module.
1437 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1438 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1439 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1440 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1441 g_assert(result);
1444 // Only included in checked builds. Validates the structure of table after insert.
1445 validate_table_no_lock ();
1447 } else {
1448 g_free (new_entry);
1449 new_entry = NULL;
1453 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1455 return new_entry;
1458 static void
1459 remove_range_in_table_no_lock (GList *entry)
1461 if (entry != NULL) {
1462 if (entry == g_dynamic_function_table_end)
1463 g_dynamic_function_table_end = entry->prev;
1465 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1466 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1468 g_assert_checked (removed_entry != NULL);
1469 g_assert_checked (removed_entry->rt_funcs != NULL);
1471 // Remove function table from OS.
1472 if (removed_entry->handle != NULL) {
1473 if (g_rtl_delete_growable_function_table != NULL) {
1474 g_rtl_delete_growable_function_table (removed_entry->handle);
1475 } else {
1476 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1480 g_free (removed_entry->rt_funcs);
1481 g_free (removed_entry);
1483 g_list_free_1 (entry);
1486 // Only included in checked builds. Validates the structure of table after remove.
1487 validate_table_no_lock ();
1490 void
1491 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1493 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1495 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1497 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1498 remove_range_in_table_no_lock (found_entry);
1500 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1503 void
1504 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1506 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1508 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1510 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1511 remove_range_in_table_no_lock (found_entry);
1513 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1516 PRUNTIME_FUNCTION
1517 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1519 PRUNTIME_FUNCTION found_rt_func = NULL;
1521 gsize begin_range = (gsize)code;
1522 gsize end_range = begin_range + code_size;
1524 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1526 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1528 if (found_entry != NULL) {
1530 AcquireSRWLockShared (&found_entry->lock);
1532 g_assert_checked (found_entry->begin_range <= begin_range);
1533 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1534 g_assert_checked (found_entry->rt_funcs != NULL);
1536 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1537 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1539 // Is this our RT function entry?
1540 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1541 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1542 found_rt_func = current_rt_func;
1543 break;
1547 ReleaseSRWLockShared (&found_entry->lock);
1550 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1552 return found_rt_func;
1555 static inline PRUNTIME_FUNCTION
1556 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1558 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1561 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1562 static void
1563 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1565 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1566 // Method will assert on failure to explicitly indicate what check failed.
1567 g_assert_checked (entry != NULL);
1568 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1569 g_assert_checked (entry->rt_funcs != NULL);
1571 PRUNTIME_FUNCTION current_rt_func = NULL;
1572 PRUNTIME_FUNCTION previous_rt_func = NULL;
1573 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1574 current_rt_func = &(entry->rt_funcs [i]);
1576 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1577 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1579 if (previous_rt_func != NULL) {
1580 // List should be sorted in ascending order based on BeginAddress.
1581 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1583 // Check for overlapped regions.
1584 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1587 previous_rt_func = current_rt_func;
1591 #else
1593 static inline void
1594 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1598 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1600 PRUNTIME_FUNCTION
1601 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1603 PRUNTIME_FUNCTION new_rt_func = NULL;
1605 gsize begin_range = (gsize)code;
1606 gsize end_range = begin_range + code_size;
1608 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1610 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1612 if (found_entry != NULL) {
1614 AcquireSRWLockExclusive (&found_entry->lock);
1616 g_assert_checked (found_entry->begin_range <= begin_range);
1617 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1618 g_assert_checked (found_entry->rt_funcs != NULL);
1619 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1621 gsize code_offset = (gsize)code - found_entry->begin_range;
1622 gsize entry_count = found_entry->rt_funcs_current_count;
1623 gsize max_entry_count = found_entry->rt_funcs_max_count;
1624 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1626 RUNTIME_FUNCTION new_rt_func_data;
1627 new_rt_func_data.BeginAddress = code_offset;
1628 new_rt_func_data.EndAddress = code_offset + code_size;
1630 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1631 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1633 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1635 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1637 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1638 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1639 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1640 new_rt_func = &(current_rt_funcs [entry_count]);
1641 *new_rt_func = new_rt_func_data;
1642 entry_count++;
1643 } else {
1644 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1645 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1646 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1648 if (new_rt_funcs != NULL) {
1649 gsize from_index = 0;
1650 gsize to_index = 0;
1652 // Copy from old table into new table. Make sure new rt func gets inserted
1653 // into correct location based on sort order.
1654 for (; from_index < entry_count; ++from_index) {
1655 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1656 new_rt_func = &(new_rt_funcs [to_index++]);
1657 *new_rt_func = new_rt_func_data;
1660 if (current_rt_funcs [from_index].UnwindData != 0)
1661 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1664 // If we didn't insert by now, put it last in the list.
1665 if (new_rt_func == NULL) {
1666 new_rt_func = &(new_rt_funcs [to_index]);
1667 *new_rt_func = new_rt_func_data;
1671 entry_count++;
1674 // Update the stats for current entry.
1675 found_entry->rt_funcs_current_count = entry_count;
1676 found_entry->rt_funcs_max_count = max_entry_count;
1678 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1679 // No new table just report increase in use.
1680 g_assert_checked (found_entry->handle != NULL);
1681 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1682 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1683 // New table, delete old table and rt funcs, and register a new one.
1684 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1685 g_rtl_delete_growable_function_table (found_entry->handle);
1686 found_entry->handle = NULL;
1687 g_free (found_entry->rt_funcs);
1688 found_entry->rt_funcs = new_rt_funcs;
1689 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1690 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1691 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1692 g_assert (!result);
1693 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1694 // No table registered with OS, callback solution in use. Switch tables.
1695 g_free (found_entry->rt_funcs);
1696 found_entry->rt_funcs = new_rt_funcs;
1697 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1698 // No table registered with OS, callback solution in use, nothing to do.
1699 } else {
1700 g_assert_not_reached ();
1703 // Only included in checked builds. Validates the structure of table after insert.
1704 validate_rt_funcs_in_table_no_lock (found_entry);
1706 ReleaseSRWLockExclusive (&found_entry->lock);
1709 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1711 return new_rt_func;
1714 static PRUNTIME_FUNCTION
1715 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1717 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1720 static void
1721 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1723 if (unwind_ops != NULL && unwindinfo != NULL) {
1724 MonoUnwindOp *unwind_op_data;
1725 gboolean sp_alloced = FALSE;
1726 gboolean fp_alloced = FALSE;
1728 // Replay collected unwind info and setup Windows format.
1729 for (GSList *l = unwind_ops; l; l = l->next) {
1730 unwind_op_data = (MonoUnwindOp *)l->data;
1731 switch (unwind_op_data->op) {
1732 case DW_CFA_offset : {
1733 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1734 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1735 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1736 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1737 break;
1739 case DW_CFA_mono_sp_alloc_info_win64 : {
1740 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1741 sp_alloced = TRUE;
1742 break;
1744 case DW_CFA_mono_fp_alloc_info_win64 : {
1745 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1746 fp_alloced = TRUE;
1747 break;
1749 default :
1750 break;
1756 static PUNWIND_INFO
1757 initialize_unwind_info_internal (GSList *unwind_ops)
1759 PUNWIND_INFO unwindinfo;
1761 mono_arch_unwindinfo_create (&unwindinfo);
1762 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1764 return unwindinfo;
1767 guchar
1768 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1770 UNWIND_INFO unwindinfo = {0};
1771 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1772 return unwindinfo.CountOfCodes;
1775 PUNWIND_INFO
1776 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops)
1778 if (!unwind_ops)
1779 return NULL;
1781 return initialize_unwind_info_internal (unwind_ops);
1784 void
1785 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info)
1787 g_free (unwind_info);
1790 guint
1791 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1793 MonoCompile * current_cfg = (MonoCompile *)cfg;
1794 g_assert (current_cfg->arch.unwindinfo == NULL);
1795 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1796 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1799 void
1800 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1802 PUNWIND_INFO unwindinfo, targetinfo;
1803 guchar codecount;
1804 guint64 targetlocation;
1805 if (!*monoui)
1806 return;
1808 unwindinfo = (PUNWIND_INFO)*monoui;
1809 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1810 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1812 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1814 codecount = unwindinfo->CountOfCodes;
1815 if (codecount) {
1816 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1817 sizeof (UNWIND_CODE) * codecount);
1820 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1821 if (codecount) {
1822 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1823 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1824 int previous = 0;
1825 for (int current = 0; current < codecount; current++) {
1826 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1827 previous = current;
1828 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1829 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1830 current++;
1831 } else {
1832 current += 2;
1837 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1839 mono_arch_unwindinfo_free_unwind_info (unwindinfo);
1840 *monoui = 0;
1842 // Register unwind info in table.
1843 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1846 void
1847 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1849 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1850 if (unwindinfo != NULL) {
1851 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1855 void
1856 mono_arch_code_chunk_new (void *chunk, int size)
1858 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1861 void mono_arch_code_chunk_destroy (void *chunk)
1863 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1865 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1867 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1868 MonoContinuationRestore
1869 mono_tasklets_arch_restore (void)
1871 static guint8* saved = NULL;
1872 guint8 *code, *start;
1873 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1874 const guint kMaxCodeSize = 64;
1877 if (saved)
1878 return (MonoContinuationRestore)saved;
1879 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1880 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1881 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1882 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1883 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1884 * We move cont to cont_reg since we need both rcx and rdi for the copy
1885 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1887 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1888 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1889 /* setup the copy of the stack */
1890 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1891 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1892 x86_cld (code);
1893 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1894 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1895 amd64_prefix (code, X86_REP_PREFIX);
1896 amd64_movsl (code);
1898 /* now restore the registers from the LMF */
1899 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1900 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1901 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1903 #ifdef WIN32
1904 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1905 #else
1906 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1907 #endif
1909 /* state is already in rax */
1910 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1911 g_assert ((code - start) <= kMaxCodeSize);
1913 mono_arch_flush_icache (start, code - start);
1914 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1916 saved = start;
1917 return (MonoContinuationRestore)saved;
1919 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1922 * mono_arch_setup_resume_sighandler_ctx:
1924 * Setup CTX so execution continues at FUNC.
1926 void
1927 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1930 * When resuming from a signal handler, the stack should be misaligned, just like right after
1931 * a call.
1933 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1934 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1935 MONO_CONTEXT_SET_IP (ctx, func);
1938 #ifdef DISABLE_JIT
1939 gpointer
1940 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1942 g_assert_not_reached ();
1943 return NULL;
1946 gpointer
1947 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1949 g_assert_not_reached ();
1950 return NULL;
1953 gpointer
1954 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1956 g_assert_not_reached ();
1957 return NULL;
1960 gpointer
1961 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1963 g_assert_not_reached ();
1964 return NULL;
1967 gpointer
1968 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1970 g_assert_not_reached ();
1971 return NULL;
1974 GSList*
1975 mono_amd64_get_exception_trampolines (gboolean aot)
1977 g_assert_not_reached ();
1978 return NULL;
1980 #endif /* DISABLE_JIT */
1982 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1983 MonoContinuationRestore
1984 mono_tasklets_arch_restore (void)
1986 g_assert_not_reached ();
1987 return NULL;
1989 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */