Merge pull request #1861 from saper/home-override
[mono-project.git] / mono / mini / exceptions-amd64.c
blobe93b9942285c8440a1de49f4b8a40e0137e2a661
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
9 */
11 #include <config.h>
13 #include <glib.h>
14 #include <string.h>
16 #ifdef HAVE_SIGNAL_H
17 #include <signal.h>
18 #endif
19 #ifdef HAVE_UCONTEXT_H
20 #include <ucontext.h>
21 #endif
23 #include <mono/arch/amd64/amd64-codegen.h>
24 #include <mono/metadata/abi-details.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/tabledefs.h>
27 #include <mono/metadata/threads.h>
28 #include <mono/metadata/threads-types.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/exception.h>
31 #include <mono/metadata/gc-internal.h>
32 #include <mono/metadata/mono-debug.h>
33 #include <mono/utils/mono-mmap.h>
35 #include "mini.h"
36 #include "mini-amd64.h"
37 #include "tasklets.h"
39 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
41 #ifdef TARGET_WIN32
42 static MonoW32ExceptionHandler fpe_handler;
43 static MonoW32ExceptionHandler ill_handler;
44 static MonoW32ExceptionHandler segv_handler;
46 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
47 void *mono_win_vectored_exception_handle;
49 #define W32_SEH_HANDLE_EX(_ex) \
50 if (_ex##_handler) _ex##_handler(0, ep, ctx)
52 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
54 #ifndef MONO_CROSS_COMPILE
55 if (mono_old_win_toplevel_exception_filter) {
56 return (*mono_old_win_toplevel_exception_filter)(ep);
58 #endif
60 mono_handle_native_sigsegv (SIGSEGV, NULL, NULL);
62 return EXCEPTION_CONTINUE_SEARCH;
66 * Unhandled Exception Filter
67 * Top-level per-process exception handler.
69 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
71 EXCEPTION_RECORD* er;
72 CONTEXT* ctx;
73 LONG res;
74 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
76 /* If the thread is not managed by the runtime return early */
77 if (!jit_tls)
78 return EXCEPTION_CONTINUE_SEARCH;
80 jit_tls->mono_win_chained_exception_needs_run = FALSE;
81 res = EXCEPTION_CONTINUE_EXECUTION;
83 er = ep->ExceptionRecord;
84 ctx = ep->ContextRecord;
86 switch (er->ExceptionCode) {
87 case EXCEPTION_ACCESS_VIOLATION:
88 W32_SEH_HANDLE_EX(segv);
89 break;
90 case EXCEPTION_ILLEGAL_INSTRUCTION:
91 W32_SEH_HANDLE_EX(ill);
92 break;
93 case EXCEPTION_INT_DIVIDE_BY_ZERO:
94 case EXCEPTION_INT_OVERFLOW:
95 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
96 case EXCEPTION_FLT_OVERFLOW:
97 case EXCEPTION_FLT_UNDERFLOW:
98 case EXCEPTION_FLT_INEXACT_RESULT:
99 W32_SEH_HANDLE_EX(fpe);
100 break;
101 default:
102 jit_tls->mono_win_chained_exception_needs_run = TRUE;
103 break;
106 if (jit_tls->mono_win_chained_exception_needs_run) {
107 /* Don't copy context back if we chained exception
108 * as the handler may have modfied the EXCEPTION_POINTERS
109 * directly. We don't pass sigcontext to chained handlers.
110 * Return continue search so the UnhandledExceptionFilter
111 * can correctly chain the exception.
113 res = EXCEPTION_CONTINUE_SEARCH;
116 return res;
119 void win32_seh_init()
121 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
122 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
125 void win32_seh_cleanup()
127 guint32 ret = 0;
129 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
131 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
132 g_assert (ret);
135 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
137 switch (type) {
138 case SIGFPE:
139 fpe_handler = handler;
140 break;
141 case SIGILL:
142 ill_handler = handler;
143 break;
144 case SIGSEGV:
145 segv_handler = handler;
146 break;
147 default:
148 break;
152 #endif /* TARGET_WIN32 */
155 * mono_arch_get_restore_context:
157 * Returns a pointer to a method which restores a previously saved sigcontext.
159 gpointer
160 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
162 guint8 *start = NULL;
163 guint8 *code;
164 MonoJumpInfo *ji = NULL;
165 GSList *unwind_ops = NULL;
166 int i, gregs_offset;
168 /* restore_contect (MonoContext *ctx) */
170 start = code = mono_global_codeman_reserve (256);
172 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
174 /* Restore all registers except %rip and %r11 */
175 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
176 for (i = 0; i < AMD64_NREG; ++i) {
177 #if defined(__native_client_codegen__)
178 if (i == AMD64_R15)
179 continue;
180 #endif
181 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
182 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
186 * The context resides on the stack, in the stack frame of the
187 * caller of this function. The stack pointer that we need to
188 * restore is potentially many stack frames higher up, so the
189 * distance between them can easily be more than the red zone
190 * size. Hence the stack pointer can be restored only after
191 * we have finished loading everything from the context.
193 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
194 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
195 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
197 /* jump to the saved IP */
198 amd64_jump_reg (code, AMD64_R11);
200 nacl_global_codeman_validate (&start, 256, &code);
202 mono_arch_flush_icache (start, code - start);
203 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
205 if (info)
206 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
208 return start;
212 * mono_arch_get_call_filter:
214 * Returns a pointer to a method which calls an exception filter. We
215 * also use this function to call finally handlers (we pass NULL as
216 * @exc object in this case).
218 gpointer
219 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
221 guint8 *start;
222 int i, gregs_offset;
223 guint8 *code;
224 guint32 pos;
225 MonoJumpInfo *ji = NULL;
226 GSList *unwind_ops = NULL;
227 const guint kMaxCodeSize = NACL_SIZE (128, 256);
229 start = code = mono_global_codeman_reserve (kMaxCodeSize);
231 /* call_filter (MonoContext *ctx, unsigned long eip) */
232 code = start;
234 /* Alloc new frame */
235 amd64_push_reg (code, AMD64_RBP);
236 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
238 /* Save callee saved regs */
239 pos = 0;
240 for (i = 0; i < AMD64_NREG; ++i)
241 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
242 amd64_push_reg (code, i);
243 pos += 8;
246 /* Save EBP */
247 pos += 8;
248 amd64_push_reg (code, AMD64_RBP);
250 /* Make stack misaligned, the call will make it aligned again */
251 if (! (pos & 8))
252 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
254 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
256 /* set new EBP */
257 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
258 /* load callee saved regs */
259 for (i = 0; i < AMD64_NREG; ++i) {
260 #if defined(__native_client_codegen__)
261 if (i == AMD64_R15)
262 continue;
263 #endif
264 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
265 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
267 /* load exc register */
268 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
270 /* call the handler */
271 amd64_call_reg (code, AMD64_ARG_REG2);
273 if (! (pos & 8))
274 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
276 /* restore RBP */
277 amd64_pop_reg (code, AMD64_RBP);
279 /* Restore callee saved regs */
280 for (i = AMD64_NREG; i >= 0; --i)
281 if (AMD64_IS_CALLEE_SAVED_REG (i))
282 amd64_pop_reg (code, i);
284 amd64_leave (code);
285 amd64_ret (code);
287 g_assert ((code - start) < kMaxCodeSize);
289 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
291 mono_arch_flush_icache (start, code - start);
292 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
294 if (info)
295 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
297 return start;
301 * The first few arguments are dummy, to force the other arguments to be passed on
302 * the stack, this avoids overwriting the argument registers in the throw trampoline.
304 void
305 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
306 guint64 dummy5, guint64 dummy6,
307 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
309 MonoContext ctx;
311 /* mctx is on the caller's stack */
312 memcpy (&ctx, mctx, sizeof (MonoContext));
314 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
315 MonoException *mono_ex = (MonoException*)exc;
316 if (!rethrow) {
317 mono_ex->stack_trace = NULL;
318 mono_ex->trace_ips = NULL;
322 /* adjust eip so that it point into the call instruction */
323 ctx.gregs [AMD64_RIP] --;
325 mono_handle_exception (&ctx, exc);
326 mono_restore_context (&ctx);
327 g_assert_not_reached ();
330 void
331 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
332 guint64 dummy5, guint64 dummy6,
333 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
335 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
336 MonoException *ex;
338 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
340 mctx->gregs [AMD64_RIP] -= pc_offset;
342 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
343 mctx->gregs [AMD64_RIP] += 1;
345 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
348 static void
349 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
350 guint64 dummy5, guint64 dummy6,
351 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
353 /* Only the register parameters are valid */
354 MonoContext ctx;
356 /* mctx is on the caller's stack */
357 memcpy (&ctx, mctx, sizeof (MonoContext));
359 mono_resume_unwind (&ctx);
363 * get_throw_trampoline:
365 * Generate a call to mono_amd64_throw_exception/
366 * mono_amd64_throw_corlib_exception.
368 static gpointer
369 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
371 guint8* start;
372 guint8 *code;
373 MonoJumpInfo *ji = NULL;
374 GSList *unwind_ops = NULL;
375 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
376 const guint kMaxCodeSize = NACL_SIZE (256, 512);
378 #ifdef TARGET_WIN32
379 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
380 #else
381 dummy_stack_space = 0;
382 #endif
384 start = code = mono_global_codeman_reserve (kMaxCodeSize);
386 /* The stack is unaligned on entry */
387 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
389 code = start;
391 if (info)
392 unwind_ops = mono_arch_get_cie_program ();
394 /* Alloc frame */
395 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
396 if (info)
397 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
400 * To hide linux/windows calling convention differences, we pass all arguments on
401 * the stack by passing 6 dummy values in registers.
404 arg_offsets [0] = dummy_stack_space + 0;
405 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
406 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
407 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
408 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
410 /* Save registers */
411 for (i = 0; i < AMD64_NREG; ++i)
412 if (i != AMD64_RSP)
413 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
414 /* Save RSP */
415 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
416 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
417 /* Save IP */
418 if (llvm_abs)
419 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
420 else
421 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
422 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
423 /* Set arg1 == ctx */
424 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
425 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
426 /* Set arg2 == exc/ex_token_index */
427 if (resume_unwind)
428 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
429 else
430 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
431 /* Set arg3 == rethrow/pc offset */
432 if (resume_unwind) {
433 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
434 } else if (corlib) {
435 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
436 if (llvm_abs)
438 * The caller is LLVM code which passes the absolute address not a pc offset,
439 * so compensate by passing 0 as 'rip' and passing the negated abs address as
440 * the pc offset.
442 amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]);
443 } else {
444 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
447 if (aot) {
448 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
449 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
450 } else {
451 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
453 amd64_call_reg (code, AMD64_R11);
454 amd64_breakpoint (code);
456 mono_arch_flush_icache (start, code - start);
458 g_assert ((code - start) < kMaxCodeSize);
460 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
461 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
463 if (info)
464 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
466 return start;
470 * mono_arch_get_throw_exception:
472 * Returns a function pointer which can be used to raise
473 * exceptions. The returned function has the following
474 * signature: void (*func) (MonoException *exc);
477 gpointer
478 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
480 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
483 gpointer
484 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
486 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
490 * mono_arch_get_throw_corlib_exception:
492 * Returns a function pointer which can be used to raise
493 * corlib exceptions. The returned function has the following
494 * signature: void (*func) (guint32 ex_token, guint32 offset);
495 * Here, offset is the offset which needs to be substracted from the caller IP
496 * to get the IP of the throw. Passing the offset has the advantage that it
497 * needs no relocations in the caller.
499 gpointer
500 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
502 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
506 * mono_arch_find_jit_info:
508 * This function is used to gather information from @ctx, and store it in @frame_info.
509 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
510 * is modified if needed.
511 * Returns TRUE on success, FALSE otherwise.
513 gboolean
514 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
515 MonoJitInfo *ji, MonoContext *ctx,
516 MonoContext *new_ctx, MonoLMF **lmf,
517 mgreg_t **save_locations,
518 StackFrameInfo *frame)
520 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
521 int i;
523 memset (frame, 0, sizeof (StackFrameInfo));
524 frame->ji = ji;
526 *new_ctx = *ctx;
528 if (ji != NULL) {
529 mgreg_t regs [MONO_MAX_IREGS + 1];
530 guint8 *cfa;
531 guint32 unwind_info_len;
532 guint8 *unwind_info;
533 guint8 *epilog = NULL;
535 frame->type = FRAME_TYPE_MANAGED;
537 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
539 frame->unwind_info = unwind_info;
540 frame->unwind_info_len = unwind_info_len;
543 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
544 mono_print_unwind_info (unwind_info, unwind_info_len);
546 /* LLVM compiled code doesn't have this info */
547 if (ji->has_arch_eh_info)
548 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
550 for (i = 0; i < AMD64_NREG; ++i)
551 regs [i] = new_ctx->gregs [i];
553 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
554 (guint8*)ji->code_start + ji->code_size,
555 ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
556 save_locations, MONO_MAX_IREGS, &cfa);
558 for (i = 0; i < AMD64_NREG; ++i)
559 new_ctx->gregs [i] = regs [i];
561 /* The CFA becomes the new SP value */
562 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
564 /* Adjust IP */
565 new_ctx->gregs [AMD64_RIP] --;
567 return TRUE;
568 } else if (*lmf) {
569 guint64 rip;
571 if (((guint64)(*lmf)->previous_lmf) & 2) {
573 * This LMF entry is created by the soft debug code to mark transitions to
574 * managed code done during invokes.
576 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
578 g_assert (ext->debugger_invoke);
580 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
582 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
584 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
586 return TRUE;
589 if (((guint64)(*lmf)->previous_lmf) & 4) {
590 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
592 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
593 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
594 /* This LMF has the rip field set */
595 rip = (*lmf)->rip;
596 } else if ((*lmf)->rsp == 0) {
597 /* Top LMF entry */
598 return FALSE;
599 } else {
601 * The rsp field is set just before the call which transitioned to native
602 * code. Obtain the rip from the stack.
604 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
607 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
609 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
610 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
611 * return address.
613 //g_assert (ji);
614 if (!ji)
615 return FALSE;
617 frame->ji = ji;
618 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
620 if (((guint64)(*lmf)->previous_lmf) & 4) {
621 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
623 /* Trampoline frame */
624 for (i = 0; i < AMD64_NREG; ++i)
625 new_ctx->gregs [i] = ext->ctx->gregs [i];
626 /* Adjust IP */
627 new_ctx->gregs [AMD64_RIP] --;
628 } else {
630 * The registers saved in the LMF will be restored using the normal unwind info,
631 * when the wrapper frame is processed.
633 /* Adjust IP */
634 rip --;
635 new_ctx->gregs [AMD64_RIP] = rip;
636 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
637 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
638 for (i = 0; i < AMD64_NREG; ++i) {
639 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
640 new_ctx->gregs [i] = 0;
644 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
646 return TRUE;
649 return FALSE;
653 * handle_exception:
655 * Called by resuming from a signal handler.
657 static void
658 handle_signal_exception (gpointer obj)
660 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
661 MonoContext ctx;
663 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
665 mono_handle_exception (&ctx, obj);
667 mono_restore_context (&ctx);
670 void
671 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
673 guint64 sp = ctx->gregs [AMD64_RSP];
675 ctx->gregs [AMD64_RDI] = (guint64)user_data;
677 /* Allocate a stack frame below the red zone */
678 sp -= 128;
679 /* The stack should be unaligned */
680 if ((sp % 16) == 0)
681 sp -= 8;
682 #ifdef __linux__
683 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
684 *(guint64*)sp = ctx->gregs [AMD64_RIP];
685 #endif
686 ctx->gregs [AMD64_RSP] = sp;
687 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
691 * mono_arch_handle_exception:
693 * @ctx: saved processor state
694 * @obj: the exception object
696 gboolean
697 mono_arch_handle_exception (void *sigctx, gpointer obj)
699 #if defined(MONO_ARCH_USE_SIGACTION)
700 MonoContext mctx;
703 * Handling the exception in the signal handler is problematic, since the original
704 * signal is disabled, and we could run arbitrary code though the debugger. So
705 * resume into the normal stack and do most work there if possible.
707 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
709 /* Pass the ctx parameter in TLS */
710 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
712 mctx = jit_tls->ex_ctx;
713 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
714 mono_monoctx_to_sigctx (&mctx, sigctx);
716 return TRUE;
717 #else
718 MonoContext mctx;
720 mono_sigctx_to_monoctx (sigctx, &mctx);
722 mono_handle_exception (&mctx, obj);
724 mono_monoctx_to_sigctx (&mctx, sigctx);
726 return TRUE;
727 #endif
730 gpointer
731 mono_arch_ip_from_context (void *sigctx)
733 #if defined(MONO_ARCH_USE_SIGACTION)
734 ucontext_t *ctx = (ucontext_t*)sigctx;
736 return (gpointer)UCONTEXT_REG_RIP (ctx);
737 #elif defined(HOST_WIN32)
738 return ((CONTEXT*)sigctx)->Rip;
739 #else
740 MonoContext *ctx = sigctx;
741 return (gpointer)ctx->gregs [AMD64_RIP];
742 #endif
745 static void
746 restore_soft_guard_pages (void)
748 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
749 if (jit_tls->stack_ovf_guard_base)
750 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
754 * this function modifies mctx so that when it is restored, it
755 * won't execcute starting at mctx.eip, but in a function that
756 * will restore the protection on the soft-guard pages and return back to
757 * continue at mctx.eip.
759 static void
760 prepare_for_guard_pages (MonoContext *mctx)
762 gpointer *sp;
763 sp = (gpointer)(mctx->gregs [AMD64_RSP]);
764 sp -= 1;
765 /* the return addr */
766 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
767 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
768 mctx->gregs [AMD64_RSP] = (guint64)sp;
771 static void
772 altstack_handle_and_restore (MonoContext *ctx, gpointer obj, gboolean stack_ovf)
774 MonoContext mctx;
776 mctx = *ctx;
778 mono_handle_exception (&mctx, obj);
779 if (stack_ovf)
780 prepare_for_guard_pages (&mctx);
781 mono_restore_context (&mctx);
784 void
785 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
787 #if defined(MONO_ARCH_USE_SIGACTION)
788 MonoException *exc = NULL;
789 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
790 gpointer *sp;
791 int frame_size;
792 MonoContext *copied_ctx;
794 if (stack_ovf)
795 exc = mono_domain_get ()->stack_overflow_ex;
796 if (!ji)
797 mono_handle_native_sigsegv (SIGSEGV, sigctx, siginfo);
799 /* setup a call frame on the real stack so that control is returned there
800 * and exception handling can continue.
801 * The frame looks like:
802 * ucontext struct
803 * ...
804 * return ip
805 * 128 is the size of the red zone
807 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
808 frame_size += 15;
809 frame_size &= ~15;
810 sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
811 sp = (gpointer)((char*)sp - frame_size);
812 copied_ctx = (MonoContext*)(sp + 4);
813 /* the arguments must be aligned */
814 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
815 mono_sigctx_to_monoctx (sigctx, copied_ctx);
816 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
817 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
818 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
819 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
820 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
821 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
822 #endif
825 guint64
826 mono_amd64_get_original_ip (void)
828 MonoLMF *lmf = mono_get_lmf ();
830 g_assert (lmf);
832 /* Reset the change to previous_lmf */
833 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
835 return lmf->rip;
838 gpointer
839 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
841 guint8 *code, *start;
842 guint8 *br[1];
843 gpointer throw_trampoline;
844 MonoJumpInfo *ji = NULL;
845 GSList *unwind_ops = NULL;
846 const guint kMaxCodeSize = NACL_SIZE (128, 256);
848 start = code = mono_global_codeman_reserve (kMaxCodeSize);
850 /* We are in the frame of a managed method after a call */
852 * We would like to throw the pending exception in such a way that it looks to
853 * be thrown from the managed method.
856 /* Save registers which might contain the return value of the call */
857 amd64_push_reg (code, AMD64_RAX);
858 amd64_push_reg (code, AMD64_RDX);
860 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
861 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
863 /* Align stack */
864 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
866 /* Obtain the pending exception */
867 if (aot) {
868 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
869 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
870 } else {
871 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
873 amd64_call_reg (code, AMD64_R11);
875 /* Check if it is NULL, and branch */
876 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
877 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
879 /* exc != NULL branch */
881 /* Save the exc on the stack */
882 amd64_push_reg (code, AMD64_RAX);
883 /* Align stack */
884 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
886 /* Obtain the original ip and clear the flag in previous_lmf */
887 if (aot) {
888 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
889 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
890 } else {
891 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
893 amd64_call_reg (code, AMD64_R11);
895 /* Load exc */
896 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
898 /* Pop saved stuff from the stack */
899 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
901 /* Setup arguments for the throw trampoline */
902 /* Exception */
903 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
904 /* The trampoline expects the caller ip to be pushed on the stack */
905 amd64_push_reg (code, AMD64_RAX);
907 /* Call the throw trampoline */
908 if (aot) {
909 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
910 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
911 } else {
912 throw_trampoline = mono_get_throw_exception ();
913 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
915 /* We use a jump instead of a call so we can push the original ip on the stack */
916 amd64_jump_reg (code, AMD64_R11);
918 /* ex == NULL branch */
919 mono_amd64_patch (br [0], code);
921 /* Obtain the original ip and clear the flag in previous_lmf */
922 if (aot) {
923 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
924 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
925 } else {
926 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
928 amd64_call_reg (code, AMD64_R11);
929 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
931 /* Restore registers */
932 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
933 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
934 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
935 amd64_pop_reg (code, AMD64_RDX);
936 amd64_pop_reg (code, AMD64_RAX);
938 /* Return to original code */
939 amd64_jump_reg (code, AMD64_R11);
941 g_assert ((code - start) < kMaxCodeSize);
943 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
944 mono_arch_flush_icache (start, code - start);
945 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
947 if (info)
948 *info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);
950 return start;
953 static gpointer throw_pending_exception;
956 * Called when a thread receives an async exception while executing unmanaged code.
957 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
958 * the return address on the stack to point to a helper routine which throws the
959 * exception.
961 void
962 mono_arch_notify_pending_exc (MonoThreadInfo *info)
964 MonoLMF *lmf = mono_get_lmf ();
966 if (!info) {
967 lmf = mono_get_lmf ();
968 } else {
969 g_assert (mono_thread_info_get_suspend_state (info)->valid);
970 lmf = mono_thread_info_get_suspend_state (info)->unwind_data [MONO_UNWIND_DATA_LMF];
973 if (!lmf)
974 /* Not yet started */
975 return;
977 if (lmf->rsp == 0)
978 /* Initial LMF */
979 return;
981 if ((guint64)lmf->previous_lmf & 5)
982 /* Already hijacked or trampoline LMF entry */
983 return;
985 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
986 lmf->rip = *(guint64*)(lmf->rsp - 8);
987 /* Signal that lmf->rip is set */
988 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
990 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
993 GSList*
994 mono_amd64_get_exception_trampolines (gboolean aot)
996 MonoTrampInfo *info;
997 GSList *tramps = NULL;
999 mono_arch_get_throw_pending_exception (&info, aot);
1000 tramps = g_slist_prepend (tramps, info);
1002 /* LLVM needs different throw trampolines */
1003 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
1004 tramps = g_slist_prepend (tramps, info);
1006 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
1007 tramps = g_slist_prepend (tramps, info);
1009 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
1010 tramps = g_slist_prepend (tramps, info);
1012 return tramps;
1015 void
1016 mono_arch_exceptions_init (void)
1018 GSList *tramps, *l;
1019 gpointer tramp;
1021 if (mono_aot_only) {
1022 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1023 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1024 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
1025 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1026 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
1027 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1028 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
1029 } else {
1030 /* Call this to avoid initialization races */
1031 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1033 tramps = mono_amd64_get_exception_trampolines (FALSE);
1034 for (l = tramps; l; l = l->next) {
1035 MonoTrampInfo *info = l->data;
1037 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
1038 mono_tramp_info_register (info);
1040 g_slist_free (tramps);
1044 #ifdef TARGET_WIN32
1047 * The mono_arch_unwindinfo* methods are used to build and add
1048 * function table info for each emitted method from mono. On Winx64
1049 * the seh handler will not be called if the mono methods are not
1050 * added to the function table.
1052 * We should not need to add non-volatile register info to the
1053 * table since mono stores that info elsewhere. (Except for the register
1054 * used for the fp.)
1057 #define MONO_MAX_UNWIND_CODES 22
1059 typedef union _UNWIND_CODE {
1060 struct {
1061 guchar CodeOffset;
1062 guchar UnwindOp : 4;
1063 guchar OpInfo : 4;
1065 gushort FrameOffset;
1066 } UNWIND_CODE, *PUNWIND_CODE;
1068 typedef struct _UNWIND_INFO {
1069 guchar Version : 3;
1070 guchar Flags : 5;
1071 guchar SizeOfProlog;
1072 guchar CountOfCodes;
1073 guchar FrameRegister : 4;
1074 guchar FrameOffset : 4;
1075 /* custom size for mono allowing for mono allowing for*/
1076 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1077 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1078 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1079 /*UWOP_PUSH_NONVOL offset = 15-0*/
1080 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1082 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1083 * union {
1084 * OPTIONAL ULONG ExceptionHandler;
1085 * OPTIONAL ULONG FunctionEntry;
1086 * };
1087 * OPTIONAL ULONG ExceptionData[]; */
1088 } UNWIND_INFO, *PUNWIND_INFO;
1090 typedef struct
1092 RUNTIME_FUNCTION runtimeFunction;
1093 UNWIND_INFO unwindInfo;
1094 } MonoUnwindInfo, *PMonoUnwindInfo;
1096 static void
1097 mono_arch_unwindinfo_create (gpointer* monoui)
1099 PMonoUnwindInfo newunwindinfo;
1100 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1101 newunwindinfo->unwindInfo.Version = 1;
1104 void
1105 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1107 PMonoUnwindInfo unwindinfo;
1108 PUNWIND_CODE unwindcode;
1109 guchar codeindex;
1110 if (!*monoui)
1111 mono_arch_unwindinfo_create (monoui);
1113 unwindinfo = (MonoUnwindInfo*)*monoui;
1115 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1116 g_error ("Larger allocation needed for the unwind information.");
1118 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1119 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1120 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1121 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1122 unwindcode->OpInfo = reg;
1124 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1125 g_error ("Adding unwind info in wrong order.");
1127 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1130 void
1131 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1133 PMonoUnwindInfo unwindinfo;
1134 PUNWIND_CODE unwindcode;
1135 guchar codeindex;
1136 if (!*monoui)
1137 mono_arch_unwindinfo_create (monoui);
1139 unwindinfo = (MonoUnwindInfo*)*monoui;
1141 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1142 g_error ("Larger allocation needed for the unwind information.");
1144 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1145 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1146 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1147 unwindcode++;
1148 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1149 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1150 unwindcode->OpInfo = reg;
1152 unwindinfo->unwindInfo.FrameRegister = reg;
1154 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1155 g_error ("Adding unwind info in wrong order.");
1157 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1160 void
1161 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1163 PMonoUnwindInfo unwindinfo;
1164 PUNWIND_CODE unwindcode;
1165 guchar codeindex;
1166 guchar codesneeded;
1167 if (!*monoui)
1168 mono_arch_unwindinfo_create (monoui);
1170 unwindinfo = (MonoUnwindInfo*)*monoui;
1172 if (size < 0x8)
1173 g_error ("Stack allocation must be equal to or greater than 0x8.");
1175 if (size <= 0x80)
1176 codesneeded = 1;
1177 else if (size <= 0x7FFF8)
1178 codesneeded = 2;
1179 else
1180 codesneeded = 3;
1182 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1183 g_error ("Larger allocation needed for the unwind information.");
1185 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1186 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1188 if (codesneeded == 1) {
1189 /*The size of the allocation is
1190 (the number in the OpInfo member) times 8 plus 8*/
1191 unwindcode->OpInfo = (size - 8)/8;
1192 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1194 else {
1195 if (codesneeded == 3) {
1196 /*the unscaled size of the allocation is recorded
1197 in the next two slots in little-endian format*/
1198 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1199 unwindcode += 2;
1200 unwindcode->OpInfo = 1;
1202 else {
1203 /*the size of the allocation divided by 8
1204 is recorded in the next slot*/
1205 unwindcode->FrameOffset = size/8;
1206 unwindcode++;
1207 unwindcode->OpInfo = 0;
1210 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1213 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1215 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1216 g_error ("Adding unwind info in wrong order.");
1218 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1221 guint
1222 mono_arch_unwindinfo_get_size (gpointer monoui)
1224 PMonoUnwindInfo unwindinfo;
1225 if (!monoui)
1226 return 0;
1228 unwindinfo = (MonoUnwindInfo*)monoui;
1229 return (8 + sizeof (MonoUnwindInfo)) -
1230 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1233 static PRUNTIME_FUNCTION
1234 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1236 MonoJitInfo *ji;
1237 guint64 pos;
1238 PMonoUnwindInfo targetinfo;
1239 MonoDomain *domain = mono_domain_get ();
1241 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1242 if (!ji)
1243 return 0;
1245 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1247 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1249 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1251 return &targetinfo->runtimeFunction;
1254 void
1255 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1257 PMonoUnwindInfo unwindinfo, targetinfo;
1258 guchar codecount;
1259 guint64 targetlocation;
1260 if (!*monoui)
1261 return;
1263 unwindinfo = (MonoUnwindInfo*)*monoui;
1264 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1265 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1267 unwindinfo->runtimeFunction.EndAddress = code_size;
1268 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1270 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1272 codecount = unwindinfo->unwindInfo.CountOfCodes;
1273 if (codecount) {
1274 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1275 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1278 g_free (unwindinfo);
1279 *monoui = 0;
1281 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1284 #endif
1286 #if MONO_SUPPORT_TASKLETS
1287 MonoContinuationRestore
1288 mono_tasklets_arch_restore (void)
1290 static guint8* saved = NULL;
1291 guint8 *code, *start;
1292 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1293 const guint kMaxCodeSize = NACL_SIZE (64, 128);
1296 if (saved)
1297 return (MonoContinuationRestore)saved;
1298 code = start = mono_global_codeman_reserve (kMaxCodeSize);
1299 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1300 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1301 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1302 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1303 * We move cont to cont_reg since we need both rcx and rdi for the copy
1304 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1306 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1307 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1308 /* setup the copy of the stack */
1309 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1310 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1311 x86_cld (code);
1312 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1313 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1314 amd64_prefix (code, X86_REP_PREFIX);
1315 amd64_movsl (code);
1317 /* now restore the registers from the LMF */
1318 NOT_IMPLEMENTED;
1319 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1320 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1322 /* restore the lmf chain */
1323 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1324 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1326 /* state is already in rax */
1327 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1328 g_assert ((code - start) <= kMaxCodeSize);
1330 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1331 mono_arch_flush_icache (start, code - start);
1332 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1334 saved = start;
1335 return (MonoContinuationRestore)saved;
1337 #endif
1340 * mono_arch_setup_resume_sighandler_ctx:
1342 * Setup CTX so execution continues at FUNC.
1344 void
1345 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1348 * When resuming from a signal handler, the stack should be misaligned, just like right after
1349 * a call.
1351 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1352 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1353 MONO_CONTEXT_SET_IP (ctx, func);