Adjust type of mono_win_vectored_exception_handle to match its use.
[mono-project.git] / mono / mini / exceptions-amd64.c
blob52bb9455cb9768b35649d3060802d1a9ffb5c5ea
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
9 */
11 #include <config.h>
13 #if _WIN32_WINNT < 0x0501
14 /* Required for Vectored Exception Handling. */
15 #undef _WIN32_WINNT
16 #define _WIN32_WINNT 0x0501
17 #endif /* _WIN32_WINNT < 0x0501 */
19 #include <glib.h>
20 #include <signal.h>
21 #include <string.h>
22 #ifdef HAVE_UCONTEXT_H
23 #include <ucontext.h>
24 #endif
26 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/metadata/appdomain.h>
28 #include <mono/metadata/tabledefs.h>
29 #include <mono/metadata/threads.h>
30 #include <mono/metadata/threads-types.h>
31 #include <mono/metadata/debug-helpers.h>
32 #include <mono/metadata/exception.h>
33 #include <mono/metadata/gc-internal.h>
34 #include <mono/metadata/mono-debug.h>
35 #include <mono/utils/mono-mmap.h>
37 #include "mini.h"
38 #include "mini-amd64.h"
39 #include "tasklets.h"
41 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 #ifdef TARGET_WIN32
44 static MonoW32ExceptionHandler fpe_handler;
45 static MonoW32ExceptionHandler ill_handler;
46 static MonoW32ExceptionHandler segv_handler;
48 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
49 void *mono_win_vectored_exception_handle;
50 extern gboolean mono_win_chained_exception_needs_run;
52 #define W32_SEH_HANDLE_EX(_ex) \
53 if (_ex##_handler) _ex##_handler(0, ep, sctx)
55 LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
57 #ifndef MONO_CROSS_COMPILE
58 if (mono_old_win_toplevel_exception_filter) {
59 return (*mono_old_win_toplevel_exception_filter)(ep);
61 #endif
63 mono_handle_native_sigsegv (SIGSEGV, NULL);
65 return EXCEPTION_CONTINUE_SEARCH;
69 * Unhandled Exception Filter
70 * Top-level per-process exception handler.
72 LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
74 EXCEPTION_RECORD* er;
75 CONTEXT* ctx;
76 MonoContext* sctx;
77 LONG res;
79 mono_win_chained_exception_needs_run = FALSE;
80 res = EXCEPTION_CONTINUE_EXECUTION;
82 er = ep->ExceptionRecord;
83 ctx = ep->ContextRecord;
84 sctx = g_malloc(sizeof(MonoContext));
86 /* Copy Win32 context to UNIX style context */
87 sctx->rax = ctx->Rax;
88 sctx->rbx = ctx->Rbx;
89 sctx->rcx = ctx->Rcx;
90 sctx->rdx = ctx->Rdx;
91 sctx->rbp = ctx->Rbp;
92 sctx->rsp = ctx->Rsp;
93 sctx->rsi = ctx->Rsi;
94 sctx->rdi = ctx->Rdi;
95 sctx->rip = ctx->Rip;
96 sctx->r12 = ctx->R12;
97 sctx->r13 = ctx->R13;
98 sctx->r14 = ctx->R14;
99 sctx->r15 = ctx->R15;
101 switch (er->ExceptionCode) {
102 case EXCEPTION_ACCESS_VIOLATION:
103 W32_SEH_HANDLE_EX(segv);
104 break;
105 case EXCEPTION_ILLEGAL_INSTRUCTION:
106 W32_SEH_HANDLE_EX(ill);
107 break;
108 case EXCEPTION_INT_DIVIDE_BY_ZERO:
109 case EXCEPTION_INT_OVERFLOW:
110 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
111 case EXCEPTION_FLT_OVERFLOW:
112 case EXCEPTION_FLT_UNDERFLOW:
113 case EXCEPTION_FLT_INEXACT_RESULT:
114 W32_SEH_HANDLE_EX(fpe);
115 break;
116 default:
117 break;
120 if (mono_win_chained_exception_needs_run) {
121 /* Don't copy context back if we chained exception
122 * as the handler may have modfied the EXCEPTION_POINTERS
123 * directly. We don't pass sigcontext to chained handlers.
124 * Return continue search so the UnhandledExceptionFilter
125 * can correctly chain the exception.
127 res = EXCEPTION_CONTINUE_SEARCH;
128 } else {
129 /* Copy context back */
130 /* Nonvolatile */
131 ctx->Rsp = sctx->rsp;
132 ctx->Rdi = sctx->rdi;
133 ctx->Rsi = sctx->rsi;
134 ctx->Rbx = sctx->rbx;
135 ctx->Rbp = sctx->rbp;
136 ctx->R12 = sctx->r12;
137 ctx->R13 = sctx->r13;
138 ctx->R14 = sctx->r14;
139 ctx->R15 = sctx->r15;
140 ctx->Rip = sctx->rip;
142 /* Volatile But should not matter?*/
143 ctx->Rax = sctx->rax;
144 ctx->Rcx = sctx->rcx;
145 ctx->Rdx = sctx->rdx;
148 /* TODO: Find right place to free this in stack overflow case */
149 if (er->ExceptionCode != EXCEPTION_STACK_OVERFLOW)
150 g_free (sctx);
152 return res;
155 void win32_seh_init()
157 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
158 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
161 void win32_seh_cleanup()
163 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
165 guint32 ret = 0;
167 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
168 g_assert (ret);
171 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
173 switch (type) {
174 case SIGFPE:
175 fpe_handler = handler;
176 break;
177 case SIGILL:
178 ill_handler = handler;
179 break;
180 case SIGSEGV:
181 segv_handler = handler;
182 break;
183 default:
184 break;
188 #endif /* TARGET_WIN32 */
191 * mono_arch_get_restore_context:
193 * Returns a pointer to a method which restores a previously saved sigcontext.
195 gpointer
196 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
198 guint8 *start = NULL;
199 guint8 *code;
200 MonoJumpInfo *ji = NULL;
201 GSList *unwind_ops = NULL;
203 /* restore_contect (MonoContext *ctx) */
205 start = code = mono_global_codeman_reserve (256);
207 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
209 /* Restore all registers except %rip and %r11 */
210 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
211 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
212 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
213 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
214 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
215 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
216 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
217 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
218 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
219 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
220 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
221 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
222 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
223 #if !defined(__native_client_codegen__)
224 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
225 #endif
228 * The context resides on the stack, in the stack frame of the
229 * caller of this function. The stack pointer that we need to
230 * restore is potentially many stack frames higher up, so the
231 * distance between them can easily be more than the red zone
232 * size. Hence the stack pointer can be restored only after
233 * we have finished loading everything from the context.
235 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
236 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
237 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
239 /* jump to the saved IP */
240 amd64_jump_reg (code, AMD64_R11);
242 nacl_global_codeman_validate(&start, 256, &code);
244 mono_arch_flush_icache (start, code - start);
246 if (info)
247 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
249 return start;
253 * mono_arch_get_call_filter:
255 * Returns a pointer to a method which calls an exception filter. We
256 * also use this function to call finally handlers (we pass NULL as
257 * @exc object in this case).
259 gpointer
260 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
262 guint8 *start;
263 int i;
264 guint8 *code;
265 guint32 pos;
266 MonoJumpInfo *ji = NULL;
267 GSList *unwind_ops = NULL;
268 const guint kMaxCodeSize = NACL_SIZE (128, 256);
270 start = code = mono_global_codeman_reserve (kMaxCodeSize);
272 /* call_filter (MonoContext *ctx, unsigned long eip) */
273 code = start;
275 /* Alloc new frame */
276 amd64_push_reg (code, AMD64_RBP);
277 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
279 /* Save callee saved regs */
280 pos = 0;
281 for (i = 0; i < AMD64_NREG; ++i)
282 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
283 amd64_push_reg (code, i);
284 pos += 8;
287 /* Save EBP */
288 pos += 8;
289 amd64_push_reg (code, AMD64_RBP);
291 /* Make stack misaligned, the call will make it aligned again */
292 if (! (pos & 8))
293 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
295 /* set new EBP */
296 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
297 /* load callee saved regs */
298 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
299 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
300 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
301 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
302 #if !defined(__native_client_codegen__)
303 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
304 #endif
305 #ifdef TARGET_WIN32
306 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
307 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
308 #endif
310 /* call the handler */
311 amd64_call_reg (code, AMD64_ARG_REG2);
313 if (! (pos & 8))
314 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
316 /* restore RBP */
317 amd64_pop_reg (code, AMD64_RBP);
319 /* Restore callee saved regs */
320 for (i = AMD64_NREG; i >= 0; --i)
321 if (AMD64_IS_CALLEE_SAVED_REG (i))
322 amd64_pop_reg (code, i);
324 amd64_leave (code);
325 amd64_ret (code);
327 g_assert ((code - start) < kMaxCodeSize);
329 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
331 mono_arch_flush_icache (start, code - start);
333 if (info)
334 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
336 return start;
340 * The first few arguments are dummy, to force the other arguments to be passed on
341 * the stack, this avoids overwriting the argument registers in the throw trampoline.
343 void
344 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
345 guint64 dummy5, guint64 dummy6,
346 mgreg_t *regs, mgreg_t rip,
347 MonoObject *exc, gboolean rethrow)
349 MonoContext ctx;
351 ctx.rsp = regs [AMD64_RSP];
352 ctx.rip = rip;
353 ctx.rbx = regs [AMD64_RBX];
354 ctx.rbp = regs [AMD64_RBP];
355 ctx.r12 = regs [AMD64_R12];
356 ctx.r13 = regs [AMD64_R13];
357 ctx.r14 = regs [AMD64_R14];
358 ctx.r15 = regs [AMD64_R15];
359 ctx.rdi = regs [AMD64_RDI];
360 ctx.rsi = regs [AMD64_RSI];
361 ctx.rax = regs [AMD64_RAX];
362 ctx.rcx = regs [AMD64_RCX];
363 ctx.rdx = regs [AMD64_RDX];
365 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
366 MonoException *mono_ex = (MonoException*)exc;
367 if (!rethrow)
368 mono_ex->stack_trace = NULL;
371 /* adjust eip so that it point into the call instruction */
372 ctx.rip -= 1;
374 mono_handle_exception (&ctx, exc);
375 mono_restore_context (&ctx);
376 g_assert_not_reached ();
379 void
380 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
381 guint64 dummy5, guint64 dummy6,
382 mgreg_t *regs, mgreg_t rip,
383 guint32 ex_token_index, gint64 pc_offset)
385 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
386 MonoException *ex;
388 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
390 rip -= pc_offset;
392 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
393 rip += 1;
395 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
398 static void
399 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
400 guint64 dummy5, guint64 dummy6,
401 mgreg_t *regs, mgreg_t rip,
402 guint32 dummy7, gint64 dummy8)
404 /* Only the register parameters are valid */
405 MonoContext ctx;
407 ctx.rsp = regs [AMD64_RSP];
408 ctx.rip = rip;
409 ctx.rbx = regs [AMD64_RBX];
410 ctx.rbp = regs [AMD64_RBP];
411 ctx.r12 = regs [AMD64_R12];
412 ctx.r13 = regs [AMD64_R13];
413 ctx.r14 = regs [AMD64_R14];
414 ctx.r15 = regs [AMD64_R15];
415 ctx.rdi = regs [AMD64_RDI];
416 ctx.rsi = regs [AMD64_RSI];
417 ctx.rax = regs [AMD64_RAX];
418 ctx.rcx = regs [AMD64_RCX];
419 ctx.rdx = regs [AMD64_RDX];
421 mono_resume_unwind (&ctx);
425 * get_throw_trampoline:
427 * Generate a call to mono_amd64_throw_exception/
428 * mono_amd64_throw_corlib_exception.
430 static gpointer
431 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
433 guint8* start;
434 guint8 *code;
435 MonoJumpInfo *ji = NULL;
436 GSList *unwind_ops = NULL;
437 int i, stack_size, arg_offsets [16], regs_offset, dummy_stack_space;
438 const guint kMaxCodeSize = NACL_SIZE (256, 512);
440 #ifdef TARGET_WIN32
441 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
442 #else
443 dummy_stack_space = 0;
444 #endif
446 start = code = mono_global_codeman_reserve (kMaxCodeSize);
448 /* The stack is unaligned on entry */
449 stack_size = 192 + 8 + dummy_stack_space;
451 code = start;
453 if (info)
454 unwind_ops = mono_arch_get_cie_program ();
456 /* Alloc frame */
457 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
458 if (info)
459 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
462 * To hide linux/windows calling convention differences, we pass all arguments on
463 * the stack by passing 6 dummy values in registers.
466 arg_offsets [0] = dummy_stack_space + 0;
467 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
468 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
469 arg_offsets [3] = dummy_stack_space + sizeof(mgreg_t) * 3;
470 regs_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
472 /* Save registers */
473 for (i = 0; i < AMD64_NREG; ++i)
474 if (i != AMD64_RSP)
475 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
476 /* Save RSP */
477 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
478 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
479 /* Set arg1 == regs */
480 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
481 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
482 /* Set arg2 == eip */
483 if (llvm_abs)
484 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
485 else
486 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
487 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
488 /* Set arg3 == exc/ex_token_index */
489 if (resume_unwind)
490 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
491 else
492 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
493 /* Set arg4 == rethrow/pc offset */
494 if (resume_unwind) {
495 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
496 } else if (corlib) {
497 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
498 if (llvm_abs)
500 * The caller is LLVM code which passes the absolute address not a pc offset,
501 * so compensate by passing 0 as 'rip' and passing the negated abs address as
502 * the pc offset.
504 amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
505 } else {
506 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
509 if (aot) {
510 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
511 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
512 } else {
513 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
515 amd64_call_reg (code, AMD64_R11);
516 amd64_breakpoint (code);
518 mono_arch_flush_icache (start, code - start);
520 g_assert ((code - start) < kMaxCodeSize);
522 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
524 if (info)
525 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
527 return start;
531 * mono_arch_get_throw_exception:
533 * Returns a function pointer which can be used to raise
534 * exceptions. The returned function has the following
535 * signature: void (*func) (MonoException *exc);
538 gpointer
539 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
541 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
544 gpointer
545 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
547 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
551 * mono_arch_get_throw_corlib_exception:
553 * Returns a function pointer which can be used to raise
554 * corlib exceptions. The returned function has the following
555 * signature: void (*func) (guint32 ex_token, guint32 offset);
556 * Here, offset is the offset which needs to be substracted from the caller IP
557 * to get the IP of the throw. Passing the offset has the advantage that it
558 * needs no relocations in the caller.
560 gpointer
561 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
563 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
567 * mono_arch_find_jit_info:
569 * This function is used to gather information from @ctx, and store it in @frame_info.
570 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
571 * is modified if needed.
572 * Returns TRUE on success, FALSE otherwise.
574 gboolean
575 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
576 MonoJitInfo *ji, MonoContext *ctx,
577 MonoContext *new_ctx, MonoLMF **lmf,
578 mgreg_t **save_locations,
579 StackFrameInfo *frame)
581 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
583 memset (frame, 0, sizeof (StackFrameInfo));
584 frame->ji = ji;
586 *new_ctx = *ctx;
588 if (ji != NULL) {
589 mgreg_t regs [MONO_MAX_IREGS + 1];
590 guint8 *cfa;
591 guint32 unwind_info_len;
592 guint8 *unwind_info;
594 frame->type = FRAME_TYPE_MANAGED;
596 if (ji->from_aot)
597 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
598 else
599 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
601 frame->unwind_info = unwind_info;
602 frame->unwind_info_len = unwind_info_len;
605 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
606 mono_print_unwind_info (unwind_info, unwind_info_len);
609 regs [AMD64_RAX] = new_ctx->rax;
610 regs [AMD64_RBX] = new_ctx->rbx;
611 regs [AMD64_RCX] = new_ctx->rcx;
612 regs [AMD64_RDX] = new_ctx->rdx;
613 regs [AMD64_RBP] = new_ctx->rbp;
614 regs [AMD64_RSP] = new_ctx->rsp;
615 regs [AMD64_RSI] = new_ctx->rsi;
616 regs [AMD64_RDI] = new_ctx->rdi;
617 regs [AMD64_RIP] = new_ctx->rip;
618 regs [AMD64_R12] = new_ctx->r12;
619 regs [AMD64_R13] = new_ctx->r13;
620 regs [AMD64_R14] = new_ctx->r14;
621 regs [AMD64_R15] = new_ctx->r15;
623 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
624 (guint8*)ji->code_start + ji->code_size,
625 ip, regs, MONO_MAX_IREGS + 1,
626 save_locations, MONO_MAX_IREGS, &cfa);
628 new_ctx->rax = regs [AMD64_RAX];
629 new_ctx->rbx = regs [AMD64_RBX];
630 new_ctx->rcx = regs [AMD64_RCX];
631 new_ctx->rdx = regs [AMD64_RDX];
632 new_ctx->rbp = regs [AMD64_RBP];
633 new_ctx->rsp = regs [AMD64_RSP];
634 new_ctx->rsi = regs [AMD64_RSI];
635 new_ctx->rdi = regs [AMD64_RDI];
636 new_ctx->rip = regs [AMD64_RIP];
637 new_ctx->r12 = regs [AMD64_R12];
638 new_ctx->r13 = regs [AMD64_R13];
639 new_ctx->r14 = regs [AMD64_R14];
640 new_ctx->r15 = regs [AMD64_R15];
642 /* The CFA becomes the new SP value */
643 new_ctx->rsp = (mgreg_t)cfa;
645 /* Adjust IP */
646 new_ctx->rip --;
648 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
649 /* remove any unused lmf */
650 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
653 #ifndef MONO_AMD64_NO_PUSHES
654 /* Pop arguments off the stack */
655 if (ji->has_arch_eh_info)
656 new_ctx->rsp += mono_jit_info_get_arch_eh_info (ji)->stack_size;
657 #endif
659 return TRUE;
660 } else if (*lmf) {
661 guint64 rip;
663 if (((guint64)(*lmf)->previous_lmf) & 2) {
665 * This LMF entry is created by the soft debug code to mark transitions to
666 * managed code done during invokes.
668 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
670 g_assert (ext->debugger_invoke);
672 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
674 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
676 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
678 return TRUE;
681 if (((guint64)(*lmf)->previous_lmf) & 1) {
682 /* This LMF has the rip field set */
683 rip = (*lmf)->rip;
684 } else if ((*lmf)->rsp == 0) {
685 /* Top LMF entry */
686 return FALSE;
687 } else {
689 * The rsp field is set just before the call which transitioned to native
690 * code. Obtain the rip from the stack.
692 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
695 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
697 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
698 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
699 * return address.
701 //g_assert (ji);
702 if (!ji)
703 return FALSE;
705 /* Adjust IP */
706 rip --;
708 frame->ji = ji;
709 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
711 new_ctx->rip = rip;
712 new_ctx->rbp = (*lmf)->rbp;
713 new_ctx->rsp = (*lmf)->rsp;
715 if (((guint64)(*lmf)->previous_lmf) & 4) {
716 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
718 /* Trampoline frame */
719 new_ctx->rbx = ext->regs [AMD64_RBX];
720 new_ctx->r12 = ext->regs [AMD64_R12];
721 new_ctx->r13 = ext->regs [AMD64_R13];
722 new_ctx->r14 = ext->regs [AMD64_R14];
723 new_ctx->r15 = ext->regs [AMD64_R15];
724 #ifdef TARGET_WIN32
725 new_ctx->rdi = ext->regs [AMD64_RDI];
726 new_ctx->rsi = ext->regs [AMD64_RSI];
727 #endif
728 } else {
730 * The registers saved in the LMF will be restored using the normal unwind info,
731 * when the wrapper frame is processed.
733 new_ctx->rbx = 0;
734 new_ctx->r12 = 0;
735 new_ctx->r13 = 0;
736 new_ctx->r14 = 0;
737 new_ctx->r15 = 0;
738 #ifdef TARGET_WIN32
739 new_ctx->rdi = 0;
740 new_ctx->rsi = 0;
741 #endif
744 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
746 return TRUE;
749 return FALSE;
753 * handle_exception:
755 * Called by resuming from a signal handler.
757 static void
758 handle_signal_exception (gpointer obj)
760 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
761 MonoContext ctx;
763 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
765 mono_handle_exception (&ctx, obj);
767 mono_restore_context (&ctx);
770 void
771 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
773 guint64 sp = ctx->rsp;
775 ctx->rdi = (guint64)user_data;
777 /* Allocate a stack frame below the red zone */
778 sp -= 128;
779 /* The stack should be unaligned */
780 if ((sp % 16) == 0)
781 sp -= 8;
782 #ifdef __linux__
783 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
784 *(guint64*)sp = ctx->rip;
785 #endif
786 ctx->rsp = sp;
787 ctx->rip = (guint64)async_cb;
791 * mono_arch_handle_exception:
793 * @ctx: saved processor state
794 * @obj: the exception object
796 gboolean
797 mono_arch_handle_exception (void *sigctx, gpointer obj)
799 #if defined(MONO_ARCH_USE_SIGACTION)
800 MonoContext mctx;
803 * Handling the exception in the signal handler is problematic, since the original
804 * signal is disabled, and we could run arbitrary code though the debugger. So
805 * resume into the normal stack and do most work there if possible.
807 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
809 /* Pass the ctx parameter in TLS */
810 mono_arch_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
812 mctx = jit_tls->ex_ctx;
813 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
814 mono_monoctx_to_sigctx (&mctx, sigctx);
816 return TRUE;
817 #else
818 MonoContext mctx;
820 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
822 mono_handle_exception (&mctx, obj);
824 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
826 return TRUE;
827 #endif
830 void
831 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
833 mono_sigctx_to_monoctx (sigctx, mctx);
836 void
837 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
839 mono_monoctx_to_sigctx (mctx, sigctx);
842 gpointer
843 mono_arch_ip_from_context (void *sigctx)
845 #if defined(MONO_ARCH_USE_SIGACTION)
846 ucontext_t *ctx = (ucontext_t*)sigctx;
848 return (gpointer)UCONTEXT_REG_RIP (ctx);
849 #else
850 MonoContext *ctx = sigctx;
851 return (gpointer)ctx->rip;
852 #endif
855 static void
856 restore_soft_guard_pages (void)
858 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
859 if (jit_tls->stack_ovf_guard_base)
860 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
864 * this function modifies mctx so that when it is restored, it
865 * won't execcute starting at mctx.eip, but in a function that
866 * will restore the protection on the soft-guard pages and return back to
867 * continue at mctx.eip.
869 static void
870 prepare_for_guard_pages (MonoContext *mctx)
872 gpointer *sp;
873 sp = (gpointer)(mctx->rsp);
874 sp -= 1;
875 /* the return addr */
876 sp [0] = (gpointer)(mctx->rip);
877 mctx->rip = (guint64)restore_soft_guard_pages;
878 mctx->rsp = (guint64)sp;
881 static void
882 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
884 MonoContext mctx;
886 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
888 mono_handle_exception (&mctx, obj);
889 if (stack_ovf)
890 prepare_for_guard_pages (&mctx);
891 mono_restore_context (&mctx);
894 void
895 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
897 #if defined(MONO_ARCH_USE_SIGACTION)
898 MonoException *exc = NULL;
899 ucontext_t *ctx = (ucontext_t*)sigctx;
900 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
901 gpointer *sp;
902 int frame_size;
903 ucontext_t *copied_ctx;
905 if (stack_ovf)
906 exc = mono_domain_get ()->stack_overflow_ex;
907 if (!ji)
908 mono_handle_native_sigsegv (SIGSEGV, sigctx);
910 /* setup a call frame on the real stack so that control is returned there
911 * and exception handling can continue.
912 * The frame looks like:
913 * ucontext struct
914 * ...
915 * return ip
916 * 128 is the size of the red zone
918 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
919 #ifdef __APPLE__
920 frame_size += sizeof (*ctx->uc_mcontext);
921 #endif
922 frame_size += 15;
923 frame_size &= ~15;
924 sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
925 sp = (gpointer)((char*)sp - frame_size);
926 copied_ctx = (ucontext_t*)(sp + 4);
927 /* the arguments must be aligned */
928 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
929 /* may need to adjust pointers in the new struct copy, depending on the OS */
930 memcpy (copied_ctx, ctx, sizeof (ucontext_t));
931 #ifdef __APPLE__
933 guint8 * copied_mcontext = (guint8*)copied_ctx + sizeof (ucontext_t);
934 /* uc_mcontext is a pointer, so make a copy which is stored after the ctx */
935 memcpy (copied_mcontext, ctx->uc_mcontext, sizeof (*ctx->uc_mcontext));
936 copied_ctx->uc_mcontext = (void*)copied_mcontext;
938 #endif
939 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
940 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
941 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
942 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
943 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
944 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
945 #endif
948 guint64
949 mono_amd64_get_original_ip (void)
951 MonoLMF *lmf = mono_get_lmf ();
953 g_assert (lmf);
955 /* Reset the change to previous_lmf */
956 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
958 return lmf->rip;
961 gpointer
962 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
964 guint8 *code, *start;
965 guint8 *br[1];
966 gpointer throw_trampoline;
967 MonoJumpInfo *ji = NULL;
968 GSList *unwind_ops = NULL;
969 const guint kMaxCodeSize = NACL_SIZE (128, 256);
971 start = code = mono_global_codeman_reserve (kMaxCodeSize);
973 /* We are in the frame of a managed method after a call */
975 * We would like to throw the pending exception in such a way that it looks to
976 * be thrown from the managed method.
979 /* Save registers which might contain the return value of the call */
980 amd64_push_reg (code, AMD64_RAX);
981 amd64_push_reg (code, AMD64_RDX);
983 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
984 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
986 /* Align stack */
987 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
989 /* Obtain the pending exception */
990 if (aot) {
991 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
992 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
993 } else {
994 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
996 amd64_call_reg (code, AMD64_R11);
998 /* Check if it is NULL, and branch */
999 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
1000 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
1002 /* exc != NULL branch */
1004 /* Save the exc on the stack */
1005 amd64_push_reg (code, AMD64_RAX);
1006 /* Align stack */
1007 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1009 /* Obtain the original ip and clear the flag in previous_lmf */
1010 if (aot) {
1011 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1012 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1013 } else {
1014 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1016 amd64_call_reg (code, AMD64_R11);
1018 /* Load exc */
1019 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
1021 /* Pop saved stuff from the stack */
1022 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
1024 /* Setup arguments for the throw trampoline */
1025 /* Exception */
1026 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
1027 /* The trampoline expects the caller ip to be pushed on the stack */
1028 amd64_push_reg (code, AMD64_RAX);
1030 /* Call the throw trampoline */
1031 if (aot) {
1032 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
1033 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1034 } else {
1035 throw_trampoline = mono_get_throw_exception ();
1036 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
1038 /* We use a jump instead of a call so we can push the original ip on the stack */
1039 amd64_jump_reg (code, AMD64_R11);
1041 /* ex == NULL branch */
1042 mono_amd64_patch (br [0], code);
1044 /* Obtain the original ip and clear the flag in previous_lmf */
1045 if (aot) {
1046 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1047 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1048 } else {
1049 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1051 amd64_call_reg (code, AMD64_R11);
1052 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1054 /* Restore registers */
1055 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1056 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1057 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1058 amd64_pop_reg (code, AMD64_RDX);
1059 amd64_pop_reg (code, AMD64_RAX);
1061 /* Return to original code */
1062 amd64_jump_reg (code, AMD64_R11);
1064 g_assert ((code - start) < kMaxCodeSize);
1066 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1068 if (info)
1069 *info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);
1071 return start;
1074 static gpointer throw_pending_exception;
1077 * Called when a thread receives an async exception while executing unmanaged code.
1078 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1079 * the return address on the stack to point to a helper routine which throws the
1080 * exception.
1082 void
1083 mono_arch_notify_pending_exc (void)
1085 MonoLMF *lmf = mono_get_lmf ();
1087 if (!lmf)
1088 /* Not yet started */
1089 return;
1091 if (lmf->rsp == 0)
1092 /* Initial LMF */
1093 return;
1095 if ((guint64)lmf->previous_lmf & 1)
1096 /* Already hijacked or trampoline LMF entry */
1097 return;
1099 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1100 lmf->rip = *(guint64*)(lmf->rsp - 8);
1101 /* Signal that lmf->rip is set */
1102 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1104 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1107 GSList*
1108 mono_amd64_get_exception_trampolines (gboolean aot)
1110 MonoTrampInfo *info;
1111 GSList *tramps = NULL;
1113 mono_arch_get_throw_pending_exception (&info, aot);
1114 tramps = g_slist_prepend (tramps, info);
1116 /* LLVM needs different throw trampolines */
1117 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
1118 tramps = g_slist_prepend (tramps, info);
1120 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
1121 tramps = g_slist_prepend (tramps, info);
1123 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
1124 tramps = g_slist_prepend (tramps, info);
1126 return tramps;
1129 void
1130 mono_arch_exceptions_init (void)
1132 GSList *tramps, *l;
1133 gpointer tramp;
1135 if (mono_aot_only) {
1136 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1137 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1138 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
1139 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1140 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
1141 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1142 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
1143 } else {
1144 /* Call this to avoid initialization races */
1145 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1147 tramps = mono_amd64_get_exception_trampolines (FALSE);
1148 for (l = tramps; l; l = l->next) {
1149 MonoTrampInfo *info = l->data;
1151 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
1152 mono_tramp_info_register (info);
1154 g_slist_free (tramps);
1158 #ifdef TARGET_WIN32
1161 * The mono_arch_unwindinfo* methods are used to build and add
1162 * function table info for each emitted method from mono. On Winx64
1163 * the seh handler will not be called if the mono methods are not
1164 * added to the function table.
1166 * We should not need to add non-volatile register info to the
1167 * table since mono stores that info elsewhere. (Except for the register
1168 * used for the fp.)
1171 #define MONO_MAX_UNWIND_CODES 22
1173 typedef union _UNWIND_CODE {
1174 struct {
1175 guchar CodeOffset;
1176 guchar UnwindOp : 4;
1177 guchar OpInfo : 4;
1179 gushort FrameOffset;
1180 } UNWIND_CODE, *PUNWIND_CODE;
1182 typedef struct _UNWIND_INFO {
1183 guchar Version : 3;
1184 guchar Flags : 5;
1185 guchar SizeOfProlog;
1186 guchar CountOfCodes;
1187 guchar FrameRegister : 4;
1188 guchar FrameOffset : 4;
1189 /* custom size for mono allowing for mono allowing for*/
1190 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1191 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1192 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1193 /*UWOP_PUSH_NONVOL offset = 15-0*/
1194 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1196 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1197 * union {
1198 * OPTIONAL ULONG ExceptionHandler;
1199 * OPTIONAL ULONG FunctionEntry;
1200 * };
1201 * OPTIONAL ULONG ExceptionData[]; */
1202 } UNWIND_INFO, *PUNWIND_INFO;
1204 typedef struct
1206 RUNTIME_FUNCTION runtimeFunction;
1207 UNWIND_INFO unwindInfo;
1208 } MonoUnwindInfo, *PMonoUnwindInfo;
1210 static void
1211 mono_arch_unwindinfo_create (gpointer* monoui)
1213 PMonoUnwindInfo newunwindinfo;
1214 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1215 newunwindinfo->unwindInfo.Version = 1;
1218 void
1219 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1221 PMonoUnwindInfo unwindinfo;
1222 PUNWIND_CODE unwindcode;
1223 guchar codeindex;
1224 if (!*monoui)
1225 mono_arch_unwindinfo_create (monoui);
1227 unwindinfo = (MonoUnwindInfo*)*monoui;
1229 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1230 g_error ("Larger allocation needed for the unwind information.");
1232 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1233 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1234 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1235 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1236 unwindcode->OpInfo = reg;
1238 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1239 g_error ("Adding unwind info in wrong order.");
1241 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1244 void
1245 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1247 PMonoUnwindInfo unwindinfo;
1248 PUNWIND_CODE unwindcode;
1249 guchar codeindex;
1250 if (!*monoui)
1251 mono_arch_unwindinfo_create (monoui);
1253 unwindinfo = (MonoUnwindInfo*)*monoui;
1255 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1256 g_error ("Larger allocation needed for the unwind information.");
1258 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1259 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1260 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1261 unwindcode++;
1262 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1263 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1264 unwindcode->OpInfo = reg;
1266 unwindinfo->unwindInfo.FrameRegister = reg;
1268 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1269 g_error ("Adding unwind info in wrong order.");
1271 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1274 void
1275 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1277 PMonoUnwindInfo unwindinfo;
1278 PUNWIND_CODE unwindcode;
1279 guchar codeindex;
1280 guchar codesneeded;
1281 if (!*monoui)
1282 mono_arch_unwindinfo_create (monoui);
1284 unwindinfo = (MonoUnwindInfo*)*monoui;
1286 if (size < 0x8)
1287 g_error ("Stack allocation must be equal to or greater than 0x8.");
1289 if (size <= 0x80)
1290 codesneeded = 1;
1291 else if (size <= 0x7FFF8)
1292 codesneeded = 2;
1293 else
1294 codesneeded = 3;
1296 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1297 g_error ("Larger allocation needed for the unwind information.");
1299 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1300 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1302 if (codesneeded == 1) {
1303 /*The size of the allocation is
1304 (the number in the OpInfo member) times 8 plus 8*/
1305 unwindcode->OpInfo = (size - 8)/8;
1306 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1308 else {
1309 if (codesneeded == 3) {
1310 /*the unscaled size of the allocation is recorded
1311 in the next two slots in little-endian format*/
1312 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1313 unwindcode += 2;
1314 unwindcode->OpInfo = 1;
1316 else {
1317 /*the size of the allocation divided by 8
1318 is recorded in the next slot*/
1319 unwindcode->FrameOffset = size/8;
1320 unwindcode++;
1321 unwindcode->OpInfo = 0;
1324 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1327 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1329 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1330 g_error ("Adding unwind info in wrong order.");
1332 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1335 guint
1336 mono_arch_unwindinfo_get_size (gpointer monoui)
1338 PMonoUnwindInfo unwindinfo;
1339 if (!monoui)
1340 return 0;
1342 unwindinfo = (MonoUnwindInfo*)monoui;
1343 return (8 + sizeof (MonoUnwindInfo)) -
1344 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1347 PRUNTIME_FUNCTION
1348 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1350 MonoJitInfo *ji;
1351 guint64 pos;
1352 PMonoUnwindInfo targetinfo;
1353 MonoDomain *domain = mono_domain_get ();
1355 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1356 if (!ji)
1357 return 0;
1359 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1361 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1363 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1365 return &targetinfo->runtimeFunction;
1368 void
1369 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1371 PMonoUnwindInfo unwindinfo, targetinfo;
1372 guchar codecount;
1373 guint64 targetlocation;
1374 if (!*monoui)
1375 return;
1377 unwindinfo = (MonoUnwindInfo*)*monoui;
1378 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1379 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1381 unwindinfo->runtimeFunction.EndAddress = code_size;
1382 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1384 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1386 codecount = unwindinfo->unwindInfo.CountOfCodes;
1387 if (codecount) {
1388 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1389 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1392 g_free (unwindinfo);
1393 *monoui = 0;
1395 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1398 #endif
1400 #if MONO_SUPPORT_TASKLETS
1401 MonoContinuationRestore
1402 mono_tasklets_arch_restore (void)
1404 static guint8* saved = NULL;
1405 guint8 *code, *start;
1406 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1407 const guint kMaxCodeSize = NACL_SIZE (64, 128);
1410 if (saved)
1411 return (MonoContinuationRestore)saved;
1412 code = start = mono_global_codeman_reserve (kMaxCodeSize);
1413 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1414 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1415 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1416 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1417 * We move cont to cont_reg since we need both rcx and rdi for the copy
1418 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1420 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1421 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1422 /* setup the copy of the stack */
1423 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1424 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1425 x86_cld (code);
1426 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1427 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1428 amd64_prefix (code, X86_REP_PREFIX);
1429 amd64_movsl (code);
1431 /* now restore the registers from the LMF */
1432 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1433 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1434 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1435 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1436 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1437 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1438 #if !defined(__native_client_codegen__)
1439 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1440 #endif
1441 #ifdef TARGET_WIN32
1442 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1443 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1444 #endif
1445 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1447 /* restore the lmf chain */
1448 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1449 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1451 /* state is already in rax */
1452 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1453 g_assert ((code - start) <= kMaxCodeSize);
1455 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1457 saved = start;
1458 return (MonoContinuationRestore)saved;
1460 #endif
1463 * mono_arch_setup_resume_sighandler_ctx:
1465 * Setup CTX so execution continues at FUNC.
1467 void
1468 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1471 * When resuming from a signal handler, the stack should be misaligned, just like right after
1472 * a call.
1474 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1475 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1476 MONO_CONTEXT_SET_IP (ctx, func);