2009-12-30 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / exceptions-amd64.c
blob73c8bf8c83c79a890a3f619bdea4b71489d44f24
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
14 #ifdef HAVE_UCONTEXT_H
15 #include <ucontext.h>
16 #endif
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
29 #include "mini.h"
30 #include "mini-amd64.h"
31 #include "tasklets.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
36 #ifdef TARGET_WIN32
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
52 EXCEPTION_RECORD* er;
53 CONTEXT* ctx;
54 MonoContext* sctx;
55 LONG res;
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
64 sctx->rax = ctx->Rax;
65 sctx->rbx = ctx->Rbx;
66 sctx->rcx = ctx->Rcx;
67 sctx->rdx = ctx->Rdx;
68 sctx->rbp = ctx->Rbp;
69 sctx->rsp = ctx->Rsp;
70 sctx->rsi = ctx->Rsi;
71 sctx->rdi = ctx->Rdi;
72 sctx->rip = ctx->Rip;
73 sctx->r12 = ctx->R12;
74 sctx->r13 = ctx->R13;
75 sctx->r14 = ctx->R14;
76 sctx->r15 = ctx->R15;
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
81 break;
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
84 break;
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
92 break;
93 default:
94 break;
97 /* Copy context back */
98 /* Nonvolatile */
99 ctx->Rsp = sctx->rsp;
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
115 g_free (sctx);
117 return res;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
132 switch (type) {
133 case SIGFPE:
134 fpe_handler = handler;
135 break;
136 case SIGILL:
137 ill_handler = handler;
138 break;
139 case SIGSEGV:
140 segv_handler = handler;
141 break;
142 default:
143 break;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
154 gpointer
155 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
157 guint8 *start = NULL;
158 guint8 *code;
160 /* restore_contect (MonoContext *ctx) */
162 *ji = NULL;
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
189 } else {
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
200 *code_size = code - start;
202 return start;
206 * mono_arch_get_call_filter:
208 * Returns a pointer to a method which calls an exception filter. We
209 * also use this function to call finally handlers (we pass NULL as
210 * @exc object in this case).
212 gpointer
213 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
215 guint8 *start;
216 int i;
217 guint8 *code;
218 guint32 pos;
220 *ji = NULL;
222 start = code = mono_global_codeman_reserve (128);
224 /* call_filter (MonoContext *ctx, unsigned long eip) */
225 code = start;
227 /* Alloc new frame */
228 amd64_push_reg (code, AMD64_RBP);
229 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
231 /* Save callee saved regs */
232 pos = 0;
233 for (i = 0; i < AMD64_NREG; ++i)
234 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
235 amd64_push_reg (code, i);
236 pos += 8;
239 /* Save EBP */
240 pos += 8;
241 amd64_push_reg (code, AMD64_RBP);
243 /* Make stack misaligned, the call will make it aligned again */
244 if (! (pos & 8))
245 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
247 /* set new EBP */
248 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
249 /* load callee saved regs */
250 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
251 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
252 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
253 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
254 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
255 #ifdef TARGET_WIN32
256 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
257 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
258 #endif
260 /* call the handler */
261 amd64_call_reg (code, AMD64_ARG_REG2);
263 if (! (pos & 8))
264 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
266 /* restore RBP */
267 amd64_pop_reg (code, AMD64_RBP);
269 /* Restore callee saved regs */
270 for (i = AMD64_NREG; i >= 0; --i)
271 if (AMD64_IS_CALLEE_SAVED_REG (i))
272 amd64_pop_reg (code, i);
274 amd64_leave (code);
275 amd64_ret (code);
277 g_assert ((code - start) < 128);
279 mono_arch_flush_icache (start, code - start);
281 *code_size = code - start;
283 return start;
287 * The first few arguments are dummy, to force the other arguments to be passed on
288 * the stack, this avoids overwriting the argument registers in the throw trampoline.
290 void
291 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
292 guint64 dummy5, guint64 dummy6,
293 MonoObject *exc, guint64 rip, guint64 rsp,
294 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
295 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
296 guint64 rax, guint64 rcx, guint64 rdx,
297 guint64 rethrow)
299 static void (*restore_context) (MonoContext *);
300 MonoContext ctx;
302 if (!restore_context)
303 restore_context = mono_get_restore_context ();
305 ctx.rsp = rsp;
306 ctx.rip = rip;
307 ctx.rbx = rbx;
308 ctx.rbp = rbp;
309 ctx.r12 = r12;
310 ctx.r13 = r13;
311 ctx.r14 = r14;
312 ctx.r15 = r15;
313 ctx.rdi = rdi;
314 ctx.rsi = rsi;
315 ctx.rax = rax;
316 ctx.rcx = rcx;
317 ctx.rdx = rdx;
319 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
320 MonoException *mono_ex = (MonoException*)exc;
321 if (!rethrow)
322 mono_ex->stack_trace = NULL;
325 if (mono_debug_using_mono_debugger ()) {
326 guint8 buf [16], *code;
328 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
329 code = buf + 8;
331 if (buf [3] == 0xe8) {
332 MonoContext ctx_cp = ctx;
333 ctx_cp.rip = rip - 5;
335 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
336 restore_context (&ctx_cp);
337 g_assert_not_reached ();
342 /* adjust eip so that it point into the call instruction */
343 ctx.rip -= 1;
345 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
346 restore_context (&ctx);
348 g_assert_not_reached ();
351 static gpointer
352 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
354 guint8* start;
355 guint8 *code;
357 start = code = mono_global_codeman_reserve (64);
359 code = start;
361 *ji = NULL;
363 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
365 /* reverse order */
366 amd64_push_imm (code, rethrow);
367 amd64_push_reg (code, AMD64_RDX);
368 amd64_push_reg (code, AMD64_RCX);
369 amd64_push_reg (code, AMD64_RAX);
370 amd64_push_reg (code, AMD64_RSI);
371 amd64_push_reg (code, AMD64_RDI);
372 amd64_push_reg (code, AMD64_R15);
373 amd64_push_reg (code, AMD64_R14);
374 amd64_push_reg (code, AMD64_R13);
375 amd64_push_reg (code, AMD64_R12);
376 amd64_push_reg (code, AMD64_RBP);
377 amd64_push_reg (code, AMD64_RBX);
379 /* SP */
380 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
381 amd64_push_reg (code, AMD64_RAX);
383 /* IP */
384 amd64_push_membase (code, AMD64_R11, 0);
386 /* Exception */
387 amd64_push_reg (code, AMD64_ARG_REG1);
389 #ifdef TARGET_WIN32
390 /* align stack */
391 amd64_push_imm (code, 0);
392 amd64_push_imm (code, 0);
393 amd64_push_imm (code, 0);
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 amd64_push_imm (code, 0);
397 #endif
399 if (aot) {
400 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
401 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
402 } else {
403 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
405 amd64_call_reg (code, AMD64_R11);
406 amd64_breakpoint (code);
408 mono_arch_flush_icache (start, code - start);
410 g_assert ((code - start) < 64);
412 *code_size = code - start;
414 return start;
418 * mono_arch_get_throw_exception:
420 * Returns a function pointer which can be used to raise
421 * exceptions. The returned function has the following
422 * signature: void (*func) (MonoException *exc);
425 gpointer
426 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
428 return get_throw_trampoline (FALSE, code_size, ji, aot);
431 gpointer
432 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
434 return get_throw_trampoline (TRUE, code_size, ji, aot);
437 gpointer
438 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
440 guint8* start;
441 guint8 *code;
443 start = code = mono_global_codeman_reserve (64);
445 *ji = NULL;
447 /* Not used on amd64 */
448 amd64_breakpoint (code);
450 mono_arch_flush_icache (start, code - start);
452 *code_size = code - start;
454 return start;
458 * mono_arch_get_throw_corlib_exception:
460 * Returns a function pointer which can be used to raise
461 * corlib exceptions. The returned function has the following
462 * signature: void (*func) (guint32 ex_token, guint32 offset);
463 * Here, offset is the offset which needs to be substracted from the caller IP
464 * to get the IP of the throw. Passing the offset has the advantage that it
465 * needs no relocations in the caller.
467 gpointer
468 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
470 static guint8* start;
471 guint8 *code;
472 guint64 throw_ex;
474 start = code = mono_global_codeman_reserve (64);
476 *ji = NULL;
478 /* Push throw_ip */
479 amd64_push_reg (code, AMD64_ARG_REG2);
481 /* Call exception_from_token */
482 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
483 if (aot) {
484 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
485 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
486 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
487 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
488 } else {
489 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
490 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
492 #ifdef TARGET_WIN32
493 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
494 #endif
495 amd64_call_reg (code, AMD64_R11);
496 #ifdef TARGET_WIN32
497 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
498 #endif
500 /* Compute throw_ip */
501 amd64_pop_reg (code, AMD64_ARG_REG2);
502 /* return addr */
503 amd64_pop_reg (code, AMD64_ARG_REG3);
504 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
506 /* Put the throw_ip at the top of the misaligned stack */
507 amd64_push_reg (code, AMD64_ARG_REG3);
509 throw_ex = (guint64)mono_get_throw_exception ();
511 /* Call throw_exception */
512 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
513 if (aot) {
514 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
515 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
516 } else {
517 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
519 /* The original IP is on the stack */
520 amd64_jump_reg (code, AMD64_R11);
522 g_assert ((code - start) < 64);
524 mono_arch_flush_icache (start, code - start);
526 *code_size = code - start;
528 return start;
532 * mono_arch_find_jit_info_ext:
534 * This function is used to gather information from @ctx, and store it in @frame_info.
535 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
536 * is modified if needed.
537 * Returns TRUE on success, FALSE otherwise.
538 * This function is a version of mono_arch_find_jit_info () where all the results are
539 * returned in a StackFrameInfo structure.
541 gboolean
542 mono_arch_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
543 MonoJitInfo *ji, MonoContext *ctx,
544 MonoContext *new_ctx, MonoLMF **lmf,
545 StackFrameInfo *frame)
547 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
549 memset (frame, 0, sizeof (StackFrameInfo));
550 frame->ji = ji;
551 frame->managed = FALSE;
553 *new_ctx = *ctx;
555 if (ji != NULL) {
556 gssize regs [MONO_MAX_IREGS + 1];
557 guint8 *cfa;
558 guint32 unwind_info_len;
559 guint8 *unwind_info;
561 frame->type = FRAME_TYPE_MANAGED;
563 if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
564 frame->managed = TRUE;
566 if (ji->from_aot)
567 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
568 else
569 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
571 regs [AMD64_RAX] = new_ctx->rax;
572 regs [AMD64_RBX] = new_ctx->rbx;
573 regs [AMD64_RCX] = new_ctx->rcx;
574 regs [AMD64_RDX] = new_ctx->rdx;
575 regs [AMD64_RBP] = new_ctx->rbp;
576 regs [AMD64_RSP] = new_ctx->rsp;
577 regs [AMD64_RSI] = new_ctx->rsi;
578 regs [AMD64_RDI] = new_ctx->rdi;
579 regs [AMD64_RIP] = new_ctx->rip;
580 regs [AMD64_R12] = new_ctx->r12;
581 regs [AMD64_R13] = new_ctx->r13;
582 regs [AMD64_R14] = new_ctx->r14;
583 regs [AMD64_R15] = new_ctx->r15;
585 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
586 (guint8*)ji->code_start + ji->code_size,
587 ip, regs, MONO_MAX_IREGS + 1, &cfa);
589 new_ctx->rax = regs [AMD64_RAX];
590 new_ctx->rbx = regs [AMD64_RBX];
591 new_ctx->rcx = regs [AMD64_RCX];
592 new_ctx->rdx = regs [AMD64_RDX];
593 new_ctx->rbp = regs [AMD64_RBP];
594 new_ctx->rsp = regs [AMD64_RSP];
595 new_ctx->rsi = regs [AMD64_RSI];
596 new_ctx->rdi = regs [AMD64_RDI];
597 new_ctx->rip = regs [AMD64_RIP];
598 new_ctx->r12 = regs [AMD64_R12];
599 new_ctx->r13 = regs [AMD64_R13];
600 new_ctx->r14 = regs [AMD64_R14];
601 new_ctx->r15 = regs [AMD64_R15];
603 /* The CFA becomes the new SP value */
604 new_ctx->rsp = (gssize)cfa;
606 /* Adjust IP */
607 new_ctx->rip --;
609 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
610 /* remove any unused lmf */
611 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
614 #ifndef MONO_AMD64_NO_PUSHES
615 /* Pop arguments off the stack */
617 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
619 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
620 new_ctx->rsp += stack_to_pop;
622 #endif
624 return TRUE;
625 } else if (*lmf) {
626 guint64 rip;
628 if (((guint64)(*lmf)->previous_lmf) & 2) {
630 * This LMF entry is created by the soft debug code to mark transitions to
631 * managed code done during invokes.
633 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
635 g_assert (ext->debugger_invoke);
637 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
639 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
641 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
643 return TRUE;
646 if (((guint64)(*lmf)->previous_lmf) & 1) {
647 /* This LMF has the rip field set */
648 rip = (*lmf)->rip;
649 } else if ((*lmf)->rsp == 0) {
650 /* Top LMF entry */
651 return FALSE;
652 } else {
654 * The rsp field is set just before the call which transitioned to native
655 * code. Obtain the rip from the stack.
657 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
660 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
661 if (!ji) {
662 // FIXME: This can happen with multiple appdomains (bug #444383)
663 return FALSE;
666 frame->ji = ji;
667 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
669 new_ctx->rip = rip;
670 new_ctx->rbp = (*lmf)->rbp;
671 new_ctx->rsp = (*lmf)->rsp;
673 new_ctx->rbx = (*lmf)->rbx;
674 new_ctx->r12 = (*lmf)->r12;
675 new_ctx->r13 = (*lmf)->r13;
676 new_ctx->r14 = (*lmf)->r14;
677 new_ctx->r15 = (*lmf)->r15;
678 #ifdef TARGET_WIN32
679 new_ctx->rdi = (*lmf)->rdi;
680 new_ctx->rsi = (*lmf)->rsi;
681 #endif
683 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
685 return TRUE;
688 return FALSE;
692 * mono_arch_handle_exception:
694 * @ctx: saved processor state
695 * @obj: the exception object
697 gboolean
698 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
700 MonoContext mctx;
702 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
704 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
705 return TRUE;
707 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
709 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
711 return TRUE;
714 #ifdef MONO_ARCH_USE_SIGACTION
715 static inline guint64*
716 gregs_from_ucontext (ucontext_t *ctx)
718 return (guint64 *) UCONTEXT_GREGS (ctx);
720 #endif
721 void
722 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
724 #ifdef MONO_ARCH_USE_SIGACTION
725 ucontext_t *ctx = (ucontext_t*)sigctx;
727 guint64 *gregs = gregs_from_ucontext (ctx);
729 mctx->rax = gregs [REG_RAX];
730 mctx->rbx = gregs [REG_RBX];
731 mctx->rcx = gregs [REG_RCX];
732 mctx->rdx = gregs [REG_RDX];
733 mctx->rbp = gregs [REG_RBP];
734 mctx->rsp = gregs [REG_RSP];
735 mctx->rsi = gregs [REG_RSI];
736 mctx->rdi = gregs [REG_RDI];
737 mctx->rip = gregs [REG_RIP];
738 mctx->r12 = gregs [REG_R12];
739 mctx->r13 = gregs [REG_R13];
740 mctx->r14 = gregs [REG_R14];
741 mctx->r15 = gregs [REG_R15];
742 #else
743 MonoContext *ctx = (MonoContext *)sigctx;
745 mctx->rax = ctx->rax;
746 mctx->rbx = ctx->rbx;
747 mctx->rcx = ctx->rcx;
748 mctx->rdx = ctx->rdx;
749 mctx->rbp = ctx->rbp;
750 mctx->rsp = ctx->rsp;
751 mctx->rsi = ctx->rsi;
752 mctx->rdi = ctx->rdi;
753 mctx->rip = ctx->rip;
754 mctx->r12 = ctx->r12;
755 mctx->r13 = ctx->r13;
756 mctx->r14 = ctx->r14;
757 mctx->r15 = ctx->r15;
758 #endif
761 void
762 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
764 #ifdef MONO_ARCH_USE_SIGACTION
765 ucontext_t *ctx = (ucontext_t*)sigctx;
767 guint64 *gregs = gregs_from_ucontext (ctx);
769 gregs [REG_RAX] = mctx->rax;
770 gregs [REG_RBX] = mctx->rbx;
771 gregs [REG_RCX] = mctx->rcx;
772 gregs [REG_RDX] = mctx->rdx;
773 gregs [REG_RBP] = mctx->rbp;
774 gregs [REG_RSP] = mctx->rsp;
775 gregs [REG_RSI] = mctx->rsi;
776 gregs [REG_RDI] = mctx->rdi;
777 gregs [REG_RIP] = mctx->rip;
778 gregs [REG_R12] = mctx->r12;
779 gregs [REG_R13] = mctx->r13;
780 gregs [REG_R14] = mctx->r14;
781 gregs [REG_R15] = mctx->r15;
782 #else
783 MonoContext *ctx = (MonoContext *)sigctx;
785 ctx->rax = mctx->rax;
786 ctx->rbx = mctx->rbx;
787 ctx->rcx = mctx->rcx;
788 ctx->rdx = mctx->rdx;
789 ctx->rbp = mctx->rbp;
790 ctx->rsp = mctx->rsp;
791 ctx->rsi = mctx->rsi;
792 ctx->rdi = mctx->rdi;
793 ctx->rip = mctx->rip;
794 ctx->r12 = mctx->r12;
795 ctx->r13 = mctx->r13;
796 ctx->r14 = mctx->r14;
797 ctx->r15 = mctx->r15;
798 #endif
801 gpointer
802 mono_arch_ip_from_context (void *sigctx)
805 #ifdef MONO_ARCH_USE_SIGACTION
807 ucontext_t *ctx = (ucontext_t*)sigctx;
809 guint64 *gregs = gregs_from_ucontext (ctx);
811 return (gpointer)gregs [REG_RIP];
812 #else
813 MonoContext *ctx = sigctx;
814 return (gpointer)ctx->rip;
815 #endif
818 static void
819 restore_soft_guard_pages (void)
821 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
822 if (jit_tls->stack_ovf_guard_base)
823 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
827 * this function modifies mctx so that when it is restored, it
828 * won't execcute starting at mctx.eip, but in a function that
829 * will restore the protection on the soft-guard pages and return back to
830 * continue at mctx.eip.
832 static void
833 prepare_for_guard_pages (MonoContext *mctx)
835 gpointer *sp;
836 sp = (gpointer)(mctx->rsp);
837 sp -= 1;
838 /* the return addr */
839 sp [0] = (gpointer)(mctx->rip);
840 mctx->rip = (guint64)restore_soft_guard_pages;
841 mctx->rsp = (guint64)sp;
844 static void
845 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
847 void (*restore_context) (MonoContext *);
848 MonoContext mctx;
850 restore_context = mono_get_restore_context ();
851 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
853 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
854 if (stack_ovf)
855 prepare_for_guard_pages (&mctx);
856 restore_context (&mctx);
859 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
860 if (stack_ovf)
861 prepare_for_guard_pages (&mctx);
862 restore_context (&mctx);
865 void
866 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
868 #ifdef MONO_ARCH_USE_SIGACTION
869 MonoException *exc = NULL;
870 ucontext_t *ctx = (ucontext_t*)sigctx;
871 guint64 *gregs = gregs_from_ucontext (ctx);
872 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP], NULL);
873 gpointer *sp;
874 int frame_size;
876 if (stack_ovf)
877 exc = mono_domain_get ()->stack_overflow_ex;
878 if (!ji)
879 mono_handle_native_sigsegv (SIGSEGV, sigctx);
881 /* setup a call frame on the real stack so that control is returned there
882 * and exception handling can continue.
883 * The frame looks like:
884 * ucontext struct
885 * ...
886 * return ip
887 * 128 is the size of the red zone
889 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
890 frame_size += 15;
891 frame_size &= ~15;
892 sp = (gpointer)(gregs [REG_RSP] & ~15);
893 sp = (gpointer)((char*)sp - frame_size);
894 /* the arguments must be aligned */
895 sp [-1] = (gpointer)gregs [REG_RIP];
896 /* may need to adjust pointers in the new struct copy, depending on the OS */
897 memcpy (sp + 4, ctx, sizeof (ucontext_t));
898 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
899 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
900 gregs [REG_RSP] = (unsigned long)(sp - 1);
901 gregs [REG_RDI] = (unsigned long)(sp + 4);
902 gregs [REG_RSI] = (guint64)exc;
903 gregs [REG_RDX] = stack_ovf;
904 #endif
907 guint64
908 mono_amd64_get_original_ip (void)
910 MonoLMF *lmf = mono_get_lmf ();
912 g_assert (lmf);
914 /* Reset the change to previous_lmf */
915 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
917 return lmf->rip;
920 gpointer
921 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
923 guint8 *code, *start;
924 guint8 *br[1];
925 gpointer throw_trampoline;
927 *ji = NULL;
929 start = code = mono_global_codeman_reserve (128);
931 /* We are in the frame of a managed method after a call */
933 * We would like to throw the pending exception in such a way that it looks to
934 * be thrown from the managed method.
937 /* Save registers which might contain the return value of the call */
938 amd64_push_reg (code, AMD64_RAX);
939 amd64_push_reg (code, AMD64_RDX);
941 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
942 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
944 /* Align stack */
945 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
947 /* Obtain the pending exception */
948 if (aot) {
949 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
950 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
951 } else {
952 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
954 amd64_call_reg (code, AMD64_R11);
956 /* Check if it is NULL, and branch */
957 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
958 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
960 /* exc != NULL branch */
962 /* Save the exc on the stack */
963 amd64_push_reg (code, AMD64_RAX);
964 /* Align stack */
965 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
967 /* Obtain the original ip and clear the flag in previous_lmf */
968 if (aot) {
969 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
970 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
971 } else {
972 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
974 amd64_call_reg (code, AMD64_R11);
976 /* Load exc */
977 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
979 /* Pop saved stuff from the stack */
980 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
982 /* Setup arguments for the throw trampoline */
983 /* Exception */
984 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
985 /* The trampoline expects the caller ip to be pushed on the stack */
986 amd64_push_reg (code, AMD64_RAX);
988 /* Call the throw trampoline */
989 if (aot) {
990 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
991 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
992 } else {
993 throw_trampoline = mono_get_throw_exception ();
994 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
996 /* We use a jump instead of a call so we can push the original ip on the stack */
997 amd64_jump_reg (code, AMD64_R11);
999 /* ex == NULL branch */
1000 mono_amd64_patch (br [0], code);
1002 /* Obtain the original ip and clear the flag in previous_lmf */
1003 if (aot) {
1004 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1005 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1006 } else {
1007 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1009 amd64_call_reg (code, AMD64_R11);
1010 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1012 /* Restore registers */
1013 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1014 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1015 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1016 amd64_pop_reg (code, AMD64_RDX);
1017 amd64_pop_reg (code, AMD64_RAX);
1019 /* Return to original code */
1020 amd64_jump_reg (code, AMD64_R11);
1022 g_assert ((code - start) < 128);
1024 *code_size = code - start;
1026 return start;
1029 static gpointer throw_pending_exception;
1032 * Called when a thread receives an async exception while executing unmanaged code.
1033 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1034 * the return address on the stack to point to a helper routine which throws the
1035 * exception.
1037 void
1038 mono_arch_notify_pending_exc (void)
1040 MonoLMF *lmf = mono_get_lmf ();
1042 if (lmf->rsp == 0)
1043 /* Initial LMF */
1044 return;
1046 if ((guint64)lmf->previous_lmf & 1)
1047 /* Already hijacked or trampoline LMF entry */
1048 return;
1050 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1051 lmf->rip = *(guint64*)(lmf->rsp - 8);
1052 /* Signal that lmf->rip is set */
1053 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1055 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1058 void
1059 mono_arch_exceptions_init (void)
1061 guint32 code_size;
1062 MonoJumpInfo *ji;
1064 if (mono_aot_only) {
1065 throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception");
1066 } else {
1067 /* Call this to avoid initialization races */
1068 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1072 #ifdef TARGET_WIN32
1075 * The mono_arch_unwindinfo* methods are used to build and add
1076 * function table info for each emitted method from mono. On Winx64
1077 * the seh handler will not be called if the mono methods are not
1078 * added to the function table.
1080 * We should not need to add non-volatile register info to the
1081 * table since mono stores that info elsewhere. (Except for the register
1082 * used for the fp.)
1085 #define MONO_MAX_UNWIND_CODES 22
1087 typedef union _UNWIND_CODE {
1088 struct {
1089 guchar CodeOffset;
1090 guchar UnwindOp : 4;
1091 guchar OpInfo : 4;
1093 gushort FrameOffset;
1094 } UNWIND_CODE, *PUNWIND_CODE;
1096 typedef struct _UNWIND_INFO {
1097 guchar Version : 3;
1098 guchar Flags : 5;
1099 guchar SizeOfProlog;
1100 guchar CountOfCodes;
1101 guchar FrameRegister : 4;
1102 guchar FrameOffset : 4;
1103 /* custom size for mono allowing for mono allowing for*/
1104 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1105 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1106 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1107 /*UWOP_PUSH_NONVOL offset = 15-0*/
1108 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1110 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1111 * union {
1112 * OPTIONAL ULONG ExceptionHandler;
1113 * OPTIONAL ULONG FunctionEntry;
1114 * };
1115 * OPTIONAL ULONG ExceptionData[]; */
1116 } UNWIND_INFO, *PUNWIND_INFO;
1118 typedef struct
1120 RUNTIME_FUNCTION runtimeFunction;
1121 UNWIND_INFO unwindInfo;
1122 } MonoUnwindInfo, *PMonoUnwindInfo;
1124 static void
1125 mono_arch_unwindinfo_create (gpointer* monoui)
1127 PMonoUnwindInfo newunwindinfo;
1128 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1129 newunwindinfo->unwindInfo.Version = 1;
1132 void
1133 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1135 PMonoUnwindInfo unwindinfo;
1136 PUNWIND_CODE unwindcode;
1137 guchar codeindex;
1138 if (!*monoui)
1139 mono_arch_unwindinfo_create (monoui);
1141 unwindinfo = (MonoUnwindInfo*)*monoui;
1143 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1144 g_error ("Larger allocation needed for the unwind information.");
1146 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1147 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1148 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1149 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1150 unwindcode->OpInfo = reg;
1152 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1153 g_error ("Adding unwind info in wrong order.");
1155 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1158 void
1159 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1161 PMonoUnwindInfo unwindinfo;
1162 PUNWIND_CODE unwindcode;
1163 guchar codeindex;
1164 if (!*monoui)
1165 mono_arch_unwindinfo_create (monoui);
1167 unwindinfo = (MonoUnwindInfo*)*monoui;
1169 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1170 g_error ("Larger allocation needed for the unwind information.");
1172 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1173 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1174 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1175 unwindcode++;
1176 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1177 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1178 unwindcode->OpInfo = reg;
1180 unwindinfo->unwindInfo.FrameRegister = reg;
1182 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1183 g_error ("Adding unwind info in wrong order.");
1185 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1188 void
1189 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1191 PMonoUnwindInfo unwindinfo;
1192 PUNWIND_CODE unwindcode;
1193 guchar codeindex;
1194 guchar codesneeded;
1195 if (!*monoui)
1196 mono_arch_unwindinfo_create (monoui);
1198 unwindinfo = (MonoUnwindInfo*)*monoui;
1200 if (size < 0x8)
1201 g_error ("Stack allocation must be equal to or greater than 0x8.");
1203 if (size <= 0x80)
1204 codesneeded = 1;
1205 else if (size <= 0x7FFF8)
1206 codesneeded = 2;
1207 else
1208 codesneeded = 3;
1210 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1211 g_error ("Larger allocation needed for the unwind information.");
1213 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1214 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1216 if (codesneeded == 1) {
1217 /*The size of the allocation is
1218 (the number in the OpInfo member) times 8 plus 8*/
1219 unwindcode->OpInfo = (size - 8)/8;
1220 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1222 else {
1223 if (codesneeded == 3) {
1224 /*the unscaled size of the allocation is recorded
1225 in the next two slots in little-endian format*/
1226 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1227 unwindcode += 2;
1228 unwindcode->OpInfo = 1;
1230 else {
1231 /*the size of the allocation divided by 8
1232 is recorded in the next slot*/
1233 unwindcode->FrameOffset = size/8;
1234 unwindcode++;
1235 unwindcode->OpInfo = 0;
1238 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1241 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1243 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1244 g_error ("Adding unwind info in wrong order.");
1246 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1249 guint
1250 mono_arch_unwindinfo_get_size (gpointer monoui)
1252 PMonoUnwindInfo unwindinfo;
1253 if (!monoui)
1254 return 0;
1256 unwindinfo = (MonoUnwindInfo*)monoui;
1257 return (8 + sizeof (MonoUnwindInfo)) -
1258 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1261 PRUNTIME_FUNCTION
1262 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1264 MonoJitInfo *ji;
1265 guint64 pos;
1266 PMonoUnwindInfo targetinfo;
1267 MonoDomain *domain = mono_domain_get ();
1269 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1270 if (!ji)
1271 return 0;
1273 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1275 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1277 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1279 return &targetinfo->runtimeFunction;
1282 void
1283 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1285 PMonoUnwindInfo unwindinfo, targetinfo;
1286 guchar codecount;
1287 guint64 targetlocation;
1288 if (!*monoui)
1289 return;
1291 unwindinfo = (MonoUnwindInfo*)*monoui;
1292 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1293 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1295 unwindinfo->runtimeFunction.EndAddress = code_size;
1296 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1298 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1300 codecount = unwindinfo->unwindInfo.CountOfCodes;
1301 if (codecount) {
1302 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1303 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1306 g_free (unwindinfo);
1307 *monoui = 0;
1309 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1312 #endif
1314 #if MONO_SUPPORT_TASKLETS
1315 MonoContinuationRestore
1316 mono_tasklets_arch_restore (void)
1318 static guint8* saved = NULL;
1319 guint8 *code, *start;
1320 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1322 if (saved)
1323 return (MonoContinuationRestore)saved;
1324 code = start = mono_global_codeman_reserve (64);
1325 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1326 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1327 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1328 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1329 * We move cont to cont_reg since we need both rcx and rdi for the copy
1330 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1332 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1333 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1334 /* setup the copy of the stack */
1335 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1336 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1337 x86_cld (code);
1338 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1339 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1340 amd64_prefix (code, X86_REP_PREFIX);
1341 amd64_movsl (code);
1343 /* now restore the registers from the LMF */
1344 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1345 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1346 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1347 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1348 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1349 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1350 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1351 #ifdef TARGET_WIN32
1352 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1353 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1354 #endif
1355 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1357 /* restore the lmf chain */
1358 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1359 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1361 /* state is already in rax */
1362 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1363 g_assert ((code - start) <= 64);
1364 saved = start;
1365 return (MonoContinuationRestore)saved;
1367 #endif