2010-04-19 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / exceptions-amd64.c
blob7ab4d62976dfa514b8a3756e945be6b1127a62c1
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
14 #ifdef HAVE_UCONTEXT_H
15 #include <ucontext.h>
16 #endif
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
29 #include "mini.h"
30 #include "mini-amd64.h"
31 #include "tasklets.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
36 #ifdef TARGET_WIN32
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
52 EXCEPTION_RECORD* er;
53 CONTEXT* ctx;
54 MonoContext* sctx;
55 LONG res;
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
64 sctx->rax = ctx->Rax;
65 sctx->rbx = ctx->Rbx;
66 sctx->rcx = ctx->Rcx;
67 sctx->rdx = ctx->Rdx;
68 sctx->rbp = ctx->Rbp;
69 sctx->rsp = ctx->Rsp;
70 sctx->rsi = ctx->Rsi;
71 sctx->rdi = ctx->Rdi;
72 sctx->rip = ctx->Rip;
73 sctx->r12 = ctx->R12;
74 sctx->r13 = ctx->R13;
75 sctx->r14 = ctx->R14;
76 sctx->r15 = ctx->R15;
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
81 break;
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
84 break;
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
92 break;
93 default:
94 break;
97 /* Copy context back */
98 /* Nonvolatile */
99 ctx->Rsp = sctx->rsp;
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
115 g_free (sctx);
117 return res;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
132 switch (type) {
133 case SIGFPE:
134 fpe_handler = handler;
135 break;
136 case SIGILL:
137 ill_handler = handler;
138 break;
139 case SIGSEGV:
140 segv_handler = handler;
141 break;
142 default:
143 break;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
154 gpointer
155 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
157 guint8 *start = NULL;
158 guint8 *code;
160 /* restore_contect (MonoContext *ctx) */
162 *ji = NULL;
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
189 } else {
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
200 *code_size = code - start;
202 return start;
206 * mono_arch_get_call_filter:
208 * Returns a pointer to a method which calls an exception filter. We
209 * also use this function to call finally handlers (we pass NULL as
210 * @exc object in this case).
212 gpointer
213 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
215 guint8 *start;
216 int i;
217 guint8 *code;
218 guint32 pos;
220 *ji = NULL;
222 start = code = mono_global_codeman_reserve (128);
224 /* call_filter (MonoContext *ctx, unsigned long eip) */
225 code = start;
227 /* Alloc new frame */
228 amd64_push_reg (code, AMD64_RBP);
229 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
231 /* Save callee saved regs */
232 pos = 0;
233 for (i = 0; i < AMD64_NREG; ++i)
234 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
235 amd64_push_reg (code, i);
236 pos += 8;
239 /* Save EBP */
240 pos += 8;
241 amd64_push_reg (code, AMD64_RBP);
243 /* Make stack misaligned, the call will make it aligned again */
244 if (! (pos & 8))
245 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
247 /* set new EBP */
248 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
249 /* load callee saved regs */
250 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
251 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
252 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
253 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
254 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
255 #ifdef TARGET_WIN32
256 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
257 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
258 #endif
260 /* call the handler */
261 amd64_call_reg (code, AMD64_ARG_REG2);
263 if (! (pos & 8))
264 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
266 /* restore RBP */
267 amd64_pop_reg (code, AMD64_RBP);
269 /* Restore callee saved regs */
270 for (i = AMD64_NREG; i >= 0; --i)
271 if (AMD64_IS_CALLEE_SAVED_REG (i))
272 amd64_pop_reg (code, i);
274 amd64_leave (code);
275 amd64_ret (code);
277 g_assert ((code - start) < 128);
279 mono_arch_flush_icache (start, code - start);
281 *code_size = code - start;
283 return start;
287 * The first few arguments are dummy, to force the other arguments to be passed on
288 * the stack, this avoids overwriting the argument registers in the throw trampoline.
290 void
291 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
292 guint64 dummy5, guint64 dummy6,
293 MonoObject *exc, guint64 rip, guint64 rsp,
294 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
295 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
296 guint64 rax, guint64 rcx, guint64 rdx,
297 guint64 rethrow)
299 static void (*restore_context) (MonoContext *);
300 MonoContext ctx;
302 if (!restore_context)
303 restore_context = mono_get_restore_context ();
305 ctx.rsp = rsp;
306 ctx.rip = rip;
307 ctx.rbx = rbx;
308 ctx.rbp = rbp;
309 ctx.r12 = r12;
310 ctx.r13 = r13;
311 ctx.r14 = r14;
312 ctx.r15 = r15;
313 ctx.rdi = rdi;
314 ctx.rsi = rsi;
315 ctx.rax = rax;
316 ctx.rcx = rcx;
317 ctx.rdx = rdx;
319 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
320 MonoException *mono_ex = (MonoException*)exc;
321 if (!rethrow)
322 mono_ex->stack_trace = NULL;
325 if (mono_debug_using_mono_debugger ()) {
326 guint8 buf [16], *code;
328 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
329 code = buf + 8;
331 if (buf [3] == 0xe8) {
332 MonoContext ctx_cp = ctx;
333 ctx_cp.rip = rip - 5;
335 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
336 restore_context (&ctx_cp);
337 g_assert_not_reached ();
342 /* adjust eip so that it point into the call instruction */
343 ctx.rip -= 1;
345 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
346 restore_context (&ctx);
348 g_assert_not_reached ();
351 static gpointer
352 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
354 guint8* start;
355 guint8 *code;
356 GSList *unwind_ops;
358 start = code = mono_global_codeman_reserve (64);
360 code = start;
362 *ji = NULL;
364 unwind_ops = mono_arch_get_cie_program ();
366 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
368 /* reverse order */
369 amd64_push_imm (code, rethrow);
370 amd64_push_reg (code, AMD64_RDX);
371 amd64_push_reg (code, AMD64_RCX);
372 amd64_push_reg (code, AMD64_RAX);
373 amd64_push_reg (code, AMD64_RSI);
374 amd64_push_reg (code, AMD64_RDI);
375 amd64_push_reg (code, AMD64_R15);
376 amd64_push_reg (code, AMD64_R14);
377 amd64_push_reg (code, AMD64_R13);
378 amd64_push_reg (code, AMD64_R12);
379 amd64_push_reg (code, AMD64_RBP);
380 amd64_push_reg (code, AMD64_RBX);
382 /* SP */
383 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
384 amd64_push_reg (code, AMD64_RAX);
386 /* IP */
387 amd64_push_membase (code, AMD64_R11, 0);
389 /* Exception */
390 amd64_push_reg (code, AMD64_ARG_REG1);
392 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, (15 + 1) * sizeof (gpointer));
394 #ifdef TARGET_WIN32
395 /* align stack */
396 amd64_push_imm (code, 0);
397 amd64_push_imm (code, 0);
398 amd64_push_imm (code, 0);
399 amd64_push_imm (code, 0);
400 amd64_push_imm (code, 0);
401 amd64_push_imm (code, 0);
402 #endif
404 if (aot) {
405 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
406 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
407 } else {
408 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
410 amd64_call_reg (code, AMD64_R11);
411 amd64_breakpoint (code);
413 mono_arch_flush_icache (start, code - start);
415 g_assert ((code - start) < 64);
417 *code_size = code - start;
419 mono_save_trampoline_xdebug_info ("throw_exception_trampoline", start, code - start, unwind_ops);
421 return start;
425 * mono_arch_get_throw_exception:
427 * Returns a function pointer which can be used to raise
428 * exceptions. The returned function has the following
429 * signature: void (*func) (MonoException *exc);
432 gpointer
433 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
435 return get_throw_trampoline (FALSE, code_size, ji, aot);
438 gpointer
439 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
441 return get_throw_trampoline (TRUE, code_size, ji, aot);
445 * mono_arch_get_throw_corlib_exception:
447 * Returns a function pointer which can be used to raise
448 * corlib exceptions. The returned function has the following
449 * signature: void (*func) (guint32 ex_token, guint32 offset);
450 * Here, offset is the offset which needs to be substracted from the caller IP
451 * to get the IP of the throw. Passing the offset has the advantage that it
452 * needs no relocations in the caller.
454 gpointer
455 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
457 static guint8* start;
458 guint8 *code;
459 guint64 throw_ex;
461 start = code = mono_global_codeman_reserve (64);
463 *ji = NULL;
465 /* Push throw_ip */
466 amd64_push_reg (code, AMD64_ARG_REG2);
468 /* Call exception_from_token */
469 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
470 if (aot) {
471 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
472 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
473 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
474 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
475 } else {
476 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
477 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
479 #ifdef TARGET_WIN32
480 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
481 #endif
482 amd64_call_reg (code, AMD64_R11);
483 #ifdef TARGET_WIN32
484 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
485 #endif
487 /* Compute throw_ip */
488 amd64_pop_reg (code, AMD64_ARG_REG2);
489 /* return addr */
490 amd64_pop_reg (code, AMD64_ARG_REG3);
491 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
493 /* Put the throw_ip at the top of the misaligned stack */
494 amd64_push_reg (code, AMD64_ARG_REG3);
496 throw_ex = (guint64)mono_get_throw_exception ();
498 /* Call throw_exception */
499 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
500 if (aot) {
501 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
502 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
503 } else {
504 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
506 /* The original IP is on the stack */
507 amd64_jump_reg (code, AMD64_R11);
509 g_assert ((code - start) < 64);
511 mono_arch_flush_icache (start, code - start);
513 *code_size = code - start;
515 return start;
519 * mono_arch_find_jit_info_ext:
521 * This function is used to gather information from @ctx, and store it in @frame_info.
522 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
523 * is modified if needed.
524 * Returns TRUE on success, FALSE otherwise.
525 * This function is a version of mono_arch_find_jit_info () where all the results are
526 * returned in a StackFrameInfo structure.
528 gboolean
529 mono_arch_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
530 MonoJitInfo *ji, MonoContext *ctx,
531 MonoContext *new_ctx, MonoLMF **lmf,
532 StackFrameInfo *frame)
534 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
536 memset (frame, 0, sizeof (StackFrameInfo));
537 frame->ji = ji;
538 frame->managed = FALSE;
540 *new_ctx = *ctx;
542 if (ji != NULL) {
543 gssize regs [MONO_MAX_IREGS + 1];
544 guint8 *cfa;
545 guint32 unwind_info_len;
546 guint8 *unwind_info;
548 frame->type = FRAME_TYPE_MANAGED;
550 if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
551 frame->managed = TRUE;
553 if (ji->from_aot)
554 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
555 else
556 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
558 regs [AMD64_RAX] = new_ctx->rax;
559 regs [AMD64_RBX] = new_ctx->rbx;
560 regs [AMD64_RCX] = new_ctx->rcx;
561 regs [AMD64_RDX] = new_ctx->rdx;
562 regs [AMD64_RBP] = new_ctx->rbp;
563 regs [AMD64_RSP] = new_ctx->rsp;
564 regs [AMD64_RSI] = new_ctx->rsi;
565 regs [AMD64_RDI] = new_ctx->rdi;
566 regs [AMD64_RIP] = new_ctx->rip;
567 regs [AMD64_R12] = new_ctx->r12;
568 regs [AMD64_R13] = new_ctx->r13;
569 regs [AMD64_R14] = new_ctx->r14;
570 regs [AMD64_R15] = new_ctx->r15;
572 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
573 (guint8*)ji->code_start + ji->code_size,
574 ip, regs, MONO_MAX_IREGS + 1, &cfa);
576 new_ctx->rax = regs [AMD64_RAX];
577 new_ctx->rbx = regs [AMD64_RBX];
578 new_ctx->rcx = regs [AMD64_RCX];
579 new_ctx->rdx = regs [AMD64_RDX];
580 new_ctx->rbp = regs [AMD64_RBP];
581 new_ctx->rsp = regs [AMD64_RSP];
582 new_ctx->rsi = regs [AMD64_RSI];
583 new_ctx->rdi = regs [AMD64_RDI];
584 new_ctx->rip = regs [AMD64_RIP];
585 new_ctx->r12 = regs [AMD64_R12];
586 new_ctx->r13 = regs [AMD64_R13];
587 new_ctx->r14 = regs [AMD64_R14];
588 new_ctx->r15 = regs [AMD64_R15];
590 /* The CFA becomes the new SP value */
591 new_ctx->rsp = (gssize)cfa;
593 /* Adjust IP */
594 new_ctx->rip --;
596 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
597 /* remove any unused lmf */
598 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
601 #ifndef MONO_AMD64_NO_PUSHES
602 /* Pop arguments off the stack */
604 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
606 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
607 new_ctx->rsp += stack_to_pop;
609 #endif
611 return TRUE;
612 } else if (*lmf) {
613 guint64 rip;
615 if (((guint64)(*lmf)->previous_lmf) & 2) {
617 * This LMF entry is created by the soft debug code to mark transitions to
618 * managed code done during invokes.
620 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
622 g_assert (ext->debugger_invoke);
624 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
626 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
628 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
630 return TRUE;
633 if (((guint64)(*lmf)->previous_lmf) & 1) {
634 /* This LMF has the rip field set */
635 rip = (*lmf)->rip;
636 } else if ((*lmf)->rsp == 0) {
637 /* Top LMF entry */
638 return FALSE;
639 } else {
641 * The rsp field is set just before the call which transitioned to native
642 * code. Obtain the rip from the stack.
644 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
647 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
648 if (!ji) {
649 // FIXME: This can happen with multiple appdomains (bug #444383)
650 return FALSE;
653 frame->ji = ji;
654 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
656 new_ctx->rip = rip;
657 new_ctx->rbp = (*lmf)->rbp;
658 new_ctx->rsp = (*lmf)->rsp;
660 new_ctx->rbx = (*lmf)->rbx;
661 new_ctx->r12 = (*lmf)->r12;
662 new_ctx->r13 = (*lmf)->r13;
663 new_ctx->r14 = (*lmf)->r14;
664 new_ctx->r15 = (*lmf)->r15;
665 #ifdef TARGET_WIN32
666 new_ctx->rdi = (*lmf)->rdi;
667 new_ctx->rsi = (*lmf)->rsi;
668 #endif
670 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
672 return TRUE;
675 return FALSE;
679 * mono_arch_handle_exception:
681 * @ctx: saved processor state
682 * @obj: the exception object
684 gboolean
685 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
687 MonoContext mctx;
689 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
691 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
692 return TRUE;
694 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
696 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
698 return TRUE;
701 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
702 static inline guint64*
703 gregs_from_ucontext (ucontext_t *ctx)
705 return (guint64 *) UCONTEXT_GREGS (ctx);
707 #endif
708 void
709 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
711 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
712 ucontext_t *ctx = (ucontext_t*)sigctx;
714 guint64 *gregs = gregs_from_ucontext (ctx);
716 mctx->rax = gregs [REG_RAX];
717 mctx->rbx = gregs [REG_RBX];
718 mctx->rcx = gregs [REG_RCX];
719 mctx->rdx = gregs [REG_RDX];
720 mctx->rbp = gregs [REG_RBP];
721 mctx->rsp = gregs [REG_RSP];
722 mctx->rsi = gregs [REG_RSI];
723 mctx->rdi = gregs [REG_RDI];
724 mctx->rip = gregs [REG_RIP];
725 mctx->r12 = gregs [REG_R12];
726 mctx->r13 = gregs [REG_R13];
727 mctx->r14 = gregs [REG_R14];
728 mctx->r15 = gregs [REG_R15];
729 #elif defined(MONO_ARCH_USE_SIGACTION)
730 ucontext_t *ctx = (ucontext_t*)sigctx;
732 mctx->rax = UCONTEXT_REG_RAX (ctx);
733 mctx->rbx = UCONTEXT_REG_RBX (ctx);
734 mctx->rcx = UCONTEXT_REG_RCX (ctx);
735 mctx->rdx = UCONTEXT_REG_RDX (ctx);
736 mctx->rbp = UCONTEXT_REG_RBP (ctx);
737 mctx->rsp = UCONTEXT_REG_RSP (ctx);
738 mctx->rsi = UCONTEXT_REG_RSI (ctx);
739 mctx->rdi = UCONTEXT_REG_RDI (ctx);
740 mctx->rip = UCONTEXT_REG_RIP (ctx);
741 mctx->r12 = UCONTEXT_REG_R12 (ctx);
742 mctx->r13 = UCONTEXT_REG_R13 (ctx);
743 mctx->r14 = UCONTEXT_REG_R14 (ctx);
744 mctx->r15 = UCONTEXT_REG_R15 (ctx);
745 #else
746 MonoContext *ctx = (MonoContext *)sigctx;
748 mctx->rax = ctx->rax;
749 mctx->rbx = ctx->rbx;
750 mctx->rcx = ctx->rcx;
751 mctx->rdx = ctx->rdx;
752 mctx->rbp = ctx->rbp;
753 mctx->rsp = ctx->rsp;
754 mctx->rsi = ctx->rsi;
755 mctx->rdi = ctx->rdi;
756 mctx->rip = ctx->rip;
757 mctx->r12 = ctx->r12;
758 mctx->r13 = ctx->r13;
759 mctx->r14 = ctx->r14;
760 mctx->r15 = ctx->r15;
761 #endif
764 void
765 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
767 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
768 ucontext_t *ctx = (ucontext_t*)sigctx;
770 guint64 *gregs = gregs_from_ucontext (ctx);
772 gregs [REG_RAX] = mctx->rax;
773 gregs [REG_RBX] = mctx->rbx;
774 gregs [REG_RCX] = mctx->rcx;
775 gregs [REG_RDX] = mctx->rdx;
776 gregs [REG_RBP] = mctx->rbp;
777 gregs [REG_RSP] = mctx->rsp;
778 gregs [REG_RSI] = mctx->rsi;
779 gregs [REG_RDI] = mctx->rdi;
780 gregs [REG_RIP] = mctx->rip;
781 gregs [REG_R12] = mctx->r12;
782 gregs [REG_R13] = mctx->r13;
783 gregs [REG_R14] = mctx->r14;
784 gregs [REG_R15] = mctx->r15;
785 #elif defined(MONO_ARCH_USE_SIGACTION)
786 ucontext_t *ctx = (ucontext_t*)sigctx;
788 UCONTEXT_REG_RAX (ctx) = mctx->rax;
789 UCONTEXT_REG_RBX (ctx) = mctx->rbx;
790 UCONTEXT_REG_RCX (ctx) = mctx->rcx;
791 UCONTEXT_REG_RDX (ctx) = mctx->rdx;
792 UCONTEXT_REG_RBP (ctx) = mctx->rbp;
793 UCONTEXT_REG_RSP (ctx) = mctx->rsp;
794 UCONTEXT_REG_RSI (ctx) = mctx->rsi;
795 UCONTEXT_REG_RDI (ctx) = mctx->rdi;
796 UCONTEXT_REG_RIP (ctx) = mctx->rip;
797 UCONTEXT_REG_R12 (ctx) = mctx->r12;
798 UCONTEXT_REG_R13 (ctx) = mctx->r13;
799 UCONTEXT_REG_R14 (ctx) = mctx->r14;
800 UCONTEXT_REG_R15 (ctx) = mctx->r15;
801 #else
802 MonoContext *ctx = (MonoContext *)sigctx;
804 ctx->rax = mctx->rax;
805 ctx->rbx = mctx->rbx;
806 ctx->rcx = mctx->rcx;
807 ctx->rdx = mctx->rdx;
808 ctx->rbp = mctx->rbp;
809 ctx->rsp = mctx->rsp;
810 ctx->rsi = mctx->rsi;
811 ctx->rdi = mctx->rdi;
812 ctx->rip = mctx->rip;
813 ctx->r12 = mctx->r12;
814 ctx->r13 = mctx->r13;
815 ctx->r14 = mctx->r14;
816 ctx->r15 = mctx->r15;
817 #endif
820 gpointer
821 mono_arch_ip_from_context (void *sigctx)
823 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
824 ucontext_t *ctx = (ucontext_t*)sigctx;
826 guint64 *gregs = gregs_from_ucontext (ctx);
828 return (gpointer)gregs [REG_RIP];
829 #elif defined(MONO_ARCH_USE_SIGACTION)
830 ucontext_t *ctx = (ucontext_t*)sigctx;
832 return (gpointer)UCONTEXT_REG_RIP (ctx);
833 #else
834 MonoContext *ctx = sigctx;
835 return (gpointer)ctx->rip;
836 #endif
839 static void
840 restore_soft_guard_pages (void)
842 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
843 if (jit_tls->stack_ovf_guard_base)
844 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
848 * this function modifies mctx so that when it is restored, it
849 * won't execcute starting at mctx.eip, but in a function that
850 * will restore the protection on the soft-guard pages and return back to
851 * continue at mctx.eip.
853 static void
854 prepare_for_guard_pages (MonoContext *mctx)
856 gpointer *sp;
857 sp = (gpointer)(mctx->rsp);
858 sp -= 1;
859 /* the return addr */
860 sp [0] = (gpointer)(mctx->rip);
861 mctx->rip = (guint64)restore_soft_guard_pages;
862 mctx->rsp = (guint64)sp;
865 static void
866 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
868 void (*restore_context) (MonoContext *);
869 MonoContext mctx;
871 restore_context = mono_get_restore_context ();
872 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
874 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
875 if (stack_ovf)
876 prepare_for_guard_pages (&mctx);
877 restore_context (&mctx);
880 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
881 if (stack_ovf)
882 prepare_for_guard_pages (&mctx);
883 restore_context (&mctx);
886 void
887 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
889 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
890 MonoException *exc = NULL;
891 ucontext_t *ctx = (ucontext_t*)sigctx;
892 guint64 *gregs = gregs_from_ucontext (ctx);
893 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP], NULL);
894 gpointer *sp;
895 int frame_size;
897 if (stack_ovf)
898 exc = mono_domain_get ()->stack_overflow_ex;
899 if (!ji)
900 mono_handle_native_sigsegv (SIGSEGV, sigctx);
902 /* setup a call frame on the real stack so that control is returned there
903 * and exception handling can continue.
904 * The frame looks like:
905 * ucontext struct
906 * ...
907 * return ip
908 * 128 is the size of the red zone
910 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
911 frame_size += 15;
912 frame_size &= ~15;
913 sp = (gpointer)(gregs [REG_RSP] & ~15);
914 sp = (gpointer)((char*)sp - frame_size);
915 /* the arguments must be aligned */
916 sp [-1] = (gpointer)gregs [REG_RIP];
917 /* may need to adjust pointers in the new struct copy, depending on the OS */
918 memcpy (sp + 4, ctx, sizeof (ucontext_t));
919 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
920 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
921 gregs [REG_RSP] = (unsigned long)(sp - 1);
922 gregs [REG_RDI] = (unsigned long)(sp + 4);
923 gregs [REG_RSI] = (guint64)exc;
924 gregs [REG_RDX] = stack_ovf;
925 #endif
928 guint64
929 mono_amd64_get_original_ip (void)
931 MonoLMF *lmf = mono_get_lmf ();
933 g_assert (lmf);
935 /* Reset the change to previous_lmf */
936 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
938 return lmf->rip;
941 gpointer
942 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
944 guint8 *code, *start;
945 guint8 *br[1];
946 gpointer throw_trampoline;
948 *ji = NULL;
950 start = code = mono_global_codeman_reserve (128);
952 /* We are in the frame of a managed method after a call */
954 * We would like to throw the pending exception in such a way that it looks to
955 * be thrown from the managed method.
958 /* Save registers which might contain the return value of the call */
959 amd64_push_reg (code, AMD64_RAX);
960 amd64_push_reg (code, AMD64_RDX);
962 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
963 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
965 /* Align stack */
966 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
968 /* Obtain the pending exception */
969 if (aot) {
970 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
971 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
972 } else {
973 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
975 amd64_call_reg (code, AMD64_R11);
977 /* Check if it is NULL, and branch */
978 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
979 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
981 /* exc != NULL branch */
983 /* Save the exc on the stack */
984 amd64_push_reg (code, AMD64_RAX);
985 /* Align stack */
986 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
988 /* Obtain the original ip and clear the flag in previous_lmf */
989 if (aot) {
990 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
991 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
992 } else {
993 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
995 amd64_call_reg (code, AMD64_R11);
997 /* Load exc */
998 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
1000 /* Pop saved stuff from the stack */
1001 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
1003 /* Setup arguments for the throw trampoline */
1004 /* Exception */
1005 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
1006 /* The trampoline expects the caller ip to be pushed on the stack */
1007 amd64_push_reg (code, AMD64_RAX);
1009 /* Call the throw trampoline */
1010 if (aot) {
1011 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
1012 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1013 } else {
1014 throw_trampoline = mono_get_throw_exception ();
1015 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
1017 /* We use a jump instead of a call so we can push the original ip on the stack */
1018 amd64_jump_reg (code, AMD64_R11);
1020 /* ex == NULL branch */
1021 mono_amd64_patch (br [0], code);
1023 /* Obtain the original ip and clear the flag in previous_lmf */
1024 if (aot) {
1025 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1026 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1027 } else {
1028 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1030 amd64_call_reg (code, AMD64_R11);
1031 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1033 /* Restore registers */
1034 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1035 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1036 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1037 amd64_pop_reg (code, AMD64_RDX);
1038 amd64_pop_reg (code, AMD64_RAX);
1040 /* Return to original code */
1041 amd64_jump_reg (code, AMD64_R11);
1043 g_assert ((code - start) < 128);
1045 *code_size = code - start;
1047 return start;
1050 static gpointer throw_pending_exception;
1053 * Called when a thread receives an async exception while executing unmanaged code.
1054 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1055 * the return address on the stack to point to a helper routine which throws the
1056 * exception.
1058 void
1059 mono_arch_notify_pending_exc (void)
1061 MonoLMF *lmf = mono_get_lmf ();
1063 if (!lmf)
1064 /* Not yet started */
1065 return;
1067 if (lmf->rsp == 0)
1068 /* Initial LMF */
1069 return;
1071 if ((guint64)lmf->previous_lmf & 1)
1072 /* Already hijacked or trampoline LMF entry */
1073 return;
1075 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1076 lmf->rip = *(guint64*)(lmf->rsp - 8);
1077 /* Signal that lmf->rip is set */
1078 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1080 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1083 void
1084 mono_arch_exceptions_init (void)
1086 guint32 code_size;
1087 MonoJumpInfo *ji;
1089 if (mono_aot_only) {
1090 throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception");
1091 } else {
1092 /* Call this to avoid initialization races */
1093 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1097 #ifdef TARGET_WIN32
1100 * The mono_arch_unwindinfo* methods are used to build and add
1101 * function table info for each emitted method from mono. On Winx64
1102 * the seh handler will not be called if the mono methods are not
1103 * added to the function table.
1105 * We should not need to add non-volatile register info to the
1106 * table since mono stores that info elsewhere. (Except for the register
1107 * used for the fp.)
1110 #define MONO_MAX_UNWIND_CODES 22
1112 typedef union _UNWIND_CODE {
1113 struct {
1114 guchar CodeOffset;
1115 guchar UnwindOp : 4;
1116 guchar OpInfo : 4;
1118 gushort FrameOffset;
1119 } UNWIND_CODE, *PUNWIND_CODE;
1121 typedef struct _UNWIND_INFO {
1122 guchar Version : 3;
1123 guchar Flags : 5;
1124 guchar SizeOfProlog;
1125 guchar CountOfCodes;
1126 guchar FrameRegister : 4;
1127 guchar FrameOffset : 4;
1128 /* custom size for mono allowing for mono allowing for*/
1129 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1130 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1131 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1132 /*UWOP_PUSH_NONVOL offset = 15-0*/
1133 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1135 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1136 * union {
1137 * OPTIONAL ULONG ExceptionHandler;
1138 * OPTIONAL ULONG FunctionEntry;
1139 * };
1140 * OPTIONAL ULONG ExceptionData[]; */
1141 } UNWIND_INFO, *PUNWIND_INFO;
1143 typedef struct
1145 RUNTIME_FUNCTION runtimeFunction;
1146 UNWIND_INFO unwindInfo;
1147 } MonoUnwindInfo, *PMonoUnwindInfo;
1149 static void
1150 mono_arch_unwindinfo_create (gpointer* monoui)
1152 PMonoUnwindInfo newunwindinfo;
1153 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1154 newunwindinfo->unwindInfo.Version = 1;
1157 void
1158 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1160 PMonoUnwindInfo unwindinfo;
1161 PUNWIND_CODE unwindcode;
1162 guchar codeindex;
1163 if (!*monoui)
1164 mono_arch_unwindinfo_create (monoui);
1166 unwindinfo = (MonoUnwindInfo*)*monoui;
1168 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1169 g_error ("Larger allocation needed for the unwind information.");
1171 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1172 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1173 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1174 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1175 unwindcode->OpInfo = reg;
1177 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1178 g_error ("Adding unwind info in wrong order.");
1180 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1183 void
1184 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1186 PMonoUnwindInfo unwindinfo;
1187 PUNWIND_CODE unwindcode;
1188 guchar codeindex;
1189 if (!*monoui)
1190 mono_arch_unwindinfo_create (monoui);
1192 unwindinfo = (MonoUnwindInfo*)*monoui;
1194 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1195 g_error ("Larger allocation needed for the unwind information.");
1197 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1198 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1199 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1200 unwindcode++;
1201 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1202 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1203 unwindcode->OpInfo = reg;
1205 unwindinfo->unwindInfo.FrameRegister = reg;
1207 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1208 g_error ("Adding unwind info in wrong order.");
1210 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1213 void
1214 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1216 PMonoUnwindInfo unwindinfo;
1217 PUNWIND_CODE unwindcode;
1218 guchar codeindex;
1219 guchar codesneeded;
1220 if (!*monoui)
1221 mono_arch_unwindinfo_create (monoui);
1223 unwindinfo = (MonoUnwindInfo*)*monoui;
1225 if (size < 0x8)
1226 g_error ("Stack allocation must be equal to or greater than 0x8.");
1228 if (size <= 0x80)
1229 codesneeded = 1;
1230 else if (size <= 0x7FFF8)
1231 codesneeded = 2;
1232 else
1233 codesneeded = 3;
1235 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1236 g_error ("Larger allocation needed for the unwind information.");
1238 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1239 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1241 if (codesneeded == 1) {
1242 /*The size of the allocation is
1243 (the number in the OpInfo member) times 8 plus 8*/
1244 unwindcode->OpInfo = (size - 8)/8;
1245 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1247 else {
1248 if (codesneeded == 3) {
1249 /*the unscaled size of the allocation is recorded
1250 in the next two slots in little-endian format*/
1251 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1252 unwindcode += 2;
1253 unwindcode->OpInfo = 1;
1255 else {
1256 /*the size of the allocation divided by 8
1257 is recorded in the next slot*/
1258 unwindcode->FrameOffset = size/8;
1259 unwindcode++;
1260 unwindcode->OpInfo = 0;
1263 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1266 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1268 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1269 g_error ("Adding unwind info in wrong order.");
1271 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1274 guint
1275 mono_arch_unwindinfo_get_size (gpointer monoui)
1277 PMonoUnwindInfo unwindinfo;
1278 if (!monoui)
1279 return 0;
1281 unwindinfo = (MonoUnwindInfo*)monoui;
1282 return (8 + sizeof (MonoUnwindInfo)) -
1283 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1286 PRUNTIME_FUNCTION
1287 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1289 MonoJitInfo *ji;
1290 guint64 pos;
1291 PMonoUnwindInfo targetinfo;
1292 MonoDomain *domain = mono_domain_get ();
1294 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1295 if (!ji)
1296 return 0;
1298 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1300 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1302 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1304 return &targetinfo->runtimeFunction;
1307 void
1308 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1310 PMonoUnwindInfo unwindinfo, targetinfo;
1311 guchar codecount;
1312 guint64 targetlocation;
1313 if (!*monoui)
1314 return;
1316 unwindinfo = (MonoUnwindInfo*)*monoui;
1317 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1318 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1320 unwindinfo->runtimeFunction.EndAddress = code_size;
1321 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1323 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1325 codecount = unwindinfo->unwindInfo.CountOfCodes;
1326 if (codecount) {
1327 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1328 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1331 g_free (unwindinfo);
1332 *monoui = 0;
1334 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1337 #endif
1339 #if MONO_SUPPORT_TASKLETS
1340 MonoContinuationRestore
1341 mono_tasklets_arch_restore (void)
1343 static guint8* saved = NULL;
1344 guint8 *code, *start;
1345 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1347 if (saved)
1348 return (MonoContinuationRestore)saved;
1349 code = start = mono_global_codeman_reserve (64);
1350 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1351 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1352 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1353 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1354 * We move cont to cont_reg since we need both rcx and rdi for the copy
1355 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1357 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1358 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1359 /* setup the copy of the stack */
1360 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1361 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1362 x86_cld (code);
1363 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1364 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1365 amd64_prefix (code, X86_REP_PREFIX);
1366 amd64_movsl (code);
1368 /* now restore the registers from the LMF */
1369 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1370 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1371 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1372 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1373 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1374 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1375 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1376 #ifdef TARGET_WIN32
1377 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1378 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1379 #endif
1380 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1382 /* restore the lmf chain */
1383 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1384 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1386 /* state is already in rax */
1387 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1388 g_assert ((code - start) <= 64);
1389 saved = start;
1390 return (MonoContinuationRestore)saved;
1392 #endif