Treat CEE_BREAK the same as Debugger:Break (), i.e. route it through sdb.
[mono-project/dkf.git] / mono / mini / exceptions-amd64.c
bloba8eb9328a4de6adaf9f2f6748aeaac4ff221841e
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
14 #ifdef HAVE_UCONTEXT_H
15 #include <ucontext.h>
16 #endif
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
29 #include "mini.h"
30 #include "mini-amd64.h"
31 #include "tasklets.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
36 #ifdef TARGET_WIN32
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
42 guint64 mono_win_chained_exception_filter_result;
43 gboolean mono_win_chained_exception_filter_didrun;
45 #define W32_SEH_HANDLE_EX(_ex) \
46 if (_ex##_handler) _ex##_handler(0, ep, sctx)
49 * Unhandled Exception Filter
50 * Top-level per-process exception handler.
52 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
54 EXCEPTION_RECORD* er;
55 CONTEXT* ctx;
56 MonoContext* sctx;
57 LONG res;
59 mono_win_chained_exception_filter_didrun = FALSE;
60 res = EXCEPTION_CONTINUE_EXECUTION;
62 er = ep->ExceptionRecord;
63 ctx = ep->ContextRecord;
64 sctx = g_malloc(sizeof(MonoContext));
66 /* Copy Win32 context to UNIX style context */
67 sctx->rax = ctx->Rax;
68 sctx->rbx = ctx->Rbx;
69 sctx->rcx = ctx->Rcx;
70 sctx->rdx = ctx->Rdx;
71 sctx->rbp = ctx->Rbp;
72 sctx->rsp = ctx->Rsp;
73 sctx->rsi = ctx->Rsi;
74 sctx->rdi = ctx->Rdi;
75 sctx->rip = ctx->Rip;
76 sctx->r12 = ctx->R12;
77 sctx->r13 = ctx->R13;
78 sctx->r14 = ctx->R14;
79 sctx->r15 = ctx->R15;
81 switch (er->ExceptionCode) {
82 case EXCEPTION_ACCESS_VIOLATION:
83 W32_SEH_HANDLE_EX(segv);
84 break;
85 case EXCEPTION_ILLEGAL_INSTRUCTION:
86 W32_SEH_HANDLE_EX(ill);
87 break;
88 case EXCEPTION_INT_DIVIDE_BY_ZERO:
89 case EXCEPTION_INT_OVERFLOW:
90 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
91 case EXCEPTION_FLT_OVERFLOW:
92 case EXCEPTION_FLT_UNDERFLOW:
93 case EXCEPTION_FLT_INEXACT_RESULT:
94 W32_SEH_HANDLE_EX(fpe);
95 break;
96 default:
97 break;
100 /* Copy context back */
101 /* Nonvolatile */
102 ctx->Rsp = sctx->rsp;
103 ctx->Rdi = sctx->rdi;
104 ctx->Rsi = sctx->rsi;
105 ctx->Rbx = sctx->rbx;
106 ctx->Rbp = sctx->rbp;
107 ctx->R12 = sctx->r12;
108 ctx->R13 = sctx->r13;
109 ctx->R14 = sctx->r14;
110 ctx->R15 = sctx->r15;
111 ctx->Rip = sctx->rip;
113 /* Volatile But should not matter?*/
114 ctx->Rax = sctx->rax;
115 ctx->Rcx = sctx->rcx;
116 ctx->Rdx = sctx->rdx;
118 g_free (sctx);
120 if (mono_win_chained_exception_filter_didrun)
121 res = mono_win_chained_exception_filter_result;
123 return res;
126 void win32_seh_init()
128 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_handler);
131 void win32_seh_cleanup()
133 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
136 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
138 switch (type) {
139 case SIGFPE:
140 fpe_handler = handler;
141 break;
142 case SIGILL:
143 ill_handler = handler;
144 break;
145 case SIGSEGV:
146 segv_handler = handler;
147 break;
148 default:
149 break;
153 #endif /* TARGET_WIN32 */
156 * mono_arch_get_restore_context:
158 * Returns a pointer to a method which restores a previously saved sigcontext.
160 gpointer
161 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
163 guint8 *start = NULL;
164 guint8 *code;
165 MonoJumpInfo *ji = NULL;
166 GSList *unwind_ops = NULL;
168 /* restore_contect (MonoContext *ctx) */
170 start = code = mono_global_codeman_reserve (256);
172 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
174 /* Restore all registers except %rip and %r11 */
175 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
176 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
177 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
178 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
179 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
180 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
181 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
182 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
183 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
184 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
185 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
186 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
187 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
188 #if !defined(__native_client_codegen__)
189 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
190 #endif
192 if (mono_running_on_valgrind ()) {
193 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
194 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
195 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
196 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
197 } else {
198 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
199 /* get return address */
200 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
203 /* jump to the saved IP */
204 amd64_jump_reg (code, AMD64_R11);
206 nacl_global_codeman_validate(&start, 256, &code);
208 mono_arch_flush_icache (start, code - start);
210 if (info)
211 *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
213 return start;
217 * mono_arch_get_call_filter:
219 * Returns a pointer to a method which calls an exception filter. We
220 * also use this function to call finally handlers (we pass NULL as
221 * @exc object in this case).
223 gpointer
224 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
226 guint8 *start;
227 int i;
228 guint8 *code;
229 guint32 pos;
230 MonoJumpInfo *ji = NULL;
231 GSList *unwind_ops = NULL;
232 const guint kMaxCodeSize = NACL_SIZE (128, 256);
234 start = code = mono_global_codeman_reserve (kMaxCodeSize);
236 /* call_filter (MonoContext *ctx, unsigned long eip) */
237 code = start;
239 /* Alloc new frame */
240 amd64_push_reg (code, AMD64_RBP);
241 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
243 /* Save callee saved regs */
244 pos = 0;
245 for (i = 0; i < AMD64_NREG; ++i)
246 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
247 amd64_push_reg (code, i);
248 pos += 8;
251 /* Save EBP */
252 pos += 8;
253 amd64_push_reg (code, AMD64_RBP);
255 /* Make stack misaligned, the call will make it aligned again */
256 if (! (pos & 8))
257 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
259 /* set new EBP */
260 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
261 /* load callee saved regs */
262 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
263 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
264 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
265 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
266 #if !defined(__native_client_codegen__)
267 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
268 #endif
269 #ifdef TARGET_WIN32
270 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
271 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
272 #endif
274 /* call the handler */
275 amd64_call_reg (code, AMD64_ARG_REG2);
277 if (! (pos & 8))
278 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
280 /* restore RBP */
281 amd64_pop_reg (code, AMD64_RBP);
283 /* Restore callee saved regs */
284 for (i = AMD64_NREG; i >= 0; --i)
285 if (AMD64_IS_CALLEE_SAVED_REG (i))
286 amd64_pop_reg (code, i);
288 amd64_leave (code);
289 amd64_ret (code);
291 g_assert ((code - start) < kMaxCodeSize);
293 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
295 mono_arch_flush_icache (start, code - start);
297 if (info)
298 *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
300 return start;
304 * The first few arguments are dummy, to force the other arguments to be passed on
305 * the stack, this avoids overwriting the argument registers in the throw trampoline.
307 void
308 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
309 guint64 dummy5, guint64 dummy6,
310 mgreg_t *regs, mgreg_t rip,
311 MonoObject *exc, gboolean rethrow)
313 static void (*restore_context) (MonoContext *);
314 MonoContext ctx;
316 if (!restore_context)
317 restore_context = mono_get_restore_context ();
319 ctx.rsp = regs [AMD64_RSP];
320 ctx.rip = rip;
321 ctx.rbx = regs [AMD64_RBX];
322 ctx.rbp = regs [AMD64_RBP];
323 ctx.r12 = regs [AMD64_R12];
324 ctx.r13 = regs [AMD64_R13];
325 ctx.r14 = regs [AMD64_R14];
326 ctx.r15 = regs [AMD64_R15];
327 ctx.rdi = regs [AMD64_RDI];
328 ctx.rsi = regs [AMD64_RSI];
329 ctx.rax = regs [AMD64_RAX];
330 ctx.rcx = regs [AMD64_RCX];
331 ctx.rdx = regs [AMD64_RDX];
333 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
334 MonoException *mono_ex = (MonoException*)exc;
335 if (!rethrow)
336 mono_ex->stack_trace = NULL;
339 if (mono_debug_using_mono_debugger ()) {
340 guint8 buf [16];
342 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
344 if (buf [3] == 0xe8) {
345 MonoContext ctx_cp = ctx;
346 ctx_cp.rip = rip - 5;
348 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
349 restore_context (&ctx_cp);
350 g_assert_not_reached ();
355 /* adjust eip so that it point into the call instruction */
356 ctx.rip -= 1;
358 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
359 restore_context (&ctx);
361 g_assert_not_reached ();
364 void
365 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
366 guint64 dummy5, guint64 dummy6,
367 mgreg_t *regs, mgreg_t rip,
368 guint32 ex_token_index, gint64 pc_offset)
370 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
371 MonoException *ex;
373 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
375 rip -= pc_offset;
377 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
378 rip += 1;
380 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
383 static void
384 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
385 guint64 dummy5, guint64 dummy6,
386 mgreg_t *regs, mgreg_t rip,
387 guint32 dummy7, gint64 dummy8)
389 /* Only the register parameters are valid */
390 MonoContext ctx;
392 ctx.rsp = regs [AMD64_RSP];
393 ctx.rip = rip;
394 ctx.rbx = regs [AMD64_RBX];
395 ctx.rbp = regs [AMD64_RBP];
396 ctx.r12 = regs [AMD64_R12];
397 ctx.r13 = regs [AMD64_R13];
398 ctx.r14 = regs [AMD64_R14];
399 ctx.r15 = regs [AMD64_R15];
400 ctx.rdi = regs [AMD64_RDI];
401 ctx.rsi = regs [AMD64_RSI];
402 ctx.rax = regs [AMD64_RAX];
403 ctx.rcx = regs [AMD64_RCX];
404 ctx.rdx = regs [AMD64_RDX];
406 mono_resume_unwind (&ctx);
410 * get_throw_trampoline:
412 * Generate a call to mono_amd64_throw_exception/
413 * mono_amd64_throw_corlib_exception.
415 static gpointer
416 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
418 guint8* start;
419 guint8 *code;
420 MonoJumpInfo *ji = NULL;
421 GSList *unwind_ops = NULL;
422 int i, stack_size, arg_offsets [16], regs_offset, dummy_stack_space;
423 const guint kMaxCodeSize = NACL_SIZE (256, 512);
425 #ifdef TARGET_WIN32
426 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
427 #else
428 dummy_stack_space = 0;
429 #endif
431 start = code = mono_global_codeman_reserve (kMaxCodeSize);
433 /* The stack is unaligned on entry */
434 stack_size = 192 + 8 + dummy_stack_space;
436 code = start;
438 if (info)
439 unwind_ops = mono_arch_get_cie_program ();
441 /* Alloc frame */
442 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
443 if (info)
444 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
447 * To hide linux/windows calling convention differences, we pass all arguments on
448 * the stack by passing 6 dummy values in registers.
451 arg_offsets [0] = dummy_stack_space + 0;
452 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
453 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
454 arg_offsets [3] = dummy_stack_space + sizeof(mgreg_t) * 3;
455 regs_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
457 /* Save registers */
458 for (i = 0; i < AMD64_NREG; ++i)
459 if (i != AMD64_RSP)
460 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
461 /* Save RSP */
462 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
463 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
464 /* Set arg1 == regs */
465 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
466 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
467 /* Set arg2 == eip */
468 if (llvm_abs)
469 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
470 else
471 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
472 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
473 /* Set arg3 == exc/ex_token_index */
474 if (resume_unwind)
475 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
476 else
477 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
478 /* Set arg4 == rethrow/pc offset */
479 if (resume_unwind) {
480 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
481 } else if (corlib) {
482 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
483 if (llvm_abs)
485 * The caller is LLVM code which passes the absolute address not a pc offset,
486 * so compensate by passing 0 as 'rip' and passing the negated abs address as
487 * the pc offset.
489 amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
490 } else {
491 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
494 if (aot) {
495 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
496 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
497 } else {
498 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
500 amd64_call_reg (code, AMD64_R11);
501 amd64_breakpoint (code);
503 mono_arch_flush_icache (start, code - start);
505 g_assert ((code - start) < kMaxCodeSize);
507 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
509 if (info)
510 *info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops);
512 return start;
516 * mono_arch_get_throw_exception:
518 * Returns a function pointer which can be used to raise
519 * exceptions. The returned function has the following
520 * signature: void (*func) (MonoException *exc);
523 gpointer
524 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
526 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
529 gpointer
530 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
532 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
536 * mono_arch_get_throw_corlib_exception:
538 * Returns a function pointer which can be used to raise
539 * corlib exceptions. The returned function has the following
540 * signature: void (*func) (guint32 ex_token, guint32 offset);
541 * Here, offset is the offset which needs to be substracted from the caller IP
542 * to get the IP of the throw. Passing the offset has the advantage that it
543 * needs no relocations in the caller.
545 gpointer
546 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
548 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
552 * mono_arch_find_jit_info:
554 * This function is used to gather information from @ctx, and store it in @frame_info.
555 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
556 * is modified if needed.
557 * Returns TRUE on success, FALSE otherwise.
559 gboolean
560 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
561 MonoJitInfo *ji, MonoContext *ctx,
562 MonoContext *new_ctx, MonoLMF **lmf,
563 mgreg_t **save_locations,
564 StackFrameInfo *frame)
566 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
568 memset (frame, 0, sizeof (StackFrameInfo));
569 frame->ji = ji;
571 *new_ctx = *ctx;
573 if (ji != NULL) {
574 mgreg_t regs [MONO_MAX_IREGS + 1];
575 guint8 *cfa;
576 guint32 unwind_info_len;
577 guint8 *unwind_info;
579 frame->type = FRAME_TYPE_MANAGED;
581 if (ji->from_aot)
582 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
583 else
584 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
586 frame->unwind_info = unwind_info;
587 frame->unwind_info_len = unwind_info_len;
589 regs [AMD64_RAX] = new_ctx->rax;
590 regs [AMD64_RBX] = new_ctx->rbx;
591 regs [AMD64_RCX] = new_ctx->rcx;
592 regs [AMD64_RDX] = new_ctx->rdx;
593 regs [AMD64_RBP] = new_ctx->rbp;
594 regs [AMD64_RSP] = new_ctx->rsp;
595 regs [AMD64_RSI] = new_ctx->rsi;
596 regs [AMD64_RDI] = new_ctx->rdi;
597 regs [AMD64_RIP] = new_ctx->rip;
598 regs [AMD64_R12] = new_ctx->r12;
599 regs [AMD64_R13] = new_ctx->r13;
600 regs [AMD64_R14] = new_ctx->r14;
601 regs [AMD64_R15] = new_ctx->r15;
603 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
604 (guint8*)ji->code_start + ji->code_size,
605 ip, regs, MONO_MAX_IREGS + 1,
606 save_locations, MONO_MAX_IREGS, &cfa);
608 new_ctx->rax = regs [AMD64_RAX];
609 new_ctx->rbx = regs [AMD64_RBX];
610 new_ctx->rcx = regs [AMD64_RCX];
611 new_ctx->rdx = regs [AMD64_RDX];
612 new_ctx->rbp = regs [AMD64_RBP];
613 new_ctx->rsp = regs [AMD64_RSP];
614 new_ctx->rsi = regs [AMD64_RSI];
615 new_ctx->rdi = regs [AMD64_RDI];
616 new_ctx->rip = regs [AMD64_RIP];
617 new_ctx->r12 = regs [AMD64_R12];
618 new_ctx->r13 = regs [AMD64_R13];
619 new_ctx->r14 = regs [AMD64_R14];
620 new_ctx->r15 = regs [AMD64_R15];
622 /* The CFA becomes the new SP value */
623 new_ctx->rsp = (mgreg_t)cfa;
625 /* Adjust IP */
626 new_ctx->rip --;
628 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
629 /* remove any unused lmf */
630 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
633 #ifndef MONO_AMD64_NO_PUSHES
634 /* Pop arguments off the stack */
636 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
638 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
639 new_ctx->rsp += stack_to_pop;
641 #endif
643 return TRUE;
644 } else if (*lmf) {
645 guint64 rip;
647 if (((guint64)(*lmf)->previous_lmf) & 2) {
649 * This LMF entry is created by the soft debug code to mark transitions to
650 * managed code done during invokes.
652 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
654 g_assert (ext->debugger_invoke);
656 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
658 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
660 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
662 return TRUE;
665 if (((guint64)(*lmf)->previous_lmf) & 1) {
666 /* This LMF has the rip field set */
667 rip = (*lmf)->rip;
668 } else if ((*lmf)->rsp == 0) {
669 /* Top LMF entry */
670 return FALSE;
671 } else {
673 * The rsp field is set just before the call which transitioned to native
674 * code. Obtain the rip from the stack.
676 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
679 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
681 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
682 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
683 * return address.
685 //g_assert (ji);
686 if (!ji)
687 return FALSE;
689 /* Adjust IP */
690 rip --;
692 frame->ji = ji;
693 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
695 new_ctx->rip = rip;
696 new_ctx->rbp = (*lmf)->rbp;
697 new_ctx->rsp = (*lmf)->rsp;
699 new_ctx->rbx = (*lmf)->rbx;
700 new_ctx->r12 = (*lmf)->r12;
701 new_ctx->r13 = (*lmf)->r13;
702 new_ctx->r14 = (*lmf)->r14;
703 new_ctx->r15 = (*lmf)->r15;
704 #ifdef TARGET_WIN32
705 new_ctx->rdi = (*lmf)->rdi;
706 new_ctx->rsi = (*lmf)->rsi;
707 #endif
709 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
711 return TRUE;
714 return FALSE;
718 * handle_exception:
720 * Called by resuming from a signal handler.
722 static void
723 handle_signal_exception (gpointer obj, gboolean test_only)
725 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
726 MonoContext ctx;
727 static void (*restore_context) (MonoContext *);
729 if (!restore_context)
730 restore_context = mono_get_restore_context ();
732 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
734 if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj))
735 return;
737 mono_handle_exception (&ctx, obj, MONO_CONTEXT_GET_IP (&ctx), test_only);
739 restore_context (&ctx);
743 * mono_arch_handle_exception:
745 * @ctx: saved processor state
746 * @obj: the exception object
748 gboolean
749 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
751 #if defined(MONO_ARCH_USE_SIGACTION)
752 ucontext_t *ctx = (ucontext_t*)sigctx;
755 * Handling the exception in the signal handler is problematic, since the original
756 * signal is disabled, and we could run arbitrary code though the debugger. So
757 * resume into the normal stack and do most work there if possible.
759 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
760 guint64 sp = UCONTEXT_REG_RSP (ctx);
762 /* Pass the ctx parameter in TLS */
763 mono_arch_sigctx_to_monoctx (ctx, &jit_tls->ex_ctx);
764 /* The others in registers */
765 UCONTEXT_REG_RDI (ctx) = (guint64)obj;
766 UCONTEXT_REG_RSI (ctx) = test_only;
768 /* Allocate a stack frame below the red zone */
769 sp -= 128;
770 /* The stack should be unaligned */
771 if (sp % 8 == 0)
772 sp -= 8;
773 UCONTEXT_REG_RSP (ctx) = sp;
775 UCONTEXT_REG_RIP (ctx) = (guint64)handle_signal_exception;
777 return TRUE;
778 #else
779 MonoContext mctx;
781 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
783 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
784 return TRUE;
786 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
788 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
790 return TRUE;
791 #endif
794 void
795 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
797 #if defined(__native_client_codegen__) || defined(__native_client__)
798 printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
799 #endif
801 #if defined(MONO_ARCH_USE_SIGACTION)
802 ucontext_t *ctx = (ucontext_t*)sigctx;
804 mctx->rax = UCONTEXT_REG_RAX (ctx);
805 mctx->rbx = UCONTEXT_REG_RBX (ctx);
806 mctx->rcx = UCONTEXT_REG_RCX (ctx);
807 mctx->rdx = UCONTEXT_REG_RDX (ctx);
808 mctx->rbp = UCONTEXT_REG_RBP (ctx);
809 mctx->rsp = UCONTEXT_REG_RSP (ctx);
810 mctx->rsi = UCONTEXT_REG_RSI (ctx);
811 mctx->rdi = UCONTEXT_REG_RDI (ctx);
812 mctx->rip = UCONTEXT_REG_RIP (ctx);
813 mctx->r12 = UCONTEXT_REG_R12 (ctx);
814 mctx->r13 = UCONTEXT_REG_R13 (ctx);
815 mctx->r14 = UCONTEXT_REG_R14 (ctx);
816 mctx->r15 = UCONTEXT_REG_R15 (ctx);
817 #else
818 MonoContext *ctx = (MonoContext *)sigctx;
820 mctx->rax = ctx->rax;
821 mctx->rbx = ctx->rbx;
822 mctx->rcx = ctx->rcx;
823 mctx->rdx = ctx->rdx;
824 mctx->rbp = ctx->rbp;
825 mctx->rsp = ctx->rsp;
826 mctx->rsi = ctx->rsi;
827 mctx->rdi = ctx->rdi;
828 mctx->rip = ctx->rip;
829 mctx->r12 = ctx->r12;
830 mctx->r13 = ctx->r13;
831 mctx->r14 = ctx->r14;
832 mctx->r15 = ctx->r15;
833 #endif
836 void
837 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
839 #if defined(__native_client__) || defined(__native_client_codegen__)
840 printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
841 #endif
843 #if defined(MONO_ARCH_USE_SIGACTION)
844 ucontext_t *ctx = (ucontext_t*)sigctx;
846 UCONTEXT_REG_RAX (ctx) = mctx->rax;
847 UCONTEXT_REG_RBX (ctx) = mctx->rbx;
848 UCONTEXT_REG_RCX (ctx) = mctx->rcx;
849 UCONTEXT_REG_RDX (ctx) = mctx->rdx;
850 UCONTEXT_REG_RBP (ctx) = mctx->rbp;
851 UCONTEXT_REG_RSP (ctx) = mctx->rsp;
852 UCONTEXT_REG_RSI (ctx) = mctx->rsi;
853 UCONTEXT_REG_RDI (ctx) = mctx->rdi;
854 UCONTEXT_REG_RIP (ctx) = mctx->rip;
855 UCONTEXT_REG_R12 (ctx) = mctx->r12;
856 UCONTEXT_REG_R13 (ctx) = mctx->r13;
857 UCONTEXT_REG_R14 (ctx) = mctx->r14;
858 UCONTEXT_REG_R15 (ctx) = mctx->r15;
859 #else
860 MonoContext *ctx = (MonoContext *)sigctx;
862 ctx->rax = mctx->rax;
863 ctx->rbx = mctx->rbx;
864 ctx->rcx = mctx->rcx;
865 ctx->rdx = mctx->rdx;
866 ctx->rbp = mctx->rbp;
867 ctx->rsp = mctx->rsp;
868 ctx->rsi = mctx->rsi;
869 ctx->rdi = mctx->rdi;
870 ctx->rip = mctx->rip;
871 ctx->r12 = mctx->r12;
872 ctx->r13 = mctx->r13;
873 ctx->r14 = mctx->r14;
874 ctx->r15 = mctx->r15;
875 #endif
878 gpointer
879 mono_arch_ip_from_context (void *sigctx)
881 #if defined(MONO_ARCH_USE_SIGACTION)
882 ucontext_t *ctx = (ucontext_t*)sigctx;
884 return (gpointer)UCONTEXT_REG_RIP (ctx);
885 #else
886 MonoContext *ctx = sigctx;
887 return (gpointer)ctx->rip;
888 #endif
891 static void
892 restore_soft_guard_pages (void)
894 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
895 if (jit_tls->stack_ovf_guard_base)
896 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
900 * this function modifies mctx so that when it is restored, it
901 * won't execcute starting at mctx.eip, but in a function that
902 * will restore the protection on the soft-guard pages and return back to
903 * continue at mctx.eip.
905 static void
906 prepare_for_guard_pages (MonoContext *mctx)
908 gpointer *sp;
909 sp = (gpointer)(mctx->rsp);
910 sp -= 1;
911 /* the return addr */
912 sp [0] = (gpointer)(mctx->rip);
913 mctx->rip = (guint64)restore_soft_guard_pages;
914 mctx->rsp = (guint64)sp;
917 static void
918 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
920 void (*restore_context) (MonoContext *);
921 MonoContext mctx;
923 restore_context = mono_get_restore_context ();
924 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
926 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
927 if (stack_ovf)
928 prepare_for_guard_pages (&mctx);
929 restore_context (&mctx);
932 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
933 if (stack_ovf)
934 prepare_for_guard_pages (&mctx);
935 restore_context (&mctx);
938 void
939 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
941 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
942 MonoException *exc = NULL;
943 ucontext_t *ctx = (ucontext_t*)sigctx;
944 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
945 gpointer *sp;
946 int frame_size;
948 if (stack_ovf)
949 exc = mono_domain_get ()->stack_overflow_ex;
950 if (!ji)
951 mono_handle_native_sigsegv (SIGSEGV, sigctx);
953 /* setup a call frame on the real stack so that control is returned there
954 * and exception handling can continue.
955 * The frame looks like:
956 * ucontext struct
957 * ...
958 * return ip
959 * 128 is the size of the red zone
961 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
962 frame_size += 15;
963 frame_size &= ~15;
964 sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
965 sp = (gpointer)((char*)sp - frame_size);
966 /* the arguments must be aligned */
967 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
968 /* may need to adjust pointers in the new struct copy, depending on the OS */
969 memcpy (sp + 4, ctx, sizeof (ucontext_t));
970 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
971 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
972 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
973 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(sp + 4);
974 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
975 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
976 #endif
979 guint64
980 mono_amd64_get_original_ip (void)
982 MonoLMF *lmf = mono_get_lmf ();
984 g_assert (lmf);
986 /* Reset the change to previous_lmf */
987 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
989 return lmf->rip;
992 gpointer
993 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
995 guint8 *code, *start;
996 guint8 *br[1];
997 gpointer throw_trampoline;
998 MonoJumpInfo *ji = NULL;
999 GSList *unwind_ops = NULL;
1000 const guint kMaxCodeSize = NACL_SIZE (128, 256);
1002 start = code = mono_global_codeman_reserve (kMaxCodeSize);
1004 /* We are in the frame of a managed method after a call */
1006 * We would like to throw the pending exception in such a way that it looks to
1007 * be thrown from the managed method.
1010 /* Save registers which might contain the return value of the call */
1011 amd64_push_reg (code, AMD64_RAX);
1012 amd64_push_reg (code, AMD64_RDX);
1014 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1015 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
1017 /* Align stack */
1018 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1020 /* Obtain the pending exception */
1021 if (aot) {
1022 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
1023 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1024 } else {
1025 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
1027 amd64_call_reg (code, AMD64_R11);
1029 /* Check if it is NULL, and branch */
1030 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
1031 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
1033 /* exc != NULL branch */
1035 /* Save the exc on the stack */
1036 amd64_push_reg (code, AMD64_RAX);
1037 /* Align stack */
1038 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1040 /* Obtain the original ip and clear the flag in previous_lmf */
1041 if (aot) {
1042 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1043 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1044 } else {
1045 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1047 amd64_call_reg (code, AMD64_R11);
1049 /* Load exc */
1050 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
1052 /* Pop saved stuff from the stack */
1053 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
1055 /* Setup arguments for the throw trampoline */
1056 /* Exception */
1057 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
1058 /* The trampoline expects the caller ip to be pushed on the stack */
1059 amd64_push_reg (code, AMD64_RAX);
1061 /* Call the throw trampoline */
1062 if (aot) {
1063 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
1064 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1065 } else {
1066 throw_trampoline = mono_get_throw_exception ();
1067 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
1069 /* We use a jump instead of a call so we can push the original ip on the stack */
1070 amd64_jump_reg (code, AMD64_R11);
1072 /* ex == NULL branch */
1073 mono_amd64_patch (br [0], code);
1075 /* Obtain the original ip and clear the flag in previous_lmf */
1076 if (aot) {
1077 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1078 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1079 } else {
1080 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1082 amd64_call_reg (code, AMD64_R11);
1083 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1085 /* Restore registers */
1086 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1087 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1088 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1089 amd64_pop_reg (code, AMD64_RDX);
1090 amd64_pop_reg (code, AMD64_RAX);
1092 /* Return to original code */
1093 amd64_jump_reg (code, AMD64_R11);
1095 g_assert ((code - start) < kMaxCodeSize);
1097 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1099 if (info)
1100 *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
1102 return start;
1105 static gpointer throw_pending_exception;
1108 * Called when a thread receives an async exception while executing unmanaged code.
1109 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1110 * the return address on the stack to point to a helper routine which throws the
1111 * exception.
1113 void
1114 mono_arch_notify_pending_exc (void)
1116 MonoLMF *lmf = mono_get_lmf ();
1118 if (!lmf)
1119 /* Not yet started */
1120 return;
1122 if (lmf->rsp == 0)
1123 /* Initial LMF */
1124 return;
1126 if ((guint64)lmf->previous_lmf & 1)
1127 /* Already hijacked or trampoline LMF entry */
1128 return;
1130 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1131 lmf->rip = *(guint64*)(lmf->rsp - 8);
1132 /* Signal that lmf->rip is set */
1133 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1135 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1138 GSList*
1139 mono_amd64_get_exception_trampolines (gboolean aot)
1141 MonoTrampInfo *info;
1142 GSList *tramps = NULL;
1144 mono_arch_get_throw_pending_exception (&info, aot);
1145 tramps = g_slist_prepend (tramps, info);
1147 /* LLVM needs different throw trampolines */
1148 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
1149 tramps = g_slist_prepend (tramps, info);
1151 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
1152 tramps = g_slist_prepend (tramps, info);
1154 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
1155 tramps = g_slist_prepend (tramps, info);
1157 return tramps;
1160 void
1161 mono_arch_exceptions_init (void)
1163 GSList *tramps, *l;
1164 gpointer tramp;
1166 if (mono_aot_only) {
1167 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1168 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1169 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
1170 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1171 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
1172 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1173 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
1174 } else {
1175 /* Call this to avoid initialization races */
1176 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1178 tramps = mono_amd64_get_exception_trampolines (FALSE);
1179 for (l = tramps; l; l = l->next) {
1180 MonoTrampInfo *info = l->data;
1182 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
1183 mono_save_trampoline_xdebug_info (info);
1184 mono_tramp_info_free (info);
1186 g_slist_free (tramps);
1190 #ifdef TARGET_WIN32
1193 * The mono_arch_unwindinfo* methods are used to build and add
1194 * function table info for each emitted method from mono. On Winx64
1195 * the seh handler will not be called if the mono methods are not
1196 * added to the function table.
1198 * We should not need to add non-volatile register info to the
1199 * table since mono stores that info elsewhere. (Except for the register
1200 * used for the fp.)
1203 #define MONO_MAX_UNWIND_CODES 22
1205 typedef union _UNWIND_CODE {
1206 struct {
1207 guchar CodeOffset;
1208 guchar UnwindOp : 4;
1209 guchar OpInfo : 4;
1211 gushort FrameOffset;
1212 } UNWIND_CODE, *PUNWIND_CODE;
1214 typedef struct _UNWIND_INFO {
1215 guchar Version : 3;
1216 guchar Flags : 5;
1217 guchar SizeOfProlog;
1218 guchar CountOfCodes;
1219 guchar FrameRegister : 4;
1220 guchar FrameOffset : 4;
1221 /* custom size for mono allowing for mono allowing for*/
1222 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1223 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1224 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1225 /*UWOP_PUSH_NONVOL offset = 15-0*/
1226 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1228 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1229 * union {
1230 * OPTIONAL ULONG ExceptionHandler;
1231 * OPTIONAL ULONG FunctionEntry;
1232 * };
1233 * OPTIONAL ULONG ExceptionData[]; */
1234 } UNWIND_INFO, *PUNWIND_INFO;
1236 typedef struct
1238 RUNTIME_FUNCTION runtimeFunction;
1239 UNWIND_INFO unwindInfo;
1240 } MonoUnwindInfo, *PMonoUnwindInfo;
1242 static void
1243 mono_arch_unwindinfo_create (gpointer* monoui)
1245 PMonoUnwindInfo newunwindinfo;
1246 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1247 newunwindinfo->unwindInfo.Version = 1;
1250 void
1251 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1253 PMonoUnwindInfo unwindinfo;
1254 PUNWIND_CODE unwindcode;
1255 guchar codeindex;
1256 if (!*monoui)
1257 mono_arch_unwindinfo_create (monoui);
1259 unwindinfo = (MonoUnwindInfo*)*monoui;
1261 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1262 g_error ("Larger allocation needed for the unwind information.");
1264 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1265 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1266 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1267 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1268 unwindcode->OpInfo = reg;
1270 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1271 g_error ("Adding unwind info in wrong order.");
1273 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1276 void
1277 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1279 PMonoUnwindInfo unwindinfo;
1280 PUNWIND_CODE unwindcode;
1281 guchar codeindex;
1282 if (!*monoui)
1283 mono_arch_unwindinfo_create (monoui);
1285 unwindinfo = (MonoUnwindInfo*)*monoui;
1287 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1288 g_error ("Larger allocation needed for the unwind information.");
1290 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1291 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1292 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1293 unwindcode++;
1294 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1295 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1296 unwindcode->OpInfo = reg;
1298 unwindinfo->unwindInfo.FrameRegister = reg;
1300 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1301 g_error ("Adding unwind info in wrong order.");
1303 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1306 void
1307 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1309 PMonoUnwindInfo unwindinfo;
1310 PUNWIND_CODE unwindcode;
1311 guchar codeindex;
1312 guchar codesneeded;
1313 if (!*monoui)
1314 mono_arch_unwindinfo_create (monoui);
1316 unwindinfo = (MonoUnwindInfo*)*monoui;
1318 if (size < 0x8)
1319 g_error ("Stack allocation must be equal to or greater than 0x8.");
1321 if (size <= 0x80)
1322 codesneeded = 1;
1323 else if (size <= 0x7FFF8)
1324 codesneeded = 2;
1325 else
1326 codesneeded = 3;
1328 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1329 g_error ("Larger allocation needed for the unwind information.");
1331 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1332 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1334 if (codesneeded == 1) {
1335 /*The size of the allocation is
1336 (the number in the OpInfo member) times 8 plus 8*/
1337 unwindcode->OpInfo = (size - 8)/8;
1338 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1340 else {
1341 if (codesneeded == 3) {
1342 /*the unscaled size of the allocation is recorded
1343 in the next two slots in little-endian format*/
1344 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1345 unwindcode += 2;
1346 unwindcode->OpInfo = 1;
1348 else {
1349 /*the size of the allocation divided by 8
1350 is recorded in the next slot*/
1351 unwindcode->FrameOffset = size/8;
1352 unwindcode++;
1353 unwindcode->OpInfo = 0;
1356 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1359 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1361 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1362 g_error ("Adding unwind info in wrong order.");
1364 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1367 guint
1368 mono_arch_unwindinfo_get_size (gpointer monoui)
1370 PMonoUnwindInfo unwindinfo;
1371 if (!monoui)
1372 return 0;
1374 unwindinfo = (MonoUnwindInfo*)monoui;
1375 return (8 + sizeof (MonoUnwindInfo)) -
1376 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1379 PRUNTIME_FUNCTION
1380 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1382 MonoJitInfo *ji;
1383 guint64 pos;
1384 PMonoUnwindInfo targetinfo;
1385 MonoDomain *domain = mono_domain_get ();
1387 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1388 if (!ji)
1389 return 0;
1391 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1393 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1395 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1397 return &targetinfo->runtimeFunction;
1400 void
1401 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1403 PMonoUnwindInfo unwindinfo, targetinfo;
1404 guchar codecount;
1405 guint64 targetlocation;
1406 if (!*monoui)
1407 return;
1409 unwindinfo = (MonoUnwindInfo*)*monoui;
1410 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1411 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1413 unwindinfo->runtimeFunction.EndAddress = code_size;
1414 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1416 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1418 codecount = unwindinfo->unwindInfo.CountOfCodes;
1419 if (codecount) {
1420 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1421 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1424 g_free (unwindinfo);
1425 *monoui = 0;
1427 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1430 #endif
1432 #if MONO_SUPPORT_TASKLETS
1433 MonoContinuationRestore
1434 mono_tasklets_arch_restore (void)
1436 static guint8* saved = NULL;
1437 guint8 *code, *start;
1438 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1439 const guint kMaxCodeSize = NACL_SIZE (64, 128);
1442 if (saved)
1443 return (MonoContinuationRestore)saved;
1444 code = start = mono_global_codeman_reserve (kMaxCodeSize);
1445 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1446 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1447 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1448 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1449 * We move cont to cont_reg since we need both rcx and rdi for the copy
1450 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1452 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1453 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1454 /* setup the copy of the stack */
1455 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1456 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1457 x86_cld (code);
1458 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1459 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1460 amd64_prefix (code, X86_REP_PREFIX);
1461 amd64_movsl (code);
1463 /* now restore the registers from the LMF */
1464 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1465 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1466 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1467 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1468 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1469 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1470 #if !defined(__native_client_codegen__)
1471 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1472 #endif
1473 #ifdef TARGET_WIN32
1474 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1475 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1476 #endif
1477 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1479 /* restore the lmf chain */
1480 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1481 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1483 /* state is already in rax */
1484 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1485 g_assert ((code - start) <= kMaxCodeSize);
1487 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1489 saved = start;
1490 return (MonoContinuationRestore)saved;
1492 #endif
1495 * mono_arch_setup_resume_sighandler_ctx:
1497 * Setup CTX so execution continues at FUNC.
1499 void
1500 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1503 * When resuming from a signal handler, the stack should be misaligned, just like right after
1504 * a call.
1506 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1507 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1508 MONO_CONTEXT_SET_IP (ctx, func);