Revert "update rx to the latest rx-oss-v1.1 build."
[mono-project.git] / mono / mini / exceptions-x86.c
bloba8c8c0f2bd4c94c129c7645923f4a0668af3a874
1 /*
2 * exceptions-x86.c: exception support for x86
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
15 #include <mono/arch/x86/x86-codegen.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/gc-internal.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-mmap.h>
25 #include "mini.h"
26 #include "mini-x86.h"
27 #include "tasklets.h"
28 #include "debug-mini.h"
30 static gpointer signal_exception_trampoline;
32 gpointer
33 mono_x86_get_signal_exception_trampoline (MonoTrampInfo **info, gboolean aot) MONO_INTERNAL;
35 #ifdef TARGET_WIN32
36 static void (*restore_stack) (void *);
38 static MonoW32ExceptionHandler fpe_handler;
39 static MonoW32ExceptionHandler ill_handler;
40 static MonoW32ExceptionHandler segv_handler;
42 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
43 guint64 mono_win_chained_exception_filter_result;
44 gboolean mono_win_chained_exception_filter_didrun;
46 #ifndef PROCESS_CALLBACK_FILTER_ENABLED
47 # define PROCESS_CALLBACK_FILTER_ENABLED 1
48 #endif
50 #define W32_SEH_HANDLE_EX(_ex) \
51 if (_ex##_handler) _ex##_handler(0, ep, sctx)
54 * mono_win32_get_handle_stackoverflow (void):
56 * Returns a pointer to a method which restores the current context stack
57 * and calls handle_exceptions, when done restores the original stack.
59 static gpointer
60 mono_win32_get_handle_stackoverflow (void)
62 static guint8 *start = NULL;
63 guint8 *code;
65 if (start)
66 return start;
68 /* restore_contect (void *sigctx) */
69 start = code = mono_global_codeman_reserve (128);
71 /* load context into ebx */
72 x86_mov_reg_membase (code, X86_EBX, X86_ESP, 4, 4);
74 /* move current stack into edi for later restore */
75 x86_mov_reg_reg (code, X86_EDI, X86_ESP, 4);
77 /* use the new freed stack from sigcontext */
78 x86_mov_reg_membase (code, X86_ESP, X86_EBX, G_STRUCT_OFFSET (struct sigcontext, esp), 4);
80 /* get the current domain */
81 x86_call_code (code, mono_domain_get);
83 /* get stack overflow exception from domain object */
84 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoDomain, stack_overflow_ex), 4);
86 /* call mono_arch_handle_exception (sctx, stack_overflow_exception_obj) */
87 x86_push_reg (code, X86_EAX);
88 x86_push_reg (code, X86_EBX);
89 x86_call_code (code, mono_arch_handle_exception);
91 /* restore the SEH handler stack */
92 x86_mov_reg_reg (code, X86_ESP, X86_EDI, 4);
94 /* return */
95 x86_ret (code);
97 return start;
100 /* Special hack to workaround the fact that the
101 * when the SEH handler is called the stack is
102 * to small to recover.
104 * Stack walking part of this method is from mono_handle_exception
106 * The idea is simple;
107 * - walk the stack to free some space (64k)
108 * - set esp to new stack location
109 * - call mono_arch_handle_exception with stack overflow exception
110 * - set esp to SEH handlers stack
111 * - done
113 static void
114 win32_handle_stack_overflow (EXCEPTION_POINTERS* ep, struct sigcontext *sctx)
116 SYSTEM_INFO si;
117 DWORD page_size;
118 MonoDomain *domain = mono_domain_get ();
119 MonoJitInfo rji;
120 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
121 MonoLMF *lmf = jit_tls->lmf;
122 MonoContext initial_ctx;
123 MonoContext ctx;
124 guint32 free_stack = 0;
125 StackFrameInfo frame;
127 /* convert sigcontext to MonoContext (due to reuse of stack walking helpers */
128 mono_arch_sigctx_to_monoctx (sctx, &ctx);
130 /* get our os page size */
131 GetSystemInfo(&si);
132 page_size = si.dwPageSize;
134 /* Let's walk the stack to recover
135 * the needed stack space (if possible)
137 memset (&rji, 0, sizeof (rji));
139 initial_ctx = ctx;
140 free_stack = (guint8*)(MONO_CONTEXT_GET_BP (&ctx)) - (guint8*)(MONO_CONTEXT_GET_BP (&initial_ctx));
142 /* try to free 64kb from our stack */
143 do {
144 MonoContext new_ctx;
146 mono_arch_find_jit_info (domain, jit_tls, &rji, &ctx, &new_ctx, &lmf, NULL, &frame);
147 if (!frame.ji) {
148 g_warning ("Exception inside function without unwind info");
149 g_assert_not_reached ();
152 if (frame.ji != (gpointer)-1) {
153 free_stack = (guint8*)(MONO_CONTEXT_GET_BP (&ctx)) - (guint8*)(MONO_CONTEXT_GET_BP (&initial_ctx));
156 /* todo: we should call abort if ji is -1 */
157 ctx = new_ctx;
158 } while (free_stack < 64 * 1024 && frame.ji != (gpointer) -1);
160 /* convert into sigcontext to be used in mono_arch_handle_exception */
161 mono_arch_monoctx_to_sigctx (&ctx, sctx);
163 /* todo: install new stack-guard page */
165 /* use the new stack and call mono_arch_handle_exception () */
166 restore_stack (sctx);
170 * Unhandled Exception Filter
171 * Top-level per-process exception handler.
173 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
175 EXCEPTION_RECORD* er;
176 CONTEXT* ctx;
177 struct sigcontext* sctx;
178 LONG res;
180 mono_win_chained_exception_filter_didrun = FALSE;
181 res = EXCEPTION_CONTINUE_EXECUTION;
183 er = ep->ExceptionRecord;
184 ctx = ep->ContextRecord;
185 sctx = g_malloc(sizeof(struct sigcontext));
187 /* Copy Win32 context to UNIX style context */
188 sctx->eax = ctx->Eax;
189 sctx->ebx = ctx->Ebx;
190 sctx->ecx = ctx->Ecx;
191 sctx->edx = ctx->Edx;
192 sctx->ebp = ctx->Ebp;
193 sctx->esp = ctx->Esp;
194 sctx->esi = ctx->Esi;
195 sctx->edi = ctx->Edi;
196 sctx->eip = ctx->Eip;
198 switch (er->ExceptionCode) {
199 case EXCEPTION_STACK_OVERFLOW:
200 win32_handle_stack_overflow (ep, sctx);
201 break;
202 case EXCEPTION_ACCESS_VIOLATION:
203 W32_SEH_HANDLE_EX(segv);
204 break;
205 case EXCEPTION_ILLEGAL_INSTRUCTION:
206 W32_SEH_HANDLE_EX(ill);
207 break;
208 case EXCEPTION_INT_DIVIDE_BY_ZERO:
209 case EXCEPTION_INT_OVERFLOW:
210 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
211 case EXCEPTION_FLT_OVERFLOW:
212 case EXCEPTION_FLT_UNDERFLOW:
213 case EXCEPTION_FLT_INEXACT_RESULT:
214 W32_SEH_HANDLE_EX(fpe);
215 break;
216 default:
217 break;
220 /* Copy context back */
221 ctx->Eax = sctx->eax;
222 ctx->Ebx = sctx->ebx;
223 ctx->Ecx = sctx->ecx;
224 ctx->Edx = sctx->edx;
225 ctx->Ebp = sctx->ebp;
226 ctx->Esp = sctx->esp;
227 ctx->Esi = sctx->esi;
228 ctx->Edi = sctx->edi;
229 ctx->Eip = sctx->eip;
231 g_free (sctx);
233 if (mono_win_chained_exception_filter_didrun)
234 res = mono_win_chained_exception_filter_result;
236 return res;
239 void win32_seh_init()
241 /* install restore stack helper */
242 if (!restore_stack)
243 restore_stack = mono_win32_get_handle_stackoverflow ();
245 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_handler);
248 void win32_seh_cleanup()
250 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
253 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
255 switch (type) {
256 case SIGFPE:
257 fpe_handler = handler;
258 break;
259 case SIGILL:
260 ill_handler = handler;
261 break;
262 case SIGSEGV:
263 segv_handler = handler;
264 break;
265 default:
266 break;
270 #endif /* TARGET_WIN32 */
273 * mono_arch_get_restore_context:
275 * Returns a pointer to a method which restores a previously saved sigcontext.
277 gpointer
278 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
280 guint8 *start = NULL;
281 guint8 *code;
282 MonoJumpInfo *ji = NULL;
283 GSList *unwind_ops = NULL;
285 /* restore_contect (MonoContext *ctx) */
287 start = code = mono_global_codeman_reserve (128);
289 /* load ctx */
290 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
292 /* get return address, stored in ECX */
293 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoContext, eip), 4);
294 /* restore EBX */
295 x86_mov_reg_membase (code, X86_EBX, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebx), 4);
296 /* restore EDI */
297 x86_mov_reg_membase (code, X86_EDI, X86_EAX, G_STRUCT_OFFSET (MonoContext, edi), 4);
298 /* restore ESI */
299 x86_mov_reg_membase (code, X86_ESI, X86_EAX, G_STRUCT_OFFSET (MonoContext, esi), 4);
300 /* restore ESP */
301 x86_mov_reg_membase (code, X86_ESP, X86_EAX, G_STRUCT_OFFSET (MonoContext, esp), 4);
302 /* save the return addr to the restored stack */
303 x86_push_reg (code, X86_ECX);
304 /* restore EBP */
305 x86_mov_reg_membase (code, X86_EBP, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebp), 4);
306 /* restore ECX */
307 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoContext, ecx), 4);
308 /* restore EDX */
309 x86_mov_reg_membase (code, X86_EDX, X86_EAX, G_STRUCT_OFFSET (MonoContext, edx), 4);
310 /* restore EAX */
311 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoContext, eax), 4);
313 /* jump to the saved IP */
314 x86_ret (code);
316 nacl_global_codeman_validate(&start, 128, &code);
318 if (info)
319 *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
320 else {
321 GSList *l;
323 for (l = unwind_ops; l; l = l->next)
324 g_free (l->data);
325 g_slist_free (unwind_ops);
328 return start;
332 * mono_arch_get_call_filter:
334 * Returns a pointer to a method which calls an exception filter. We
335 * also use this function to call finally handlers (we pass NULL as
336 * @exc object in this case).
338 gpointer
339 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
341 guint8* start;
342 guint8 *code;
343 MonoJumpInfo *ji = NULL;
344 GSList *unwind_ops = NULL;
345 guint kMaxCodeSize = NACL_SIZE (64, 128);
347 /* call_filter (MonoContext *ctx, unsigned long eip) */
348 start = code = mono_global_codeman_reserve (kMaxCodeSize);
350 x86_push_reg (code, X86_EBP);
351 x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
352 x86_push_reg (code, X86_EBX);
353 x86_push_reg (code, X86_EDI);
354 x86_push_reg (code, X86_ESI);
356 /* load ctx */
357 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 8, 4);
358 /* load eip */
359 x86_mov_reg_membase (code, X86_ECX, X86_EBP, 12, 4);
360 /* save EBP */
361 x86_push_reg (code, X86_EBP);
363 /* set new EBP */
364 x86_mov_reg_membase (code, X86_EBP, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebp), 4);
365 /* restore registers used by global register allocation (EBX & ESI) */
366 x86_mov_reg_membase (code, X86_EBX, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebx), 4);
367 x86_mov_reg_membase (code, X86_ESI, X86_EAX, G_STRUCT_OFFSET (MonoContext, esi), 4);
368 x86_mov_reg_membase (code, X86_EDI, X86_EAX, G_STRUCT_OFFSET (MonoContext, edi), 4);
370 /* align stack and save ESP */
371 x86_mov_reg_reg (code, X86_EDX, X86_ESP, 4);
372 x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT);
373 g_assert (MONO_ARCH_FRAME_ALIGNMENT >= 8);
374 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 8);
375 x86_push_reg (code, X86_EDX);
377 /* call the handler */
378 x86_call_reg (code, X86_ECX);
380 /* restore ESP */
381 x86_pop_reg (code, X86_ESP);
383 /* restore EBP */
384 x86_pop_reg (code, X86_EBP);
386 /* restore saved regs */
387 x86_pop_reg (code, X86_ESI);
388 x86_pop_reg (code, X86_EDI);
389 x86_pop_reg (code, X86_EBX);
390 x86_leave (code);
391 x86_ret (code);
393 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
395 if (info)
396 *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
397 else {
398 GSList *l;
400 for (l = unwind_ops; l; l = l->next)
401 g_free (l->data);
402 g_slist_free (unwind_ops);
405 g_assert ((code - start) < kMaxCodeSize);
406 return start;
410 * mono_x86_throw_exception:
412 * C function called from the throw trampolines.
414 void
415 mono_x86_throw_exception (mgreg_t *regs, MonoObject *exc,
416 mgreg_t eip, gboolean rethrow)
418 static void (*restore_context) (MonoContext *);
419 MonoContext ctx;
421 if (!restore_context)
422 restore_context = mono_get_restore_context ();
424 ctx.esp = regs [X86_ESP];
425 ctx.eip = eip;
426 ctx.ebp = regs [X86_EBP];
427 ctx.edi = regs [X86_EDI];
428 ctx.esi = regs [X86_ESI];
429 ctx.ebx = regs [X86_EBX];
430 ctx.edx = regs [X86_EDX];
431 ctx.ecx = regs [X86_ECX];
432 ctx.eax = regs [X86_EAX];
434 #ifdef __APPLE__
435 /* The OSX ABI specifies 16 byte alignment at call sites */
436 g_assert ((ctx.esp % MONO_ARCH_FRAME_ALIGNMENT) == 0);
437 #endif
439 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
440 MonoException *mono_ex = (MonoException*)exc;
441 if (!rethrow)
442 mono_ex->stack_trace = NULL;
445 if (mono_debug_using_mono_debugger ()) {
446 guint8 buf [16], *code;
448 mono_breakpoint_clean_code (NULL, (gpointer)eip, 8, buf, sizeof (buf));
449 code = buf + 8;
451 if (buf [3] == 0xe8) {
452 MonoContext ctx_cp = ctx;
453 ctx_cp.eip = eip - 5;
455 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
456 restore_context (&ctx_cp);
457 g_assert_not_reached ();
462 /* adjust eip so that it point into the call instruction */
463 ctx.eip -= 1;
465 mono_handle_exception (&ctx, exc);
467 restore_context (&ctx);
469 g_assert_not_reached ();
472 void
473 mono_x86_throw_corlib_exception (mgreg_t *regs, guint32 ex_token_index,
474 mgreg_t eip, gint32 pc_offset)
476 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
477 MonoException *ex;
479 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
481 eip -= pc_offset;
483 /* Negate the ip adjustment done in mono_x86_throw_exception () */
484 eip += 1;
486 mono_x86_throw_exception (regs, (MonoObject*)ex, eip, FALSE);
489 static void
490 mono_x86_resume_unwind (mgreg_t *regs, MonoObject *exc,
491 mgreg_t eip, gboolean rethrow)
493 MonoContext ctx;
495 ctx.esp = regs [X86_ESP];
496 ctx.eip = eip;
497 ctx.ebp = regs [X86_EBP];
498 ctx.edi = regs [X86_EDI];
499 ctx.esi = regs [X86_ESI];
500 ctx.ebx = regs [X86_EBX];
501 ctx.edx = regs [X86_EDX];
502 ctx.ecx = regs [X86_ECX];
503 ctx.eax = regs [X86_EAX];
505 mono_resume_unwind (&ctx);
509 * get_throw_trampoline:
511 * Generate a call to mono_x86_throw_exception/
512 * mono_x86_throw_corlib_exception.
513 * If LLVM is true, generate code which assumes the caller is LLVM generated code,
514 * which doesn't push the arguments.
516 static guint8*
517 get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
519 guint8 *start, *code;
520 int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
521 MonoJumpInfo *ji = NULL;
522 GSList *unwind_ops = NULL;
523 guint kMaxCodeSize = NACL_SIZE (128, 256);
525 start = code = mono_global_codeman_reserve (kMaxCodeSize);
527 stack_size = 128;
530 * On apple, the stack is misaligned by the pushing of the return address.
532 if (!llvm && corlib)
533 /* On OSX, we don't generate alignment code to save space */
534 stack_size += 4;
535 else
536 stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;
539 * The stack looks like this:
540 * <pc offset> (only if corlib is TRUE)
541 * <exception object>/<type token>
542 * <return addr> <- esp (unaligned on apple)
545 mono_add_unwind_op_def_cfa (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
546 mono_add_unwind_op_offset (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);
548 /* Alloc frame */
549 x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
550 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);
552 arg_offsets [0] = 0;
553 arg_offsets [1] = 4;
554 arg_offsets [2] = 8;
555 arg_offsets [3] = 12;
556 regs_offset = 16;
558 /* Save registers */
559 for (i = 0; i < X86_NREG; ++i)
560 if (i != X86_ESP)
561 x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
562 /* Calculate the offset between the current sp and the sp of the caller */
563 if (llvm) {
564 /* LLVM doesn't push the arguments */
565 stack_offset = stack_size + 4;
566 } else {
567 if (corlib) {
568 /* Two arguments */
569 stack_offset = stack_size + 4 + 8;
570 #ifdef __APPLE__
571 /* We don't generate stack alignment code on osx to save space */
572 #endif
573 } else {
574 /* One argument + stack alignment */
575 stack_offset = stack_size + 4 + 4;
576 #ifdef __APPLE__
577 /* Pop the alignment added by OP_THROW too */
578 stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
579 #else
580 if (mono_do_x86_stack_align)
581 stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
582 #endif
585 /* Save ESP */
586 x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
587 x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);
589 /* Set arg1 == regs */
590 x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
591 x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
592 /* Set arg2 == exc/ex_token_index */
593 if (resume_unwind)
594 x86_mov_reg_imm (code, X86_EAX, 0);
595 else
596 x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
597 x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
598 /* Set arg3 == eip */
599 if (llvm_abs)
600 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
601 else
602 x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
603 x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
604 /* Set arg4 == rethrow/pc_offset */
605 if (resume_unwind) {
606 x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
607 } else if (corlib) {
608 x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
609 if (llvm_abs) {
611 * The caller is LLVM code which passes the absolute address not a pc offset,
612 * so compensate by passing 0 as 'ip' and passing the negated abs address as
613 * the pc offset.
615 x86_neg_reg (code, X86_EAX);
617 x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
618 } else {
619 x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
621 /* Make the call */
622 if (aot) {
623 // This can be called from runtime code, which can't guarantee that
624 // ebx contains the got address.
625 // So emit the got address loading code too
626 code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
627 code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
628 x86_call_reg (code, X86_EAX);
629 } else {
630 x86_call_code (code, resume_unwind ? (gpointer)(mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
632 x86_breakpoint (code);
634 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
636 g_assert ((code - start) < kMaxCodeSize);
638 if (info)
639 *info = mono_tramp_info_create (g_strdup (name), start, code - start, ji, unwind_ops);
640 else {
641 GSList *l;
643 for (l = unwind_ops; l; l = l->next)
644 g_free (l->data);
645 g_slist_free (unwind_ops);
648 return start;
652 * mono_arch_get_throw_exception:
654 * Returns a function pointer which can be used to raise
655 * exceptions. The returned function has the following
656 * signature: void (*func) (MonoException *exc);
657 * For example to raise an arithmetic exception you can use:
659 * x86_push_imm (code, mono_get_exception_arithmetic ());
660 * x86_call_code (code, arch_get_throw_exception ());
663 gpointer
664 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
666 return get_throw_trampoline ("throw_exception", FALSE, FALSE, FALSE, FALSE, FALSE, info, aot);
669 gpointer
670 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
672 return get_throw_trampoline ("rethrow_exception", TRUE, FALSE, FALSE, FALSE, FALSE, info, aot);
676 * mono_arch_get_throw_corlib_exception:
678 * Returns a function pointer which can be used to raise
679 * corlib exceptions. The returned function has the following
680 * signature: void (*func) (guint32 ex_token, guint32 offset);
681 * Here, offset is the offset which needs to be substracted from the caller IP
682 * to get the IP of the throw. Passing the offset has the advantage that it
683 * needs no relocations in the caller.
685 gpointer
686 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
688 return get_throw_trampoline ("throw_corlib_exception", FALSE, FALSE, TRUE, FALSE, FALSE, info, aot);
691 void
692 mono_arch_exceptions_init (void)
694 guint8 *tramp;
697 * If we're running WoW64, we need to set the usermode exception policy
698 * for SEHs to behave. This requires hotfix http://support.microsoft.com/kb/976038
699 * or (eventually) Windows 7 SP1.
701 #ifdef HOST_WIN32
702 DWORD flags;
703 FARPROC getter;
704 FARPROC setter;
705 HMODULE kernel32 = LoadLibraryW (L"kernel32.dll");
707 if (kernel32) {
708 getter = GetProcAddress (kernel32, "GetProcessUserModeExceptionPolicy");
709 setter = GetProcAddress (kernel32, "SetProcessUserModeExceptionPolicy");
710 if (getter && setter) {
711 if (getter (&flags))
712 setter (flags & ~PROCESS_CALLBACK_FILTER_ENABLED);
715 #endif
717 if (mono_aot_only) {
718 signal_exception_trampoline = mono_aot_get_trampoline ("x86_signal_exception_trampoline");
719 return;
722 /* LLVM needs different throw trampolines */
723 tramp = get_throw_trampoline ("llvm_throw_exception_trampoline", FALSE, TRUE, FALSE, FALSE, FALSE, NULL, FALSE);
724 mono_register_jit_icall (tramp, "llvm_throw_exception_trampoline", NULL, TRUE);
726 tramp = get_throw_trampoline ("llvm_rethrow_exception_trampoline", FALSE, TRUE, FALSE, FALSE, FALSE, NULL, FALSE);
727 mono_register_jit_icall (tramp, "llvm_rethrow_exception_trampoline", NULL, TRUE);
729 tramp = get_throw_trampoline ("llvm_throw_corlib_exception_trampoline", FALSE, TRUE, TRUE, FALSE, FALSE, NULL, FALSE);
730 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
732 tramp = get_throw_trampoline ("llvm_throw_corlib_exception_abs_trampoline", FALSE, TRUE, TRUE, TRUE, FALSE, NULL, FALSE);
733 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
735 tramp = get_throw_trampoline ("llvm_resume_unwind_trampoline", FALSE, FALSE, FALSE, FALSE, TRUE, NULL, FALSE);
736 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
738 signal_exception_trampoline = mono_x86_get_signal_exception_trampoline (NULL, FALSE);
742 * mono_arch_find_jit_info:
744 * See exceptions-amd64.c for docs.
746 gboolean
747 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
748 MonoJitInfo *ji, MonoContext *ctx,
749 MonoContext *new_ctx, MonoLMF **lmf,
750 mgreg_t **save_locations,
751 StackFrameInfo *frame)
753 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
755 memset (frame, 0, sizeof (StackFrameInfo));
756 frame->ji = ji;
758 *new_ctx = *ctx;
760 if (ji != NULL) {
761 gssize regs [MONO_MAX_IREGS + 1];
762 guint8 *cfa;
763 guint32 unwind_info_len;
764 guint8 *unwind_info;
766 frame->type = FRAME_TYPE_MANAGED;
768 if (ji->from_aot)
769 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
770 else
771 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
773 regs [X86_EAX] = new_ctx->eax;
774 regs [X86_EBX] = new_ctx->ebx;
775 regs [X86_ECX] = new_ctx->ecx;
776 regs [X86_EDX] = new_ctx->edx;
777 regs [X86_ESP] = new_ctx->esp;
778 regs [X86_EBP] = new_ctx->ebp;
779 regs [X86_ESI] = new_ctx->esi;
780 regs [X86_EDI] = new_ctx->edi;
781 regs [X86_NREG] = new_ctx->eip;
783 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
784 (guint8*)ji->code_start + ji->code_size,
785 ip, regs, MONO_MAX_IREGS + 1,
786 save_locations, MONO_MAX_IREGS, &cfa);
788 new_ctx->eax = regs [X86_EAX];
789 new_ctx->ebx = regs [X86_EBX];
790 new_ctx->ecx = regs [X86_ECX];
791 new_ctx->edx = regs [X86_EDX];
792 new_ctx->esp = regs [X86_ESP];
793 new_ctx->ebp = regs [X86_EBP];
794 new_ctx->esi = regs [X86_ESI];
795 new_ctx->edi = regs [X86_EDI];
796 new_ctx->eip = regs [X86_NREG];
798 /* The CFA becomes the new SP value */
799 new_ctx->esp = (gssize)cfa;
801 /* Adjust IP */
802 new_ctx->eip --;
804 if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
805 /* remove any unused lmf */
806 *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3);
809 /* Pop arguments off the stack */
811 * FIXME: LLVM doesn't push these, we can't use ji->from_llvm as it describes
812 * the callee.
814 #ifndef ENABLE_LLVM
815 if (ji->has_arch_eh_info)
816 new_ctx->esp += mono_jit_info_get_arch_eh_info (ji)->stack_size;
817 #endif
819 return TRUE;
820 } else if (*lmf) {
822 if (((guint64)(*lmf)->previous_lmf) & 2) {
824 * This LMF entry is created by the soft debug code to mark transitions to
825 * managed code done during invokes.
827 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
829 g_assert (ext->debugger_invoke);
831 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
833 *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3);
835 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
837 return TRUE;
840 if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) {
841 } else {
842 if (!((guint32)((*lmf)->previous_lmf) & 1))
843 /* Top LMF entry */
844 return FALSE;
845 g_assert_not_reached ();
846 /* Trampoline lmf frame */
847 frame->method = (*lmf)->method;
850 new_ctx->esi = (*lmf)->esi;
851 new_ctx->edi = (*lmf)->edi;
852 new_ctx->ebx = (*lmf)->ebx;
853 new_ctx->ebp = (*lmf)->ebp;
854 new_ctx->eip = (*lmf)->eip;
856 /* Adjust IP */
857 new_ctx->eip --;
859 frame->ji = ji;
860 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
862 /* Check if we are in a trampoline LMF frame */
863 if ((guint32)((*lmf)->previous_lmf) & 1) {
864 /* lmf->esp is set by the trampoline code */
865 new_ctx->esp = (*lmf)->esp;
867 /* Pop arguments off the stack */
868 /* FIXME: Handle the delegate case too ((*lmf)->method == NULL) */
869 /* FIXME: Handle the IMT/vtable case too */
870 #if 0
871 #ifndef ENABLE_LLVM
872 if ((*lmf)->method) {
873 MonoMethod *method = (*lmf)->method;
874 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (method)->param_count + 1);
876 guint32 stack_to_pop = mono_arch_get_argument_info (NULL, mono_method_signature (method), mono_method_signature (method)->param_count, arg_info);
877 new_ctx->esp += stack_to_pop;
879 #endif
880 #endif
882 else
883 /* the lmf is always stored on the stack, so the following
884 * expression points to a stack location which can be used as ESP */
885 new_ctx->esp = (unsigned long)&((*lmf)->eip);
887 *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3);
889 return TRUE;
892 return FALSE;
895 void
896 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
898 mono_sigctx_to_monoctx (sigctx, mctx);
901 void
902 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
904 mono_monoctx_to_sigctx (mctx, sigctx);
907 gpointer
908 mono_arch_ip_from_context (void *sigctx)
910 #if defined(__native_client__)
911 printf("WARNING: mono_arch_ip_from_context() called!\n");
912 return (NULL);
913 #else
914 #ifdef MONO_ARCH_USE_SIGACTION
915 ucontext_t *ctx = (ucontext_t*)sigctx;
916 return (gpointer)UCONTEXT_REG_EIP (ctx);
917 #else
918 struct sigcontext *ctx = sigctx;
919 return (gpointer)ctx->SC_EIP;
920 #endif
921 #endif /* __native_client__ */
925 * handle_exception:
927 * Called by resuming from a signal handler.
929 static void
930 handle_signal_exception (gpointer obj)
932 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
933 MonoContext ctx;
934 static void (*restore_context) (MonoContext *);
936 if (!restore_context)
937 restore_context = mono_get_restore_context ();
939 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
941 if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj))
942 return;
944 mono_handle_exception (&ctx, obj);
946 restore_context (&ctx);
950 * mono_x86_get_signal_exception_trampoline:
952 * This x86 specific trampoline is used to call handle_signal_exception.
954 gpointer
955 mono_x86_get_signal_exception_trampoline (MonoTrampInfo **info, gboolean aot)
957 guint8 *start, *code;
958 MonoJumpInfo *ji = NULL;
959 GSList *unwind_ops = NULL;
960 int stack_size;
962 start = code = mono_global_codeman_reserve (128);
964 /* Caller ip */
965 x86_push_reg (code, X86_ECX);
967 mono_add_unwind_op_def_cfa (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
968 mono_add_unwind_op_offset (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);
970 /* Fix the alignment to be what apple expects */
971 stack_size = 12;
973 x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
974 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);
976 /* Arg1 */
977 x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, 4);
978 /* Branch to target */
979 x86_call_reg (code, X86_EDX);
981 g_assert ((code - start) < 128);
983 if (info)
984 *info = mono_tramp_info_create (g_strdup ("x86_signal_exception_trampoline"), start, code - start, ji, unwind_ops);
985 else {
986 GSList *l;
988 for (l = unwind_ops; l; l = l->next)
989 g_free (l->data);
990 g_slist_free (unwind_ops);
993 return start;
997 void
998 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
1001 * Can't pass the obj on the stack, since we are executing on the
1002 * same stack. Can't save it into MonoJitTlsData, since it needs GC tracking.
1003 * So put it into a register, and branch to a trampoline which
1004 * pushes it.
1006 ctx->eax = (mgreg_t)user_data;
1007 ctx->ecx = ctx->eip;
1008 ctx->edx = (mgreg_t)async_cb;
1010 /*align the stack*/
1011 ctx->esp = (ctx->esp - 16) & ~15;
1012 ctx->eip = (mgreg_t)signal_exception_trampoline;
1015 gboolean
1016 mono_arch_handle_exception (void *sigctx, gpointer obj)
1018 #if defined(MONO_ARCH_USE_SIGACTION)
1019 MonoContext mctx;
1020 ucontext_t *ctx = (ucontext_t*)sigctx;
1023 * Handling the exception in the signal handler is problematic, since the original
1024 * signal is disabled, and we could run arbitrary code though the debugger. So
1025 * resume into the normal stack and do most work there if possible.
1027 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1029 /* Pass the ctx parameter in TLS */
1030 mono_arch_sigctx_to_monoctx (ctx, &jit_tls->ex_ctx);
1032 mctx = jit_tls->ex_ctx;
1033 mono_setup_async_callback (&mctx, handle_signal_exception, obj);
1034 mono_monoctx_to_sigctx (&mctx, sigctx);
1036 return TRUE;
1037 #elif defined (TARGET_WIN32)
1038 MonoContext mctx;
1039 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1040 struct sigcontext *ctx = (struct sigcontext *)sigctx;
1042 mono_arch_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
1044 mctx = jit_tls->ex_ctx;
1045 mono_setup_async_callback (&mctx, handle_signal_exception, obj);
1046 mono_monoctx_to_sigctx (&mctx, sigctx);
1048 return TRUE;
1049 #else
1050 MonoContext mctx;
1052 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
1054 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
1055 return TRUE;
1057 mono_handle_exception (&mctx, obj);
1059 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
1061 return TRUE;
1062 #endif
1065 static void
1066 restore_soft_guard_pages (void)
1068 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1069 if (jit_tls->stack_ovf_guard_base)
1070 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
1074 * this function modifies mctx so that when it is restored, it
1075 * won't execcute starting at mctx.eip, but in a function that
1076 * will restore the protection on the soft-guard pages and return back to
1077 * continue at mctx.eip.
1079 static void
1080 prepare_for_guard_pages (MonoContext *mctx)
1082 gpointer *sp;
1083 sp = (gpointer)(mctx->esp);
1084 sp -= 1;
1085 /* the resturn addr */
1086 sp [0] = (gpointer)(mctx->eip);
1087 mctx->eip = (unsigned long)restore_soft_guard_pages;
1088 mctx->esp = (unsigned long)sp;
1091 static void
1092 altstack_handle_and_restore (MonoContext *ctx, gpointer obj, gboolean stack_ovf)
1094 void (*restore_context) (MonoContext *);
1095 MonoContext mctx;
1097 restore_context = mono_get_restore_context ();
1098 mctx = *ctx;
1100 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
1101 if (stack_ovf)
1102 prepare_for_guard_pages (&mctx);
1103 restore_context (&mctx);
1106 mono_handle_exception (&mctx, obj);
1107 if (stack_ovf)
1108 prepare_for_guard_pages (&mctx);
1109 restore_context (&mctx);
1112 void
1113 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
1115 #ifdef MONO_ARCH_USE_SIGACTION
1116 MonoException *exc = NULL;
1117 ucontext_t *ctx = (ucontext_t*)sigctx;
1118 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_EIP (ctx), NULL);
1119 gpointer *sp;
1120 int frame_size;
1122 /* if we didn't find a managed method for the ip address and it matches the fault
1123 * address, we assume we followed a broken pointer during an indirect call, so
1124 * we try the lookup again with the return address pushed on the stack
1126 if (!ji && fault_addr == (gpointer)UCONTEXT_REG_EIP (ctx)) {
1127 glong *sp = (gpointer)UCONTEXT_REG_ESP (ctx);
1128 ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)sp [0], NULL);
1129 if (ji)
1130 UCONTEXT_REG_EIP (ctx) = sp [0];
1132 if (stack_ovf)
1133 exc = mono_domain_get ()->stack_overflow_ex;
1134 if (!ji)
1135 mono_handle_native_sigsegv (SIGSEGV, sigctx);
1136 /* setup a call frame on the real stack so that control is returned there
1137 * and exception handling can continue.
1138 * If this was a stack overflow the caller already ensured the stack pages
1139 * needed have been unprotected.
1140 * The frame looks like:
1141 * ucontext struct
1142 * test_only arg
1143 * exception arg
1144 * ctx arg
1145 * return ip
1147 // FIXME: test_only is no more.
1148 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4;
1149 frame_size += 15;
1150 frame_size &= ~15;
1151 sp = (gpointer)(UCONTEXT_REG_ESP (ctx) & ~15);
1152 sp = (gpointer)((char*)sp - frame_size);
1153 /* the incoming arguments are aligned to 16 bytes boundaries, so the return address IP
1154 * goes at sp [-1]
1156 sp [-1] = (gpointer)UCONTEXT_REG_EIP (ctx);
1157 sp [0] = sp + 4;
1158 sp [1] = exc;
1159 sp [2] = (gpointer)stack_ovf;
1160 mono_sigctx_to_monoctx (sigctx, (MonoContext*)(sp + 4));
1161 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
1162 UCONTEXT_REG_EIP (ctx) = (unsigned long)altstack_handle_and_restore;
1163 UCONTEXT_REG_ESP (ctx) = (unsigned long)(sp - 1);
1164 #endif
1167 #if MONO_SUPPORT_TASKLETS
1168 MonoContinuationRestore
1169 mono_tasklets_arch_restore (void)
1171 static guint8* saved = NULL;
1172 guint8 *code, *start;
1174 #ifdef __native_client_codegen__
1175 g_print("mono_tasklets_arch_restore needs to be aligned for Native Client\n");
1176 #endif
1177 if (saved)
1178 return (MonoContinuationRestore)saved;
1179 code = start = mono_global_codeman_reserve (48);
1180 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1181 /* put cont in edx */
1182 x86_mov_reg_membase (code, X86_EDX, X86_ESP, 4, 4);
1183 /* state in eax, so it's setup as the return value */
1184 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 8, 4);
1186 /* setup the copy of the stack */
1187 x86_mov_reg_membase (code, X86_ECX, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), 4);
1188 x86_shift_reg_imm (code, X86_SHR, X86_ECX, 2);
1189 x86_cld (code);
1190 x86_mov_reg_membase (code, X86_ESI, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, saved_stack), 4);
1191 x86_mov_reg_membase (code, X86_EDI, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, return_sp), 4);
1192 x86_prefix (code, X86_REP_PREFIX);
1193 x86_movsl (code);
1195 /* now restore the registers from the LMF */
1196 x86_mov_reg_membase (code, X86_ECX, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, lmf), 4);
1197 x86_mov_reg_membase (code, X86_EBX, X86_ECX, G_STRUCT_OFFSET (MonoLMF, ebx), 4);
1198 x86_mov_reg_membase (code, X86_EBP, X86_ECX, G_STRUCT_OFFSET (MonoLMF, ebp), 4);
1199 x86_mov_reg_membase (code, X86_ESI, X86_ECX, G_STRUCT_OFFSET (MonoLMF, esi), 4);
1200 x86_mov_reg_membase (code, X86_EDI, X86_ECX, G_STRUCT_OFFSET (MonoLMF, edi), 4);
1202 /* restore the lmf chain */
1203 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1204 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1206 x86_jump_membase (code, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1207 g_assert ((code - start) <= 48);
1208 saved = start;
1209 return (MonoContinuationRestore)saved;
1211 #endif
1214 * mono_arch_setup_resume_sighandler_ctx:
1216 * Setup CTX so execution continues at FUNC.
1218 void
1219 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1221 int align = (((gint32)MONO_CONTEXT_GET_SP (ctx)) % MONO_ARCH_FRAME_ALIGNMENT + 4);
1223 if (align != 0)
1224 MONO_CONTEXT_SET_SP (ctx, (gsize)MONO_CONTEXT_GET_SP (ctx) - align);
1226 MONO_CONTEXT_SET_IP (ctx, func);