* TextControl.cs: Make PageUp and PageDown more like the
[mono-project.git] / mono / mini / exceptions-amd64.c
blob4123acd071d1e1473b8797ada5824900154eb760
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
14 #include <sys/ucontext.h>
16 #include <mono/arch/amd64/amd64-codegen.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/exception.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/metadata/mono-debug.h>
25 #include "mini.h"
26 #include "mini-amd64.h"
28 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
30 #ifdef PLATFORM_WIN32
31 static MonoW32ExceptionHandler fpe_handler;
32 static MonoW32ExceptionHandler ill_handler;
33 static MonoW32ExceptionHandler segv_handler;
35 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
37 #define W32_SEH_HANDLE_EX(_ex) \
38 if (_ex##_handler) _ex##_handler((int)sctx)
41 * Unhandled Exception Filter
42 * Top-level per-process exception handler.
44 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
46 EXCEPTION_RECORD* er;
47 CONTEXT* ctx;
48 MonoContext* sctx;
49 LONG res;
51 res = EXCEPTION_CONTINUE_EXECUTION;
53 er = ep->ExceptionRecord;
54 ctx = ep->ContextRecord;
55 sctx = g_malloc(sizeof(MonoContext));
57 /* Copy Win32 context to UNIX style context */
58 sctx->eax = ctx->Eax;
59 sctx->ebx = ctx->Ebx;
60 sctx->ecx = ctx->Ecx;
61 sctx->edx = ctx->Edx;
62 sctx->ebp = ctx->Ebp;
63 sctx->esp = ctx->Esp;
64 sctx->esi = ctx->Esi;
65 sctx->edi = ctx->Edi;
66 sctx->eip = ctx->Eip;
68 switch (er->ExceptionCode) {
69 case EXCEPTION_ACCESS_VIOLATION:
70 W32_SEH_HANDLE_EX(segv);
71 break;
72 case EXCEPTION_ILLEGAL_INSTRUCTION:
73 W32_SEH_HANDLE_EX(ill);
74 break;
75 case EXCEPTION_INT_DIVIDE_BY_ZERO:
76 case EXCEPTION_INT_OVERFLOW:
77 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
78 case EXCEPTION_FLT_OVERFLOW:
79 case EXCEPTION_FLT_UNDERFLOW:
80 case EXCEPTION_FLT_INEXACT_RESULT:
81 W32_SEH_HANDLE_EX(fpe);
82 break;
83 default:
84 break;
87 /* Copy context back */
88 ctx->Eax = sctx->eax;
89 ctx->Ebx = sctx->ebx;
90 ctx->Ecx = sctx->ecx;
91 ctx->Edx = sctx->edx;
92 ctx->Ebp = sctx->ebp;
93 ctx->Esp = sctx->esp;
94 ctx->Esi = sctx->esi;
95 ctx->Edi = sctx->edi;
96 ctx->Eip = sctx->eip;
98 return res;
101 void win32_seh_init()
103 old_handler = SetUnhandledExceptionFilter(seh_handler);
106 void win32_seh_cleanup()
108 if (old_handler) SetUnhandledExceptionFilter(old_handler);
111 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
113 switch (type) {
114 case SIGFPE:
115 fpe_handler = handler;
116 break;
117 case SIGILL:
118 ill_handler = handler;
119 break;
120 case SIGSEGV:
121 segv_handler = handler;
122 break;
123 default:
124 break;
128 #endif /* PLATFORM_WIN32 */
131 * mono_arch_get_restore_context:
133 * Returns a pointer to a method which restores a previously saved sigcontext.
135 gpointer
136 mono_arch_get_restore_context (void)
138 static guint8 *start = NULL;
139 static gboolean inited = FALSE;
140 guint8 *code;
142 if (inited)
143 return start;
145 /* restore_contect (MonoContext *ctx) */
147 start = code = mono_global_codeman_reserve (256);
149 /* get return address */
150 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8);
152 /* Restore registers */
153 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
154 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
155 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
156 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
157 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
158 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
160 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rsp), 8);
162 /* jump to the saved IP */
163 amd64_jump_reg (code, AMD64_RAX);
165 inited = TRUE;
167 return start;
171 * mono_arch_get_call_filter:
173 * Returns a pointer to a method which calls an exception filter. We
174 * also use this function to call finally handlers (we pass NULL as
175 * @exc object in this case).
177 gpointer
178 mono_arch_get_call_filter (void)
180 static guint8 *start;
181 static gboolean inited = FALSE;
182 int i;
183 guint8 *code;
184 guint32 pos;
186 if (inited)
187 return start;
189 start = code = mono_global_codeman_reserve (64);
191 /* call_filter (MonoContext *ctx, unsigned long eip) */
192 code = start;
194 /* Alloc new frame */
195 amd64_push_reg (code, AMD64_RBP);
196 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
198 /* Save callee saved regs */
199 pos = 0;
200 for (i = 0; i < AMD64_NREG; ++i)
201 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
202 amd64_push_reg (code, i);
203 pos += 8;
206 /* Save EBP */
207 pos += 8;
208 amd64_push_reg (code, AMD64_RBP);
210 /* Make stack misaligned, the call will make it aligned again */
211 if (! (pos & 8))
212 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
214 /* set new EBP */
215 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
216 /* load callee saved regs */
217 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
218 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
219 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
220 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
221 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
223 /* call the handler */
224 amd64_call_reg (code, AMD64_RSI);
226 if (! (pos & 8))
227 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
229 /* restore RBP */
230 amd64_pop_reg (code, AMD64_RBP);
232 /* Restore callee saved regs */
233 for (i = AMD64_NREG; i >= 0; --i)
234 if (AMD64_IS_CALLEE_SAVED_REG (i))
235 amd64_pop_reg (code, i);
237 amd64_leave (code);
238 amd64_ret (code);
240 g_assert ((code - start) < 64);
242 inited = TRUE;
244 return start;
247 static void
248 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
249 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
250 guint64 r14, guint64 r15, guint64 rethrow)
252 static void (*restore_context) (MonoContext *);
253 MonoContext ctx;
255 if (!restore_context)
256 restore_context = mono_arch_get_restore_context ();
258 ctx.rsp = rsp;
259 ctx.rip = rip;
260 ctx.rbx = rbx;
261 ctx.rbp = rbp;
262 ctx.r12 = r12;
263 ctx.r13 = r13;
264 ctx.r14 = r14;
265 ctx.r15 = r15;
267 if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
269 * The debugger wants us to stop on the `throw' instruction.
270 * By the time we get here, it already inserted a breakpoint on
271 * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
274 /* FIXME FIXME
276 * In case of a rethrow, the JIT is emitting code like this:
278 * mov 0xffffffffffffffd0(%rbp),%rax'
279 * mov %rax,%rdi
280 * callq throw
282 * Here, restore_context() wouldn't restore the %rax register correctly.
284 ctx.rip = rip - 8;
285 ctx.rsp = rsp + 8;
286 restore_context (&ctx);
287 g_assert_not_reached ();
290 /* adjust eip so that it point into the call instruction */
291 ctx.rip -= 1;
293 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
294 MonoException *mono_ex = (MonoException*)exc;
295 if (!rethrow)
296 mono_ex->stack_trace = NULL;
298 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
299 restore_context (&ctx);
301 g_assert_not_reached ();
304 static gpointer
305 get_throw_trampoline (gboolean rethrow)
307 guint8* start;
308 guint8 *code;
310 start = code = mono_global_codeman_reserve (64);
312 code = start;
314 /* Exception */
315 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 8);
316 /* IP */
317 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, 0, 8);
318 /* SP */
319 amd64_lea_membase (code, AMD64_RDX, AMD64_RSP, 8);
320 /* Callee saved regs */
321 amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
322 amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
323 amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
324 /* align stack */
325 amd64_push_imm (code, 0);
326 /* reverse order */
327 amd64_push_imm (code, rethrow);
328 amd64_push_reg (code, AMD64_R15);
329 amd64_push_reg (code, AMD64_R14);
330 amd64_push_reg (code, AMD64_R13);
332 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
333 amd64_call_reg (code, AMD64_R11);
334 amd64_breakpoint (code);
336 g_assert ((code - start) < 64);
338 return start;
342 * mono_arch_get_throw_exception:
344 * Returns a function pointer which can be used to raise
345 * exceptions. The returned function has the following
346 * signature: void (*func) (MonoException *exc);
349 gpointer
350 mono_arch_get_throw_exception (void)
352 static guint8* start;
353 static gboolean inited = FALSE;
355 if (inited)
356 return start;
358 start = get_throw_trampoline (FALSE);
360 inited = TRUE;
362 return start;
365 gpointer
366 mono_arch_get_rethrow_exception (void)
368 static guint8* start;
369 static gboolean inited = FALSE;
371 if (inited)
372 return start;
374 start = get_throw_trampoline (TRUE);
376 inited = TRUE;
378 return start;
381 gpointer
382 mono_arch_get_throw_exception_by_name (void)
384 static guint8* start;
385 static gboolean inited = FALSE;
386 guint8 *code;
388 if (inited)
389 return start;
391 start = code = mono_global_codeman_reserve (64);
393 /* Not used on amd64 */
394 amd64_breakpoint (code);
396 return start;
400 * mono_arch_get_throw_corlib_exception:
402 * Returns a function pointer which can be used to raise
403 * corlib exceptions. The returned function has the following
404 * signature: void (*func) (guint32 ex_token, guint32 offset);
405 * Here, offset is the offset which needs to be substracted from the caller IP
406 * to get the IP of the throw. Passing the offset has the advantage that it
407 * needs no relocations in the caller.
409 gpointer
410 mono_arch_get_throw_corlib_exception (void)
412 static guint8* start;
413 static gboolean inited = FALSE;
414 guint8 *code;
415 guint64 throw_ex;
417 if (inited)
418 return start;
420 start = code = mono_global_codeman_reserve (64);
422 /* Push throw_ip */
423 amd64_push_reg (code, AMD64_RSI);
425 /* Call exception_from_token */
426 amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RDI, 8);
427 amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image);
428 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
429 amd64_call_reg (code, AMD64_R11);
431 /* Compute throw_ip */
432 amd64_pop_reg (code, AMD64_RSI);
433 /* return addr */
434 amd64_pop_reg (code, AMD64_RDX);
435 amd64_alu_reg_reg (code, X86_SUB, AMD64_RDX, AMD64_RSI);
437 /* Put the throw_ip at the top of the misaligned stack */
438 amd64_push_reg (code, AMD64_RDX);
440 throw_ex = (guint64)mono_arch_get_throw_exception ();
442 /* Call throw_exception */
443 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RAX, 8);
444 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
445 /* The original IP is on the stack */
446 amd64_jump_reg (code, AMD64_R11);
448 g_assert ((code - start) < 64);
450 inited = TRUE;
452 return start;
455 /* mono_arch_find_jit_info:
457 * This function is used to gather information from @ctx. It return the
458 * MonoJitInfo of the corresponding function, unwinds one stack frame and
459 * stores the resulting context into @new_ctx. It also stores a string
460 * describing the stack location into @trace (if not NULL), and modifies
461 * the @lmf if necessary. @native_offset return the IP offset from the
462 * start of the function or -1 if that info is not available.
464 MonoJitInfo *
465 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
466 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
467 gboolean *managed)
469 MonoJitInfo *ji;
470 int i;
471 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
473 /* Avoid costly table lookup during stack overflow */
474 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
475 ji = prev_ji;
476 else
477 ji = mono_jit_info_table_find (domain, ip);
479 if (managed)
480 *managed = FALSE;
482 if (ji != NULL) {
483 int offset;
484 gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
486 *new_ctx = *ctx;
488 if (managed)
489 if (!ji->method->wrapper_type)
490 *managed = TRUE;
493 * Some managed methods like pinvoke wrappers might have save_lmf set.
494 * In this case, register save/restore code is not generated by the
495 * JIT, so we have to restore callee saved registers from the lmf.
497 if (ji->method->save_lmf) {
499 * We only need to do this if the exception was raised in managed
500 * code, since otherwise the lmf was already popped of the stack.
502 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
503 new_ctx->rbp = (*lmf)->ebp;
504 new_ctx->rbx = (*lmf)->rbx;
505 new_ctx->rsp = (*lmf)->rsp;
506 new_ctx->r12 = (*lmf)->r12;
507 new_ctx->r13 = (*lmf)->r13;
508 new_ctx->r14 = (*lmf)->r14;
509 new_ctx->r15 = (*lmf)->r15;
512 else {
513 offset = omit_fp ? 0 : -1;
514 /* restore caller saved registers */
515 for (i = 0; i < AMD64_NREG; i ++)
516 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
517 guint64 reg;
519 if (omit_fp) {
520 reg = *((guint64*)ctx->rsp + offset);
521 offset ++;
523 else {
524 reg = *((guint64 *)ctx->rbp + offset);
525 offset --;
528 switch (i) {
529 case AMD64_RBX:
530 new_ctx->rbx = reg;
531 break;
532 case AMD64_R12:
533 new_ctx->r12 = reg;
534 break;
535 case AMD64_R13:
536 new_ctx->r13 = reg;
537 break;
538 case AMD64_R14:
539 new_ctx->r14 = reg;
540 break;
541 case AMD64_R15:
542 new_ctx->r15 = reg;
543 break;
544 case AMD64_RBP:
545 new_ctx->rbp = reg;
546 break;
547 default:
548 g_assert_not_reached ();
553 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
554 /* remove any unused lmf */
555 *lmf = (*lmf)->previous_lmf;
558 if (omit_fp) {
559 /* Pop frame */
560 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
561 new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
562 /* Pop return address */
563 new_ctx->rsp += 8;
565 else {
566 /* Pop EBP and the return address */
567 new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
568 /* we substract 1, so that the IP points into the call instruction */
569 new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
570 new_ctx->rbp = *((guint64 *)ctx->rbp);
573 /* Pop arguments off the stack */
575 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
577 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
578 new_ctx->rsp += stack_to_pop;
581 return ji;
582 } else if (*lmf) {
584 *new_ctx = *ctx;
586 if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) {
587 } else {
588 if (!(*lmf)->method)
589 /* Top LMF entry */
590 return (gpointer)-1;
591 /* Trampoline lmf frame */
592 memset (res, 0, sizeof (MonoJitInfo));
593 res->method = (*lmf)->method;
596 new_ctx->rip = (*lmf)->rip;
597 new_ctx->rbp = (*lmf)->ebp;
598 new_ctx->rsp = (*lmf)->rsp;
600 new_ctx->rbx = (*lmf)->rbx;
601 new_ctx->r12 = (*lmf)->r12;
602 new_ctx->r13 = (*lmf)->r13;
603 new_ctx->r14 = (*lmf)->r14;
604 new_ctx->r15 = (*lmf)->r15;
606 *lmf = (*lmf)->previous_lmf;
608 return ji ? ji : res;
611 return NULL;
615 * mono_arch_handle_exception:
617 * @ctx: saved processor state
618 * @obj: the exception object
620 gboolean
621 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
623 MonoContext mctx;
625 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
627 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
629 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
631 return TRUE;
634 static inline guint64*
635 gregs_from_ucontext (ucontext_t *ctx)
637 #ifdef __FreeBSD__
638 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
639 #else
640 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
641 #endif
643 return gregs;
646 void
647 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
649 ucontext_t *ctx = (ucontext_t*)sigctx;
651 guint64 *gregs = gregs_from_ucontext (ctx);
653 mctx->rax = gregs [REG_RAX];
654 mctx->rbx = gregs [REG_RBX];
655 mctx->rcx = gregs [REG_RCX];
656 mctx->rdx = gregs [REG_RDX];
657 mctx->rbp = gregs [REG_RBP];
658 mctx->rsp = gregs [REG_RSP];
659 mctx->rsi = gregs [REG_RSI];
660 mctx->rdi = gregs [REG_RDI];
661 mctx->rip = gregs [REG_RIP];
662 mctx->r12 = gregs [REG_R12];
663 mctx->r13 = gregs [REG_R13];
664 mctx->r14 = gregs [REG_R14];
665 mctx->r15 = gregs [REG_R15];
668 void
669 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
671 ucontext_t *ctx = (ucontext_t*)sigctx;
673 guint64 *gregs = gregs_from_ucontext (ctx);
675 gregs [REG_RAX] = mctx->rax;
676 gregs [REG_RBX] = mctx->rbx;
677 gregs [REG_RCX] = mctx->rcx;
678 gregs [REG_RDX] = mctx->rdx;
679 gregs [REG_RBP] = mctx->rbp;
680 gregs [REG_RSP] = mctx->rsp;
681 gregs [REG_RSI] = mctx->rsi;
682 gregs [REG_RDI] = mctx->rdi;
683 gregs [REG_RIP] = mctx->rip;
684 gregs [REG_R12] = mctx->r12;
685 gregs [REG_R13] = mctx->r13;
686 gregs [REG_R14] = mctx->r14;
687 gregs [REG_R15] = mctx->r15;
690 gpointer
691 mono_arch_ip_from_context (void *sigctx)
693 ucontext_t *ctx = (ucontext_t*)sigctx;
695 guint64 *gregs = gregs_from_ucontext (ctx);
697 return (gpointer)gregs [REG_RIP];