2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler
;
38 static MonoW32ExceptionHandler ill_handler
;
39 static MonoW32ExceptionHandler segv_handler
;
41 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter
;
42 guint64 mono_win_chained_exception_filter_result
;
43 gboolean mono_win_chained_exception_filter_didrun
;
45 #define W32_SEH_HANDLE_EX(_ex) \
46 if (_ex##_handler) _ex##_handler(0, ep, sctx)
49 * Unhandled Exception Filter
50 * Top-level per-process exception handler.
52 LONG CALLBACK
seh_handler(EXCEPTION_POINTERS
* ep
)
59 mono_win_chained_exception_filter_didrun
= FALSE
;
60 res
= EXCEPTION_CONTINUE_EXECUTION
;
62 er
= ep
->ExceptionRecord
;
63 ctx
= ep
->ContextRecord
;
64 sctx
= g_malloc(sizeof(MonoContext
));
66 /* Copy Win32 context to UNIX style context */
81 switch (er
->ExceptionCode
) {
82 case EXCEPTION_ACCESS_VIOLATION
:
83 W32_SEH_HANDLE_EX(segv
);
85 case EXCEPTION_ILLEGAL_INSTRUCTION
:
86 W32_SEH_HANDLE_EX(ill
);
88 case EXCEPTION_INT_DIVIDE_BY_ZERO
:
89 case EXCEPTION_INT_OVERFLOW
:
90 case EXCEPTION_FLT_DIVIDE_BY_ZERO
:
91 case EXCEPTION_FLT_OVERFLOW
:
92 case EXCEPTION_FLT_UNDERFLOW
:
93 case EXCEPTION_FLT_INEXACT_RESULT
:
94 W32_SEH_HANDLE_EX(fpe
);
100 /* Copy context back */
102 ctx
->Rsp
= sctx
->rsp
;
103 ctx
->Rdi
= sctx
->rdi
;
104 ctx
->Rsi
= sctx
->rsi
;
105 ctx
->Rbx
= sctx
->rbx
;
106 ctx
->Rbp
= sctx
->rbp
;
107 ctx
->R12
= sctx
->r12
;
108 ctx
->R13
= sctx
->r13
;
109 ctx
->R14
= sctx
->r14
;
110 ctx
->R15
= sctx
->r15
;
111 ctx
->Rip
= sctx
->rip
;
113 /* Volatile But should not matter?*/
114 ctx
->Rax
= sctx
->rax
;
115 ctx
->Rcx
= sctx
->rcx
;
116 ctx
->Rdx
= sctx
->rdx
;
120 if (mono_win_chained_exception_filter_didrun
)
121 res
= mono_win_chained_exception_filter_result
;
126 void win32_seh_init()
128 mono_old_win_toplevel_exception_filter
= SetUnhandledExceptionFilter(seh_handler
);
131 void win32_seh_cleanup()
133 if (mono_old_win_toplevel_exception_filter
) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter
);
136 void win32_seh_set_handler(int type
, MonoW32ExceptionHandler handler
)
140 fpe_handler
= handler
;
143 ill_handler
= handler
;
146 segv_handler
= handler
;
153 #endif /* TARGET_WIN32 */
156 * mono_arch_get_restore_context:
158 * Returns a pointer to a method which restores a previously saved sigcontext.
161 mono_arch_get_restore_context (MonoTrampInfo
**info
, gboolean aot
)
163 guint8
*start
= NULL
;
165 MonoJumpInfo
*ji
= NULL
;
166 GSList
*unwind_ops
= NULL
;
168 /* restore_contect (MonoContext *ctx) */
170 start
= code
= mono_global_codeman_reserve (256);
172 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_ARG_REG1
, 8);
174 /* Restore all registers except %rip and %r11 */
175 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rax
), 8);
176 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rcx
), 8);
177 amd64_mov_reg_membase (code
, AMD64_RDX
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rdx
), 8);
178 amd64_mov_reg_membase (code
, AMD64_RBX
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rbx
), 8);
179 amd64_mov_reg_membase (code
, AMD64_RBP
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rbp
), 8);
180 amd64_mov_reg_membase (code
, AMD64_RSI
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rsi
), 8);
181 amd64_mov_reg_membase (code
, AMD64_RDI
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rdi
), 8);
182 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
183 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
184 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
185 amd64_mov_reg_membase (code
, AMD64_R12
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, r12
), 8);
186 amd64_mov_reg_membase (code
, AMD64_R13
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, r13
), 8);
187 amd64_mov_reg_membase (code
, AMD64_R14
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, r14
), 8);
188 #if !defined(__native_client_codegen__)
189 amd64_mov_reg_membase (code
, AMD64_R15
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, r15
), 8);
192 if (mono_running_on_valgrind ()) {
193 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
194 amd64_mov_reg_membase (code
, AMD64_R8
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rsp
), 8);
195 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rip
), 8);
196 amd64_mov_reg_reg (code
, AMD64_RSP
, AMD64_R8
, 8);
198 amd64_mov_reg_membase (code
, AMD64_RSP
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rsp
), 8);
199 /* get return address */
200 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, G_STRUCT_OFFSET (MonoContext
, rip
), 8);
203 /* jump to the saved IP */
204 amd64_jump_reg (code
, AMD64_R11
);
206 nacl_global_codeman_validate(&start
, 256, &code
);
208 mono_arch_flush_icache (start
, code
- start
);
211 *info
= mono_tramp_info_create (g_strdup_printf ("restore_context"), start
, code
- start
, ji
, unwind_ops
);
217 * mono_arch_get_call_filter:
219 * Returns a pointer to a method which calls an exception filter. We
220 * also use this function to call finally handlers (we pass NULL as
221 * @exc object in this case).
224 mono_arch_get_call_filter (MonoTrampInfo
**info
, gboolean aot
)
230 MonoJumpInfo
*ji
= NULL
;
231 GSList
*unwind_ops
= NULL
;
232 const guint kMaxCodeSize
= NACL_SIZE (128, 256);
234 start
= code
= mono_global_codeman_reserve (kMaxCodeSize
);
236 /* call_filter (MonoContext *ctx, unsigned long eip) */
239 /* Alloc new frame */
240 amd64_push_reg (code
, AMD64_RBP
);
241 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, 8);
243 /* Save callee saved regs */
245 for (i
= 0; i
< AMD64_NREG
; ++i
)
246 if (AMD64_IS_CALLEE_SAVED_REG (i
)) {
247 amd64_push_reg (code
, i
);
253 amd64_push_reg (code
, AMD64_RBP
);
255 /* Make stack misaligned, the call will make it aligned again */
257 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
260 amd64_mov_reg_membase (code
, AMD64_RBP
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, rbp
), 8);
261 /* load callee saved regs */
262 amd64_mov_reg_membase (code
, AMD64_RBX
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, rbx
), 8);
263 amd64_mov_reg_membase (code
, AMD64_R12
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, r12
), 8);
264 amd64_mov_reg_membase (code
, AMD64_R13
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, r13
), 8);
265 amd64_mov_reg_membase (code
, AMD64_R14
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, r14
), 8);
266 #if !defined(__native_client_codegen__)
267 amd64_mov_reg_membase (code
, AMD64_R15
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, r15
), 8);
270 amd64_mov_reg_membase (code
, AMD64_RDI
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, rdi
), 8);
271 amd64_mov_reg_membase (code
, AMD64_RSI
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoContext
, rsi
), 8);
274 /* call the handler */
275 amd64_call_reg (code
, AMD64_ARG_REG2
);
278 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
281 amd64_pop_reg (code
, AMD64_RBP
);
283 /* Restore callee saved regs */
284 for (i
= AMD64_NREG
; i
>= 0; --i
)
285 if (AMD64_IS_CALLEE_SAVED_REG (i
))
286 amd64_pop_reg (code
, i
);
291 g_assert ((code
- start
) < kMaxCodeSize
);
293 nacl_global_codeman_validate(&start
, kMaxCodeSize
, &code
);
295 mono_arch_flush_icache (start
, code
- start
);
298 *info
= mono_tramp_info_create (g_strdup_printf ("call_filter"), start
, code
- start
, ji
, unwind_ops
);
304 * The first few arguments are dummy, to force the other arguments to be passed on
305 * the stack, this avoids overwriting the argument registers in the throw trampoline.
308 mono_amd64_throw_exception (guint64 dummy1
, guint64 dummy2
, guint64 dummy3
, guint64 dummy4
,
309 guint64 dummy5
, guint64 dummy6
,
310 mgreg_t
*regs
, mgreg_t rip
,
311 MonoObject
*exc
, gboolean rethrow
)
313 static void (*restore_context
) (MonoContext
*);
316 if (!restore_context
)
317 restore_context
= mono_get_restore_context ();
319 ctx
.rsp
= regs
[AMD64_RSP
];
321 ctx
.rbx
= regs
[AMD64_RBX
];
322 ctx
.rbp
= regs
[AMD64_RBP
];
323 ctx
.r12
= regs
[AMD64_R12
];
324 ctx
.r13
= regs
[AMD64_R13
];
325 ctx
.r14
= regs
[AMD64_R14
];
326 ctx
.r15
= regs
[AMD64_R15
];
327 ctx
.rdi
= regs
[AMD64_RDI
];
328 ctx
.rsi
= regs
[AMD64_RSI
];
329 ctx
.rax
= regs
[AMD64_RAX
];
330 ctx
.rcx
= regs
[AMD64_RCX
];
331 ctx
.rdx
= regs
[AMD64_RDX
];
333 if (mono_object_isinst (exc
, mono_defaults
.exception_class
)) {
334 MonoException
*mono_ex
= (MonoException
*)exc
;
336 mono_ex
->stack_trace
= NULL
;
339 if (mono_debug_using_mono_debugger ()) {
342 mono_breakpoint_clean_code (NULL
, (gpointer
)rip
, 8, buf
, sizeof (buf
));
344 if (buf
[3] == 0xe8) {
345 MonoContext ctx_cp
= ctx
;
346 ctx_cp
.rip
= rip
- 5;
348 if (mono_debugger_handle_exception (&ctx_cp
, exc
)) {
349 restore_context (&ctx_cp
);
350 g_assert_not_reached ();
355 /* adjust eip so that it point into the call instruction */
358 mono_handle_exception (&ctx
, exc
, (gpointer
)rip
, FALSE
);
359 restore_context (&ctx
);
361 g_assert_not_reached ();
365 mono_amd64_throw_corlib_exception (guint64 dummy1
, guint64 dummy2
, guint64 dummy3
, guint64 dummy4
,
366 guint64 dummy5
, guint64 dummy6
,
367 mgreg_t
*regs
, mgreg_t rip
,
368 guint32 ex_token_index
, gint64 pc_offset
)
370 guint32 ex_token
= MONO_TOKEN_TYPE_DEF
| ex_token_index
;
373 ex
= mono_exception_from_token (mono_defaults
.exception_class
->image
, ex_token
);
377 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
380 mono_amd64_throw_exception (dummy1
, dummy2
, dummy3
, dummy4
, dummy5
, dummy6
, regs
, rip
, (MonoObject
*)ex
, FALSE
);
384 mono_amd64_resume_unwind (guint64 dummy1
, guint64 dummy2
, guint64 dummy3
, guint64 dummy4
,
385 guint64 dummy5
, guint64 dummy6
,
386 mgreg_t
*regs
, mgreg_t rip
,
387 guint32 dummy7
, gint64 dummy8
)
389 /* Only the register parameters are valid */
392 ctx
.rsp
= regs
[AMD64_RSP
];
394 ctx
.rbx
= regs
[AMD64_RBX
];
395 ctx
.rbp
= regs
[AMD64_RBP
];
396 ctx
.r12
= regs
[AMD64_R12
];
397 ctx
.r13
= regs
[AMD64_R13
];
398 ctx
.r14
= regs
[AMD64_R14
];
399 ctx
.r15
= regs
[AMD64_R15
];
400 ctx
.rdi
= regs
[AMD64_RDI
];
401 ctx
.rsi
= regs
[AMD64_RSI
];
402 ctx
.rax
= regs
[AMD64_RAX
];
403 ctx
.rcx
= regs
[AMD64_RCX
];
404 ctx
.rdx
= regs
[AMD64_RDX
];
406 mono_resume_unwind (&ctx
);
410 * get_throw_trampoline:
412 * Generate a call to mono_amd64_throw_exception/
413 * mono_amd64_throw_corlib_exception.
416 get_throw_trampoline (MonoTrampInfo
**info
, gboolean rethrow
, gboolean corlib
, gboolean llvm_abs
, gboolean resume_unwind
, const char *tramp_name
, gboolean aot
)
420 MonoJumpInfo
*ji
= NULL
;
421 GSList
*unwind_ops
= NULL
;
422 int i
, stack_size
, arg_offsets
[16], regs_offset
, dummy_stack_space
;
423 const guint kMaxCodeSize
= NACL_SIZE (256, 512);
426 dummy_stack_space
= 6 * sizeof(mgreg_t
); /* Windows expects stack space allocated for all 6 dummy args. */
428 dummy_stack_space
= 0;
431 start
= code
= mono_global_codeman_reserve (kMaxCodeSize
);
433 /* The stack is unaligned on entry */
434 stack_size
= 192 + 8 + dummy_stack_space
;
439 unwind_ops
= mono_arch_get_cie_program ();
442 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, stack_size
);
444 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, stack_size
+ 8);
447 * To hide linux/windows calling convention differences, we pass all arguments on
448 * the stack by passing 6 dummy values in registers.
451 arg_offsets
[0] = dummy_stack_space
+ 0;
452 arg_offsets
[1] = dummy_stack_space
+ sizeof(mgreg_t
);
453 arg_offsets
[2] = dummy_stack_space
+ sizeof(mgreg_t
) * 2;
454 arg_offsets
[3] = dummy_stack_space
+ sizeof(mgreg_t
) * 3;
455 regs_offset
= dummy_stack_space
+ sizeof(mgreg_t
) * 4;
458 for (i
= 0; i
< AMD64_NREG
; ++i
)
460 amd64_mov_membase_reg (code
, AMD64_RSP
, regs_offset
+ (i
* sizeof(mgreg_t
)), i
, sizeof(mgreg_t
));
462 amd64_lea_membase (code
, AMD64_RAX
, AMD64_RSP
, stack_size
+ sizeof(mgreg_t
));
463 amd64_mov_membase_reg (code
, AMD64_RSP
, regs_offset
+ (AMD64_RSP
* sizeof(mgreg_t
)), X86_EAX
, sizeof(mgreg_t
));
464 /* Set arg1 == regs */
465 amd64_lea_membase (code
, AMD64_RAX
, AMD64_RSP
, regs_offset
);
466 amd64_mov_membase_reg (code
, AMD64_RSP
, arg_offsets
[0], AMD64_RAX
, sizeof(mgreg_t
));
467 /* Set arg2 == eip */
469 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
471 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, stack_size
, sizeof(mgreg_t
));
472 amd64_mov_membase_reg (code
, AMD64_RSP
, arg_offsets
[1], AMD64_RAX
, sizeof(mgreg_t
));
473 /* Set arg3 == exc/ex_token_index */
475 amd64_mov_membase_imm (code
, AMD64_RSP
, arg_offsets
[2], 0, sizeof(mgreg_t
));
477 amd64_mov_membase_reg (code
, AMD64_RSP
, arg_offsets
[2], AMD64_ARG_REG1
, sizeof(mgreg_t
));
478 /* Set arg4 == rethrow/pc offset */
480 amd64_mov_membase_imm (code
, AMD64_RSP
, arg_offsets
[3], 0, sizeof(mgreg_t
));
482 amd64_mov_membase_reg (code
, AMD64_RSP
, arg_offsets
[3], AMD64_ARG_REG2
, sizeof(mgreg_t
));
485 * The caller is LLVM code which passes the absolute address not a pc offset,
486 * so compensate by passing 0 as 'rip' and passing the negated abs address as
489 amd64_neg_membase (code
, AMD64_RSP
, arg_offsets
[3]);
491 amd64_mov_membase_imm (code
, AMD64_RSP
, arg_offsets
[3], rethrow
, sizeof(mgreg_t
));
495 ji
= mono_patch_info_list_prepend (ji
, code
- start
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, corlib
? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
496 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
498 amd64_mov_reg_imm (code
, AMD64_R11
, resume_unwind
? ((gpointer
)mono_amd64_resume_unwind
) : (corlib
? (gpointer
)mono_amd64_throw_corlib_exception
: (gpointer
)mono_amd64_throw_exception
));
500 amd64_call_reg (code
, AMD64_R11
);
501 amd64_breakpoint (code
);
503 mono_arch_flush_icache (start
, code
- start
);
505 g_assert ((code
- start
) < kMaxCodeSize
);
507 nacl_global_codeman_validate(&start
, kMaxCodeSize
, &code
);
510 *info
= mono_tramp_info_create (g_strdup (tramp_name
), start
, code
- start
, ji
, unwind_ops
);
516 * mono_arch_get_throw_exception:
518 * Returns a function pointer which can be used to raise
519 * exceptions. The returned function has the following
520 * signature: void (*func) (MonoException *exc);
524 mono_arch_get_throw_exception (MonoTrampInfo
**info
, gboolean aot
)
526 return get_throw_trampoline (info
, FALSE
, FALSE
, FALSE
, FALSE
, "throw_exception", aot
);
530 mono_arch_get_rethrow_exception (MonoTrampInfo
**info
, gboolean aot
)
532 return get_throw_trampoline (info
, TRUE
, FALSE
, FALSE
, FALSE
, "rethrow_exception", aot
);
536 * mono_arch_get_throw_corlib_exception:
538 * Returns a function pointer which can be used to raise
539 * corlib exceptions. The returned function has the following
540 * signature: void (*func) (guint32 ex_token, guint32 offset);
541 * Here, offset is the offset which needs to be substracted from the caller IP
542 * to get the IP of the throw. Passing the offset has the advantage that it
543 * needs no relocations in the caller.
546 mono_arch_get_throw_corlib_exception (MonoTrampInfo
**info
, gboolean aot
)
548 return get_throw_trampoline (info
, FALSE
, TRUE
, FALSE
, FALSE
, "throw_corlib_exception", aot
);
552 * mono_arch_find_jit_info:
554 * This function is used to gather information from @ctx, and store it in @frame_info.
555 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
556 * is modified if needed.
557 * Returns TRUE on success, FALSE otherwise.
560 mono_arch_find_jit_info (MonoDomain
*domain
, MonoJitTlsData
*jit_tls
,
561 MonoJitInfo
*ji
, MonoContext
*ctx
,
562 MonoContext
*new_ctx
, MonoLMF
**lmf
,
563 mgreg_t
**save_locations
,
564 StackFrameInfo
*frame
)
566 gpointer ip
= MONO_CONTEXT_GET_IP (ctx
);
568 memset (frame
, 0, sizeof (StackFrameInfo
));
574 mgreg_t regs
[MONO_MAX_IREGS
+ 1];
576 guint32 unwind_info_len
;
579 frame
->type
= FRAME_TYPE_MANAGED
;
582 unwind_info
= mono_aot_get_unwind_info (ji
, &unwind_info_len
);
584 unwind_info
= mono_get_cached_unwind_info (ji
->used_regs
, &unwind_info_len
);
586 frame
->unwind_info
= unwind_info
;
587 frame
->unwind_info_len
= unwind_info_len
;
589 regs
[AMD64_RAX
] = new_ctx
->rax
;
590 regs
[AMD64_RBX
] = new_ctx
->rbx
;
591 regs
[AMD64_RCX
] = new_ctx
->rcx
;
592 regs
[AMD64_RDX
] = new_ctx
->rdx
;
593 regs
[AMD64_RBP
] = new_ctx
->rbp
;
594 regs
[AMD64_RSP
] = new_ctx
->rsp
;
595 regs
[AMD64_RSI
] = new_ctx
->rsi
;
596 regs
[AMD64_RDI
] = new_ctx
->rdi
;
597 regs
[AMD64_RIP
] = new_ctx
->rip
;
598 regs
[AMD64_R12
] = new_ctx
->r12
;
599 regs
[AMD64_R13
] = new_ctx
->r13
;
600 regs
[AMD64_R14
] = new_ctx
->r14
;
601 regs
[AMD64_R15
] = new_ctx
->r15
;
603 mono_unwind_frame (unwind_info
, unwind_info_len
, ji
->code_start
,
604 (guint8
*)ji
->code_start
+ ji
->code_size
,
605 ip
, regs
, MONO_MAX_IREGS
+ 1,
606 save_locations
, MONO_MAX_IREGS
, &cfa
);
608 new_ctx
->rax
= regs
[AMD64_RAX
];
609 new_ctx
->rbx
= regs
[AMD64_RBX
];
610 new_ctx
->rcx
= regs
[AMD64_RCX
];
611 new_ctx
->rdx
= regs
[AMD64_RDX
];
612 new_ctx
->rbp
= regs
[AMD64_RBP
];
613 new_ctx
->rsp
= regs
[AMD64_RSP
];
614 new_ctx
->rsi
= regs
[AMD64_RSI
];
615 new_ctx
->rdi
= regs
[AMD64_RDI
];
616 new_ctx
->rip
= regs
[AMD64_RIP
];
617 new_ctx
->r12
= regs
[AMD64_R12
];
618 new_ctx
->r13
= regs
[AMD64_R13
];
619 new_ctx
->r14
= regs
[AMD64_R14
];
620 new_ctx
->r15
= regs
[AMD64_R15
];
622 /* The CFA becomes the new SP value */
623 new_ctx
->rsp
= (mgreg_t
)cfa
;
628 if (*lmf
&& ((*lmf
) != jit_tls
->first_lmf
) && (MONO_CONTEXT_GET_SP (ctx
) >= (gpointer
)(*lmf
)->rsp
)) {
629 /* remove any unused lmf */
630 *lmf
= (gpointer
)(((guint64
)(*lmf
)->previous_lmf
) & ~3);
633 #ifndef MONO_AMD64_NO_PUSHES
634 /* Pop arguments off the stack */
636 MonoJitArgumentInfo
*arg_info
= g_newa (MonoJitArgumentInfo
, mono_method_signature (ji
->method
)->param_count
+ 1);
638 guint32 stack_to_pop
= mono_arch_get_argument_info (mono_method_signature (ji
->method
), mono_method_signature (ji
->method
)->param_count
, arg_info
);
639 new_ctx
->rsp
+= stack_to_pop
;
647 if (((guint64
)(*lmf
)->previous_lmf
) & 2) {
649 * This LMF entry is created by the soft debug code to mark transitions to
650 * managed code done during invokes.
652 MonoLMFExt
*ext
= (MonoLMFExt
*)(*lmf
);
654 g_assert (ext
->debugger_invoke
);
656 memcpy (new_ctx
, &ext
->ctx
, sizeof (MonoContext
));
658 *lmf
= (gpointer
)(((guint64
)(*lmf
)->previous_lmf
) & ~3);
660 frame
->type
= FRAME_TYPE_DEBUGGER_INVOKE
;
665 if (((guint64
)(*lmf
)->previous_lmf
) & 1) {
666 /* This LMF has the rip field set */
668 } else if ((*lmf
)->rsp
== 0) {
673 * The rsp field is set just before the call which transitioned to native
674 * code. Obtain the rip from the stack.
676 rip
= *(guint64
*)((*lmf
)->rsp
- sizeof(mgreg_t
));
679 ji
= mini_jit_info_table_find (domain
, (gpointer
)rip
, NULL
);
681 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
682 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
693 frame
->type
= FRAME_TYPE_MANAGED_TO_NATIVE
;
696 new_ctx
->rbp
= (*lmf
)->rbp
;
697 new_ctx
->rsp
= (*lmf
)->rsp
;
699 new_ctx
->rbx
= (*lmf
)->rbx
;
700 new_ctx
->r12
= (*lmf
)->r12
;
701 new_ctx
->r13
= (*lmf
)->r13
;
702 new_ctx
->r14
= (*lmf
)->r14
;
703 new_ctx
->r15
= (*lmf
)->r15
;
705 new_ctx
->rdi
= (*lmf
)->rdi
;
706 new_ctx
->rsi
= (*lmf
)->rsi
;
709 *lmf
= (gpointer
)(((guint64
)(*lmf
)->previous_lmf
) & ~3);
720 * Called by resuming from a signal handler.
723 handle_signal_exception (gpointer obj
, gboolean test_only
)
725 MonoJitTlsData
*jit_tls
= TlsGetValue (mono_jit_tls_id
);
727 static void (*restore_context
) (MonoContext
*);
729 if (!restore_context
)
730 restore_context
= mono_get_restore_context ();
732 memcpy (&ctx
, &jit_tls
->ex_ctx
, sizeof (MonoContext
));
734 if (mono_debugger_handle_exception (&ctx
, (MonoObject
*)obj
))
737 mono_handle_exception (&ctx
, obj
, MONO_CONTEXT_GET_IP (&ctx
), test_only
);
739 restore_context (&ctx
);
743 * mono_arch_handle_exception:
745 * @ctx: saved processor state
746 * @obj: the exception object
749 mono_arch_handle_exception (void *sigctx
, gpointer obj
, gboolean test_only
)
751 #if defined(MONO_ARCH_USE_SIGACTION)
752 ucontext_t
*ctx
= (ucontext_t
*)sigctx
;
755 * Handling the exception in the signal handler is problematic, since the original
756 * signal is disabled, and we could run arbitrary code though the debugger. So
757 * resume into the normal stack and do most work there if possible.
759 MonoJitTlsData
*jit_tls
= TlsGetValue (mono_jit_tls_id
);
760 guint64 sp
= UCONTEXT_REG_RSP (ctx
);
762 /* Pass the ctx parameter in TLS */
763 mono_arch_sigctx_to_monoctx (ctx
, &jit_tls
->ex_ctx
);
764 /* The others in registers */
765 UCONTEXT_REG_RDI (ctx
) = (guint64
)obj
;
766 UCONTEXT_REG_RSI (ctx
) = test_only
;
768 /* Allocate a stack frame below the red zone */
770 /* The stack should be unaligned */
773 UCONTEXT_REG_RSP (ctx
) = sp
;
775 UCONTEXT_REG_RIP (ctx
) = (guint64
)handle_signal_exception
;
781 mono_arch_sigctx_to_monoctx (sigctx
, &mctx
);
783 if (mono_debugger_handle_exception (&mctx
, (MonoObject
*)obj
))
786 mono_handle_exception (&mctx
, obj
, MONO_CONTEXT_GET_IP (&mctx
), test_only
);
788 mono_arch_monoctx_to_sigctx (&mctx
, sigctx
);
795 mono_arch_sigctx_to_monoctx (void *sigctx
, MonoContext
*mctx
)
797 #if defined(__native_client_codegen__) || defined(__native_client__)
798 printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
801 #if defined(MONO_ARCH_USE_SIGACTION)
802 ucontext_t
*ctx
= (ucontext_t
*)sigctx
;
804 mctx
->rax
= UCONTEXT_REG_RAX (ctx
);
805 mctx
->rbx
= UCONTEXT_REG_RBX (ctx
);
806 mctx
->rcx
= UCONTEXT_REG_RCX (ctx
);
807 mctx
->rdx
= UCONTEXT_REG_RDX (ctx
);
808 mctx
->rbp
= UCONTEXT_REG_RBP (ctx
);
809 mctx
->rsp
= UCONTEXT_REG_RSP (ctx
);
810 mctx
->rsi
= UCONTEXT_REG_RSI (ctx
);
811 mctx
->rdi
= UCONTEXT_REG_RDI (ctx
);
812 mctx
->rip
= UCONTEXT_REG_RIP (ctx
);
813 mctx
->r12
= UCONTEXT_REG_R12 (ctx
);
814 mctx
->r13
= UCONTEXT_REG_R13 (ctx
);
815 mctx
->r14
= UCONTEXT_REG_R14 (ctx
);
816 mctx
->r15
= UCONTEXT_REG_R15 (ctx
);
818 MonoContext
*ctx
= (MonoContext
*)sigctx
;
820 mctx
->rax
= ctx
->rax
;
821 mctx
->rbx
= ctx
->rbx
;
822 mctx
->rcx
= ctx
->rcx
;
823 mctx
->rdx
= ctx
->rdx
;
824 mctx
->rbp
= ctx
->rbp
;
825 mctx
->rsp
= ctx
->rsp
;
826 mctx
->rsi
= ctx
->rsi
;
827 mctx
->rdi
= ctx
->rdi
;
828 mctx
->rip
= ctx
->rip
;
829 mctx
->r12
= ctx
->r12
;
830 mctx
->r13
= ctx
->r13
;
831 mctx
->r14
= ctx
->r14
;
832 mctx
->r15
= ctx
->r15
;
837 mono_arch_monoctx_to_sigctx (MonoContext
*mctx
, void *sigctx
)
839 #if defined(__native_client__) || defined(__native_client_codegen__)
840 printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
843 #if defined(MONO_ARCH_USE_SIGACTION)
844 ucontext_t
*ctx
= (ucontext_t
*)sigctx
;
846 UCONTEXT_REG_RAX (ctx
) = mctx
->rax
;
847 UCONTEXT_REG_RBX (ctx
) = mctx
->rbx
;
848 UCONTEXT_REG_RCX (ctx
) = mctx
->rcx
;
849 UCONTEXT_REG_RDX (ctx
) = mctx
->rdx
;
850 UCONTEXT_REG_RBP (ctx
) = mctx
->rbp
;
851 UCONTEXT_REG_RSP (ctx
) = mctx
->rsp
;
852 UCONTEXT_REG_RSI (ctx
) = mctx
->rsi
;
853 UCONTEXT_REG_RDI (ctx
) = mctx
->rdi
;
854 UCONTEXT_REG_RIP (ctx
) = mctx
->rip
;
855 UCONTEXT_REG_R12 (ctx
) = mctx
->r12
;
856 UCONTEXT_REG_R13 (ctx
) = mctx
->r13
;
857 UCONTEXT_REG_R14 (ctx
) = mctx
->r14
;
858 UCONTEXT_REG_R15 (ctx
) = mctx
->r15
;
860 MonoContext
*ctx
= (MonoContext
*)sigctx
;
862 ctx
->rax
= mctx
->rax
;
863 ctx
->rbx
= mctx
->rbx
;
864 ctx
->rcx
= mctx
->rcx
;
865 ctx
->rdx
= mctx
->rdx
;
866 ctx
->rbp
= mctx
->rbp
;
867 ctx
->rsp
= mctx
->rsp
;
868 ctx
->rsi
= mctx
->rsi
;
869 ctx
->rdi
= mctx
->rdi
;
870 ctx
->rip
= mctx
->rip
;
871 ctx
->r12
= mctx
->r12
;
872 ctx
->r13
= mctx
->r13
;
873 ctx
->r14
= mctx
->r14
;
874 ctx
->r15
= mctx
->r15
;
879 mono_arch_ip_from_context (void *sigctx
)
881 #if defined(MONO_ARCH_USE_SIGACTION)
882 ucontext_t
*ctx
= (ucontext_t
*)sigctx
;
884 return (gpointer
)UCONTEXT_REG_RIP (ctx
);
886 MonoContext
*ctx
= sigctx
;
887 return (gpointer
)ctx
->rip
;
892 restore_soft_guard_pages (void)
894 MonoJitTlsData
*jit_tls
= TlsGetValue (mono_jit_tls_id
);
895 if (jit_tls
->stack_ovf_guard_base
)
896 mono_mprotect (jit_tls
->stack_ovf_guard_base
, jit_tls
->stack_ovf_guard_size
, MONO_MMAP_NONE
);
900 * this function modifies mctx so that when it is restored, it
901 * won't execcute starting at mctx.eip, but in a function that
902 * will restore the protection on the soft-guard pages and return back to
903 * continue at mctx.eip.
906 prepare_for_guard_pages (MonoContext
*mctx
)
909 sp
= (gpointer
)(mctx
->rsp
);
911 /* the return addr */
912 sp
[0] = (gpointer
)(mctx
->rip
);
913 mctx
->rip
= (guint64
)restore_soft_guard_pages
;
914 mctx
->rsp
= (guint64
)sp
;
918 altstack_handle_and_restore (void *sigctx
, gpointer obj
, gboolean stack_ovf
)
920 void (*restore_context
) (MonoContext
*);
923 restore_context
= mono_get_restore_context ();
924 mono_arch_sigctx_to_monoctx (sigctx
, &mctx
);
926 if (mono_debugger_handle_exception (&mctx
, (MonoObject
*)obj
)) {
928 prepare_for_guard_pages (&mctx
);
929 restore_context (&mctx
);
932 mono_handle_exception (&mctx
, obj
, MONO_CONTEXT_GET_IP (&mctx
), FALSE
);
934 prepare_for_guard_pages (&mctx
);
935 restore_context (&mctx
);
939 mono_arch_handle_altstack_exception (void *sigctx
, gpointer fault_addr
, gboolean stack_ovf
)
941 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
942 MonoException
*exc
= NULL
;
943 ucontext_t
*ctx
= (ucontext_t
*)sigctx
;
944 MonoJitInfo
*ji
= mini_jit_info_table_find (mono_domain_get (), (gpointer
)UCONTEXT_REG_RIP (sigctx
), NULL
);
949 exc
= mono_domain_get ()->stack_overflow_ex
;
951 mono_handle_native_sigsegv (SIGSEGV
, sigctx
);
953 /* setup a call frame on the real stack so that control is returned there
954 * and exception handling can continue.
955 * The frame looks like:
959 * 128 is the size of the red zone
961 frame_size
= sizeof (ucontext_t
) + sizeof (gpointer
) * 4 + 128;
964 sp
= (gpointer
)(UCONTEXT_REG_RSP (sigctx
) & ~15);
965 sp
= (gpointer
)((char*)sp
- frame_size
);
966 /* the arguments must be aligned */
967 sp
[-1] = (gpointer
)UCONTEXT_REG_RIP (sigctx
);
968 /* may need to adjust pointers in the new struct copy, depending on the OS */
969 memcpy (sp
+ 4, ctx
, sizeof (ucontext_t
));
970 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
971 UCONTEXT_REG_RIP (sigctx
) = (unsigned long)altstack_handle_and_restore
;
972 UCONTEXT_REG_RSP (sigctx
) = (unsigned long)(sp
- 1);
973 UCONTEXT_REG_RDI (sigctx
) = (unsigned long)(sp
+ 4);
974 UCONTEXT_REG_RSI (sigctx
) = (guint64
)exc
;
975 UCONTEXT_REG_RDX (sigctx
) = stack_ovf
;
980 mono_amd64_get_original_ip (void)
982 MonoLMF
*lmf
= mono_get_lmf ();
986 /* Reset the change to previous_lmf */
987 lmf
->previous_lmf
= (gpointer
)((guint64
)lmf
->previous_lmf
& ~1);
993 mono_arch_get_throw_pending_exception (MonoTrampInfo
**info
, gboolean aot
)
995 guint8
*code
, *start
;
997 gpointer throw_trampoline
;
998 MonoJumpInfo
*ji
= NULL
;
999 GSList
*unwind_ops
= NULL
;
1000 const guint kMaxCodeSize
= NACL_SIZE (128, 256);
1002 start
= code
= mono_global_codeman_reserve (kMaxCodeSize
);
1004 /* We are in the frame of a managed method after a call */
1006 * We would like to throw the pending exception in such a way that it looks to
1007 * be thrown from the managed method.
1010 /* Save registers which might contain the return value of the call */
1011 amd64_push_reg (code
, AMD64_RAX
);
1012 amd64_push_reg (code
, AMD64_RDX
);
1014 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
1015 amd64_movsd_membase_reg (code
, AMD64_RSP
, 0, AMD64_XMM0
);
1018 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
1020 /* Obtain the pending exception */
1022 ji
= mono_patch_info_list_prepend (ji
, code
- start
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_get_and_clear_pending_exception");
1023 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
1025 amd64_mov_reg_imm (code
, AMD64_R11
, mono_thread_get_and_clear_pending_exception
);
1027 amd64_call_reg (code
, AMD64_R11
);
1029 /* Check if it is NULL, and branch */
1030 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_RAX
, 0);
1031 br
[0] = code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
1033 /* exc != NULL branch */
1035 /* Save the exc on the stack */
1036 amd64_push_reg (code
, AMD64_RAX
);
1038 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
1040 /* Obtain the original ip and clear the flag in previous_lmf */
1042 ji
= mono_patch_info_list_prepend (ji
, code
- start
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_amd64_get_original_ip");
1043 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
1045 amd64_mov_reg_imm (code
, AMD64_R11
, mono_amd64_get_original_ip
);
1047 amd64_call_reg (code
, AMD64_R11
);
1050 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, 8, 8);
1052 /* Pop saved stuff from the stack */
1053 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 6 * 8);
1055 /* Setup arguments for the throw trampoline */
1057 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_R11
, 8);
1058 /* The trampoline expects the caller ip to be pushed on the stack */
1059 amd64_push_reg (code
, AMD64_RAX
);
1061 /* Call the throw trampoline */
1063 ji
= mono_patch_info_list_prepend (ji
, code
- start
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_amd64_throw_exception");
1064 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
1066 throw_trampoline
= mono_get_throw_exception ();
1067 amd64_mov_reg_imm (code
, AMD64_R11
, throw_trampoline
);
1069 /* We use a jump instead of a call so we can push the original ip on the stack */
1070 amd64_jump_reg (code
, AMD64_R11
);
1072 /* ex == NULL branch */
1073 mono_amd64_patch (br
[0], code
);
1075 /* Obtain the original ip and clear the flag in previous_lmf */
1077 ji
= mono_patch_info_list_prepend (ji
, code
- start
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_amd64_get_original_ip");
1078 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
1080 amd64_mov_reg_imm (code
, AMD64_R11
, mono_amd64_get_original_ip
);
1082 amd64_call_reg (code
, AMD64_R11
);
1083 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RAX
, 8);
1085 /* Restore registers */
1086 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
1087 amd64_movsd_reg_membase (code
, AMD64_XMM0
, AMD64_RSP
, 0);
1088 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
1089 amd64_pop_reg (code
, AMD64_RDX
);
1090 amd64_pop_reg (code
, AMD64_RAX
);
1092 /* Return to original code */
1093 amd64_jump_reg (code
, AMD64_R11
);
1095 g_assert ((code
- start
) < kMaxCodeSize
);
1097 nacl_global_codeman_validate(&start
, kMaxCodeSize
, &code
);
1100 *info
= mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start
, code
- start
, ji
, unwind_ops
);
1105 static gpointer throw_pending_exception
;
1108 * Called when a thread receives an async exception while executing unmanaged code.
1109 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1110 * the return address on the stack to point to a helper routine which throws the
1114 mono_arch_notify_pending_exc (void)
1116 MonoLMF
*lmf
= mono_get_lmf ();
1119 /* Not yet started */
1126 if ((guint64
)lmf
->previous_lmf
& 1)
1127 /* Already hijacked or trampoline LMF entry */
1130 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1131 lmf
->rip
= *(guint64
*)(lmf
->rsp
- 8);
1132 /* Signal that lmf->rip is set */
1133 lmf
->previous_lmf
= (gpointer
)((guint64
)lmf
->previous_lmf
| 1);
1135 *(gpointer
*)(lmf
->rsp
- 8) = throw_pending_exception
;
1139 mono_amd64_get_exception_trampolines (gboolean aot
)
1141 MonoTrampInfo
*info
;
1142 GSList
*tramps
= NULL
;
1144 mono_arch_get_throw_pending_exception (&info
, aot
);
1145 tramps
= g_slist_prepend (tramps
, info
);
1147 /* LLVM needs different throw trampolines */
1148 get_throw_trampoline (&info
, FALSE
, TRUE
, FALSE
, FALSE
, "llvm_throw_corlib_exception_trampoline", aot
);
1149 tramps
= g_slist_prepend (tramps
, info
);
1151 get_throw_trampoline (&info
, FALSE
, TRUE
, TRUE
, FALSE
, "llvm_throw_corlib_exception_abs_trampoline", aot
);
1152 tramps
= g_slist_prepend (tramps
, info
);
1154 get_throw_trampoline (&info
, FALSE
, TRUE
, TRUE
, TRUE
, "llvm_resume_unwind_trampoline", FALSE
);
1155 tramps
= g_slist_prepend (tramps
, info
);
1161 mono_arch_exceptions_init (void)
1166 if (mono_aot_only
) {
1167 throw_pending_exception
= mono_aot_get_trampoline ("throw_pending_exception");
1168 tramp
= mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1169 mono_register_jit_icall (tramp
, "llvm_throw_corlib_exception_trampoline", NULL
, TRUE
);
1170 tramp
= mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1171 mono_register_jit_icall (tramp
, "llvm_throw_corlib_exception_abs_trampoline", NULL
, TRUE
);
1172 tramp
= mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1173 mono_register_jit_icall (tramp
, "llvm_resume_unwind_trampoline", NULL
, TRUE
);
1175 /* Call this to avoid initialization races */
1176 throw_pending_exception
= mono_arch_get_throw_pending_exception (NULL
, FALSE
);
1178 tramps
= mono_amd64_get_exception_trampolines (FALSE
);
1179 for (l
= tramps
; l
; l
= l
->next
) {
1180 MonoTrampInfo
*info
= l
->data
;
1182 mono_register_jit_icall (info
->code
, g_strdup (info
->name
), NULL
, TRUE
);
1183 mono_save_trampoline_xdebug_info (info
);
1184 mono_tramp_info_free (info
);
1186 g_slist_free (tramps
);
1193 * The mono_arch_unwindinfo* methods are used to build and add
1194 * function table info for each emitted method from mono. On Winx64
1195 * the seh handler will not be called if the mono methods are not
1196 * added to the function table.
1198 * We should not need to add non-volatile register info to the
1199 * table since mono stores that info elsewhere. (Except for the register
1203 #define MONO_MAX_UNWIND_CODES 22
1205 typedef union _UNWIND_CODE
{
1208 guchar UnwindOp
: 4;
1211 gushort FrameOffset
;
1212 } UNWIND_CODE
, *PUNWIND_CODE
;
1214 typedef struct _UNWIND_INFO
{
1217 guchar SizeOfProlog
;
1218 guchar CountOfCodes
;
1219 guchar FrameRegister
: 4;
1220 guchar FrameOffset
: 4;
1221 /* custom size for mono allowing for mono allowing for*/
1222 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1223 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1224 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1225 /*UWOP_PUSH_NONVOL offset = 15-0*/
1226 UNWIND_CODE UnwindCode
[MONO_MAX_UNWIND_CODES
];
1228 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1230 * OPTIONAL ULONG ExceptionHandler;
1231 * OPTIONAL ULONG FunctionEntry;
1233 * OPTIONAL ULONG ExceptionData[]; */
1234 } UNWIND_INFO
, *PUNWIND_INFO
;
1238 RUNTIME_FUNCTION runtimeFunction
;
1239 UNWIND_INFO unwindInfo
;
1240 } MonoUnwindInfo
, *PMonoUnwindInfo
;
1243 mono_arch_unwindinfo_create (gpointer
* monoui
)
1245 PMonoUnwindInfo newunwindinfo
;
1246 *monoui
= newunwindinfo
= g_new0 (MonoUnwindInfo
, 1);
1247 newunwindinfo
->unwindInfo
.Version
= 1;
1251 mono_arch_unwindinfo_add_push_nonvol (gpointer
* monoui
, gpointer codebegin
, gpointer nextip
, guchar reg
)
1253 PMonoUnwindInfo unwindinfo
;
1254 PUNWIND_CODE unwindcode
;
1257 mono_arch_unwindinfo_create (monoui
);
1259 unwindinfo
= (MonoUnwindInfo
*)*monoui
;
1261 if (unwindinfo
->unwindInfo
.CountOfCodes
>= MONO_MAX_UNWIND_CODES
)
1262 g_error ("Larger allocation needed for the unwind information.");
1264 codeindex
= MONO_MAX_UNWIND_CODES
- (++unwindinfo
->unwindInfo
.CountOfCodes
);
1265 unwindcode
= &unwindinfo
->unwindInfo
.UnwindCode
[codeindex
];
1266 unwindcode
->UnwindOp
= 0; /*UWOP_PUSH_NONVOL*/
1267 unwindcode
->CodeOffset
= (((guchar
*)nextip
)-((guchar
*)codebegin
));
1268 unwindcode
->OpInfo
= reg
;
1270 if (unwindinfo
->unwindInfo
.SizeOfProlog
>= unwindcode
->CodeOffset
)
1271 g_error ("Adding unwind info in wrong order.");
1273 unwindinfo
->unwindInfo
.SizeOfProlog
= unwindcode
->CodeOffset
;
1277 mono_arch_unwindinfo_add_set_fpreg (gpointer
* monoui
, gpointer codebegin
, gpointer nextip
, guchar reg
)
1279 PMonoUnwindInfo unwindinfo
;
1280 PUNWIND_CODE unwindcode
;
1283 mono_arch_unwindinfo_create (monoui
);
1285 unwindinfo
= (MonoUnwindInfo
*)*monoui
;
1287 if (unwindinfo
->unwindInfo
.CountOfCodes
+ 1 >= MONO_MAX_UNWIND_CODES
)
1288 g_error ("Larger allocation needed for the unwind information.");
1290 codeindex
= MONO_MAX_UNWIND_CODES
- (unwindinfo
->unwindInfo
.CountOfCodes
+= 2);
1291 unwindcode
= &unwindinfo
->unwindInfo
.UnwindCode
[codeindex
];
1292 unwindcode
->FrameOffset
= 0; /*Assuming no frame pointer offset for mono*/
1294 unwindcode
->UnwindOp
= 3; /*UWOP_SET_FPREG*/
1295 unwindcode
->CodeOffset
= (((guchar
*)nextip
)-((guchar
*)codebegin
));
1296 unwindcode
->OpInfo
= reg
;
1298 unwindinfo
->unwindInfo
.FrameRegister
= reg
;
1300 if (unwindinfo
->unwindInfo
.SizeOfProlog
>= unwindcode
->CodeOffset
)
1301 g_error ("Adding unwind info in wrong order.");
1303 unwindinfo
->unwindInfo
.SizeOfProlog
= unwindcode
->CodeOffset
;
1307 mono_arch_unwindinfo_add_alloc_stack (gpointer
* monoui
, gpointer codebegin
, gpointer nextip
, guint size
)
1309 PMonoUnwindInfo unwindinfo
;
1310 PUNWIND_CODE unwindcode
;
1314 mono_arch_unwindinfo_create (monoui
);
1316 unwindinfo
= (MonoUnwindInfo
*)*monoui
;
1319 g_error ("Stack allocation must be equal to or greater than 0x8.");
1323 else if (size
<= 0x7FFF8)
1328 if (unwindinfo
->unwindInfo
.CountOfCodes
+ codesneeded
> MONO_MAX_UNWIND_CODES
)
1329 g_error ("Larger allocation needed for the unwind information.");
1331 codeindex
= MONO_MAX_UNWIND_CODES
- (unwindinfo
->unwindInfo
.CountOfCodes
+= codesneeded
);
1332 unwindcode
= &unwindinfo
->unwindInfo
.UnwindCode
[codeindex
];
1334 if (codesneeded
== 1) {
1335 /*The size of the allocation is
1336 (the number in the OpInfo member) times 8 plus 8*/
1337 unwindcode
->OpInfo
= (size
- 8)/8;
1338 unwindcode
->UnwindOp
= 2; /*UWOP_ALLOC_SMALL*/
1341 if (codesneeded
== 3) {
1342 /*the unscaled size of the allocation is recorded
1343 in the next two slots in little-endian format*/
1344 *((unsigned int*)(&unwindcode
->FrameOffset
)) = size
;
1346 unwindcode
->OpInfo
= 1;
1349 /*the size of the allocation divided by 8
1350 is recorded in the next slot*/
1351 unwindcode
->FrameOffset
= size
/8;
1353 unwindcode
->OpInfo
= 0;
1356 unwindcode
->UnwindOp
= 1; /*UWOP_ALLOC_LARGE*/
1359 unwindcode
->CodeOffset
= (((guchar
*)nextip
)-((guchar
*)codebegin
));
1361 if (unwindinfo
->unwindInfo
.SizeOfProlog
>= unwindcode
->CodeOffset
)
1362 g_error ("Adding unwind info in wrong order.");
1364 unwindinfo
->unwindInfo
.SizeOfProlog
= unwindcode
->CodeOffset
;
1368 mono_arch_unwindinfo_get_size (gpointer monoui
)
1370 PMonoUnwindInfo unwindinfo
;
1374 unwindinfo
= (MonoUnwindInfo
*)monoui
;
1375 return (8 + sizeof (MonoUnwindInfo
)) -
1376 (sizeof (UNWIND_CODE
) * (MONO_MAX_UNWIND_CODES
- unwindinfo
->unwindInfo
.CountOfCodes
));
1380 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc
, IN PVOID Context
)
1384 PMonoUnwindInfo targetinfo
;
1385 MonoDomain
*domain
= mono_domain_get ();
1387 ji
= mini_jit_info_table_find (domain
, (char*)ControlPc
, NULL
);
1391 pos
= (guint64
)(((char*)ji
->code_start
) + ji
->code_size
);
1393 targetinfo
= (PMonoUnwindInfo
)ALIGN_TO (pos
, 8);
1395 targetinfo
->runtimeFunction
.UnwindData
= ((DWORD64
)&targetinfo
->unwindInfo
) - ((DWORD64
)Context
);
1397 return &targetinfo
->runtimeFunction
;
1401 mono_arch_unwindinfo_install_unwind_info (gpointer
* monoui
, gpointer code
, guint code_size
)
1403 PMonoUnwindInfo unwindinfo
, targetinfo
;
1405 guint64 targetlocation
;
1409 unwindinfo
= (MonoUnwindInfo
*)*monoui
;
1410 targetlocation
= (guint64
)&(((guchar
*)code
)[code_size
]);
1411 targetinfo
= (PMonoUnwindInfo
) ALIGN_TO(targetlocation
, 8);
1413 unwindinfo
->runtimeFunction
.EndAddress
= code_size
;
1414 unwindinfo
->runtimeFunction
.UnwindData
= ((guchar
*)&targetinfo
->unwindInfo
) - ((guchar
*)code
);
1416 memcpy (targetinfo
, unwindinfo
, sizeof (MonoUnwindInfo
) - (sizeof (UNWIND_CODE
) * MONO_MAX_UNWIND_CODES
));
1418 codecount
= unwindinfo
->unwindInfo
.CountOfCodes
;
1420 memcpy (&targetinfo
->unwindInfo
.UnwindCode
[0], &unwindinfo
->unwindInfo
.UnwindCode
[MONO_MAX_UNWIND_CODES
-codecount
],
1421 sizeof (UNWIND_CODE
) * unwindinfo
->unwindInfo
.CountOfCodes
);
1424 g_free (unwindinfo
);
1427 RtlInstallFunctionTableCallback (((DWORD64
)code
) | 0x3, (DWORD64
)code
, code_size
, MONO_GET_RUNTIME_FUNCTION_CALLBACK
, code
, NULL
);
1432 #if MONO_SUPPORT_TASKLETS
1433 MonoContinuationRestore
1434 mono_tasklets_arch_restore (void)
1436 static guint8
* saved
= NULL
;
1437 guint8
*code
, *start
;
1438 int cont_reg
= AMD64_R9
; /* register usable on both call conventions */
1439 const guint kMaxCodeSize
= NACL_SIZE (64, 128);
1443 return (MonoContinuationRestore
)saved
;
1444 code
= start
= mono_global_codeman_reserve (kMaxCodeSize
);
1445 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1446 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1447 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1448 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1449 * We move cont to cont_reg since we need both rcx and rdi for the copy
1450 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1452 amd64_mov_reg_reg (code
, cont_reg
, MONO_AMD64_ARG_REG1
, 8);
1453 amd64_mov_reg_reg (code
, AMD64_RAX
, MONO_AMD64_ARG_REG2
, 8);
1454 /* setup the copy of the stack */
1455 amd64_mov_reg_membase (code
, AMD64_RCX
, cont_reg
, G_STRUCT_OFFSET (MonoContinuation
, stack_used_size
), sizeof (int));
1456 amd64_shift_reg_imm (code
, X86_SHR
, AMD64_RCX
, 3);
1458 amd64_mov_reg_membase (code
, AMD64_RSI
, cont_reg
, G_STRUCT_OFFSET (MonoContinuation
, saved_stack
), sizeof (gpointer
));
1459 amd64_mov_reg_membase (code
, AMD64_RDI
, cont_reg
, G_STRUCT_OFFSET (MonoContinuation
, return_sp
), sizeof (gpointer
));
1460 amd64_prefix (code
, X86_REP_PREFIX
);
1463 /* now restore the registers from the LMF */
1464 amd64_mov_reg_membase (code
, AMD64_RCX
, cont_reg
, G_STRUCT_OFFSET (MonoContinuation
, lmf
), 8);
1465 amd64_mov_reg_membase (code
, AMD64_RBX
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, rbx
), 8);
1466 amd64_mov_reg_membase (code
, AMD64_RBP
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, rbp
), 8);
1467 amd64_mov_reg_membase (code
, AMD64_R12
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, r12
), 8);
1468 amd64_mov_reg_membase (code
, AMD64_R13
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, r13
), 8);
1469 amd64_mov_reg_membase (code
, AMD64_R14
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, r14
), 8);
1470 #if !defined(__native_client_codegen__)
1471 amd64_mov_reg_membase (code
, AMD64_R15
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, r15
), 8);
1474 amd64_mov_reg_membase (code
, AMD64_RDI
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, rdi
), 8);
1475 amd64_mov_reg_membase (code
, AMD64_RSI
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, rsi
), 8);
1477 amd64_mov_reg_membase (code
, AMD64_RSP
, AMD64_RCX
, G_STRUCT_OFFSET (MonoLMF
, rsp
), 8);
1479 /* restore the lmf chain */
1480 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1481 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1483 /* state is already in rax */
1484 amd64_jump_membase (code
, cont_reg
, G_STRUCT_OFFSET (MonoContinuation
, return_ip
));
1485 g_assert ((code
- start
) <= kMaxCodeSize
);
1487 nacl_global_codeman_validate(&start
, kMaxCodeSize
, &code
);
1490 return (MonoContinuationRestore
)saved
;
1495 * mono_arch_setup_resume_sighandler_ctx:
1497 * Setup CTX so execution continues at FUNC.
1500 mono_arch_setup_resume_sighandler_ctx (MonoContext
*ctx
, gpointer func
)
1503 * When resuming from a signal handler, the stack should be misaligned, just like right after
1506 if ((((guint64
)MONO_CONTEXT_GET_SP (ctx
)) % 16) == 0)
1507 MONO_CONTEXT_SET_SP (ctx
, (guint64
)MONO_CONTEXT_GET_SP (ctx
) - 8);
1508 MONO_CONTEXT_SET_IP (ctx
, func
);