[build] Skips RemoteExecuted bases tests on monodroid
[mono-project.git] / mono / mini / mini-amd64.h
blobfc3cdb26594cbb8b5acc8923cdf7a1a83c21484e
1 /**
2 * \file
3 */
5 #ifndef __MONO_MINI_AMD64_H__
6 #define __MONO_MINI_AMD64_H__
8 #include <mono/arch/amd64/amd64-codegen.h>
9 #include <mono/utils/mono-sigcontext.h>
10 #include <mono/utils/mono-context.h>
11 #include <glib.h>
13 #ifdef HOST_WIN32
14 #include <windows.h>
15 /* use SIG* defines if possible */
16 #ifdef HAVE_SIGNAL_H
17 #include <signal.h>
18 #endif
20 #if !defined(_MSC_VER)
21 /* sigcontext surrogate */
22 struct sigcontext {
23 guint64 eax;
24 guint64 ebx;
25 guint64 ecx;
26 guint64 edx;
27 guint64 ebp;
28 guint64 esp;
29 guint64 esi;
30 guint64 edi;
31 guint64 eip;
33 #endif
35 typedef void (* MonoW32ExceptionHandler) (int _dummy, EXCEPTION_POINTERS *info, void *context);
36 void win32_seh_init(void);
37 void win32_seh_cleanup(void);
38 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler);
40 #ifndef SIGFPE
41 #define SIGFPE 4
42 #endif
44 #ifndef SIGILL
45 #define SIGILL 8
46 #endif
48 #ifndef SIGSEGV
49 #define SIGSEGV 11
50 #endif
52 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep);
54 typedef struct {
55 SRWLOCK lock;
56 PVOID handle;
57 gsize begin_range;
58 gsize end_range;
59 PRUNTIME_FUNCTION rt_funcs;
60 DWORD rt_funcs_current_count;
61 DWORD rt_funcs_max_count;
62 } DynamicFunctionTableEntry;
64 #define MONO_UNWIND_INFO_RT_FUNC_SIZE 128
66 // On Win8/Win2012Server and later we can use dynamic growable function tables
67 // instead of RtlInstallFunctionTableCallback. This gives us the benefit to
68 // include all needed unwind upon registration.
69 typedef DWORD (NTAPI* RtlAddGrowableFunctionTablePtr)(
70 PVOID * DynamicTable,
71 PRUNTIME_FUNCTION FunctionTable,
72 DWORD EntryCount,
73 DWORD MaximumEntryCount,
74 ULONG_PTR RangeBase,
75 ULONG_PTR RangeEnd);
77 typedef VOID (NTAPI* RtlGrowFunctionTablePtr)(
78 PVOID DynamicTable,
79 DWORD NewEntryCount);
81 typedef VOID (NTAPI* RtlDeleteGrowableFunctionTablePtr)(
82 PVOID DynamicTable);
84 #endif /* HOST_WIN32 */
86 #ifdef sun // Solaris x86
87 # undef SIGSEGV_ON_ALTSTACK
88 # define MONO_ARCH_NOMAP32BIT
90 struct sigcontext {
91 unsigned short gs, __gsh;
92 unsigned short fs, __fsh;
93 unsigned short es, __esh;
94 unsigned short ds, __dsh;
95 unsigned long edi;
96 unsigned long esi;
97 unsigned long ebp;
98 unsigned long esp;
99 unsigned long ebx;
100 unsigned long edx;
101 unsigned long ecx;
102 unsigned long eax;
103 unsigned long trapno;
104 unsigned long err;
105 unsigned long eip;
106 unsigned short cs, __csh;
107 unsigned long eflags;
108 unsigned long esp_at_signal;
109 unsigned short ss, __ssh;
110 unsigned long fpstate[95];
111 unsigned long filler[5];
113 #endif // sun, Solaris x86
115 #ifndef DISABLE_SIMD
116 #define MONO_ARCH_SIMD_INTRINSICS 1
117 #define MONO_ARCH_NEED_SIMD_BANK 1
118 #define MONO_ARCH_USE_SHARED_FP_SIMD_BANK 1
119 #endif
123 #if defined(__APPLE__)
124 #define MONO_ARCH_SIGNAL_STACK_SIZE MINSIGSTKSZ
125 #else
126 #define MONO_ARCH_SIGNAL_STACK_SIZE (16 * 1024)
127 #endif
129 #define MONO_ARCH_CPU_SPEC mono_amd64_desc
131 #define MONO_MAX_IREGS 16
133 #define MONO_MAX_FREGS AMD64_XMM_NREG
135 #define MONO_ARCH_FP_RETURN_REG AMD64_XMM0
137 #ifdef TARGET_WIN32
138 /* xmm5 is used as a scratch register */
139 #define MONO_ARCH_CALLEE_FREGS 0x1f
140 /* xmm6:xmm15 */
141 #define MONO_ARCH_CALLEE_SAVED_FREGS (0xffff - 0x3f)
142 #define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM5
143 #else
144 /* xmm15 is used as a scratch register */
145 #define MONO_ARCH_CALLEE_FREGS 0x7fff
146 #define MONO_ARCH_CALLEE_SAVED_FREGS 0
147 #define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM15
148 #endif
150 #define MONO_MAX_XREGS MONO_MAX_FREGS
152 #define MONO_ARCH_CALLEE_XREGS MONO_ARCH_CALLEE_FREGS
153 #define MONO_ARCH_CALLEE_SAVED_XREGS MONO_ARCH_CALLEE_SAVED_FREGS
156 #define MONO_ARCH_CALLEE_REGS AMD64_CALLEE_REGS
157 #define MONO_ARCH_CALLEE_SAVED_REGS AMD64_CALLEE_SAVED_REGS
159 #define MONO_ARCH_USE_FPSTACK FALSE
161 #define MONO_ARCH_INST_FIXED_REG(desc) ((desc == '\0') ? -1 : ((desc == 'i' ? -1 : ((desc == 'a') ? AMD64_RAX : ((desc == 's') ? AMD64_RCX : ((desc == 'd') ? AMD64_RDX : ((desc == 'A') ? MONO_AMD64_ARG_REG1 : -1)))))))
163 /* RDX is clobbered by the opcode implementation before accessing sreg2 */
164 #define MONO_ARCH_INST_SREG2_MASK(ins) (((ins [MONO_INST_CLOB] == 'a') || (ins [MONO_INST_CLOB] == 'd')) ? (1 << AMD64_RDX) : 0)
166 #define MONO_ARCH_INST_IS_REGPAIR(desc) FALSE
167 #define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
169 #define MONO_ARCH_FRAME_ALIGNMENT 16
171 /* fixme: align to 16byte instead of 32byte (we align to 32byte to get
172 * reproduceable results for benchmarks */
173 #define MONO_ARCH_CODE_ALIGNMENT 32
175 struct MonoLMF {
177 * The rsp field points to the stack location where the caller ip is saved.
178 * If the second lowest bit is set, then this is a MonoLMFExt structure, and
179 * the other fields are not valid.
180 * If the third lowest bit is set, then this is a MonoLMFTramp structure, and
181 * the 'rbp' field is not valid.
183 gpointer previous_lmf;
184 guint64 rbp;
185 guint64 rsp;
188 /* LMF structure used by the JIT trampolines */
189 typedef struct {
190 struct MonoLMF lmf;
191 MonoContext *ctx;
192 gpointer lmf_addr;
193 } MonoLMFTramp;
195 typedef struct MonoCompileArch {
196 gint32 localloc_offset;
197 gint32 reg_save_area_offset;
198 gint32 stack_alloc_size;
199 gint32 sp_fp_offset;
200 guint32 saved_iregs;
201 gboolean omit_fp, omit_fp_computed;
202 gpointer cinfo;
203 gint32 async_point_count;
204 gpointer vret_addr_loc;
205 #ifdef HOST_WIN32
206 gpointer unwindinfo;
207 #endif
208 gpointer seq_point_info_var;
209 gpointer ss_trigger_page_var;
210 gpointer ss_tramp_var;
211 gpointer bp_tramp_var;
212 gpointer lmf_var;
213 } MonoCompileArch;
215 #ifdef TARGET_WIN32
217 static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
219 static AMD64_XMM_Reg_No float_param_regs [] = { AMD64_XMM0, AMD64_XMM1, AMD64_XMM2, AMD64_XMM3 };
221 static AMD64_Reg_No return_regs [] = { AMD64_RAX };
223 static AMD64_XMM_Reg_No float_return_regs [] = { AMD64_XMM0 };
225 #define PARAM_REGS G_N_ELEMENTS(param_regs)
226 #define FLOAT_PARAM_REGS G_N_ELEMENTS(float_param_regs)
227 #define RETURN_REGS G_N_ELEMENTS(return_regs)
228 #define FLOAT_RETURN_REGS G_N_ELEMENTS(float_return_regs)
230 #else
231 #define PARAM_REGS 6
232 #define FLOAT_PARAM_REGS 8
233 #define RETURN_REGS 2
234 #define FLOAT_RETURN_REGS 2
236 static const AMD64_Reg_No param_regs [] = {AMD64_RDI, AMD64_RSI, AMD64_RDX,
237 AMD64_RCX, AMD64_R8, AMD64_R9};
239 static const AMD64_Reg_No return_regs [] = {AMD64_RAX, AMD64_RDX};
240 #endif
242 typedef struct {
243 /* Method address to call */
244 gpointer addr;
245 /* The trampoline reads this, so keep the size explicit */
246 int ret_marshal;
247 /* If ret_marshal != NONE, this is the reg of the vret arg, else -1 (used in out case) */
248 /* Equivalent of vret_arg_slot in the x86 implementation. */
249 int vret_arg_reg;
250 /* The stack slot where the return value will be stored (used in in case) */
251 int vret_slot;
252 int stack_usage, map_count;
253 /* If not -1, then make a virtual call using this vtable offset */
254 int vcall_offset;
255 /* If 1, make an indirect call to the address in the rgctx reg */
256 int calli;
257 /* Whenever this is a in or an out call */
258 int gsharedvt_in;
259 /* Maps stack slots/registers in the caller to the stack slots/registers in the callee */
260 int map [MONO_ZERO_LEN_ARRAY];
261 } GSharedVtCallInfo;
263 /* Structure used by the sequence points in AOTed code */
264 typedef struct {
265 gpointer ss_tramp_addr;
266 gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
267 } SeqPointInfo;
269 typedef struct {
270 mgreg_t res;
271 guint8 *ret;
272 double fregs [8];
273 mgreg_t has_fp;
274 mgreg_t nstack_args;
275 guint8 buffer [256];
276 /* This should come last as the structure is dynamically extended */
277 mgreg_t regs [PARAM_REGS];
278 } DynCallArgs;
280 typedef enum {
281 ArgInIReg,
282 ArgInFloatSSEReg,
283 ArgInDoubleSSEReg,
284 ArgOnStack,
285 ArgValuetypeInReg,
286 ArgValuetypeAddrInIReg,
287 ArgValuetypeAddrOnStack,
288 /* gsharedvt argument passed by addr */
289 ArgGSharedVtInReg,
290 ArgGSharedVtOnStack,
291 /* Variable sized gsharedvt argument passed/returned by addr */
292 ArgGsharedvtVariableInReg,
293 ArgNone /* only in pair_storage */
294 } ArgStorage;
296 typedef struct {
297 gint16 offset;
298 gint8 reg;
299 ArgStorage storage : 8;
301 /* Only if storage == ArgValuetypeInReg */
302 ArgStorage pair_storage [2];
303 gint8 pair_regs [2];
304 /* The size of each pair (bytes) */
305 int pair_size [2];
306 int nregs;
307 /* Only if storage == ArgOnStack */
308 int arg_size; // Bytes, will always be rounded up/aligned to 8 byte boundary
309 // Size in bytes for small arguments
310 int byte_arg_size;
311 guint8 pass_empty_struct : 1; // Set in scenarios when empty structs needs to be represented as argument.
312 } ArgInfo;
314 typedef struct {
315 int nargs;
316 guint32 stack_usage;
317 guint32 reg_usage;
318 guint32 freg_usage;
319 gboolean need_stack_align;
320 gboolean gsharedvt;
321 /* The index of the vret arg in the argument list */
322 int vret_arg_index;
323 ArgInfo ret;
324 ArgInfo sig_cookie;
325 ArgInfo args [1];
326 } CallInfo;
328 typedef struct {
329 /* General registers */
330 mgreg_t gregs [AMD64_NREG];
331 /* Floating registers */
332 double fregs [AMD64_XMM_NREG];
333 /* Stack usage, used for passing params on stack */
334 size_t stack_size;
335 gpointer *stack;
336 } CallContext;
338 #define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->gregs [AMD64_RAX] = (gsize)exc; } while (0)
339 #define MONO_CONTEXT_SET_LLVM_EH_SELECTOR_REG(ctx, sel) do { (ctx)->gregs [AMD64_RDX] = (gsize)(sel); } while (0)
341 #define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
343 #ifdef _MSC_VER
345 #define MONO_INIT_CONTEXT_FROM_FUNC(ctx, start_func) do { \
346 guint64 stackptr; \
347 mono_arch_flush_register_windows (); \
348 stackptr = ((guint64)_AddressOfReturnAddress () - sizeof (void*));\
349 MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
350 MONO_CONTEXT_SET_BP ((ctx), stackptr); \
351 MONO_CONTEXT_SET_SP ((ctx), stackptr); \
352 } while (0)
354 #else
357 * __builtin_frame_address () is broken on some older gcc versions in the presence of
358 * frame pointer elimination, see bug #82095.
360 #define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) do { \
361 int tmp; \
362 guint64 stackptr = (guint64)&tmp; \
363 mono_arch_flush_register_windows (); \
364 MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
365 MONO_CONTEXT_SET_BP ((ctx), stackptr); \
366 MONO_CONTEXT_SET_SP ((ctx), stackptr); \
367 } while (0)
369 #endif
372 * some icalls like mono_array_new_va needs to be called using a different
373 * calling convention.
375 #define MONO_ARCH_VARARG_ICALLS 1
377 #if !defined( HOST_WIN32 ) && !defined(__HAIKU__) && defined (HAVE_SIGACTION)
379 #define MONO_ARCH_USE_SIGACTION 1
381 #ifdef HAVE_WORKING_SIGALTSTACK
383 #define MONO_ARCH_SIGSEGV_ON_ALTSTACK
385 #endif
387 #endif /* !HOST_WIN32 */
389 #if !defined(__linux__)
390 #define MONO_ARCH_NOMAP32BIT 1
391 #endif
393 #ifdef TARGET_WIN32
394 #define MONO_AMD64_ARG_REG1 AMD64_RCX
395 #define MONO_AMD64_ARG_REG2 AMD64_RDX
396 #define MONO_AMD64_ARG_REG3 AMD64_R8
397 #define MONO_AMD64_ARG_REG4 AMD64_R9
398 #else
399 #define MONO_AMD64_ARG_REG1 AMD64_RDI
400 #define MONO_AMD64_ARG_REG2 AMD64_RSI
401 #define MONO_AMD64_ARG_REG3 AMD64_RDX
402 #define MONO_AMD64_ARG_REG4 AMD64_RCX
403 #endif
405 #define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
406 #define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS
408 #define MONO_ARCH_EMULATE_CONV_R8_UN 1
409 #define MONO_ARCH_EMULATE_FREM 1
410 #define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
411 #define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
412 #define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
413 #define MONO_ARCH_IMT_REG AMD64_R10
414 #define MONO_ARCH_IMT_SCRATCH_REG AMD64_R11
415 #define MONO_ARCH_VTABLE_REG MONO_AMD64_ARG_REG1
417 * We use r10 for the imt/rgctx register rather than r11 because r11 is
418 * used by the trampoline as a scratch register and hence might be
419 * clobbered across method call boundaries.
421 #define MONO_ARCH_RGCTX_REG MONO_ARCH_IMT_REG
422 #define MONO_ARCH_HAVE_CMOV_OPS 1
423 #define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
424 #define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
425 #define MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX 1
426 #define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
428 #define MONO_ARCH_INTERPRETER_SUPPORTED 1
429 #define MONO_ARCH_AOT_SUPPORTED 1
430 #define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
432 #define MONO_ARCH_SUPPORT_TASKLETS 1
434 #define MONO_ARCH_GSHARED_SUPPORTED 1
435 #define MONO_ARCH_DYN_CALL_SUPPORTED 1
436 #define MONO_ARCH_DYN_CALL_PARAM_AREA 0
438 #define MONO_ARCH_LLVM_SUPPORTED 1
439 #define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
440 #define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
441 #define MONO_ARCH_GC_MAPS_SUPPORTED 1
442 #define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
443 #define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
444 #define MONO_ARCH_HAVE_CREATE_LLVM_NATIVE_THUNK 1
445 #define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
446 #define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
447 #define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
448 #define MONO_ARCH_HAVE_PATCH_CODE_NEW 1
449 #define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
450 #define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
451 #define MONO_ARCH_FLOAT32_SUPPORTED 1
453 #define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP
455 #if defined(TARGET_OSX) || defined(__linux__)
456 #define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
457 #endif
459 #define MONO_ARCH_GSHAREDVT_SUPPORTED 1
462 #if defined(TARGET_APPLETVOS)
463 /* No signals */
464 #define MONO_ARCH_NEED_DIV_CHECK 1
465 #endif
467 /* Used for optimization, not complete */
468 #define MONO_ARCH_IS_OP_MEMBASE(opcode) ((opcode) == OP_X86_PUSH_MEMBASE)
470 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
471 MonoInst *inst; \
472 MONO_INST_NEW ((cfg), inst, OP_AMD64_ICOMPARE_MEMBASE_REG); \
473 inst->inst_basereg = array_reg; \
474 inst->inst_offset = offset; \
475 inst->sreg2 = index_reg; \
476 MONO_ADD_INS ((cfg)->cbb, inst); \
477 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
478 } while (0)
480 // Does the ABI have a volatile non-parameter register, so tailcall
481 // can pass context to generics or interfaces?
482 #define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
484 void
485 mono_amd64_patch (unsigned char* code, gpointer target);
487 void
488 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
489 guint64 dummy5, guint64 dummy6,
490 MonoContext *mctx, MonoObject *exc, gboolean rethrow);
492 void
493 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
494 guint64 dummy5, guint64 dummy6,
495 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset);
497 void
498 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
499 guint64 dummy5, guint64 dummy6,
500 MonoContext *mctx, guint32 dummy7, gint64 dummy8);
502 gpointer
503 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg);
505 GSList*
506 mono_amd64_get_exception_trampolines (gboolean aot);
509 mono_amd64_get_tls_gs_offset (void) MONO_LLVM_INTERNAL;
511 #if defined(TARGET_WIN32) && !defined(DISABLE_JIT)
513 #if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
514 #define MONO_ARCH_HAVE_UNWIND_TABLE 1
515 #define MONO_ARCH_HAVE_CODE_CHUNK_TRACKING 1
517 #ifdef ENABLE_CHECKED_BUILD
518 #define ENABLE_CHECKED_BUILD_UNWINDINFO
519 #endif
521 #define MONO_MAX_UNWIND_CODES 22
523 typedef enum _UNWIND_OP_CODES {
524 UWOP_PUSH_NONVOL = 0, /* info == register number */
525 UWOP_ALLOC_LARGE, /* no info, alloc size in next 2 slots */
526 UWOP_ALLOC_SMALL, /* info == size of allocation / 8 - 1 */
527 UWOP_SET_FPREG, /* no info, FP = RSP + UNWIND_INFO.FPRegOffset*16 */
528 UWOP_SAVE_NONVOL, /* info == register number, offset in next slot */
529 UWOP_SAVE_NONVOL_FAR, /* info == register number, offset in next 2 slots */
530 UWOP_SAVE_XMM128, /* info == XMM reg number, offset in next slot */
531 UWOP_SAVE_XMM128_FAR, /* info == XMM reg number, offset in next 2 slots */
532 UWOP_PUSH_MACHFRAME /* info == 0: no error-code, 1: error-code */
533 } UNWIND_CODE_OPS;
535 typedef union _UNWIND_CODE {
536 struct {
537 guchar CodeOffset;
538 guchar UnwindOp : 4;
539 guchar OpInfo : 4;
541 gushort FrameOffset;
542 } UNWIND_CODE, *PUNWIND_CODE;
544 typedef struct _UNWIND_INFO {
545 guchar Version : 3;
546 guchar Flags : 5;
547 guchar SizeOfProlog;
548 guchar CountOfCodes;
549 guchar FrameRegister : 4;
550 guchar FrameOffset : 4;
551 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
552 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
553 * union {
554 * OPTIONAL ULONG ExceptionHandler;
555 * OPTIONAL ULONG FunctionEntry;
556 * };
557 * OPTIONAL ULONG ExceptionData[]; */
558 } UNWIND_INFO, *PUNWIND_INFO;
560 static inline guint
561 mono_arch_unwindinfo_get_size (guchar code_count)
563 // Returned size will be used as the allocated size for unwind data trailing the memory used by compiled method.
564 // Windows x64 ABI have some requirements on the data written into this memory. Both the RUNTIME_FUNCTION
565 // and UNWIND_INFO struct needs to be DWORD aligned and the number of elements in unwind codes array
566 // should have an even number of entries, while the count stored in UNWIND_INFO struct should hold the real number
567 // of unwind codes. Adding extra bytes to the total size will make sure we can properly align the RUNTIME_FUNCTION
568 // struct. Since our UNWIND_INFO follows RUNTIME_FUNCTION struct in memory, it will automatically be DWORD aligned
569 // as well. Also make sure to allocate room for a padding UNWIND_CODE, if needed.
570 return (sizeof (mgreg_t) + sizeof (UNWIND_INFO)) -
571 (sizeof (UNWIND_CODE) * ((MONO_MAX_UNWIND_CODES - ((code_count + 1) & ~1))));
574 guchar
575 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops);
577 PUNWIND_INFO
578 mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops);
580 void
581 mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info);
583 guint
584 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg);
586 void
587 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size);
589 void
590 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size);
592 void
593 mono_arch_code_chunk_new (void *chunk, int size);
595 void
596 mono_arch_code_chunk_destroy (void *chunk);
598 #endif /* G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) */
599 #endif /* defined(TARGET_WIN32) && !defined(DISABLE_JIT) */
601 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
602 // Allocate additional size for max 3 unwind ops (push + fp or sp small|large) + unwind info struct trailing code buffer.
603 #define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) (mono_arch_unwindinfo_get_size (max_code_count))
604 #define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE (MONO_TRAMPOLINE_UNWINDINFO_SIZE(3))
606 static inline gboolean
607 mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
609 guint current_size = mono_arch_unwindinfo_get_size (mono_arch_unwindinfo_get_code_count (unwind_ops));
610 return current_size <= max_size;
613 #else
615 #define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) 0
616 #define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE 0
618 static inline gboolean
619 mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
621 return TRUE;
623 #endif
625 CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
627 #endif /* __MONO_MINI_AMD64_H__ */