update readme (#21797)
[mono-project.git] / mono / mini / tramp-amd64-gsharedvt.c
blob3b3a894e43914fb9a0076178fbf6aa0d24e09e75
1 /**
2 * \file
3 * libcorkscrew-based native unwinder
5 * Authors:
6 * Zoltan Varga <vargaz@gmail.com>
7 * Rodrigo Kumpera <kumpera@gmail.com>
8 * Andi McClure <andi.mcclure@xamarin.com>
9 * Johan Lorensson <johan.lorensson@xamarin.com>
11 * Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
15 #include <glib.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/gc-internals.h>
23 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/utils/memcheck.h>
27 #include "mini.h"
28 #include "mini-amd64.h"
29 #include "mini-amd64-gsharedvt.h"
30 #include "debugger-agent.h"
32 #if defined (MONO_ARCH_GSHAREDVT_SUPPORTED)
34 #define SRC_REG_SHIFT 0
35 #define SRC_REG_MASK 0xFFFF
37 #define SRC_DESCRIPTOR_MARSHAL_SHIFT 16
38 #define SRC_DESCRIPTOR_MARSHAL_MASK 0x0FF
40 #define SLOT_COUNT_SHIFT 24
41 #define SLOT_COUNT_MASK 0xFF
43 gpointer
44 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
46 int i;
48 #ifdef DEBUG_AMD64_GSHAREDVT
49 printf ("mono_amd64_start_gsharedvt_call info %p caller %p callee %p ctx %p\n", info, caller, callee, mrgctx_reg);
51 for (i = 0; i < PARAM_REGS; ++i)
52 printf ("\treg [%d] -> %p\n", i, caller [i]);
53 #endif
55 /* Set vtype ret arg */
56 if (info->vret_slot != -1) {
57 DEBUG_AMD64_GSHAREDVT_PRINT ("vret handling\n[%d] < &%d (%p)\n", info->vret_arg_reg, info->vret_slot, &callee [info->vret_slot]);
58 g_assert (info->vret_slot);
59 callee [info->vret_arg_reg] = &callee [info->vret_slot];
62 for (i = 0; i < info->map_count; ++i) {
63 int src = info->map [i * 2];
64 int dst = info->map [(i * 2) + 1];
65 int arg_marshal = (src >> SRC_DESCRIPTOR_MARSHAL_SHIFT) & SRC_DESCRIPTOR_MARSHAL_MASK;
67 int source_reg = src & SRC_REG_MASK;
68 int dest_reg = dst & SRC_REG_MASK;
70 DEBUG_AMD64_GSHAREDVT_PRINT ("source %x dest %x marshal %d: ", src, dst, arg_marshal);
71 switch (arg_marshal) {
72 case GSHAREDVT_ARG_NONE:
73 callee [dest_reg] = caller [source_reg];
74 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- %d (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], caller [source_reg]);
75 break;
76 case GSHAREDVT_ARG_BYVAL_TO_BYREF:
77 /* gsharedvt argument passed by addr in reg/stack slot */
78 callee [dest_reg] = &caller [source_reg];
79 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- &%d (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
80 break;
81 case GSHAREDVT_ARG_BYREF_TO_BYVAL: {
82 int slot_count = (src >> SLOT_COUNT_SHIFT) & SLOT_COUNT_MASK;
83 int j;
84 gpointer *addr = (gpointer*)caller [source_reg];
86 for (j = 0; j < slot_count; ++j)
87 callee [dest_reg + j] = addr [j];
88 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- [%d] (%d words) (%p) <- (%p)\n", dest_reg, source_reg, slot_count, &callee [dest_reg], &caller [source_reg]);
89 break;
91 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I1: {
92 gint8 *addr = (gint8*)caller [source_reg];
94 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
95 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i1) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
96 break;
98 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U1: {
99 guint8 *addr = (guint8*)caller [source_reg];
101 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
102 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u1) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
103 break;
105 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I2: {
106 gint16 *addr = (gint16*)caller [source_reg];
108 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
109 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i2) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
110 break;
112 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U2: {
113 guint16 *addr = (guint16*)caller [source_reg];
115 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
116 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u2) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
117 break;
119 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I4: {
120 gint32 *addr = (gint32*)caller [source_reg];
122 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
123 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i4) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
124 break;
126 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U4: {
127 guint32 *addr = (guint32*)caller [source_reg];
129 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
130 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u4) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
131 break;
134 default:
135 g_error ("cant handle arg marshal %d\n", arg_marshal);
139 //Can't handle for now
140 if (info->vcall_offset != -1){
141 MonoObject *this_obj = (MonoObject*)caller [0];
143 DEBUG_AMD64_GSHAREDVT_PRINT ("target is a vcall at offset %d\n", info->vcall_offset / 8);
144 if (G_UNLIKELY (!this_obj))
145 return NULL;
146 if (info->vcall_offset == MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET)
147 /* delegate invoke */
148 return ((MonoDelegate*)this_obj)->invoke_impl;
149 else
150 return *(gpointer*)((char*)this_obj->vtable + info->vcall_offset);
151 } else if (info->calli) {
152 /* The address to call is passed in the mrgctx reg */
153 return mrgctx_reg;
154 } else {
155 DEBUG_AMD64_GSHAREDVT_PRINT ("target is %p\n", info->addr);
156 return info->addr;
160 #ifndef DISABLE_JIT
162 // Compiler support
165 * mono_arch_get_gsharedvt_arg_trampoline:
167 * See tramp-x86.c for documentation.
169 gpointer
170 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
172 guint8 *code, *start;
173 MonoMemoryManager *mem_manager = mono_domain_ambient_memory_manager (domain);
175 const int buf_len = 32;
177 start = code = mono_mem_manager_code_reserve (mem_manager, buf_len);
179 amd64_mov_reg_imm (code, AMD64_RAX, arg);
180 amd64_jump_code (code, addr);
182 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
184 mono_arch_flush_icache (start, code - start);
185 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
187 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
189 return start;
192 gpointer
193 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
195 guint8 *code, *buf;
196 int buf_len, cfa_offset;
197 GSList *unwind_ops = NULL;
198 MonoJumpInfo *ji = NULL;
199 int n_arg_regs, n_arg_fregs, framesize, i;
200 int info_offset, offset, rgctx_arg_reg_offset;
201 int caller_reg_area_offset, callee_reg_area_offset;
202 guint8 *br_out, *br [64], *br_ret [64];
203 int b_ret_index;
204 int reg_area_size;
206 buf_len = 2048;
207 buf = code = mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
210 * We are being called by an gsharedvt arg trampoline, the info argument is in AMD64_RAX.
212 n_arg_regs = PARAM_REGS;
213 n_arg_fregs = FLOAT_PARAM_REGS;
215 /* Compute stack frame size and offsets */
216 offset = 0;
217 /* info reg */
218 info_offset = offset;
219 offset += 8;
221 /* rgctx reg */
222 rgctx_arg_reg_offset = offset;
223 offset += 8;
225 /*callconv in regs */
226 caller_reg_area_offset = offset;
227 reg_area_size = ALIGN_TO ((n_arg_regs + n_arg_fregs) * 8, MONO_ARCH_FRAME_ALIGNMENT);
228 offset += reg_area_size;
230 framesize = offset;
232 g_assert (framesize % MONO_ARCH_FRAME_ALIGNMENT == 0);
233 g_assert (reg_area_size % MONO_ARCH_FRAME_ALIGNMENT == 0);
235 /* unwind markers 1/3 */
236 cfa_offset = sizeof (target_mgreg_t);
237 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
238 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
240 /* save the old frame pointer */
241 amd64_push_reg (code, AMD64_RBP);
243 /* unwind markers 2/3 */
244 cfa_offset += sizeof (target_mgreg_t);
245 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
246 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
248 /* set it as the new frame pointer */
249 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
251 /* unwind markers 3/3 */
252 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
253 mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
255 /* setup the frame */
256 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
258 /* save stuff */
260 /* save info */
261 amd64_mov_membase_reg (code, AMD64_RSP, info_offset, AMD64_RAX, sizeof (target_mgreg_t));
262 /* save rgctx */
263 amd64_mov_membase_reg (code, AMD64_RSP, rgctx_arg_reg_offset, MONO_ARCH_RGCTX_REG, sizeof (target_mgreg_t));
265 for (i = 0; i < n_arg_regs; ++i)
266 amd64_mov_membase_reg (code, AMD64_RSP, caller_reg_area_offset + i * 8, param_regs [i], sizeof (target_mgreg_t));
268 for (i = 0; i < n_arg_fregs; ++i)
269 amd64_sse_movsd_membase_reg (code, AMD64_RSP, caller_reg_area_offset + (i + n_arg_regs) * 8, i);
271 /* TODO Allocate stack area used to pass arguments to the method */
274 /* Allocate callee register area just below the caller area so it can be accessed from start_gsharedvt_call using negative offsets */
275 /* XXX figure out alignment */
276 callee_reg_area_offset = reg_area_size - ((n_arg_regs + n_arg_fregs) * 8); /* Ensure alignment */
277 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, reg_area_size);
279 /* Allocate stack area used to pass arguments to the method */
280 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), 4);
281 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_R11);
283 /* The stack now looks like this:
285 <caller stack params area>
286 <return address>
287 <old frame pointer>
288 <caller registers area>
289 <rgctx>
290 <gsharedvt info>
291 <callee stack area>
292 <callee reg area>
295 /* Call start_gsharedvt_call () */
296 /* arg1 == info */
297 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RAX, sizeof (target_mgreg_t));
298 /* arg2 = caller stack area */
299 amd64_lea_membase (code, MONO_AMD64_ARG_REG2, AMD64_RBP, -(framesize - caller_reg_area_offset));
301 /* arg3 == callee stack area */
302 amd64_lea_membase (code, MONO_AMD64_ARG_REG3, AMD64_RSP, callee_reg_area_offset);
304 /* arg4 = mrgctx reg */
305 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG4, MONO_ARCH_RGCTX_REG, sizeof (target_mgreg_t));
307 if (aot) {
308 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_amd64_start_gsharedvt_call));
309 #ifdef TARGET_WIN32
310 /* Since we are doing a call as part of setting up stackframe, the reserved shadow stack used by Windows platform is allocated up in
311 the callee stack area but currently the callee reg area is in between. Windows calling convention dictates that room is made on stack where
312 callee can save any parameters passed in registers. Since Windows x64 calling convention
313 uses 4 registers for the first 4 parameters, stack needs to be adjusted before making the call.
314 NOTE, Windows calling convention assumes that space for all registers have been reserved, regardless
315 of the number of function parameters actually used.
317 int shadow_reg_size = 0;
319 shadow_reg_size = ALIGN_TO (PARAM_REGS * sizeof (target_mgreg_t), MONO_ARCH_FRAME_ALIGNMENT);
320 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, shadow_reg_size);
321 amd64_call_reg (code, AMD64_R11);
322 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, shadow_reg_size);
323 #else
324 amd64_call_reg (code, AMD64_R11);
325 #endif
326 } else {
327 amd64_call_code (code, mono_amd64_start_gsharedvt_call);
330 /* Method to call is now on RAX. Restore regs and jump */
331 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, sizeof (target_mgreg_t));
333 for (i = 0; i < n_arg_regs; ++i)
334 amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, callee_reg_area_offset + i * 8, sizeof (target_mgreg_t));
336 for (i = 0; i < n_arg_fregs; ++i)
337 amd64_sse_movsd_reg_membase (code, i, AMD64_RSP, callee_reg_area_offset + (i + n_arg_regs) * 8);
339 //load rgctx
340 amd64_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, AMD64_RBP, -(framesize - rgctx_arg_reg_offset), sizeof (target_mgreg_t));
342 /* Clear callee reg area */
343 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, reg_area_size);
345 /* Call the thing */
346 amd64_call_reg (code, AMD64_R11);
348 /* Marshal return value. Available registers: R10 and R11 */
349 /* Load info struct */
350 amd64_mov_reg_membase (code, AMD64_R10, AMD64_RBP, -(framesize - info_offset), sizeof (target_mgreg_t));
352 /* Branch to the in/out handling code */
353 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1, 4);
355 b_ret_index = 0;
356 br_out = code;
357 x86_branch32 (code, X86_CC_NE, 0, TRUE);
360 * IN CASE
363 /* Load vret_slot */
364 /* Use first input parameter register as scratch since it is volatile on all platforms */
365 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
366 amd64_alu_reg_imm (code, X86_SUB, MONO_AMD64_ARG_REG1, n_arg_regs + n_arg_fregs);
367 amd64_shift_reg_imm (code, X86_SHL, MONO_AMD64_ARG_REG1, 3);
369 /* vret address is RBP - (framesize - caller_reg_area_offset) */
370 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (target_mgreg_t));
371 amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, MONO_AMD64_ARG_REG1);
373 /* Load ret marshal type */
374 /* Load vret address in R11 */
375 amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
377 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
378 amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
379 br [i] = code;
380 amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
382 x86_breakpoint (code); /* unhandled case */
384 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
385 mono_amd64_patch (br [i], code);
386 switch (i) {
387 case GSHAREDVT_RET_NONE:
388 break;
389 case GSHAREDVT_RET_I1:
390 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, FALSE);
391 break;
392 case GSHAREDVT_RET_U1:
393 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, FALSE);
394 break;
395 case GSHAREDVT_RET_I2:
396 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, TRUE);
397 break;
398 case GSHAREDVT_RET_U2:
399 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, TRUE);
400 break;
401 case GSHAREDVT_RET_I4: // CORRECT
402 case GSHAREDVT_RET_U4: // THIS IS INCORRECT. WHY IS IT NOT FAILING?
403 amd64_movsxd_reg_membase (code, AMD64_RAX, AMD64_R11, 0);
404 break;
405 case GSHAREDVT_RET_I8:
406 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 0, 8);
407 break;
408 case GSHAREDVT_RET_IREGS_1:
409 amd64_mov_reg_membase (code, return_regs [i - GSHAREDVT_RET_IREGS_1], AMD64_R11, 0, 8);
410 break;
411 case GSHAREDVT_RET_R8:
412 amd64_sse_movsd_reg_membase (code, AMD64_XMM0, AMD64_R11, 0);
413 break;
414 default:
415 x86_breakpoint (code); /* can't handle specific case */
418 br_ret [b_ret_index ++] = code;
419 x86_jump32 (code, 0);
423 * OUT CASE
425 mono_amd64_patch (br_out, code);
428 Address to write return to is in the original value of the register specified by vret_arg_reg.
429 This will be either RSI, RDI (System V) or RCX, RDX (Windows) depending on whether this is a static call.
430 Its location:
431 We alloc 'framesize' bytes below RBP to save regs, info and rgctx. RSP = RBP - framesize
432 We store RDI (System V), RCX (Windows) at RSP + caller_reg_area_offset + slot_index_of (register) * 8.
434 address: RBP - framesize + caller_reg_area_offset + 8*slot
437 int caller_vret_offset = caller_reg_area_offset - framesize;
439 /* Load vret address in R11 */
440 /* Position to return to is passed as a hidden argument. Load 'vret_arg_slot' to find it */
441 amd64_movsxd_reg_membase (code, AMD64_R11, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg));
443 // In the GSHAREDVT_RET_NONE case, vret_arg_slot is -1. In this case, skip marshalling.
444 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
445 br_ret [b_ret_index ++] = code;
446 amd64_branch32 (code, X86_CC_LT, 0, TRUE);
448 /* Compute ret area address in the caller frame, *( ((gpointer *)RBP) [R11+2] ) */
449 amd64_shift_reg_imm (code, X86_SHL, AMD64_R11, 3);
450 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, caller_vret_offset);
451 amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, AMD64_RBP);
452 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof (target_mgreg_t));
454 /* Load ret marshal type in R10 */
455 amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
457 // Switch table for ret_marshal value
458 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
459 amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
460 br [i] = code;
461 amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
463 x86_breakpoint (code); /* unhandled case */
465 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
466 mono_amd64_patch (br [i], code);
467 switch (i) {
468 case GSHAREDVT_RET_NONE:
469 break;
470 case GSHAREDVT_RET_IREGS_1:
471 amd64_mov_membase_reg (code, AMD64_R11, 0, return_regs [i - GSHAREDVT_RET_IREGS_1], 8);
472 break;
473 case GSHAREDVT_RET_R8:
474 amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
475 break;
476 default:
477 x86_breakpoint (code); /* can't handle specific case */
480 br_ret [b_ret_index ++] = code;
481 x86_jump32 (code, 0);
484 /* exit path */
485 for (i = 0; i < b_ret_index; ++i)
486 mono_amd64_patch (br_ret [i], code);
488 /* Exit code path */
489 #if TARGET_WIN32
490 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
491 amd64_pop_reg (code, AMD64_RBP);
492 mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
493 #else
494 amd64_leave (code);
495 #endif
496 amd64_ret (code);
498 g_assertf ((code - buf) <= buf_len, "%d %d", (int)(code - buf), buf_len);
499 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
501 if (info)
502 *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);
504 mono_arch_flush_icache (buf, code - buf);
505 return buf;
508 #else
510 gpointer
511 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
513 g_assert_not_reached ();
514 return NULL;
517 gpointer
518 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
520 g_assert_not_reached ();
521 return NULL;
524 #endif
526 #else
528 gpointer
529 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
531 g_assert_not_reached ();
532 return NULL;
535 #endif