[interp] Fix interp logging (#17636)
[mono-project.git] / mono / mini / tramp-amd64-gsharedvt.c
blobc426c064953f39269944eb6043059f03a9f37674
1 /**
2 * \file
3 * libcorkscrew-based native unwinder
5 * Authors:
6 * Zoltan Varga <vargaz@gmail.com>
7 * Rodrigo Kumpera <kumpera@gmail.com>
8 * Andi McClure <andi.mcclure@xamarin.com>
9 * Johan Lorensson <johan.lorensson@xamarin.com>
11 * Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
15 #include <glib.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/gc-internals.h>
23 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/utils/memcheck.h>
27 #include "mini.h"
28 #include "mini-amd64.h"
29 #include "mini-amd64-gsharedvt.h"
30 #include "debugger-agent.h"
32 #if defined (MONO_ARCH_GSHAREDVT_SUPPORTED)
34 #define SRC_REG_SHIFT 0
35 #define SRC_REG_MASK 0xFFFF
37 #define SRC_DESCRIPTOR_MARSHAL_SHIFT 16
38 #define SRC_DESCRIPTOR_MARSHAL_MASK 0x0FF
40 #define SLOT_COUNT_SHIFT 24
41 #define SLOT_COUNT_MASK 0xFF
43 gpointer
44 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
46 int i;
48 #ifdef DEBUG_AMD64_GSHAREDVT
49 printf ("mono_amd64_start_gsharedvt_call info %p caller %p callee %p ctx %p\n", info, caller, callee, mrgctx_reg);
51 for (i = 0; i < PARAM_REGS; ++i)
52 printf ("\treg [%d] -> %p\n", i, caller [i]);
53 #endif
55 /* Set vtype ret arg */
56 if (info->vret_slot != -1) {
57 DEBUG_AMD64_GSHAREDVT_PRINT ("vret handling\n[%d] < &%d (%p)\n", info->vret_arg_reg, info->vret_slot, &callee [info->vret_slot]);
58 g_assert (info->vret_slot);
59 callee [info->vret_arg_reg] = &callee [info->vret_slot];
62 for (i = 0; i < info->map_count; ++i) {
63 int src = info->map [i * 2];
64 int dst = info->map [(i * 2) + 1];
65 int arg_marshal = (src >> SRC_DESCRIPTOR_MARSHAL_SHIFT) & SRC_DESCRIPTOR_MARSHAL_MASK;
67 int source_reg = src & SRC_REG_MASK;
68 int dest_reg = dst & SRC_REG_MASK;
70 DEBUG_AMD64_GSHAREDVT_PRINT ("source %x dest %x marshal %d: ", src, dst, arg_marshal);
71 switch (arg_marshal) {
72 case GSHAREDVT_ARG_NONE:
73 callee [dest_reg] = caller [source_reg];
74 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- %d (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], caller [source_reg]);
75 break;
76 case GSHAREDVT_ARG_BYVAL_TO_BYREF:
77 /* gsharedvt argument passed by addr in reg/stack slot */
78 callee [dest_reg] = &caller [source_reg];
79 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- &%d (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
80 break;
81 case GSHAREDVT_ARG_BYREF_TO_BYVAL: {
82 int slot_count = (src >> SLOT_COUNT_SHIFT) & SLOT_COUNT_MASK;
83 int j;
84 gpointer *addr = (gpointer*)caller [source_reg];
86 for (j = 0; j < slot_count; ++j)
87 callee [dest_reg + j] = addr [j];
88 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- [%d] (%d words) (%p) <- (%p)\n", dest_reg, source_reg, slot_count, &callee [dest_reg], &caller [source_reg]);
89 break;
91 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I1: {
92 gint8 *addr = (gint8*)caller [source_reg];
94 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
95 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i1) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
96 break;
98 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U1: {
99 guint8 *addr = (guint8*)caller [source_reg];
101 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
102 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u1) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
103 break;
105 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I2: {
106 gint16 *addr = (gint16*)caller [source_reg];
108 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
109 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i2) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
110 break;
112 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U2: {
113 guint16 *addr = (guint16*)caller [source_reg];
115 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
116 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u2) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
117 break;
119 case GSHAREDVT_ARG_BYREF_TO_BYVAL_I4: {
120 gint32 *addr = (gint32*)caller [source_reg];
122 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
123 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (i4) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
124 break;
126 case GSHAREDVT_ARG_BYREF_TO_BYVAL_U4: {
127 guint32 *addr = (guint32*)caller [source_reg];
129 callee [dest_reg] = (gpointer)(host_mgreg_t)*addr;
130 DEBUG_AMD64_GSHAREDVT_PRINT ("[%d] <- (u4) [%d] (%p) <- (%p)\n", dest_reg, source_reg, &callee [dest_reg], &caller [source_reg]);
131 break;
134 default:
135 g_error ("cant handle arg marshal %d\n", arg_marshal);
139 //Can't handle for now
140 if (info->vcall_offset != -1){
141 MonoObject *this_obj = (MonoObject*)caller [0];
143 DEBUG_AMD64_GSHAREDVT_PRINT ("target is a vcall at offset %d\n", info->vcall_offset / 8);
144 if (G_UNLIKELY (!this_obj))
145 return NULL;
146 if (info->vcall_offset == MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET)
147 /* delegate invoke */
148 return ((MonoDelegate*)this_obj)->invoke_impl;
149 else
150 return *(gpointer*)((char*)this_obj->vtable + info->vcall_offset);
151 } else if (info->calli) {
152 /* The address to call is passed in the mrgctx reg */
153 return mrgctx_reg;
154 } else {
155 DEBUG_AMD64_GSHAREDVT_PRINT ("target is %p\n", info->addr);
156 return info->addr;
160 #ifndef DISABLE_JIT
162 // Compiler support
165 * mono_arch_get_gsharedvt_arg_trampoline:
167 * See tramp-x86.c for documentation.
169 gpointer
170 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
172 guint8 *code, *start;
174 const int buf_len = 32;
176 start = code = mono_domain_code_reserve (domain, buf_len);
178 amd64_mov_reg_imm (code, AMD64_RAX, arg);
179 amd64_jump_code (code, addr);
181 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
183 mono_arch_flush_icache (start, code - start);
184 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
186 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
188 return start;
191 gpointer
192 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
194 guint8 *code, *buf;
195 int buf_len, cfa_offset;
196 GSList *unwind_ops = NULL;
197 MonoJumpInfo *ji = NULL;
198 int n_arg_regs, n_arg_fregs, framesize, i;
199 int info_offset, offset, rgctx_arg_reg_offset;
200 int caller_reg_area_offset, callee_reg_area_offset;
201 guint8 *br_out, *br [64], *br_ret [64];
202 int b_ret_index;
203 int reg_area_size;
205 buf_len = 2048;
206 buf = code = mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
209 * We are being called by an gsharedvt arg trampoline, the info argument is in AMD64_RAX.
211 n_arg_regs = PARAM_REGS;
212 n_arg_fregs = FLOAT_PARAM_REGS;
214 /* Compute stack frame size and offsets */
215 offset = 0;
216 /* info reg */
217 info_offset = offset;
218 offset += 8;
220 /* rgctx reg */
221 rgctx_arg_reg_offset = offset;
222 offset += 8;
224 /*callconv in regs */
225 caller_reg_area_offset = offset;
226 reg_area_size = ALIGN_TO ((n_arg_regs + n_arg_fregs) * 8, MONO_ARCH_FRAME_ALIGNMENT);
227 offset += reg_area_size;
229 framesize = offset;
231 g_assert (framesize % MONO_ARCH_FRAME_ALIGNMENT == 0);
232 g_assert (reg_area_size % MONO_ARCH_FRAME_ALIGNMENT == 0);
234 /* unwind markers 1/3 */
235 cfa_offset = sizeof (target_mgreg_t);
236 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
237 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
239 /* save the old frame pointer */
240 amd64_push_reg (code, AMD64_RBP);
242 /* unwind markers 2/3 */
243 cfa_offset += sizeof (target_mgreg_t);
244 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
245 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
247 /* set it as the new frame pointer */
248 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
250 /* unwind markers 3/3 */
251 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
252 mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
254 /* setup the frame */
255 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
257 /* save stuff */
259 /* save info */
260 amd64_mov_membase_reg (code, AMD64_RSP, info_offset, AMD64_RAX, sizeof (target_mgreg_t));
261 /* save rgctx */
262 amd64_mov_membase_reg (code, AMD64_RSP, rgctx_arg_reg_offset, MONO_ARCH_RGCTX_REG, sizeof (target_mgreg_t));
264 for (i = 0; i < n_arg_regs; ++i)
265 amd64_mov_membase_reg (code, AMD64_RSP, caller_reg_area_offset + i * 8, param_regs [i], sizeof (target_mgreg_t));
267 for (i = 0; i < n_arg_fregs; ++i)
268 amd64_sse_movsd_membase_reg (code, AMD64_RSP, caller_reg_area_offset + (i + n_arg_regs) * 8, i);
270 /* TODO Allocate stack area used to pass arguments to the method */
273 /* Allocate callee register area just below the caller area so it can be accessed from start_gsharedvt_call using negative offsets */
274 /* XXX figure out alignment */
275 callee_reg_area_offset = reg_area_size - ((n_arg_regs + n_arg_fregs) * 8); /* Ensure alignment */
276 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, reg_area_size);
278 /* Allocate stack area used to pass arguments to the method */
279 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), 4);
280 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_R11);
282 /* The stack now looks like this:
284 <caller stack params area>
285 <return address>
286 <old frame pointer>
287 <caller registers area>
288 <rgctx>
289 <gsharedvt info>
290 <callee stack area>
291 <callee reg area>
294 /* Call start_gsharedvt_call () */
295 /* arg1 == info */
296 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RAX, sizeof (target_mgreg_t));
297 /* arg2 = caller stack area */
298 amd64_lea_membase (code, MONO_AMD64_ARG_REG2, AMD64_RBP, -(framesize - caller_reg_area_offset));
300 /* arg3 == callee stack area */
301 amd64_lea_membase (code, MONO_AMD64_ARG_REG3, AMD64_RSP, callee_reg_area_offset);
303 /* arg4 = mrgctx reg */
304 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG4, MONO_ARCH_RGCTX_REG, sizeof (target_mgreg_t));
306 if (aot) {
307 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_amd64_start_gsharedvt_call));
308 #ifdef TARGET_WIN32
309 /* Since we are doing a call as part of setting up stackframe, the reserved shadow stack used by Windows platform is allocated up in
310 the callee stack area but currently the callee reg area is in between. Windows calling convention dictates that room is made on stack where
311 callee can save any parameters passed in registers. Since Windows x64 calling convention
312 uses 4 registers for the first 4 parameters, stack needs to be adjusted before making the call.
313 NOTE, Windows calling convention assumes that space for all registers have been reserved, regardless
314 of the number of function parameters actually used.
316 int shadow_reg_size = 0;
318 shadow_reg_size = ALIGN_TO (PARAM_REGS * sizeof (target_mgreg_t), MONO_ARCH_FRAME_ALIGNMENT);
319 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, shadow_reg_size);
320 amd64_call_reg (code, AMD64_R11);
321 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, shadow_reg_size);
322 #else
323 amd64_call_reg (code, AMD64_R11);
324 #endif
325 } else {
326 amd64_call_code (code, mono_amd64_start_gsharedvt_call);
329 /* Method to call is now on RAX. Restore regs and jump */
330 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, sizeof (target_mgreg_t));
332 for (i = 0; i < n_arg_regs; ++i)
333 amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, callee_reg_area_offset + i * 8, sizeof (target_mgreg_t));
335 for (i = 0; i < n_arg_fregs; ++i)
336 amd64_sse_movsd_reg_membase (code, i, AMD64_RSP, callee_reg_area_offset + (i + n_arg_regs) * 8);
338 //load rgctx
339 amd64_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, AMD64_RBP, -(framesize - rgctx_arg_reg_offset), sizeof (target_mgreg_t));
341 /* Clear callee reg area */
342 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, reg_area_size);
344 /* Call the thing */
345 amd64_call_reg (code, AMD64_R11);
347 /* Marshal return value. Available registers: R10 and R11 */
348 /* Load info struct */
349 amd64_mov_reg_membase (code, AMD64_R10, AMD64_RBP, -(framesize - info_offset), sizeof (target_mgreg_t));
351 /* Branch to the in/out handling code */
352 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1, 4);
354 b_ret_index = 0;
355 br_out = code;
356 x86_branch32 (code, X86_CC_NE, 0, TRUE);
359 * IN CASE
362 /* Load vret_slot */
363 /* Use first input parameter register as scratch since it is volatile on all platforms */
364 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
365 amd64_alu_reg_imm (code, X86_SUB, MONO_AMD64_ARG_REG1, n_arg_regs + n_arg_fregs);
366 amd64_shift_reg_imm (code, X86_SHL, MONO_AMD64_ARG_REG1, 3);
368 /* vret address is RBP - (framesize - caller_reg_area_offset) */
369 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (target_mgreg_t));
370 amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, MONO_AMD64_ARG_REG1);
372 /* Load ret marshal type */
373 /* Load vret address in R11 */
374 amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
376 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
377 amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
378 br [i] = code;
379 amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
381 x86_breakpoint (code); /* unhandled case */
383 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
384 mono_amd64_patch (br [i], code);
385 switch (i) {
386 case GSHAREDVT_RET_NONE:
387 break;
388 case GSHAREDVT_RET_I1:
389 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, FALSE);
390 break;
391 case GSHAREDVT_RET_U1:
392 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, FALSE);
393 break;
394 case GSHAREDVT_RET_I2:
395 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, TRUE);
396 break;
397 case GSHAREDVT_RET_U2:
398 amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, TRUE);
399 break;
400 case GSHAREDVT_RET_I4: // CORRECT
401 case GSHAREDVT_RET_U4: // THIS IS INCORRECT. WHY IS IT NOT FAILING?
402 amd64_movsxd_reg_membase (code, AMD64_RAX, AMD64_R11, 0);
403 break;
404 case GSHAREDVT_RET_I8:
405 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 0, 8);
406 break;
407 case GSHAREDVT_RET_IREGS_1:
408 amd64_mov_reg_membase (code, return_regs [i - GSHAREDVT_RET_IREGS_1], AMD64_R11, 0, 8);
409 break;
410 case GSHAREDVT_RET_R8:
411 amd64_sse_movsd_reg_membase (code, AMD64_XMM0, AMD64_R11, 0);
412 break;
413 default:
414 x86_breakpoint (code); /* can't handle specific case */
417 br_ret [b_ret_index ++] = code;
418 x86_jump32 (code, 0);
422 * OUT CASE
424 mono_amd64_patch (br_out, code);
427 Address to write return to is in the original value of the register specified by vret_arg_reg.
428 This will be either RSI, RDI (System V) or RCX, RDX (Windows) depending on whether this is a static call.
429 Its location:
430 We alloc 'framesize' bytes below RBP to save regs, info and rgctx. RSP = RBP - framesize
431 We store RDI (System V), RCX (Windows) at RSP + caller_reg_area_offset + slot_index_of (register) * 8.
433 address: RBP - framesize + caller_reg_area_offset + 8*slot
436 int caller_vret_offset = caller_reg_area_offset - framesize;
438 /* Load vret address in R11 */
439 /* Position to return to is passed as a hidden argument. Load 'vret_arg_slot' to find it */
440 amd64_movsxd_reg_membase (code, AMD64_R11, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg));
442 // In the GSHAREDVT_RET_NONE case, vret_arg_slot is -1. In this case, skip marshalling.
443 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
444 br_ret [b_ret_index ++] = code;
445 amd64_branch32 (code, X86_CC_LT, 0, TRUE);
447 /* Compute ret area address in the caller frame, *( ((gpointer *)RBP) [R11+2] ) */
448 amd64_shift_reg_imm (code, X86_SHL, AMD64_R11, 3);
449 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, caller_vret_offset);
450 amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, AMD64_RBP);
451 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof (target_mgreg_t));
453 /* Load ret marshal type in R10 */
454 amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
456 // Switch table for ret_marshal value
457 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
458 amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
459 br [i] = code;
460 amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
462 x86_breakpoint (code); /* unhandled case */
464 for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
465 mono_amd64_patch (br [i], code);
466 switch (i) {
467 case GSHAREDVT_RET_NONE:
468 break;
469 case GSHAREDVT_RET_IREGS_1:
470 amd64_mov_membase_reg (code, AMD64_R11, 0, return_regs [i - GSHAREDVT_RET_IREGS_1], 8);
471 break;
472 case GSHAREDVT_RET_R8:
473 amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
474 break;
475 default:
476 x86_breakpoint (code); /* can't handle specific case */
479 br_ret [b_ret_index ++] = code;
480 x86_jump32 (code, 0);
483 /* exit path */
484 for (i = 0; i < b_ret_index; ++i)
485 mono_amd64_patch (br_ret [i], code);
487 /* Exit code path */
488 #if TARGET_WIN32
489 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
490 amd64_pop_reg (code, AMD64_RBP);
491 mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
492 #else
493 amd64_leave (code);
494 #endif
495 amd64_ret (code);
497 g_assertf ((code - buf) <= buf_len, "%d %d", (int)(code - buf), buf_len);
498 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
500 if (info)
501 *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);
503 mono_arch_flush_icache (buf, code - buf);
504 return buf;
507 #else
509 gpointer
510 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
512 g_assert_not_reached ();
513 return NULL;
516 gpointer
517 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
519 g_assert_not_reached ();
520 return NULL;
523 #endif
525 #else
527 gpointer
528 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
530 g_assert_not_reached ();
531 return NULL;
534 #endif