3 * gsharedvt support code for x86
6 * Zoltan Varga <vargaz@gmail.com>
8 * Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #include <mono/metadata/abi-details.h>
14 #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED
17 mono_x86_start_gsharedvt_call (GSharedVtCallInfo
*info
, gpointer
*caller
, gpointer
*callee
, gpointer mrgctx_reg
)
22 /* Set vtype ret arg */
23 if (info
->vret_arg_slot
!= -1) {
24 callee
[info
->vret_arg_slot
] = &callee
[info
->vret_slot
];
26 /* Copy data from the caller argument area to the callee */
27 for (i
= 0; i
< info
->map_count
; ++i
) {
28 int src
= map
[i
* 2];
29 int dst
= map
[i
* 2 + 1];
31 switch ((src
>> 16) & 0x3) {
33 callee
[dst
] = caller
[src
];
39 /* gsharedvt->normal */
41 arg
= (gpointer
*)caller
[src
& 0xffff];
42 for (j
= 0; j
< nslots
; ++j
)
43 callee
[dst
+ j
] = arg
[j
];
47 /* gsharedvt arg, have to take its address */
48 callee
[dst
] = caller
+ (src
& 0xffff);
51 int dst
= map
[i
* 2 + 1];
53 /* gsharedvt arg, have to take its address */
54 callee
[dst
- 0xffff] = caller
+ map
[i
* 2];
56 callee
[dst
] = caller
[map
[i
* 2]];
62 if (info
->vcall_offset
!= -1) {
63 MonoObject
*this_obj
= (MonoObject
*)caller
[0];
65 if (G_UNLIKELY (!this_obj
))
67 if (info
->vcall_offset
== MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET
)
69 return ((MonoDelegate
*)this_obj
)->invoke_impl
;
71 return *(gpointer
*)((char*)this_obj
->vtable
+ info
->vcall_offset
);
72 } else if (info
->calli
) {
73 /* The address to call is passed in the mrgctx reg */
82 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo
**info
, gboolean aot
)
85 int buf_len
, cfa_offset
;
86 GSList
*unwind_ops
= NULL
;
87 MonoJumpInfo
*ji
= NULL
;
88 guint8
*br_out
, *br
[16];
89 int info_offset
, mrgctx_offset
;
92 buf
= code
= mono_global_codeman_reserve (buf_len
);
95 * This trampoline is responsible for marshalling calls between normal code and gsharedvt code. The
96 * caller is a normal or gshared method which uses the signature of the inflated method to make the call, while
97 * the callee is a gsharedvt method which has a signature which uses valuetypes in place of type parameters, i.e.
101 * T=<type used to represent vtype type arguments, currently TypedByRef>
103 * The trampoline is responsible for marshalling the arguments and marshalling the result back. To simplify
104 * things, we create our own stack frame, and do most of the work in a C function, which receives a
105 * GSharedVtCallInfo structure as an argument. The structure should contain information to execute the C function to
106 * be as fast as possible. The argument is received in EAX from a gsharedvt trampoline. So the real
107 * call sequence looks like this:
108 * caller -> gsharedvt trampoline -> gsharevt in trampoline -> start_gsharedvt_call
109 * FIXME: Optimize this.
112 cfa_offset
= sizeof (target_mgreg_t
);
113 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
114 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_NREG
, -cfa_offset
);
115 x86_push_reg (code
, X86_EBP
);
116 cfa_offset
+= sizeof (target_mgreg_t
);
117 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
118 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_EBP
, - cfa_offset
);
119 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
);
120 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, X86_EBP
);
121 /* Alloc stack frame/align stack */
122 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
125 /* The info struct is put into EAX by the gsharedvt trampoline */
126 /* Save info struct addr */
127 x86_mov_membase_reg (code
, X86_EBP
, info_offset
, X86_EAX
, 4);
129 x86_mov_membase_reg (code
, X86_EBP
, mrgctx_offset
, MONO_ARCH_RGCTX_REG
, 4);
131 /* Allocate stack area used to pass arguments to the method */
132 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, stack_usage
), sizeof (target_mgreg_t
));
133 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, X86_EAX
);
136 /* Stack alignment check */
137 x86_mov_reg_reg (code
, X86_ECX
, X86_ESP
);
138 x86_alu_reg_imm (code
, X86_AND
, X86_ECX
, MONO_ARCH_FRAME_ALIGNMENT
- 1);
139 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
140 x86_branch_disp (code
, X86_CC_EQ
, 3, FALSE
);
141 x86_breakpoint (code
);
144 /* ecx = caller argument area */
145 x86_mov_reg_reg (code
, X86_ECX
, X86_EBP
);
146 x86_alu_reg_imm (code
, X86_ADD
, X86_ECX
, 8);
147 /* eax = callee argument area */
148 x86_mov_reg_reg (code
, X86_EAX
, X86_ESP
);
150 /* Call start_gsharedvt_call */
152 x86_push_membase (code
, X86_EBP
, mrgctx_offset
);
154 x86_push_reg (code
, X86_EAX
);
156 x86_push_reg (code
, X86_ECX
);
158 x86_push_membase (code
, X86_EBP
, info_offset
);
160 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_x86_start_gsharedvt_call
));
161 x86_call_reg (code
, X86_EAX
);
163 x86_call_code (code
, mono_x86_start_gsharedvt_call
);
165 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4 * 4);
166 /* The address to call is in eax */
167 /* The stack is now setup for the real call */
168 /* Load info struct */
169 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, info_offset
, 4);
171 x86_mov_reg_membase (code
, MONO_ARCH_RGCTX_REG
, X86_EBP
, mrgctx_offset
, sizeof (target_mgreg_t
));
173 x86_call_reg (code
, X86_EAX
);
174 /* The return value is either in registers, or stored to an area beginning at sp [info->vret_slot] */
175 /* EAX/EDX might contain the return value, only ECX is free */
176 /* Load info struct */
177 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, info_offset
, 4);
179 /* Branch to the in/out handling code */
180 x86_alu_membase_imm (code
, X86_CMP
, X86_ECX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, gsharedvt_in
), 1);
182 x86_branch32 (code
, X86_CC_NE
, 0, TRUE
);
188 /* Load ret marshal type */
189 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, ret_marshal
), 4);
190 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_NONE
);
192 x86_branch8 (code
, X86_CC_NE
, 0, TRUE
);
194 /* Normal return, no marshalling required */
198 /* Return value marshalling */
199 x86_patch (br
[0], code
);
200 /* Load info struct */
201 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, info_offset
, 4);
202 /* Load 'vret_slot' */
203 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, vret_slot
), 4);
204 /* Compute ret area address */
205 x86_shift_reg_imm (code
, X86_SHL
, X86_EAX
, 2);
206 x86_alu_reg_reg (code
, X86_ADD
, X86_EAX
, X86_ESP
);
207 /* The callee does a ret $4, so sp is off by 4 */
208 x86_alu_reg_imm (code
, X86_SUB
, X86_EAX
, sizeof (target_mgreg_t
));
210 /* Branch to specific marshalling code */
211 // FIXME: Move the I4 case to the top */
212 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_DOUBLE_FPSTACK
);
214 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
215 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_FLOAT_FPSTACK
);
217 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
218 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_STACK_POP
);
220 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
221 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_I1
);
223 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
224 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_U1
);
226 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
227 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_I2
);
229 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
230 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_U2
);
232 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
234 /* Load both eax and edx for simplicity */
235 x86_mov_reg_membase (code
, X86_EDX
, X86_EAX
, sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
236 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, sizeof (target_mgreg_t
));
239 /* DOUBLE_FPSTACK case */
240 x86_patch (br
[1], code
);
241 x86_fld_membase (code
, X86_EAX
, 0, TRUE
);
245 /* FLOAT_FPSTACK case */
246 x86_patch (br
[2], code
);
247 x86_fld_membase (code
, X86_EAX
, 0, FALSE
);
251 x86_patch (br
[3], code
);
253 x86_ret_imm (code
, 4);
255 x86_patch (br
[4], code
);
256 x86_widen_membase (code
, X86_EAX
, X86_EAX
, 0, TRUE
, FALSE
);
260 x86_patch (br
[5], code
);
261 x86_widen_membase (code
, X86_EAX
, X86_EAX
, 0, FALSE
, FALSE
);
265 x86_patch (br
[6], code
);
266 x86_widen_membase (code
, X86_EAX
, X86_EAX
, 0, TRUE
, TRUE
);
270 x86_patch (br
[7], code
);
271 x86_widen_membase (code
, X86_EAX
, X86_EAX
, 0, FALSE
, TRUE
);
279 x86_patch (br_out
, code
);
280 /* Load ret marshal type into ECX */
281 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, ret_marshal
), 4);
282 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_NONE
);
284 x86_branch8 (code
, X86_CC_NE
, 0, TRUE
);
286 /* Normal return, no marshalling required */
290 /* Return value marshalling */
291 x86_patch (br
[0], code
);
293 /* EAX might contain the return value */
295 x86_push_reg (code
, X86_EAX
);
297 /* Load info struct */
298 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, info_offset
, 4);
299 /* Load 'vret_arg_slot' */
300 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (GSharedVtCallInfo
, vret_arg_slot
), 4);
301 /* Compute ret area address in the caller frame in EAX */
302 x86_shift_reg_imm (code
, X86_SHL
, X86_EAX
, 2);
303 x86_alu_reg_reg (code
, X86_ADD
, X86_EAX
, X86_EBP
);
304 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, 8);
305 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, sizeof (target_mgreg_t
));
307 /* Branch to specific marshalling code */
308 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_DOUBLE_FPSTACK
);
310 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
311 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_FLOAT_FPSTACK
);
313 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
314 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_STACK_POP
);
316 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
317 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, GSHAREDVT_RET_IREGS
);
319 x86_branch8 (code
, X86_CC_E
, 0, TRUE
);
321 x86_mov_reg_reg (code
, X86_ECX
, X86_EAX
);
322 x86_pop_reg (code
, X86_EAX
);
323 x86_mov_membase_reg (code
, X86_ECX
, 0, X86_EAX
, sizeof (target_mgreg_t
));
325 x86_ret_imm (code
, 4);
327 x86_patch (br
[4], code
);
328 x86_mov_reg_reg (code
, X86_ECX
, X86_EAX
);
329 x86_pop_reg (code
, X86_EAX
);
330 x86_mov_membase_reg (code
, X86_ECX
, sizeof (target_mgreg_t
), X86_EDX
, sizeof (target_mgreg_t
));
331 x86_mov_membase_reg (code
, X86_ECX
, 0, X86_EAX
, sizeof (target_mgreg_t
));
333 x86_ret_imm (code
, 4);
334 /* DOUBLE_FPSTACK case */
335 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
336 x86_patch (br
[1], code
);
337 x86_fst_membase (code
, X86_EAX
, 0, TRUE
, TRUE
);
340 x86_ret_imm (code
, 4);
341 /* FLOAT_FPSTACK case */
342 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
343 x86_patch (br
[2], code
);
344 x86_fst_membase (code
, X86_EAX
, 0, FALSE
, TRUE
);
346 x86_ret_imm (code
, 4);
348 x86_patch (br
[3], code
);
350 x86_ret_imm (code
, 4);
352 g_assert ((code
- buf
) < buf_len
);
355 *info
= mono_tramp_info_create ("gsharedvt_trampoline", buf
, code
- buf
, ji
, unwind_ops
);
357 mono_arch_flush_icache (buf
, code
- buf
);
361 #endif /* MONO_ARCH_GSHAREDVT_SUPPORTED */