[interp] Make newarr lockfree
[mono-project.git] / mono / mini / tramp-x86-gsharedvt.c
blob5291b2ecaf457dba3b867a0c76f7d40381e3b834
1 /**
2 * \file
3 * gsharedvt support code for x86
5 * Authors:
6 * Zoltan Varga <vargaz@gmail.com>
8 * Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11 #include "mini.h"
12 #include <mono/metadata/abi-details.h>
14 #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED
16 gpointer
17 mono_x86_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
19 int i;
20 int *map = info->map;
22 /* Set vtype ret arg */
23 if (info->vret_arg_slot != -1) {
24 callee [info->vret_arg_slot] = &callee [info->vret_slot];
26 /* Copy data from the caller argument area to the callee */
27 for (i = 0; i < info->map_count; ++i) {
28 int src = map [i * 2];
29 int dst = map [i * 2 + 1];
31 switch ((src >> 16) & 0x3) {
32 case 0:
33 callee [dst] = caller [src];
34 break;
35 case 1: {
36 int j, nslots;
37 gpointer *arg;
39 /* gsharedvt->normal */
40 nslots = src >> 18;
41 arg = (gpointer*)caller [src & 0xffff];
42 for (j = 0; j < nslots; ++j)
43 callee [dst + j] = arg [j];
44 break;
46 case 2:
47 /* gsharedvt arg, have to take its address */
48 callee [dst] = caller + (src & 0xffff);
49 break;
50 #if 0
51 int dst = map [i * 2 + 1];
52 if (dst >= 0xffff) {
53 /* gsharedvt arg, have to take its address */
54 callee [dst - 0xffff] = caller + map [i * 2];
55 } else {
56 callee [dst] = caller [map [i * 2]];
58 #endif
62 if (info->vcall_offset != -1) {
63 MonoObject *this_obj = (MonoObject*)caller [0];
65 if (G_UNLIKELY (!this_obj))
66 return NULL;
67 if (info->vcall_offset == MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET)
68 /* delegate invoke */
69 return ((MonoDelegate*)this_obj)->invoke_impl;
70 else
71 return *(gpointer*)((char*)this_obj->vtable + info->vcall_offset);
72 } else if (info->calli) {
73 /* The address to call is passed in the mrgctx reg */
74 return mrgctx_reg;
75 } else {
76 return info->addr;
81 gpointer
82 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
84 guint8 *code, *buf;
85 int buf_len, cfa_offset;
86 GSList *unwind_ops = NULL;
87 MonoJumpInfo *ji = NULL;
88 guint8 *br_out, *br [16];
89 int info_offset, mrgctx_offset;
91 buf_len = 320;
92 buf = code = mono_global_codeman_reserve (buf_len);
95 * This trampoline is responsible for marshalling calls between normal code and gsharedvt code. The
96 * caller is a normal or gshared method which uses the signature of the inflated method to make the call, while
97 * the callee is a gsharedvt method which has a signature which uses valuetypes in place of type parameters, i.e.
98 * caller:
99 * foo<bool> (bool b)
100 * callee:
101 * T=<type used to represent vtype type arguments, currently TypedByRef>
102 * foo<T> (T b)
103 * The trampoline is responsible for marshalling the arguments and marshalling the result back. To simplify
104 * things, we create our own stack frame, and do most of the work in a C function, which receives a
105 * GSharedVtCallInfo structure as an argument. The structure should contain information to execute the C function to
106 * be as fast as possible. The argument is received in EAX from a gsharedvt trampoline. So the real
107 * call sequence looks like this:
108 * caller -> gsharedvt trampoline -> gsharevt in trampoline -> start_gsharedvt_call
109 * FIXME: Optimize this.
112 cfa_offset = sizeof (target_mgreg_t);
113 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
114 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
115 x86_push_reg (code, X86_EBP);
116 cfa_offset += sizeof (target_mgreg_t);
117 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
118 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
119 x86_mov_reg_reg (code, X86_EBP, X86_ESP);
120 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
121 /* Alloc stack frame/align stack */
122 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
123 info_offset = -4;
124 mrgctx_offset = - 8;
125 /* The info struct is put into EAX by the gsharedvt trampoline */
126 /* Save info struct addr */
127 x86_mov_membase_reg (code, X86_EBP, info_offset, X86_EAX, 4);
128 /* Save rgctx */
129 x86_mov_membase_reg (code, X86_EBP, mrgctx_offset, MONO_ARCH_RGCTX_REG, 4);
131 /* Allocate stack area used to pass arguments to the method */
132 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), sizeof (target_mgreg_t));
133 x86_alu_reg_reg (code, X86_SUB, X86_ESP, X86_EAX);
135 #if 0
136 /* Stack alignment check */
137 x86_mov_reg_reg (code, X86_ECX, X86_ESP);
138 x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
139 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
140 x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
141 x86_breakpoint (code);
142 #endif
144 /* ecx = caller argument area */
145 x86_mov_reg_reg (code, X86_ECX, X86_EBP);
146 x86_alu_reg_imm (code, X86_ADD, X86_ECX, 8);
147 /* eax = callee argument area */
148 x86_mov_reg_reg (code, X86_EAX, X86_ESP);
150 /* Call start_gsharedvt_call */
151 /* Arg 4 */
152 x86_push_membase (code, X86_EBP, mrgctx_offset);
153 /* Arg3 */
154 x86_push_reg (code, X86_EAX);
155 /* Arg2 */
156 x86_push_reg (code, X86_ECX);
157 /* Arg1 */
158 x86_push_membase (code, X86_EBP, info_offset);
159 if (aot) {
160 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_x86_start_gsharedvt_call));
161 x86_call_reg (code, X86_EAX);
162 } else {
163 x86_call_code (code, mono_x86_start_gsharedvt_call);
165 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4 * 4);
166 /* The address to call is in eax */
167 /* The stack is now setup for the real call */
168 /* Load info struct */
169 x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);
170 /* Load rgctx */
171 x86_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, X86_EBP, mrgctx_offset, sizeof (target_mgreg_t));
172 /* Make the call */
173 x86_call_reg (code, X86_EAX);
174 /* The return value is either in registers, or stored to an area beginning at sp [info->vret_slot] */
175 /* EAX/EDX might contain the return value, only ECX is free */
176 /* Load info struct */
177 x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);
179 /* Branch to the in/out handling code */
180 x86_alu_membase_imm (code, X86_CMP, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1);
181 br_out = code;
182 x86_branch32 (code, X86_CC_NE, 0, TRUE);
185 * IN CASE
188 /* Load ret marshal type */
189 x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
190 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
191 br [0] = code;
192 x86_branch8 (code, X86_CC_NE, 0, TRUE);
194 /* Normal return, no marshalling required */
195 x86_leave (code);
196 x86_ret (code);
198 /* Return value marshalling */
199 x86_patch (br [0], code);
200 /* Load info struct */
201 x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
202 /* Load 'vret_slot' */
203 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
204 /* Compute ret area address */
205 x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
206 x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_ESP);
207 /* The callee does a ret $4, so sp is off by 4 */
208 x86_alu_reg_imm (code, X86_SUB, X86_EAX, sizeof (target_mgreg_t));
210 /* Branch to specific marshalling code */
211 // FIXME: Move the I4 case to the top */
212 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
213 br [1] = code;
214 x86_branch8 (code, X86_CC_E, 0, TRUE);
215 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
216 br [2] = code;
217 x86_branch8 (code, X86_CC_E, 0, TRUE);
218 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
219 br [3] = code;
220 x86_branch8 (code, X86_CC_E, 0, TRUE);
221 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I1);
222 br [4] = code;
223 x86_branch8 (code, X86_CC_E, 0, TRUE);
224 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U1);
225 br [5] = code;
226 x86_branch8 (code, X86_CC_E, 0, TRUE);
227 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I2);
228 br [6] = code;
229 x86_branch8 (code, X86_CC_E, 0, TRUE);
230 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U2);
231 br [7] = code;
232 x86_branch8 (code, X86_CC_E, 0, TRUE);
233 /* IREGS case */
234 /* Load both eax and edx for simplicity */
235 x86_mov_reg_membase (code, X86_EDX, X86_EAX, sizeof (target_mgreg_t), sizeof (target_mgreg_t));
236 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (target_mgreg_t));
237 x86_leave (code);
238 x86_ret (code);
239 /* DOUBLE_FPSTACK case */
240 x86_patch (br [1], code);
241 x86_fld_membase (code, X86_EAX, 0, TRUE);
242 x86_jump8 (code, 0);
243 x86_leave (code);
244 x86_ret (code);
245 /* FLOAT_FPSTACK case */
246 x86_patch (br [2], code);
247 x86_fld_membase (code, X86_EAX, 0, FALSE);
248 x86_leave (code);
249 x86_ret (code);
250 /* STACK_POP case */
251 x86_patch (br [3], code);
252 x86_leave (code);
253 x86_ret_imm (code, 4);
254 /* I1 case */
255 x86_patch (br [4], code);
256 x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, FALSE);
257 x86_leave (code);
258 x86_ret (code);
259 /* U1 case */
260 x86_patch (br [5], code);
261 x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, FALSE);
262 x86_leave (code);
263 x86_ret (code);
264 /* I2 case */
265 x86_patch (br [6], code);
266 x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, TRUE);
267 x86_leave (code);
268 x86_ret (code);
269 /* U2 case */
270 x86_patch (br [7], code);
271 x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, TRUE);
272 x86_leave (code);
273 x86_ret (code);
276 * OUT CASE
279 x86_patch (br_out, code);
280 /* Load ret marshal type into ECX */
281 x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
282 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
283 br [0] = code;
284 x86_branch8 (code, X86_CC_NE, 0, TRUE);
286 /* Normal return, no marshalling required */
287 x86_leave (code);
288 x86_ret (code);
290 /* Return value marshalling */
291 x86_patch (br [0], code);
293 /* EAX might contain the return value */
294 // FIXME: Use moves
295 x86_push_reg (code, X86_EAX);
297 /* Load info struct */
298 x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
299 /* Load 'vret_arg_slot' */
300 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_slot), 4);
301 /* Compute ret area address in the caller frame in EAX */
302 x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
303 x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EBP);
304 x86_alu_reg_imm (code, X86_ADD, X86_EAX, 8);
305 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (target_mgreg_t));
307 /* Branch to specific marshalling code */
308 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
309 br [1] = code;
310 x86_branch8 (code, X86_CC_E, 0, TRUE);
311 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
312 br [2] = code;
313 x86_branch8 (code, X86_CC_E, 0, TRUE);
314 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
315 br [3] = code;
316 x86_branch8 (code, X86_CC_E, 0, TRUE);
317 x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_IREGS);
318 br [4] = code;
319 x86_branch8 (code, X86_CC_E, 0, TRUE);
320 /* IREG case */
321 x86_mov_reg_reg (code, X86_ECX, X86_EAX);
322 x86_pop_reg (code, X86_EAX);
323 x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (target_mgreg_t));
324 x86_leave (code);
325 x86_ret_imm (code, 4);
326 /* IREGS case */
327 x86_patch (br [4], code);
328 x86_mov_reg_reg (code, X86_ECX, X86_EAX);
329 x86_pop_reg (code, X86_EAX);
330 x86_mov_membase_reg (code, X86_ECX, sizeof (target_mgreg_t), X86_EDX, sizeof (target_mgreg_t));
331 x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (target_mgreg_t));
332 x86_leave (code);
333 x86_ret_imm (code, 4);
334 /* DOUBLE_FPSTACK case */
335 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
336 x86_patch (br [1], code);
337 x86_fst_membase (code, X86_EAX, 0, TRUE, TRUE);
338 x86_jump8 (code, 0);
339 x86_leave (code);
340 x86_ret_imm (code, 4);
341 /* FLOAT_FPSTACK case */
342 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
343 x86_patch (br [2], code);
344 x86_fst_membase (code, X86_EAX, 0, FALSE, TRUE);
345 x86_leave (code);
346 x86_ret_imm (code, 4);
347 /* STACK_POP case */
348 x86_patch (br [3], code);
349 x86_leave (code);
350 x86_ret_imm (code, 4);
352 g_assert ((code - buf) < buf_len);
354 if (info)
355 *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);
357 mono_arch_flush_icache (buf, code - buf);
358 return buf;
361 #endif /* MONO_ARCH_GSHAREDVT_SUPPORTED */