2010-04-01 Zoltan Varga <vargaz@gmail.com>
[mono/afaerber.git] / mono / mini / tramp-x86.c
blob3a9095e59271132daf01d8816b77af5ca76bf2b0
1 /*
2 * tramp-x86.c: JIT trampoline code for x86
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/metadata-internals.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug.h>
18 #include <mono/metadata/mono-debug-debugger.h>
19 #include <mono/metadata/monitor.h>
20 #include <mono/arch/x86/x86-codegen.h>
22 #include <mono/utils/memcheck.h>
24 #include "mini.h"
25 #include "mini-x86.h"
27 static guint8* nullified_class_init_trampoline;
30 * mono_arch_get_unbox_trampoline:
31 * @gsctx: the generic sharing context
32 * @m: method pointer
33 * @addr: pointer to native code for @m
35 * when value type methods are called through the vtable we need to unbox the
36 * this argument. This method returns a pointer to a trampoline which does
37 * unboxing before calling the method
39 gpointer
40 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
42 guint8 *code, *start;
43 int this_pos = 4;
44 MonoDomain *domain = mono_domain_get ();
46 if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
47 this_pos = 8;
49 start = code = mono_domain_code_reserve (domain, 16);
51 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
52 x86_jump_code (code, addr);
53 g_assert ((code - start) < 16);
55 return start;
58 gpointer
59 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
61 guint8 *code, *start;
62 int buf_len;
64 MonoDomain *domain = mono_domain_get ();
66 buf_len = 10;
68 start = code = mono_domain_code_reserve (domain, buf_len);
70 x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
71 x86_jump_code (code, addr);
72 g_assert ((code - start) <= buf_len);
74 mono_arch_flush_icache (start, code - start);
76 return start;
79 gpointer
80 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
82 guint8 *code, *start;
83 int buf_len;
84 int this_offset;
86 buf_len = 32;
88 start = code = mono_domain_code_reserve (domain, buf_len);
90 this_offset = mono_x86_get_this_arg_offset (NULL, mono_method_signature (m));
92 /* Set imt arg */
93 x86_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
94 /* Load this */
95 x86_mov_reg_membase (code, X86_EAX, X86_ESP, this_offset + 4, 4);
96 /* Load vtable address */
97 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
98 x86_jump_membase (code, X86_EAX, vt_offset);
100 g_assert ((code - start) < buf_len);
102 mono_arch_flush_icache (start, code - start);
104 return start;
107 void
108 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
110 guint8 *code;
111 guint8 buf [8];
112 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
114 code = buf + 8;
116 /* go to the start of the call instruction
118 * address_byte = (m << 6) | (o << 3) | reg
119 * call opcode: 0xff address_byte displacement
120 * 0xff m=1,o=2 imm8
121 * 0xff m=2,o=2 imm32
123 code -= 6;
124 orig_code -= 6;
125 if ((code [1] == 0xe8)) {
126 if (can_write) {
127 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
129 /* Tell valgrind to recompile the patched code */
130 VALGRIND_DISCARD_TRANSLATIONS (orig_code + 2, 4);
132 } else if (code [1] == 0xe9) {
133 /* A PLT entry: jmp <DISP> */
134 if (can_write)
135 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
136 } else {
137 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
138 code [4], code [5], code [6]);
139 g_assert_not_reached ();
143 void
144 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
146 /* A PLT entry: jmp <DISP> */
147 g_assert (code [0] == 0xe9);
149 if (!mono_running_on_valgrind ())
150 InterlockedExchange ((gint32*)(code + 1), (guint)addr - (guint)code - 5);
153 void
154 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
156 guint8 buf [16];
157 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 6, buf, sizeof (buf));
159 if (!can_write)
160 return;
162 code -= 5;
163 if (code [0] == 0xe8) {
164 if (!mono_running_on_valgrind ()) {
165 guint32 ops;
167 * Thread safe code patching using the algorithm from the paper
168 * 'Practicing JUDO: Java Under Dynamic Optimizations'
171 * First atomically change the the first 2 bytes of the call to a
172 * spinning jump.
174 ops = 0xfeeb;
175 InterlockedExchange ((gint32*)code, ops);
177 /* Then change the other bytes to a nop */
178 code [2] = 0x90;
179 code [3] = 0x90;
180 code [4] = 0x90;
182 /* Then atomically change the first 4 bytes to a nop as well */
183 ops = 0x90909090;
184 InterlockedExchange ((gint32*)code, ops);
185 /* FIXME: the calltree skin trips on the self modifying code above */
187 /* Tell valgrind to recompile the patched code */
188 //VALGRIND_DISCARD_TRANSLATIONS (code, 8);
190 } else if (code [0] == 0x90 || code [0] == 0xeb) {
191 /* Already changed by another thread */
193 } else if ((code [-1] == 0xff) && (x86_modrm_reg (code [0]) == 0x2)) {
194 /* call *<OFFSET>(<REG>) -> Call made from AOT code */
195 gpointer *vtable_slot;
197 vtable_slot = mono_get_vcall_slot_addr (code + 5, regs);
198 g_assert (vtable_slot);
200 *vtable_slot = nullified_class_init_trampoline;
201 } else {
202 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
203 code [4], code [5], code [6]);
204 g_assert_not_reached ();
208 void
209 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
211 if (!mono_running_on_valgrind ()) {
212 guint32 ops;
214 ops = 0xfeeb;
215 InterlockedExchange ((gint32*)code, ops);
217 /* Then change the other bytes to a nop */
218 code [2] = 0x90;
219 code [3] = 0x90;
220 code [4] = 0x90;
222 /* Change the first byte to a nop */
223 ops = 0xc3;
224 InterlockedExchange ((gint32*)code, ops);
228 guchar*
229 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
231 guint8 *buf, *code, *tramp;
232 int pushed_args, pushed_args_caller_saved;
234 code = buf = mono_global_codeman_reserve (256);
236 /* Note that there is a single argument to the trampoline
237 * and it is stored at: esp + pushed_args * sizeof (gpointer)
238 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
241 /* Put all registers into an array on the stack
242 * If this code is changed, make sure to update the offset value in
243 * mono_arch_get_this_arg_from_call () in mini-x86.c.
245 x86_push_reg (buf, X86_EDI);
246 x86_push_reg (buf, X86_ESI);
247 x86_push_reg (buf, X86_EBP);
248 x86_push_reg (buf, X86_ESP);
249 x86_push_reg (buf, X86_EBX);
250 x86_push_reg (buf, X86_EDX);
251 x86_push_reg (buf, X86_ECX);
252 x86_push_reg (buf, X86_EAX);
254 pushed_args_caller_saved = pushed_args = 8;
256 /* Align stack on apple */
257 x86_alu_reg_imm (buf, X86_SUB, X86_ESP, 4);
259 pushed_args ++;
261 /* save LMF begin */
263 /* save the IP (caller ip) */
264 if (tramp_type == MONO_TRAMPOLINE_JUMP)
265 x86_push_imm (buf, 0);
266 else
267 x86_push_membase (buf, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
269 pushed_args++;
271 x86_push_reg (buf, X86_EBP);
272 x86_push_reg (buf, X86_ESI);
273 x86_push_reg (buf, X86_EDI);
274 x86_push_reg (buf, X86_EBX);
276 pushed_args += 4;
278 /* save ESP */
279 x86_push_reg (buf, X86_ESP);
280 /* Adjust ESP so it points to the previous frame */
281 x86_alu_membase_imm (buf, X86_ADD, X86_ESP, 0, (pushed_args + 2) * 4);
283 pushed_args ++;
285 /* save method info */
286 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
287 x86_push_membase (buf, X86_ESP, pushed_args * sizeof (gpointer));
288 else
289 x86_push_imm (buf, 0);
291 pushed_args++;
293 /* On apple, the stack is correctly aligned to 16 bytes because pushed_args is
294 * 16 and there is the extra trampoline arg + the return ip pushed by call
295 * FIXME: Note that if an exception happens while some args are pushed
296 * on the stack, the stack will be misaligned.
298 g_assert (pushed_args == 16);
300 /* get the address of lmf for the current thread */
301 x86_call_code (buf, mono_get_lmf_addr);
302 /* push lmf */
303 x86_push_reg (buf, X86_EAX);
304 /* push *lfm (previous_lmf) */
305 x86_push_membase (buf, X86_EAX, 0);
306 /* Signal to mono_arch_find_jit_info () that this is a trampoline frame */
307 x86_alu_membase_imm (buf, X86_ADD, X86_ESP, 0, 1);
308 /* *(lmf) = ESP */
309 x86_mov_membase_reg (buf, X86_EAX, 0, X86_ESP, 4);
310 /* save LFM end */
312 pushed_args += 2;
314 /* starting the call sequence */
316 /* FIXME: Push the trampoline address */
317 x86_push_imm (buf, 0);
319 pushed_args++;
321 /* push the method info */
322 x86_push_membase (buf, X86_ESP, pushed_args * sizeof (gpointer));
324 pushed_args++;
326 /* push the return address onto the stack */
327 if (tramp_type == MONO_TRAMPOLINE_JUMP)
328 x86_push_imm (buf, 0);
329 else
330 x86_push_membase (buf, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
331 pushed_args++;
332 /* push the address of the register array */
333 x86_lea_membase (buf, X86_EAX, X86_ESP, (pushed_args - 8) * sizeof (gpointer));
334 x86_push_reg (buf, X86_EAX);
336 pushed_args++;
338 #ifdef __APPLE__
339 /* check the stack is aligned after the ret ip is pushed */
340 /*x86_mov_reg_reg (buf, X86_EDX, X86_ESP, 4);
341 x86_alu_reg_imm (buf, X86_AND, X86_EDX, 15);
342 x86_alu_reg_imm (buf, X86_CMP, X86_EDX, 0);
343 x86_branch_disp (buf, X86_CC_Z, 3, FALSE);
344 x86_breakpoint (buf);*/
345 #endif
347 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
348 x86_call_code (buf, tramp);
350 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4*4);
352 pushed_args -= 4;
354 /* Check for thread interruption */
355 /* This is not perf critical code so no need to check the interrupt flag */
356 /* Align the stack on osx */
357 x86_alu_reg_imm (buf, X86_SUB, X86_ESP, 3 * 4);
358 x86_push_reg (buf, X86_EAX);
359 x86_call_code (buf, (guint8*)mono_thread_force_interruption_checkpoint);
360 x86_pop_reg (buf, X86_EAX);
361 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 3 * 4);
363 /* Restore LMF */
365 /* ebx = previous_lmf */
366 x86_pop_reg (buf, X86_EBX);
367 pushed_args--;
368 x86_alu_reg_imm (buf, X86_SUB, X86_EBX, 1);
370 /* edi = lmf */
371 x86_pop_reg (buf, X86_EDI);
372 pushed_args--;
374 /* *(lmf) = previous_lmf */
375 x86_mov_membase_reg (buf, X86_EDI, 0, X86_EBX, 4);
377 /* discard method info */
378 x86_pop_reg (buf, X86_ESI);
379 pushed_args--;
381 /* discard ESP */
382 x86_pop_reg (buf, X86_ESI);
383 pushed_args--;
385 /* restore caller saved regs */
386 x86_pop_reg (buf, X86_EBX);
387 x86_pop_reg (buf, X86_EDI);
388 x86_pop_reg (buf, X86_ESI);
389 x86_pop_reg (buf, X86_EBP);
391 pushed_args -= 4;
393 /* discard save IP */
394 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4);
395 pushed_args--;
397 /* restore LMF end */
399 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
401 * Overwrite the method ptr with the address we need to jump to,
402 * to free %eax.
404 x86_mov_membase_reg (buf, X86_ESP, pushed_args * sizeof (gpointer), X86_EAX, 4);
407 /* Restore caller saved registers */
408 x86_mov_reg_membase (buf, X86_ECX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_ECX) * 4, 4);
409 x86_mov_reg_membase (buf, X86_EDX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_EDX) * 4, 4);
410 if ((tramp_type == MONO_TRAMPOLINE_RESTORE_STACK_PROT) || (tramp_type == MONO_TRAMPOLINE_AOT_PLT))
411 x86_mov_reg_membase (buf, X86_EAX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_EAX) * 4, 4);
413 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
414 /* Pop saved reg array + stack align */
415 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 9 * 4);
416 pushed_args -= 9;
417 g_assert (pushed_args == 0);
418 } else {
419 /* Pop saved reg array + stack align + method ptr */
420 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 10 * 4);
421 pushed_args -= 10;
423 /* We've popped one more stack item than we've pushed (the
424 method ptr argument), so we must end up at -1. */
425 g_assert (pushed_args == -1);
428 x86_ret (buf);
430 g_assert ((buf - code) <= 256);
432 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
433 /* Initialize the nullified class init trampoline used in the AOT case */
434 nullified_class_init_trampoline = buf = mono_global_codeman_reserve (16);
435 x86_ret (buf);
438 return code;
441 #define TRAMPOLINE_SIZE 10
443 gpointer
444 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
446 guint8 *code, *buf, *tramp;
448 tramp = mono_get_trampoline_code (tramp_type);
450 code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4);
452 x86_push_imm (buf, arg1);
453 x86_jump_code (buf, tramp);
454 g_assert ((buf - code) <= TRAMPOLINE_SIZE);
456 mono_arch_flush_icache (code, buf - code);
458 if (code_len)
459 *code_len = buf - code;
461 return code;
464 gpointer
465 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
467 guint8 *tramp;
468 guint8 *code, *buf;
469 guint8 **rgctx_null_jumps;
470 int tramp_size;
471 int depth, index;
472 int i;
473 gboolean mrgctx;
475 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
476 index = MONO_RGCTX_SLOT_INDEX (slot);
477 if (mrgctx)
478 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
479 for (depth = 0; ; ++depth) {
480 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
482 if (index < size - 1)
483 break;
484 index -= size - 1;
487 tramp_size = 36 + 6 * depth;
489 code = buf = mono_global_codeman_reserve (tramp_size);
491 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
493 /* load vtable/mrgctx ptr */
494 x86_mov_reg_membase (buf, X86_EAX, X86_ESP, 4, 4);
495 if (!mrgctx) {
496 /* load rgctx ptr from vtable */
497 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
498 /* is the rgctx ptr null? */
499 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
500 /* if yes, jump to actual trampoline */
501 rgctx_null_jumps [0] = buf;
502 x86_branch8 (buf, X86_CC_Z, -1, 1);
505 for (i = 0; i < depth; ++i) {
506 /* load ptr to next array */
507 if (mrgctx && i == 0)
508 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
509 else
510 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, 0, 4);
511 /* is the ptr null? */
512 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
513 /* if yes, jump to actual trampoline */
514 rgctx_null_jumps [i + 1] = buf;
515 x86_branch8 (buf, X86_CC_Z, -1, 1);
518 /* fetch slot */
519 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, sizeof (gpointer) * (index + 1), 4);
520 /* is the slot null? */
521 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
522 /* if yes, jump to actual trampoline */
523 rgctx_null_jumps [depth + 1] = buf;
524 x86_branch8 (buf, X86_CC_Z, -1, 1);
525 /* otherwise return */
526 x86_ret (buf);
528 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
529 x86_patch (rgctx_null_jumps [i], buf);
531 g_free (rgctx_null_jumps);
533 x86_mov_reg_membase (buf, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
535 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
537 /* jump to the actual trampoline */
538 x86_jump_code (buf, tramp);
540 mono_arch_flush_icache (code, buf - code);
542 g_assert (buf - code <= tramp_size);
544 return code;
547 gpointer
548 mono_arch_create_generic_class_init_trampoline (void)
550 guint8 *tramp;
551 guint8 *code, *buf;
552 static int byte_offset = -1;
553 static guint8 bitmask;
554 guint8 *jump;
555 int tramp_size;
557 tramp_size = 64;
559 code = buf = mono_global_codeman_reserve (tramp_size);
561 if (byte_offset < 0)
562 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
564 x86_test_membase_imm (code, MONO_ARCH_VTABLE_REG, byte_offset, bitmask);
565 jump = code;
566 x86_branch8 (code, X86_CC_Z, -1, 1);
568 x86_ret (code);
570 x86_patch (jump, code);
572 /* Push the vtable so the stack is the same as in a specific trampoline */
573 x86_push_reg (code, MONO_ARCH_VTABLE_REG);
575 tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
577 /* jump to the actual trampoline */
578 x86_jump_code (code, tramp);
580 mono_arch_flush_icache (code, code - buf);
582 g_assert (code - buf <= tramp_size);
584 return buf;
587 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
589 * The code produced by this trampoline is equivalent to this:
591 * if (obj) {
592 * if (obj->synchronisation) {
593 * if (obj->synchronisation->owner == 0) {
594 * if (cmpxch (&obj->synchronisation->owner, TID, 0) == 0)
595 * return;
597 * if (obj->synchronisation->owner == TID) {
598 * ++obj->synchronisation->nest;
599 * return;
603 * return full_monitor_enter ();
606 gpointer
607 mono_arch_create_monitor_enter_trampoline (void)
609 guint32 code_size;
610 MonoJumpInfo *ji;
612 return mono_arch_create_monitor_enter_trampoline_full (&code_size, &ji, FALSE);
615 gpointer
616 mono_arch_create_monitor_enter_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
618 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_ENTER);
619 guint8 *code, *buf;
620 guint8 *jump_obj_null, *jump_sync_null, *jump_other_owner, *jump_cmpxchg_failed, *jump_tid;
621 int tramp_size;
622 int owner_offset, nest_offset, dummy;
624 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == X86_EAX);
626 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
627 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
628 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
629 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
630 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
632 tramp_size = 64;
634 code = buf = mono_global_codeman_reserve (tramp_size);
636 if (mono_thread_get_tls_offset () != -1) {
637 /* MonoObject* obj is in EAX */
638 /* is obj null? */
639 x86_test_reg_reg (code, X86_EAX, X86_EAX);
640 /* if yes, jump to actual trampoline */
641 jump_obj_null = code;
642 x86_branch8 (code, X86_CC_Z, -1, 1);
644 /* load obj->synchronization to ECX */
645 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoObject, synchronisation), 4);
646 /* is synchronization null? */
647 x86_test_reg_reg (code, X86_ECX, X86_ECX);
648 /* if yes, jump to actual trampoline */
649 jump_sync_null = code;
650 x86_branch8 (code, X86_CC_Z, -1, 1);
652 /* load MonoInternalThread* into EDX */
653 code = mono_x86_emit_tls_get (code, X86_EDX, mono_thread_get_tls_offset ());
654 /* load TID into EDX */
655 x86_mov_reg_membase (code, X86_EDX, X86_EDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 4);
657 /* is synchronization->owner null? */
658 x86_alu_membase_imm (code, X86_CMP, X86_ECX, owner_offset, 0);
659 /* if not, jump to next case */
660 jump_tid = code;
661 x86_branch8 (code, X86_CC_NZ, -1, 1);
663 /* if yes, try a compare-exchange with the TID */
664 /* free up register EAX, needed for the zero */
665 x86_push_reg (code, X86_EAX);
666 /* zero EAX */
667 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
668 /* compare and exchange */
669 x86_prefix (code, X86_LOCK_PREFIX);
670 x86_cmpxchg_membase_reg (code, X86_ECX, owner_offset, X86_EDX);
671 /* if not successful, jump to actual trampoline */
672 jump_cmpxchg_failed = code;
673 x86_branch8 (code, X86_CC_NZ, -1, 1);
674 /* if successful, pop and return */
675 x86_pop_reg (code, X86_EAX);
676 x86_ret (code);
678 /* next case: synchronization->owner is not null */
679 x86_patch (jump_tid, code);
680 /* is synchronization->owner == TID? */
681 x86_alu_membase_reg (code, X86_CMP, X86_ECX, owner_offset, X86_EDX);
682 /* if not, jump to actual trampoline */
683 jump_other_owner = code;
684 x86_branch8 (code, X86_CC_NZ, -1, 1);
685 /* if yes, increment nest */
686 x86_inc_membase (code, X86_ECX, nest_offset);
687 /* return */
688 x86_ret (code);
690 /* push obj */
691 x86_patch (jump_obj_null, code);
692 x86_patch (jump_sync_null, code);
693 x86_patch (jump_other_owner, code);
694 x86_push_reg (code, X86_EAX);
695 /* jump to the actual trampoline */
696 x86_patch (jump_cmpxchg_failed, code);
697 x86_jump_code (code, tramp);
698 } else {
699 /* push obj and jump to the actual trampoline */
700 x86_push_reg (code, X86_EAX);
701 x86_jump_code (code, tramp);
704 mono_arch_flush_icache (buf, code - buf);
705 g_assert (code - buf <= tramp_size);
707 return buf;
710 gpointer
711 mono_arch_create_monitor_exit_trampoline (void)
713 guint32 code_size;
714 MonoJumpInfo *ji;
716 return mono_arch_create_monitor_exit_trampoline_full (&code_size, &ji, FALSE);
719 gpointer
720 mono_arch_create_monitor_exit_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
722 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_EXIT);
723 guint8 *code, *buf;
724 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned;
725 guint8 *jump_next;
726 int tramp_size;
727 int owner_offset, nest_offset, entry_count_offset;
729 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == X86_EAX);
731 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
732 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
733 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
734 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
735 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
736 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
737 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
739 tramp_size = 64;
741 code = buf = mono_global_codeman_reserve (tramp_size);
743 if (mono_thread_get_tls_offset () != -1) {
744 /* MonoObject* obj is in EAX */
745 /* is obj null? */
746 x86_test_reg_reg (code, X86_EAX, X86_EAX);
747 /* if yes, jump to actual trampoline */
748 jump_obj_null = code;
749 x86_branch8 (code, X86_CC_Z, -1, 1);
751 /* load obj->synchronization to ECX */
752 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoObject, synchronisation), 4);
753 /* is synchronization null? */
754 x86_test_reg_reg (code, X86_ECX, X86_ECX);
755 /* if yes, jump to actual trampoline */
756 jump_sync_null = code;
757 x86_branch8 (code, X86_CC_Z, -1, 1);
759 /* next case: synchronization is not null */
760 /* load MonoInternalThread* into EDX */
761 code = mono_x86_emit_tls_get (code, X86_EDX, mono_thread_get_tls_offset ());
762 /* load TID into EDX */
763 x86_mov_reg_membase (code, X86_EDX, X86_EDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 4);
764 /* is synchronization->owner == TID */
765 x86_alu_membase_reg (code, X86_CMP, X86_ECX, owner_offset, X86_EDX);
766 /* if no, jump to actual trampoline */
767 jump_not_owned = code;
768 x86_branch8 (code, X86_CC_NZ, -1, 1);
770 /* next case: synchronization->owner == TID */
771 /* is synchronization->nest == 1 */
772 x86_alu_membase_imm (code, X86_CMP, X86_ECX, nest_offset, 1);
773 /* if not, jump to next case */
774 jump_next = code;
775 x86_branch8 (code, X86_CC_NZ, -1, 1);
776 /* if yes, is synchronization->entry_count zero? */
777 x86_alu_membase_imm (code, X86_CMP, X86_ECX, entry_count_offset, 0);
778 /* if not, jump to actual trampoline */
779 jump_have_waiters = code;
780 x86_branch8 (code, X86_CC_NZ, -1 , 1);
781 /* if yes, set synchronization->owner to null and return */
782 x86_mov_membase_imm (code, X86_ECX, owner_offset, 0, 4);
783 x86_ret (code);
785 /* next case: synchronization->nest is not 1 */
786 x86_patch (jump_next, code);
787 /* decrease synchronization->nest and return */
788 x86_dec_membase (code, X86_ECX, nest_offset);
789 x86_ret (code);
791 /* push obj and jump to the actual trampoline */
792 x86_patch (jump_obj_null, code);
793 x86_patch (jump_have_waiters, code);
794 x86_patch (jump_not_owned, code);
795 x86_patch (jump_sync_null, code);
797 x86_push_reg (code, X86_EAX);
798 x86_jump_code (code, tramp);
799 } else {
800 /* push obj and jump to the actual trampoline */
801 x86_push_reg (code, X86_EAX);
802 x86_jump_code (code, tramp);
805 mono_arch_flush_icache (buf, code - buf);
806 g_assert (code - buf <= tramp_size);
808 return buf;
810 #else
811 gpointer
812 mono_arch_create_monitor_enter_trampoline (void)
814 g_assert_not_reached ();
815 return NULL;
818 gpointer
819 mono_arch_create_monitor_exit_trampoline (void)
821 g_assert_not_reached ();
822 return NULL;
824 #endif
826 void
827 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
829 /* FIXME: This is not thread safe */
830 guint8 *code = ji->code_start;
832 x86_push_imm (code, func_arg);
833 x86_call_code (code, (guint8*)func);
836 static void
837 handler_block_trampoline_helper (gpointer *ptr)
839 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
840 *ptr = jit_tls->handler_block_return_address;
843 gpointer
844 mono_arch_create_handler_block_trampoline (void)
846 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
847 guint8 *code, *buf;
848 int tramp_size = 64;
849 code = buf = mono_global_codeman_reserve (tramp_size);
852 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
855 if (mono_get_jit_tls_offset () != -1) {
856 code = mono_x86_emit_tls_get (code, X86_EAX, mono_get_jit_tls_offset ());
857 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 4);
858 /*simulate a call*/
859 x86_push_reg (code, X86_EAX);
860 x86_jump_code (code, tramp);
861 } else {
862 /*Slow path uses a c helper*/
863 x86_push_reg (code, X86_ESP);
864 x86_push_imm (code, tramp);
865 x86_jump_code (code, handler_block_trampoline_helper);
868 mono_arch_flush_icache (buf, code - buf);
869 g_assert (code - buf <= tramp_size);
871 return buf;