comment/style fixes
[official-gcc.git] / libjava / sysdep / i386 / locks.h
blob9d130b0f515446c27d83c8673f02499ac4cf7d3b
1 /* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
3 Copyright (C) 2002 Free Software Foundation
5 This file is part of libgcj.
7 This software is copyrighted work licensed under the terms of the
8 Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
9 details. */
11 #ifndef __SYSDEP_LOCKS_H__
12 #define __SYSDEP_LOCKS_H__
14 typedef size_t obj_addr_t; /* Integer type big enough for object */
15 /* address. */
17 // Atomically replace *addr by new_val if it was initially equal to old.
18 // Return true if the comparison succeeded.
19 // Assumed to have acquire semantics, i.e. later memory operations
20 // cannot execute before the compare_and_swap finishes.
21 inline static bool
22 compare_and_swap(volatile obj_addr_t *addr,
23 obj_addr_t old,
24 obj_addr_t new_val)
26 char result;
27 #ifdef __x86_64__
28 __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
29 : "=m"(*(addr)), "=q"(result)
30 : "r" (new_val), "a"(old), "m"(*addr)
31 : "memory");
32 #else
33 __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
34 : "=m"(*addr), "=q"(result)
35 : "r" (new_val), "a"(old), "m"(*addr)
36 : "memory");
37 #endif
38 return (bool) result;
41 // Set *addr to new_val with release semantics, i.e. making sure
42 // that prior loads and stores complete before this
43 // assignment.
44 // On X86/x86-64, the hardware shouldn't reorder reads and writes,
45 // so we just have to convince gcc not to do it either.
46 inline static void
47 release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
49 __asm__ __volatile__(" " : : : "memory");
50 *(addr) = new_val;
53 // Compare_and_swap with release semantics instead of acquire semantics.
54 // On many architecture, the operation makes both guarantees, so the
55 // implementation can be the same.
56 inline static bool
57 compare_and_swap_release(volatile obj_addr_t *addr,
58 obj_addr_t old,
59 obj_addr_t new_val)
61 return compare_and_swap(addr, old, new_val);
64 // Ensure that subsequent instructions do not execute on stale
65 // data that was loaded from memory before the barrier.
66 // On X86/x86-64, the hardware ensures that reads are properly ordered.
67 inline static void
68 read_barrier()
72 // Ensure that prior stores to memory are completed with respect to other
73 // processors.
74 inline static void
75 write_barrier()
77 /* x86-64/X86 does not reorder writes. We just need to ensure that
78 gcc also doesn't. */
79 __asm__ __volatile__(" " : : : "memory");
81 #endif