* include/bits/allocator.h (operator==, operator!=): Add exception
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_atomic_msvc.h
blobdac7c19199b97f5bf7fde211941e7cd2a360c4d8
1 //===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ATOMIC_MSVC_H
14 #define SANITIZER_ATOMIC_MSVC_H
16 extern "C" void _ReadWriteBarrier();
17 #pragma intrinsic(_ReadWriteBarrier)
18 extern "C" void _mm_mfence();
19 #pragma intrinsic(_mm_mfence)
20 extern "C" void _mm_pause();
21 #pragma intrinsic(_mm_pause)
22 extern "C" long _InterlockedExchangeAdd( // NOLINT
23 long volatile * Addend, long Value); // NOLINT
24 #pragma intrinsic(_InterlockedExchangeAdd)
26 #ifdef _WIN64
27 extern "C" void *_InterlockedCompareExchangePointer(
28 void *volatile *Destination,
29 void *Exchange, void *Comparand);
30 #pragma intrinsic(_InterlockedCompareExchangePointer)
31 #else
32 // There's no _InterlockedCompareExchangePointer intrinsic on x86,
33 // so call _InterlockedCompareExchange instead.
34 extern "C"
35 long __cdecl _InterlockedCompareExchange( // NOLINT
36 long volatile *Destination, // NOLINT
37 long Exchange, long Comparand); // NOLINT
38 #pragma intrinsic(_InterlockedCompareExchange)
40 inline static void *_InterlockedCompareExchangePointer(
41 void *volatile *Destination,
42 void *Exchange, void *Comparand) {
43 return reinterpret_cast<void*>(
44 _InterlockedCompareExchange(
45 reinterpret_cast<long volatile*>(Destination), // NOLINT
46 reinterpret_cast<long>(Exchange), // NOLINT
47 reinterpret_cast<long>(Comparand))); // NOLINT
49 #endif
51 namespace __sanitizer {
53 INLINE void atomic_signal_fence(memory_order) {
54 _ReadWriteBarrier();
57 INLINE void atomic_thread_fence(memory_order) {
58 _mm_mfence();
61 INLINE void proc_yield(int cnt) {
62 for (int i = 0; i < cnt; i++)
63 _mm_pause();
66 template<typename T>
67 INLINE typename T::Type atomic_load(
68 const volatile T *a, memory_order mo) {
69 DCHECK(mo & (memory_order_relaxed | memory_order_consume
70 | memory_order_acquire | memory_order_seq_cst));
71 DCHECK(!((uptr)a % sizeof(*a)));
72 typename T::Type v;
73 // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
74 if (mo == memory_order_relaxed) {
75 v = a->val_dont_use;
76 } else {
77 atomic_signal_fence(memory_order_seq_cst);
78 v = a->val_dont_use;
79 atomic_signal_fence(memory_order_seq_cst);
81 return v;
84 template<typename T>
85 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
86 DCHECK(mo & (memory_order_relaxed | memory_order_release
87 | memory_order_seq_cst));
88 DCHECK(!((uptr)a % sizeof(*a)));
89 // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
90 if (mo == memory_order_relaxed) {
91 a->val_dont_use = v;
92 } else {
93 atomic_signal_fence(memory_order_seq_cst);
94 a->val_dont_use = v;
95 atomic_signal_fence(memory_order_seq_cst);
97 if (mo == memory_order_seq_cst)
98 atomic_thread_fence(memory_order_seq_cst);
101 INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
102 u32 v, memory_order mo) {
103 (void)mo;
104 DCHECK(!((uptr)a % sizeof(*a)));
105 return (u32)_InterlockedExchangeAdd(
106 (volatile long*)&a->val_dont_use, (long)v); // NOLINT
109 INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
110 u8 v, memory_order mo) {
111 (void)mo;
112 DCHECK(!((uptr)a % sizeof(*a)));
113 __asm {
114 mov eax, a
115 mov cl, v
116 xchg [eax], cl // NOLINT
117 mov v, cl
119 return v;
122 INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
123 u16 v, memory_order mo) {
124 (void)mo;
125 DCHECK(!((uptr)a % sizeof(*a)));
126 __asm {
127 mov eax, a
128 mov cx, v
129 xchg [eax], cx // NOLINT
130 mov v, cx
132 return v;
135 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
136 u8 *cmp,
137 u8 xchgv,
138 memory_order mo) {
139 (void)mo;
140 DCHECK(!((uptr)a % sizeof(*a)));
141 u8 cmpv = *cmp;
142 u8 prev;
143 __asm {
144 mov al, cmpv
145 mov ecx, a
146 mov dl, xchgv
147 lock cmpxchg [ecx], dl
148 mov prev, al
150 if (prev == cmpv)
151 return true;
152 *cmp = prev;
153 return false;
156 INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
157 uptr *cmp,
158 uptr xchg,
159 memory_order mo) {
160 uptr cmpv = *cmp;
161 uptr prev = (uptr)_InterlockedCompareExchangePointer(
162 (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
163 if (prev == cmpv)
164 return true;
165 *cmp = prev;
166 return false;
169 template<typename T>
170 INLINE bool atomic_compare_exchange_weak(volatile T *a,
171 typename T::Type *cmp,
172 typename T::Type xchg,
173 memory_order mo) {
174 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
177 } // namespace __sanitizer
179 #endif // SANITIZER_ATOMIC_CLANG_H