[Sanitizer tests] Define 16- and 64-bit versions of atomic_compare_exchange_strong...
[blocksruntime.git] / lib / sanitizer_common / sanitizer_atomic_msvc.h
blobbff559331faa3efa457a3cc3497a2b4b44429aef
1 //===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_ATOMIC_MSVC_H
16 #define SANITIZER_ATOMIC_MSVC_H
18 extern "C" void _ReadWriteBarrier();
19 #pragma intrinsic(_ReadWriteBarrier)
20 extern "C" void _mm_mfence();
21 #pragma intrinsic(_mm_mfence)
22 extern "C" void _mm_pause();
23 #pragma intrinsic(_mm_pause)
24 extern "C" long _InterlockedExchangeAdd( // NOLINT
25 long volatile * Addend, long Value); // NOLINT
26 #pragma intrinsic(_InterlockedExchangeAdd)
27 extern "C" short _InterlockedCompareExchange16( // NOLINT
28 short volatile *Destination, // NOLINT
29 short Exchange, short Comparand); // NOLINT
30 #pragma intrinsic(_InterlockedCompareExchange16)
31 extern "C"
32 long long _InterlockedCompareExchange64( // NOLINT
33 long long volatile *Destination, // NOLINT
34 long long Exchange, long long Comparand); // NOLINT
35 #pragma intrinsic(_InterlockedCompareExchange64)
37 #ifdef _WIN64
38 extern "C" long long _InterlockedExchangeAdd64( // NOLINT
39 long long volatile * Addend, long long Value); // NOLINT
40 #pragma intrinsic(_InterlockedExchangeAdd64)
41 extern "C" void *_InterlockedCompareExchangePointer(
42 void *volatile *Destination,
43 void *Exchange, void *Comparand);
44 #pragma intrinsic(_InterlockedCompareExchangePointer)
45 #else
46 // There's no _InterlockedCompareExchangePointer intrinsic on x86,
47 // so call _InterlockedCompareExchange instead.
48 extern "C"
49 long __cdecl _InterlockedCompareExchange( // NOLINT
50 long volatile *Destination, // NOLINT
51 long Exchange, long Comparand); // NOLINT
52 #pragma intrinsic(_InterlockedCompareExchange)
54 inline static void *_InterlockedCompareExchangePointer(
55 void *volatile *Destination,
56 void *Exchange, void *Comparand) {
57 return reinterpret_cast<void*>(
58 _InterlockedCompareExchange(
59 reinterpret_cast<long volatile*>(Destination), // NOLINT
60 reinterpret_cast<long>(Exchange), // NOLINT
61 reinterpret_cast<long>(Comparand))); // NOLINT
63 #endif
65 namespace __sanitizer {
67 INLINE void atomic_signal_fence(memory_order) {
68 _ReadWriteBarrier();
71 INLINE void atomic_thread_fence(memory_order) {
72 _mm_mfence();
75 INLINE void proc_yield(int cnt) {
76 for (int i = 0; i < cnt; i++)
77 _mm_pause();
80 template<typename T>
81 INLINE typename T::Type atomic_load(
82 const volatile T *a, memory_order mo) {
83 DCHECK(mo & (memory_order_relaxed | memory_order_consume
84 | memory_order_acquire | memory_order_seq_cst));
85 DCHECK(!((uptr)a % sizeof(*a)));
86 typename T::Type v;
87 // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
88 if (mo == memory_order_relaxed) {
89 v = a->val_dont_use;
90 } else {
91 atomic_signal_fence(memory_order_seq_cst);
92 v = a->val_dont_use;
93 atomic_signal_fence(memory_order_seq_cst);
95 return v;
98 template<typename T>
99 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
100 DCHECK(mo & (memory_order_relaxed | memory_order_release
101 | memory_order_seq_cst));
102 DCHECK(!((uptr)a % sizeof(*a)));
103 // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
104 if (mo == memory_order_relaxed) {
105 a->val_dont_use = v;
106 } else {
107 atomic_signal_fence(memory_order_seq_cst);
108 a->val_dont_use = v;
109 atomic_signal_fence(memory_order_seq_cst);
111 if (mo == memory_order_seq_cst)
112 atomic_thread_fence(memory_order_seq_cst);
115 INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
116 u32 v, memory_order mo) {
117 (void)mo;
118 DCHECK(!((uptr)a % sizeof(*a)));
119 return (u32)_InterlockedExchangeAdd(
120 (volatile long*)&a->val_dont_use, (long)v); // NOLINT
123 INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
124 uptr v, memory_order mo) {
125 (void)mo;
126 DCHECK(!((uptr)a % sizeof(*a)));
127 #ifdef _WIN64
128 return (uptr)_InterlockedExchangeAdd64(
129 (volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
130 #else
131 return (uptr)_InterlockedExchangeAdd(
132 (volatile long*)&a->val_dont_use, (long)v); // NOLINT
133 #endif
136 INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
137 u32 v, memory_order mo) {
138 (void)mo;
139 DCHECK(!((uptr)a % sizeof(*a)));
140 return (u32)_InterlockedExchangeAdd(
141 (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
144 INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
145 uptr v, memory_order mo) {
146 (void)mo;
147 DCHECK(!((uptr)a % sizeof(*a)));
148 #ifdef _WIN64
149 return (uptr)_InterlockedExchangeAdd64(
150 (volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
151 #else
152 return (uptr)_InterlockedExchangeAdd(
153 (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
154 #endif
157 INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
158 u8 v, memory_order mo) {
159 (void)mo;
160 DCHECK(!((uptr)a % sizeof(*a)));
161 __asm {
162 mov eax, a
163 mov cl, v
164 xchg [eax], cl // NOLINT
165 mov v, cl
167 return v;
170 INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
171 u16 v, memory_order mo) {
172 (void)mo;
173 DCHECK(!((uptr)a % sizeof(*a)));
174 __asm {
175 mov eax, a
176 mov cx, v
177 xchg [eax], cx // NOLINT
178 mov v, cx
180 return v;
183 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
184 u8 *cmp,
185 u8 xchgv,
186 memory_order mo) {
187 (void)mo;
188 DCHECK(!((uptr)a % sizeof(*a)));
189 u8 cmpv = *cmp;
190 u8 prev;
191 __asm {
192 mov al, cmpv
193 mov ecx, a
194 mov dl, xchgv
195 lock cmpxchg [ecx], dl
196 mov prev, al
198 if (prev == cmpv)
199 return true;
200 *cmp = prev;
201 return false;
204 INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
205 uptr *cmp,
206 uptr xchg,
207 memory_order mo) {
208 uptr cmpv = *cmp;
209 uptr prev = (uptr)_InterlockedCompareExchangePointer(
210 (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
211 if (prev == cmpv)
212 return true;
213 *cmp = prev;
214 return false;
217 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
218 u16 *cmp,
219 u16 xchg,
220 memory_order mo) {
221 u16 cmpv = *cmp;
222 u16 prev = (u16)_InterlockedCompareExchange16(
223 (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
224 if (prev == cmpv)
225 return true;
226 *cmp = prev;
227 return false;
230 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
231 u32 *cmp,
232 u32 xchg,
233 memory_order mo) {
234 u32 cmpv = *cmp;
235 u32 prev = (u32)_InterlockedCompareExchange(
236 (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
237 if (prev == cmpv)
238 return true;
239 *cmp = prev;
240 return false;
243 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
244 u64 *cmp,
245 u64 xchg,
246 memory_order mo) {
247 u64 cmpv = *cmp;
248 u64 prev = (u64)_InterlockedCompareExchange64(
249 (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
250 if (prev == cmpv)
251 return true;
252 *cmp = prev;
253 return false;
256 template<typename T>
257 INLINE bool atomic_compare_exchange_weak(volatile T *a,
258 typename T::Type *cmp,
259 typename T::Type xchg,
260 memory_order mo) {
261 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
264 } // namespace __sanitizer
266 #endif // SANITIZER_ATOMIC_CLANG_H