* config/mn10300/mn10300.md (adddi3_degenerate): Remove bogus
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_atomic_clang_mips.h
blob41e58dcae4db101ff312b2151a602588954dcc78
1 //===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
14 #define SANITIZER_ATOMIC_CLANG_MIPS_H
16 namespace __sanitizer {
18 // MIPS32 does not support atomics > 4 bytes. To address this lack of
19 // functionality, the sanitizer library provides helper methods which use an
20 // internal spin lock mechanism to emulate atomic oprations when the size is
21 // 8 bytes.
22 static void __spin_lock(volatile int *lock) {
23 while (__sync_lock_test_and_set(lock, 1))
24 while (*lock) {
28 static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
30 // Make sure the lock is on its own cache line to prevent false sharing.
31 // Put it inside a struct that is aligned and padded to the typical MIPS
32 // cacheline which is 32 bytes.
33 static struct {
34 int lock;
35 char pad[32 - sizeof(int)];
36 } __attribute__((aligned(32))) lock = {0, {0}};
38 template <>
39 INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
40 atomic_uint64_t::Type val,
41 memory_order mo) {
42 DCHECK(mo &
43 (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
44 DCHECK(!((uptr)ptr % sizeof(*ptr)));
46 atomic_uint64_t::Type ret;
48 __spin_lock(&lock.lock);
49 ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
50 ptr->val_dont_use = ret + val;
51 __spin_unlock(&lock.lock);
53 return ret;
56 template <>
57 INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
58 atomic_uint64_t::Type val,
59 memory_order mo) {
60 return atomic_fetch_add(ptr, -val, mo);
63 template <>
64 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
65 atomic_uint64_t::Type *cmp,
66 atomic_uint64_t::Type xchg,
67 memory_order mo) {
68 DCHECK(mo &
69 (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
70 DCHECK(!((uptr)ptr % sizeof(*ptr)));
72 typedef atomic_uint64_t::Type Type;
73 Type cmpv = *cmp;
74 Type prev;
75 bool ret = false;
77 __spin_lock(&lock.lock);
78 prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
79 if (prev == cmpv) {
80 ret = true;
81 ptr->val_dont_use = xchg;
83 __spin_unlock(&lock.lock);
85 return ret;
88 template <>
89 INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
90 memory_order mo) {
91 DCHECK(mo &
92 (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
93 DCHECK(!((uptr)ptr % sizeof(*ptr)));
95 atomic_uint64_t::Type zero = 0;
96 volatile atomic_uint64_t *Newptr =
97 const_cast<volatile atomic_uint64_t *>(ptr);
98 return atomic_fetch_add(Newptr, zero, mo);
101 template <>
102 INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
103 memory_order mo) {
104 DCHECK(mo &
105 (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
106 DCHECK(!((uptr)ptr % sizeof(*ptr)));
108 __spin_lock(&lock.lock);
109 ptr->val_dont_use = v;
110 __spin_unlock(&lock.lock);
113 } // namespace __sanitizer
115 #endif // SANITIZER_ATOMIC_CLANG_MIPS_H