Skip gcc.target/i386/shrink_wrap_1.c on ia32
[official-gcc.git] / libatomic / gcas.c
blob2925b70b28ff23226663ee7c6e2aaf144d30f98f
1 /* Copyright (C) 2012-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Atomic Library (libatomic).
6 Libatomic is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libatomic is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #include "libatomic_i.h"
28 /* If we natively support the cas, and if we're unconcerned with extra
29 barriers (e.g. fully in-order cpu for which barriers are a nop), then
30 go ahead and expand the operation inline. */
31 #if !defined(WANT_SPECIALCASE_RELAXED) && !defined(__OPTIMIZE_SIZE__)
32 # define EXACT_INLINE(N) \
33 if (C2(HAVE_ATOMIC_CAS_,N)) \
34 return __atomic_compare_exchange_n \
35 (PTR(N,mptr), PTR(N,eptr), *PTR(N,dptr), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
36 #else
37 # define EXACT_INLINE(N)
38 #endif
40 /* ... and if all that fails, invoke the function we generated elsewhere.
41 Worst case, this will *also* use locks. */
42 #define EXACT(N) \
43 do { \
44 if (!C2(HAVE_INT,N)) break; \
45 if ((uintptr_t)mptr & (N - 1)) break; \
46 EXACT_INLINE (N); \
47 return C3(local_,compare_exchange_,N) \
48 (PTR(N,mptr), PTR(N,eptr), *PTR(N,dptr), smodel, fmodel); \
49 } while (0)
51 #define LARGER(N) \
52 do { \
53 if (!C2(HAVE_INT,N)) break; \
54 if (!C2(HAVE_ATOMIC_LDST_,N)) break; \
55 if (!C2(MAYBE_HAVE_ATOMIC_CAS_,N)) break; \
56 r = (uintptr_t)mptr & (N - 1); \
57 a = (uintptr_t)mptr & -N; \
58 if (r + n <= N) \
59 { \
60 pre_barrier (smodel); \
61 u.C2(i,N) = __atomic_load_n (PTR(N,a), __ATOMIC_RELAXED); \
62 do { \
63 if (memcmp (u.b + r, eptr, n) != 0) goto Lfail; \
64 v = u; memcpy (v.b + r, dptr, n); \
65 } while (!(C2(HAVE_ATOMIC_CAS_,N) \
66 ? __atomic_compare_exchange_n (PTR(N,a), \
67 &u.C2(i,N), v.C2(i,N), true, \
68 __ATOMIC_RELAXED, __ATOMIC_RELAXED) \
69 : C3(local_,compare_exchange_,N) (PTR(N,a), \
70 &u.C2(i,N), v.C2(i,N), \
71 __ATOMIC_RELAXED, __ATOMIC_RELAXED))); \
72 goto Lsucc; \
73 } \
74 } while (0)
78 bool
79 libat_compare_exchange (size_t n, void *mptr, void *eptr, void *dptr,
80 int smodel, int fmodel)
82 union max_size_u u, v;
83 uintptr_t r, a;
84 bool ret;
86 switch (n)
88 case 0: return true;
89 case 1: EXACT(1); goto L4;
90 case 2: EXACT(2); goto L4;
91 case 4: EXACT(4); goto L8;
92 case 8: EXACT(8); goto L16;
93 case 16: EXACT(16); break;
95 case 3: L4: LARGER(4); /* FALLTHRU */
96 case 5 ... 7: L8: LARGER(8); /* FALLTHRU */
97 case 9 ... 15: L16: LARGER(16); break;
99 Lsucc:
100 post_barrier (smodel);
101 return true;
102 Lfail:
103 post_barrier (fmodel);
104 memcpy (eptr, u.b + r, n);
105 return false;
108 pre_seq_barrier (smodel);
109 libat_lock_n (mptr, n);
111 ret = memcmp (mptr, eptr, n) == 0;
112 memcpy ((ret ? mptr : eptr), (ret ? dptr : mptr), n);
114 libat_unlock_n (mptr, n);
115 post_seq_barrier (ret ? smodel : fmodel);
117 return ret;
120 EXPORT_ALIAS (compare_exchange);