[nvptx] Add support for CUDA 9
[official-gcc.git] / libatomic / fop_n.c
blob556dbc38616e138460a9c2734c38c4b6b97db45e
1 /* Copyright (C) 2012-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Atomic Library (libatomic).
6 Libatomic is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libatomic is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #include <libatomic_i.h>
28 /* This file is included multiple times with required defines:
29 NAME the name of the operation that we're implementing;
30 OP a two-operand functional macro the implements the operation.
34 /* If we support the builtin, just use it. */
35 #if !DONE && SIZE(HAVE_ATOMIC_FETCH_OP)
36 UTYPE
37 SIZE(C2(libat_fetch_,NAME)) (UTYPE *mptr, UTYPE opval, int smodel)
39 if (maybe_specialcase_relaxed(smodel))
40 return C2(__atomic_fetch_,NAME) (mptr, opval, __ATOMIC_RELAXED);
41 else if (maybe_specialcase_acqrel(smodel))
42 return C2(__atomic_fetch_,NAME) (mptr, opval, __ATOMIC_ACQ_REL);
43 else
44 return C2(__atomic_fetch_,NAME) (mptr, opval, __ATOMIC_SEQ_CST);
47 UTYPE
48 SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel)
50 if (maybe_specialcase_relaxed(smodel))
51 return C3(__atomic_,NAME,_fetch) (mptr, opval, __ATOMIC_RELAXED);
52 else if (maybe_specialcase_acqrel(smodel))
53 return C3(__atomic_,NAME,_fetch) (mptr, opval, __ATOMIC_ACQ_REL);
54 else
55 return C3(__atomic_,NAME,_fetch) (mptr, opval, __ATOMIC_SEQ_CST);
58 #define DONE 1
59 #endif /* HAVE_ATOMIC_FETCH_OP */
62 #if !DONE && defined(atomic_compare_exchange_n)
63 UTYPE
64 SIZE(C2(libat_fetch_,NAME)) (UTYPE *mptr, UTYPE opval, int smodel)
66 UTYPE oldval, t;
68 pre_barrier (smodel);
70 oldval = *mptr;
73 t = OP(oldval, opval);
75 while (!atomic_compare_exchange_n (mptr, &oldval, t, true,
76 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
78 post_barrier (smodel);
79 return oldval;
82 UTYPE
83 SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel)
85 UTYPE oldval, t;
87 pre_barrier (smodel);
89 oldval = *mptr;
92 t = OP(oldval, opval);
94 while (!atomic_compare_exchange_n (mptr, &oldval, t, true,
95 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
97 post_barrier (smodel);
98 return t;
101 #define DONE 1
102 #endif /* atomic_compare_exchange_n */
105 /* If this type is no larger than word-sized, fall back to a word-sized
106 compare-and-swap loop. */
107 #if !DONE && N < WORDSIZE && defined(atomic_compare_exchange_w)
108 UTYPE
109 SIZE(C2(libat_fetch_,NAME)) (UTYPE *mptr, UTYPE opval, int smodel)
111 UWORD mask, shift, woldval, wopval, t, *wptr;
113 pre_barrier (smodel);
115 wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
116 shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
117 mask = SIZE(MASK) << shift;
119 wopval = (UWORD)opval << shift;
120 woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
123 t = (woldval & ~mask) | (OP(woldval, wopval) & mask);
125 while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
126 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
128 post_barrier (smodel);
129 return woldval >> shift;
132 UTYPE
133 SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel)
135 UWORD mask, shift, woldval, wopval, t, *wptr;
137 pre_barrier (smodel);
139 wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
140 shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
141 mask = SIZE(MASK) << shift;
143 wopval = (UWORD)opval << shift;
144 woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
147 t = (woldval & ~mask) | (OP(woldval, wopval) & mask);
149 while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
150 __ATOMIC_RELAXED, __ATOMIC_RELAXED));
152 post_barrier (smodel);
153 return t >> shift;
156 #define DONE 1
157 #endif /* atomic_compare_exchange_w */
160 /* Otherwise, fall back to some sort of protection mechanism. */
161 #if !DONE
162 UTYPE
163 SIZE(C2(libat_fetch_,NAME)) (UTYPE *mptr, UTYPE opval, int smodel UNUSED)
165 UTYPE ret;
166 UWORD magic;
168 pre_seq_barrier (smodel);
169 magic = protect_start (mptr);
171 ret = *mptr;
172 *mptr = OP(ret, opval);
174 protect_end (mptr, magic);
175 post_seq_barrier (smodel);
177 return ret;
180 UTYPE
181 SIZE(C3(libat_,NAME,_fetch)) (UTYPE *mptr, UTYPE opval, int smodel UNUSED)
183 UTYPE ret;
184 UWORD magic;
186 pre_seq_barrier (smodel);
187 magic = protect_start (mptr);
189 ret = OP (*mptr, opval);
190 *mptr = ret;
192 protect_end (mptr, magic);
193 post_seq_barrier (smodel);
195 return ret;
197 #endif
199 EXPORT_ALIAS (SIZE(C2(fetch_,NAME)));
200 EXPORT_ALIAS (SIZE(C2(NAME,_fetch)));