3 // Copyright (C) 2007, 2008 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING. If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction. Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License. This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
31 /** @file parallel/compatibility.h
32 * @brief Compatibility layer, mostly concerned with atomic operations.
33 * This file is a GNU parallel extension to the Standard C++ Library.
36 // Written by Felix Putze.
38 #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
39 #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
41 #include <parallel/types.h>
42 #include <parallel/base.h>
44 #if defined(__SUNPRO_CC) && defined(__sparc)
45 #include <sys/atomic.h>
48 #if !defined(_WIN32) || defined (__CYGWIN__)
60 // Including <windows.h> will drag in all the windows32 names. Since
61 // that can cause user code portability problems, we just declare the
62 // one needed function here.
64 __attribute((dllimport
)) void __attribute__((stdcall)) Sleep (unsigned long);
67 namespace __gnu_parallel
70 template<typename must_be_int
= int>
71 int32
faa32(int32
* x
, int32 inc
)
73 asm volatile("lock xadd %0,%1"
74 : "=r" (inc
), "=m" (*x
)
80 template<typename must_be_int
= int>
81 int64
faa64(int64
* x
, int64 inc
)
83 asm volatile("lock xadd %0,%1"
84 : "=r" (inc
), "=m" (*x
)
92 // atomic functions only work on integers
94 /** @brief Add a value to a variable, atomically.
96 * Implementation is heavily platform-dependent.
97 * @param ptr Pointer to a 32-bit signed integer.
98 * @param addend Value to add.
101 fetch_and_add_32(volatile int32
* ptr
, int32 addend
)
103 #if defined(__ICC) //x86 version
104 return _InterlockedExchangeAdd((void*)ptr
, addend
);
105 #elif defined(__ECC) //IA-64 version
106 return _InterlockedExchangeAdd((void*)ptr
, addend
);
107 #elif defined(__ICL) || defined(_MSC_VER)
108 return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(ptr
),
110 #elif defined(__GNUC__)
111 return __sync_fetch_and_add(ptr
, addend
);
112 #elif defined(__SUNPRO_CC) && defined(__sparc)
113 volatile int32 before
, after
;
117 after
= before
+ addend
;
118 } while (atomic_cas_32((volatile unsigned int*)ptr
, before
,
121 #else //fallback, slow
122 #pragma message("slow fetch_and_add_32")
133 /** @brief Add a value to a variable, atomically.
135 * Implementation is heavily platform-dependent.
136 * @param ptr Pointer to a 64-bit signed integer.
137 * @param addend Value to add.
140 fetch_and_add_64(volatile int64
* ptr
, int64 addend
)
142 #if defined(__ICC) && defined(__x86_64) //x86 version
143 return faa64
<int>((int64
*)ptr
, addend
);
144 #elif defined(__ECC) //IA-64 version
145 return _InterlockedExchangeAdd64((void*)ptr
, addend
);
146 #elif defined(__ICL) || defined(_MSC_VER)
148 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
151 return _InterlockedExchangeAdd64(ptr
, addend
);
153 #elif defined(__GNUC__) && defined(__x86_64)
154 return __sync_fetch_and_add(ptr
, addend
);
155 #elif defined(__GNUC__) && defined(__i386) && \
156 (defined(__i686) || defined(__pentium4) || defined(__athlon))
157 return __sync_fetch_and_add(ptr
, addend
);
158 #elif defined(__SUNPRO_CC) && defined(__sparc)
159 volatile int64 before
, after
;
163 after
= before
+ addend
;
164 } while (atomic_cas_64((volatile unsigned long long*)ptr
, before
,
167 #else //fallback, slow
168 #if defined(__GNUC__) && defined(__i386)
169 // XXX doesn't work with -march=native
170 //#warning "please compile with -march=i686 or better"
172 #pragma message("slow fetch_and_add_64")
183 /** @brief Add a value to a variable, atomically.
185 * Implementation is heavily platform-dependent.
186 * @param ptr Pointer to a signed integer.
187 * @param addend Value to add.
191 fetch_and_add(volatile T
* ptr
, T addend
)
193 if (sizeof(T
) == sizeof(int32
))
194 return (T
)fetch_and_add_32((volatile int32
*) ptr
, (int32
)addend
);
195 else if (sizeof(T
) == sizeof(int64
))
196 return (T
)fetch_and_add_64((volatile int64
*) ptr
, (int64
)addend
);
198 _GLIBCXX_PARALLEL_ASSERT(false);
204 template<typename must_be_int
= int>
206 cas32(volatile int32
* ptr
, int32 old
, int32 nw
)
209 __asm__
__volatile__("lock; cmpxchgl %1,%2"
211 : "q"(nw
), "m"(*(volatile long long*)(ptr
)), "0"(old
)
216 #if defined(__x86_64)
217 template<typename must_be_int
= int>
219 cas64(volatile int64
*ptr
, int64 old
, int64 nw
)
222 __asm__
__volatile__("lock; cmpxchgq %1,%2"
224 : "q"(nw
), "m"(*(volatile long long*)(ptr
)), "0"(old
)
232 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
233 * *ptr=replacement and return @c true, return @c false otherwise.
235 * Implementation is heavily platform-dependent.
236 * @param ptr Pointer to 32-bit signed integer.
237 * @param comparand Compare value.
238 * @param replacement Replacement value.
241 compare_and_swap_32(volatile int32
* ptr
, int32 comparand
, int32 replacement
)
243 #if defined(__ICC) //x86 version
244 return _InterlockedCompareExchange((void*)ptr
, replacement
,
245 comparand
) == comparand
;
246 #elif defined(__ECC) //IA-64 version
247 return _InterlockedCompareExchange((void*)ptr
, replacement
,
248 comparand
) == comparand
;
249 #elif defined(__ICL) || defined(_MSC_VER)
250 return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr
),
251 replacement
, comparand
) == comparand
;
252 #elif defined(__GNUC__)
253 return __sync_bool_compare_and_swap(ptr
, comparand
, replacement
);
254 #elif defined(__SUNPRO_CC) && defined(__sparc)
255 return atomic_cas_32((volatile unsigned int*)ptr
, comparand
,
256 replacement
) == comparand
;
258 #pragma message("slow compare_and_swap_32")
262 if (*ptr
== comparand
)
272 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
273 * *ptr=replacement and return @c true, return @c false otherwise.
275 * Implementation is heavily platform-dependent.
276 * @param ptr Pointer to 64-bit signed integer.
277 * @param comparand Compare value.
278 * @param replacement Replacement value.
281 compare_and_swap_64(volatile int64
* ptr
, int64 comparand
, int64 replacement
)
283 #if defined(__ICC) && defined(__x86_64) //x86 version
284 return cas64
<int>(ptr
, comparand
, replacement
) == comparand
;
285 #elif defined(__ECC) //IA-64 version
286 return _InterlockedCompareExchange64((void*)ptr
, replacement
,
287 comparand
) == comparand
;
288 #elif defined(__ICL) || defined(_MSC_VER)
290 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
293 return _InterlockedCompareExchange64(ptr
, replacement
,
294 comparand
) == comparand
;
297 #elif defined(__GNUC__) && defined(__x86_64)
298 return __sync_bool_compare_and_swap(ptr
, comparand
, replacement
);
299 #elif defined(__GNUC__) && defined(__i386) && \
300 (defined(__i686) || defined(__pentium4) || defined(__athlon))
301 return __sync_bool_compare_and_swap(ptr
, comparand
, replacement
);
302 #elif defined(__SUNPRO_CC) && defined(__sparc)
303 return atomic_cas_64((volatile unsigned long long*)ptr
,
304 comparand
, replacement
) == comparand
;
306 #if defined(__GNUC__) && defined(__i386)
308 //#warning "please compile with -march=i686 or better"
310 #pragma message("slow compare_and_swap_64")
314 if (*ptr
== comparand
)
324 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
325 * *ptr=replacement and return @c true, return @c false otherwise.
327 * Implementation is heavily platform-dependent.
328 * @param ptr Pointer to signed integer.
329 * @param comparand Compare value.
330 * @param replacement Replacement value. */
333 compare_and_swap(volatile T
* ptr
, T comparand
, T replacement
)
335 if (sizeof(T
) == sizeof(int32
))
336 return compare_and_swap_32((volatile int32
*) ptr
, (int32
)comparand
, (int32
)replacement
);
337 else if (sizeof(T
) == sizeof(int64
))
338 return compare_and_swap_64((volatile int64
*) ptr
, (int64
)comparand
, (int64
)replacement
);
340 _GLIBCXX_PARALLEL_ASSERT(false);
343 /** @brief Yield the control to another thread, without waiting for
344 the end to the time slice. */
348 #if defined (_WIN32) && !defined (__CYGWIN__)