Install gcc-4.4.0-tdm-1-core-2.tar.gz
[msysgit.git] / mingw / lib / gcc / mingw32 / 4.3.3 / include / c++ / parallel / compatibility.h
blob18514e6f8a989d935176dd6487eba76ae9007110
1 // -*- C++ -*-
3 // Copyright (C) 2007, 2008 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
9 // version.
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING. If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction. Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License. This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
29 // Public License.
31 /** @file parallel/compatibility.h
32 * @brief Compatibility layer, mostly concerned with atomic operations.
33 * This file is a GNU parallel extension to the Standard C++ Library.
36 // Written by Felix Putze.
38 #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
39 #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
41 #include <parallel/types.h>
42 #include <parallel/base.h>
44 #if defined(__SUNPRO_CC) && defined(__sparc)
45 #include <sys/atomic.h>
46 #endif
48 #if !defined(_WIN32) || defined (__CYGWIN__)
49 #include <sched.h>
50 #endif
52 #if defined(_MSC_VER)
53 #include <Windows.h>
54 #include <intrin.h>
55 #undef max
56 #undef min
57 #endif
59 #ifdef __MINGW32__
60 // Including <windows.h> will drag in all the windows32 names. Since
61 // that can cause user code portability problems, we just declare the
62 // one needed function here.
63 extern "C"
64 __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
65 #endif
67 namespace __gnu_parallel
69 #if defined(__ICC)
70 template<typename must_be_int = int>
71 int32 faa32(int32* x, int32 inc)
73 asm volatile("lock xadd %0,%1"
74 : "=r" (inc), "=m" (*x)
75 : "0" (inc)
76 : "memory");
77 return inc;
79 #if defined(__x86_64)
80 template<typename must_be_int = int>
81 int64 faa64(int64* x, int64 inc)
83 asm volatile("lock xadd %0,%1"
84 : "=r" (inc), "=m" (*x)
85 : "0" (inc)
86 : "memory");
87 return inc;
89 #endif
90 #endif
92 // atomic functions only work on integers
94 /** @brief Add a value to a variable, atomically.
96 * Implementation is heavily platform-dependent.
97 * @param ptr Pointer to a 32-bit signed integer.
98 * @param addend Value to add.
100 inline int32
101 fetch_and_add_32(volatile int32* ptr, int32 addend)
103 #if defined(__ICC) //x86 version
104 return _InterlockedExchangeAdd((void*)ptr, addend);
105 #elif defined(__ECC) //IA-64 version
106 return _InterlockedExchangeAdd((void*)ptr, addend);
107 #elif defined(__ICL) || defined(_MSC_VER)
108 return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(ptr),
109 addend);
110 #elif defined(__GNUC__)
111 return __sync_fetch_and_add(ptr, addend);
112 #elif defined(__SUNPRO_CC) && defined(__sparc)
113 volatile int32 before, after;
116 before = *ptr;
117 after = before + addend;
118 } while (atomic_cas_32((volatile unsigned int*)ptr, before,
119 after) != before);
120 return before;
121 #else //fallback, slow
122 #pragma message("slow fetch_and_add_32")
123 int32 res;
124 #pragma omp critical
126 res = *ptr;
127 *(ptr) += addend;
129 return res;
130 #endif
133 /** @brief Add a value to a variable, atomically.
135 * Implementation is heavily platform-dependent.
136 * @param ptr Pointer to a 64-bit signed integer.
137 * @param addend Value to add.
139 inline int64
140 fetch_and_add_64(volatile int64* ptr, int64 addend)
142 #if defined(__ICC) && defined(__x86_64) //x86 version
143 return faa64<int>((int64*)ptr, addend);
144 #elif defined(__ECC) //IA-64 version
145 return _InterlockedExchangeAdd64((void*)ptr, addend);
146 #elif defined(__ICL) || defined(_MSC_VER)
147 #ifndef _WIN64
148 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
149 return 0;
150 #else
151 return _InterlockedExchangeAdd64(ptr, addend);
152 #endif
153 #elif defined(__GNUC__) && defined(__x86_64)
154 return __sync_fetch_and_add(ptr, addend);
155 #elif defined(__GNUC__) && defined(__i386) && \
156 (defined(__i686) || defined(__pentium4) || defined(__athlon))
157 return __sync_fetch_and_add(ptr, addend);
158 #elif defined(__SUNPRO_CC) && defined(__sparc)
159 volatile int64 before, after;
162 before = *ptr;
163 after = before + addend;
164 } while (atomic_cas_64((volatile unsigned long long*)ptr, before,
165 after) != before);
166 return before;
167 #else //fallback, slow
168 #if defined(__GNUC__) && defined(__i386)
169 // XXX doesn't work with -march=native
170 //#warning "please compile with -march=i686 or better"
171 #endif
172 #pragma message("slow fetch_and_add_64")
173 int64 res;
174 #pragma omp critical
176 res = *ptr;
177 *(ptr) += addend;
179 return res;
180 #endif
183 /** @brief Add a value to a variable, atomically.
185 * Implementation is heavily platform-dependent.
186 * @param ptr Pointer to a signed integer.
187 * @param addend Value to add.
189 template<typename T>
190 inline T
191 fetch_and_add(volatile T* ptr, T addend)
193 if (sizeof(T) == sizeof(int32))
194 return (T)fetch_and_add_32((volatile int32*) ptr, (int32)addend);
195 else if (sizeof(T) == sizeof(int64))
196 return (T)fetch_and_add_64((volatile int64*) ptr, (int64)addend);
197 else
198 _GLIBCXX_PARALLEL_ASSERT(false);
202 #if defined(__ICC)
204 template<typename must_be_int = int>
205 inline int32
206 cas32(volatile int32* ptr, int32 old, int32 nw)
208 int32 before;
209 __asm__ __volatile__("lock; cmpxchgl %1,%2"
210 : "=a"(before)
211 : "q"(nw), "m"(*(volatile long long*)(ptr)), "0"(old)
212 : "memory");
213 return before;
216 #if defined(__x86_64)
217 template<typename must_be_int = int>
218 inline int64
219 cas64(volatile int64 *ptr, int64 old, int64 nw)
221 int64 before;
222 __asm__ __volatile__("lock; cmpxchgq %1,%2"
223 : "=a"(before)
224 : "q"(nw), "m"(*(volatile long long*)(ptr)), "0"(old)
225 : "memory");
226 return before;
228 #endif
230 #endif
232 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
233 * *ptr=replacement and return @c true, return @c false otherwise.
235 * Implementation is heavily platform-dependent.
236 * @param ptr Pointer to 32-bit signed integer.
237 * @param comparand Compare value.
238 * @param replacement Replacement value.
240 inline bool
241 compare_and_swap_32(volatile int32* ptr, int32 comparand, int32 replacement)
243 #if defined(__ICC) //x86 version
244 return _InterlockedCompareExchange((void*)ptr, replacement,
245 comparand) == comparand;
246 #elif defined(__ECC) //IA-64 version
247 return _InterlockedCompareExchange((void*)ptr, replacement,
248 comparand) == comparand;
249 #elif defined(__ICL) || defined(_MSC_VER)
250 return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr),
251 replacement, comparand) == comparand;
252 #elif defined(__GNUC__)
253 return __sync_bool_compare_and_swap(ptr, comparand, replacement);
254 #elif defined(__SUNPRO_CC) && defined(__sparc)
255 return atomic_cas_32((volatile unsigned int*)ptr, comparand,
256 replacement) == comparand;
257 #else
258 #pragma message("slow compare_and_swap_32")
259 bool res = false;
260 #pragma omp critical
262 if (*ptr == comparand)
264 *ptr = replacement;
265 res = true;
268 return res;
269 #endif
272 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
273 * *ptr=replacement and return @c true, return @c false otherwise.
275 * Implementation is heavily platform-dependent.
276 * @param ptr Pointer to 64-bit signed integer.
277 * @param comparand Compare value.
278 * @param replacement Replacement value.
280 inline bool
281 compare_and_swap_64(volatile int64* ptr, int64 comparand, int64 replacement)
283 #if defined(__ICC) && defined(__x86_64) //x86 version
284 return cas64<int>(ptr, comparand, replacement) == comparand;
285 #elif defined(__ECC) //IA-64 version
286 return _InterlockedCompareExchange64((void*)ptr, replacement,
287 comparand) == comparand;
288 #elif defined(__ICL) || defined(_MSC_VER)
289 #ifndef _WIN64
290 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
291 return 0;
292 #else
293 return _InterlockedCompareExchange64(ptr, replacement,
294 comparand) == comparand;
295 #endif
297 #elif defined(__GNUC__) && defined(__x86_64)
298 return __sync_bool_compare_and_swap(ptr, comparand, replacement);
299 #elif defined(__GNUC__) && defined(__i386) && \
300 (defined(__i686) || defined(__pentium4) || defined(__athlon))
301 return __sync_bool_compare_and_swap(ptr, comparand, replacement);
302 #elif defined(__SUNPRO_CC) && defined(__sparc)
303 return atomic_cas_64((volatile unsigned long long*)ptr,
304 comparand, replacement) == comparand;
305 #else
306 #if defined(__GNUC__) && defined(__i386)
307 // XXX -march=native
308 //#warning "please compile with -march=i686 or better"
309 #endif
310 #pragma message("slow compare_and_swap_64")
311 bool res = false;
312 #pragma omp critical
314 if (*ptr == comparand)
316 *ptr = replacement;
317 res = true;
320 return res;
321 #endif
324 /** @brief Compare @c *ptr and @c comparand. If equal, let @c
325 * *ptr=replacement and return @c true, return @c false otherwise.
327 * Implementation is heavily platform-dependent.
328 * @param ptr Pointer to signed integer.
329 * @param comparand Compare value.
330 * @param replacement Replacement value. */
331 template<typename T>
332 inline bool
333 compare_and_swap(volatile T* ptr, T comparand, T replacement)
335 if (sizeof(T) == sizeof(int32))
336 return compare_and_swap_32((volatile int32*) ptr, (int32)comparand, (int32)replacement);
337 else if (sizeof(T) == sizeof(int64))
338 return compare_and_swap_64((volatile int64*) ptr, (int64)comparand, (int64)replacement);
339 else
340 _GLIBCXX_PARALLEL_ASSERT(false);
343 /** @brief Yield the control to another thread, without waiting for
344 the end to the time slice. */
345 inline void
346 yield()
348 #if defined (_WIN32) && !defined (__CYGWIN__)
349 Sleep(0);
350 #else
351 sched_yield();
352 #endif
354 } // end namespace
356 #endif