Implement the use of PBuffers for offscreen textures.
[wine/wine64.git] / libs / port / interlocked.c
blob18abe1b7af764e0b311b0e54e68ed25a13061d22
1 /*
2 * interlocked functions
4 * Copyright 1996 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "config.h"
22 #include "wine/port.h"
24 #ifdef __i386__
26 #ifdef __GNUC__
28 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
29 "movl 12(%esp),%eax\n\t"
30 "movl 8(%esp),%ecx\n\t"
31 "movl 4(%esp),%edx\n\t"
32 "lock; cmpxchgl %ecx,(%edx)\n\t"
33 "ret");
34 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
35 "movl 12(%esp),%eax\n\t"
36 "movl 8(%esp),%ecx\n\t"
37 "movl 4(%esp),%edx\n\t"
38 "lock; cmpxchgl %ecx,(%edx)\n\t"
39 "ret");
40 __ASM_GLOBAL_FUNC(interlocked_xchg,
41 "movl 8(%esp),%eax\n\t"
42 "movl 4(%esp),%edx\n\t"
43 "lock; xchgl %eax,(%edx)\n\t"
44 "ret");
45 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
46 "movl 8(%esp),%eax\n\t"
47 "movl 4(%esp),%edx\n\t"
48 "lock; xchgl %eax,(%edx)\n\t"
49 "ret");
50 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
51 "movl 8(%esp),%eax\n\t"
52 "movl 4(%esp),%edx\n\t"
53 "lock; xaddl %eax,(%edx)\n\t"
54 "ret");
56 #elif defined(_MSC_VER)
58 __declspec(naked) long interlocked_cmpxchg( long *dest, long xchg, long compare )
60 __asm mov eax, 12[esp];
61 __asm mov ecx, 8[esp];
62 __asm mov edx, 4[esp];
63 __asm lock cmpxchg [edx], ecx;
64 __asm ret;
67 __declspec(naked) void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
69 __asm mov eax, 12[esp];
70 __asm mov ecx, 8[esp];
71 __asm mov edx, 4[esp];
72 __asm lock cmpxchg [edx], ecx;
73 __asm ret;
76 __declspec(naked) long interlocked_xchg( long *dest, long val )
78 __asm mov eax, 8[esp];
79 __asm mov edx, 4[esp];
80 __asm lock xchg [edx], eax;
81 __asm ret;
84 __declspec(naked) void *interlocked_xchg_ptr( void **dest, void *val )
86 __asm mov eax, 8[esp];
87 __asm mov edx, 4[esp];
88 __asm lock xchg [edx], eax;
89 __asm ret;
92 __declspec(naked) long interlocked_xchg_add( long *dest, long incr )
94 __asm mov eax, 8[esp];
95 __asm mov edx, 4[esp];
96 __asm lock xadd [edx], eax;
97 __asm ret;
100 #else
101 # error You must implement the interlocked* functions for your compiler
102 #endif
104 #elif defined(__x86_64__)
106 #ifdef __GNUC__
108 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
109 "mov %edx, %eax\n\t"
110 "lock cmpxchgl %esi,(%rdi)\n\t"
111 "ret");
112 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
113 "mov %rdx, %rax\n\t"
114 "lock cmpxchgq %rsi,(%rdi)\n\t"
115 "ret");
116 __ASM_GLOBAL_FUNC(interlocked_xchg,
117 "mov %esi, %eax\n\t"
118 "lock xchgl %eax, (%rdi)\n\t"
119 "ret");
120 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
121 "mov %rsi, %rax\n\t"
122 "lock xchgq %rax,(%rdi)\n\t"
123 "ret");
124 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
125 "mov %esi, %eax\n\t"
126 "lock xaddl %eax, (%rdi)\n\t"
127 "ret");
129 #else
130 # error You must implement the interlocked* functions for your compiler
131 #endif
133 #elif defined(__powerpc__)
134 void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
136 long ret = 0;
137 long scratch;
138 __asm__ __volatile__(
139 "0: lwarx %0,0,%2\n"
140 " xor. %1,%4,%0\n"
141 " bne 1f\n"
142 " stwcx. %3,0,%2\n"
143 " bne- 0b\n"
144 " isync\n"
145 "1: "
146 : "=&r"(ret), "=&r"(scratch)
147 : "r"(dest), "r"(xchg), "r"(compare)
148 : "cr0","memory");
149 return (void*)ret;
152 long interlocked_cmpxchg( long *dest, long xchg, long compare)
154 long ret = 0;
155 long scratch;
156 __asm__ __volatile__(
157 "0: lwarx %0,0,%2\n"
158 " xor. %1,%4,%0\n"
159 " bne 1f\n"
160 " stwcx. %3,0,%2\n"
161 " bne- 0b\n"
162 " isync\n"
163 "1: "
164 : "=&r"(ret), "=&r"(scratch)
165 : "r"(dest), "r"(xchg), "r"(compare)
166 : "cr0","memory","r0");
167 return ret;
170 long interlocked_xchg_add( long *dest, long incr )
172 long ret = 0;
173 long zero = 0;
174 __asm__ __volatile__(
175 "0: lwarx %0, %3, %1\n"
176 " add %0, %2, %0\n"
177 " stwcx. %0, %3, %1\n"
178 " bne- 0b\n"
179 " isync\n"
180 : "=&r" (ret)
181 : "r"(dest), "r"(incr), "r"(zero)
182 : "cr0", "memory", "r0"
184 return ret-incr;
187 long interlocked_xchg( long* dest, long val )
189 long ret = 0;
190 __asm__ __volatile__(
191 "0: lwarx %0,0,%1\n"
192 " stwcx. %2,0,%1\n"
193 " bne- 0b\n"
194 " isync\n"
195 : "=&r"(ret)
196 : "r"(dest), "r"(val)
197 : "cr0","memory","r0");
198 return ret;
201 void* interlocked_xchg_ptr( void** dest, void* val )
203 void *ret = NULL;
204 __asm__ __volatile__(
205 "0: lwarx %0,0,%1\n"
206 " stwcx. %2,0,%1\n"
207 " bne- 0b \n"
208 " isync\n"
209 : "=&r"(ret)
210 : "r"(dest), "r"(val)
211 : "cr0","memory","r0");
212 return ret;
215 #elif defined(__sparc__) && defined(__sun__)
218 * As the earlier Sparc processors lack necessary atomic instructions,
219 * I'm simply falling back to the library-provided _lwp_mutex routines
220 * to ensure mutual exclusion in a way appropriate for the current
221 * architecture.
223 * FIXME: If we have the compare-and-swap instruction (Sparc v9 and above)
224 * we could use this to speed up the Interlocked operations ...
226 #include <synch.h>
227 static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
229 long interlocked_cmpxchg( long *dest, long xchg, long compare )
231 _lwp_mutex_lock( &interlocked_mutex );
232 if (*dest == compare) *dest = xchg;
233 else compare = *dest;
234 _lwp_mutex_unlock( &interlocked_mutex );
235 return compare;
238 void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
240 _lwp_mutex_lock( &interlocked_mutex );
241 if (*dest == compare) *dest = xchg;
242 else compare = *dest;
243 _lwp_mutex_unlock( &interlocked_mutex );
244 return compare;
247 long interlocked_xchg( long *dest, long val )
249 long retv;
250 _lwp_mutex_lock( &interlocked_mutex );
251 retv = *dest;
252 *dest = val;
253 _lwp_mutex_unlock( &interlocked_mutex );
254 return retv;
257 void *interlocked_xchg_ptr( void **dest, void *val )
259 long retv;
260 _lwp_mutex_lock( &interlocked_mutex );
261 retv = *dest;
262 *dest = val;
263 _lwp_mutex_unlock( &interlocked_mutex );
264 return retv;
267 long interlocked_xchg_add( long *dest, long incr )
269 long retv;
270 _lwp_mutex_lock( &interlocked_mutex );
271 retv = *dest;
272 *dest += incr;
273 _lwp_mutex_unlock( &interlocked_mutex );
274 return retv;
277 #elif defined(__ALPHA__) && defined(__GNUC__)
279 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
280 "L0cmpxchg:\n\t"
281 "ldl_l $0,0($16)\n\t"
282 "cmpeq $0,$18,$1\n\t"
283 "beq $1,L1cmpxchg\n\t"
284 "mov $17,$0\n\t"
285 "stl_c $0,0($16)\n\t"
286 "beq $0,L0cmpxchg\n\t"
287 "mov $18,$0\n"
288 "L1cmpxchg:\n\t"
289 "mb");
291 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
292 "L0cmpxchg_ptr:\n\t"
293 "ldq_l $0,0($16)\n\t"
294 "cmpeq $0,$18,$1\n\t"
295 "beq $1,L1cmpxchg_ptr\n\t"
296 "mov $17,$0\n\t"
297 "stq_c $0,0($16)\n\t"
298 "beq $0,L0cmpxchg_ptr\n\t"
299 "mov $18,$0\n"
300 "L1cmpxchg_ptr:\n\t"
301 "mb");
303 __ASM_GLOBAL_FUNC(interlocked_xchg,
304 "L0xchg:\n\t"
305 "ldl_l $0,0($16)\n\t"
306 "mov $17,$1\n\t"
307 "stl_c $1,0($16)\n\t"
308 "beq $1,L0xchg\n\t"
309 "mb");
311 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
312 "L0xchg_ptr:\n\t"
313 "ldq_l $0,0($16)\n\t"
314 "mov $17,$1\n\t"
315 "stq_c $1,0($16)\n\t"
316 "beq $1,L0xchg_ptr\n\t"
317 "mb");
319 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
320 "L0xchg_add:\n\t"
321 "ldl_l $0,0($16)\n\t"
322 "addl $0,$17,$1\n\t"
323 "stl_c $1,0($16)\n\t"
324 "beq $1,L0xchg_add\n\t"
325 "mb");
327 #else
328 # error You must implement the interlocked* functions for your CPU
329 #endif