Reverting merge from trunk
[official-gcc.git] / libgcc / config / m68k / linux-atomic.c
blob7ad3f3eba94f8791413dcefa06ebc620f3d5ff64
1 /* Linux-specific atomic operations for m68k Linux.
2 Copyright (C) 2011-2013 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* Coldfire dropped the CAS instruction from the base M68K ISA.
28 GCC automatically issues a asm memory barrier when it encounters
29 a __sync_synchronize builtin. Thus, we do not need to define this
30 builtin.
32 We implement byte, short and int versions of each atomic operation
33 using the kernel helper defined below. There is no support for
34 64-bit operations yet. */
36 #include <asm/unistd.h>
37 #include <stdbool.h>
39 #ifndef __NR_atomic_cmpxchg_32
40 #define __NR_atomic_cmpxchg_32 335
41 #endif
43 /* Kernel helper for compare-and-exchange a 32-bit value. */
44 static inline unsigned
45 __kernel_cmpxchg (unsigned *mem, unsigned oldval, unsigned newval)
47 register unsigned *a0 asm("a0") = mem;
48 register unsigned d2 asm("d2") = oldval;
49 register unsigned d1 asm("d1") = newval;
50 register unsigned d0 asm("d0") = __NR_atomic_cmpxchg_32;
52 asm volatile ("trap #0"
53 : "=r"(d0), "=r"(d1), "=r"(a0)
54 : "r"(d0), "r"(d1), "r"(d2), "r"(a0)
55 : "memory", "a1");
57 return d0;
60 #define HIDDEN __attribute__ ((visibility ("hidden")))
62 /* Big endian masks */
63 #define INVERT_MASK_1 24
64 #define INVERT_MASK_2 16
66 #define MASK_1 0xffu
67 #define MASK_2 0xffffu
69 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
70 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
72 #define WORD_SYNC_OP(OP, PFX_OP, INF_OP, RETURN) \
73 unsigned HIDDEN \
74 NAME##_##RETURN (OP, 4) (unsigned *ptr, unsigned val) \
75 { \
76 unsigned oldval, newval, cmpval = *ptr; \
78 do { \
79 oldval = cmpval; \
80 newval = PFX_OP (oldval INF_OP val); \
81 cmpval = __kernel_cmpxchg (ptr, oldval, newval); \
82 } while (__builtin_expect (oldval != cmpval, 0)); \
84 return RETURN; \
87 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
88 TYPE HIDDEN \
89 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE sval) \
90 { \
91 unsigned *wordptr = (unsigned *) ((unsigned long) ptr & ~3); \
92 unsigned int mask, shift, oldval, newval, cmpval, wval; \
94 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
95 mask = MASK_##WIDTH << shift; \
96 wval = (sval & MASK_##WIDTH) << shift; \
98 cmpval = *wordptr; \
99 do { \
100 oldval = cmpval; \
101 newval = PFX_OP (oldval INF_OP wval); \
102 newval = (newval & mask) | (oldval & ~mask); \
103 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
104 } while (__builtin_expect (oldval != cmpval, 0)); \
106 return (RETURN >> shift) & MASK_##WIDTH; \
109 WORD_SYNC_OP (add, , +, oldval)
110 WORD_SYNC_OP (sub, , -, oldval)
111 WORD_SYNC_OP (or, , |, oldval)
112 WORD_SYNC_OP (and, , &, oldval)
113 WORD_SYNC_OP (xor, , ^, oldval)
114 WORD_SYNC_OP (nand, ~, &, oldval)
116 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
117 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
118 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
119 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
120 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
121 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
123 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
124 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
125 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
126 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
127 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
128 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
130 WORD_SYNC_OP (add, , +, newval)
131 WORD_SYNC_OP (sub, , -, newval)
132 WORD_SYNC_OP (or, , |, newval)
133 WORD_SYNC_OP (and, , &, newval)
134 WORD_SYNC_OP (xor, , ^, newval)
135 WORD_SYNC_OP (nand, ~, &, newval)
137 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
138 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
139 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
140 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
141 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
142 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
144 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
145 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
146 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
147 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
148 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
149 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
151 unsigned HIDDEN
152 __sync_val_compare_and_swap_4 (unsigned *ptr, unsigned oldval, unsigned newval)
154 return __kernel_cmpxchg (ptr, oldval, newval);
157 bool HIDDEN
158 __sync_bool_compare_and_swap_4 (unsigned *ptr, unsigned oldval,
159 unsigned newval)
161 return __kernel_cmpxchg (ptr, oldval, newval) == oldval;
164 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
165 TYPE HIDDEN \
166 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE soldval, \
167 TYPE snewval) \
169 unsigned *wordptr = (unsigned *)((unsigned long) ptr & ~3); \
170 unsigned int mask, shift, woldval, wnewval; \
171 unsigned oldval, newval, cmpval; \
173 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
174 mask = MASK_##WIDTH << shift; \
175 woldval = (soldval & MASK_##WIDTH) << shift; \
176 wnewval = (snewval & MASK_##WIDTH) << shift; \
177 cmpval = *wordptr; \
179 do { \
180 oldval = cmpval; \
181 if ((oldval & mask) != woldval) \
182 break; \
183 newval = (oldval & ~mask) | wnewval; \
184 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
185 } while (__builtin_expect (oldval != cmpval, 0)); \
187 return (oldval >> shift) & MASK_##WIDTH; \
190 SUBWORD_VAL_CAS (unsigned short, 2)
191 SUBWORD_VAL_CAS (unsigned char, 1)
193 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
194 bool HIDDEN \
195 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
196 TYPE newval) \
198 return (__sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval) \
199 == oldval); \
202 SUBWORD_BOOL_CAS (unsigned short, 2)
203 SUBWORD_BOOL_CAS (unsigned char, 1)
205 #undef NAME_oldval
206 #define NAME_oldval(OP, WIDTH) __sync_lock_##OP##_##WIDTH
207 #define COMMA ,
209 WORD_SYNC_OP (test_and_set, , COMMA, oldval)
210 SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned char, 1, oldval)
211 SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned short, 2, oldval)