i386-asm: fix pc-relative label ariths
[tinycc.git] / lib / stdatomic.c
blob732657a8a636bdbff7ef63d7b2f8dfefd39602d7
1 // for libtcc1, avoid including files that are not part of tcc
2 // #include <stdint.h>
3 #define uint8_t unsigned char
4 #define uint16_t unsigned short
5 #define uint32_t unsigned int
6 #define uint64_t unsigned long long
7 #define bool _Bool
8 #define false 0
9 #define true 1
10 #define __ATOMIC_RELAXED 0
11 #define __ATOMIC_CONSUME 1
12 #define __ATOMIC_ACQUIRE 2
13 #define __ATOMIC_RELEASE 3
14 #define __ATOMIC_ACQ_REL 4
15 #define __ATOMIC_SEQ_CST 5
16 typedef __SIZE_TYPE__ size_t;
18 void __atomic_thread_fence(int memorder);
19 #define MemoryBarrier(memorder) __atomic_thread_fence(memorder)
21 #if defined __i386__ || defined __x86_64__
22 #define ATOMIC_COMPARE_EXCHANGE(TYPE, MODE, SUFFIX) \
23 bool __atomic_compare_exchange_##MODE \
24 (volatile void *atom, void *ref, TYPE xchg, \
25 bool weak, int success_memorder, int failure_memorder) \
26 { \
27 TYPE rv; \
28 TYPE cmp = *(TYPE *)ref; \
29 __asm__ volatile( \
30 "lock cmpxchg" SUFFIX " %2,%1\n" \
31 : "=a" (rv), "+m" (*(TYPE *)atom) \
32 : "q" (xchg), "0" (cmp) \
33 : "memory" \
34 ); \
35 *(TYPE *)ref = rv; \
36 return (rv == cmp); \
38 #else
39 #define ATOMIC_COMPARE_EXCHANGE(TYPE, MODE, SUFFIX) \
40 extern bool __atomic_compare_exchange_##MODE \
41 (volatile void *atom, void *ref, TYPE xchg, \
42 bool weak, int success_memorder, int failure_memorder);
43 #endif
45 #define ATOMIC_LOAD(TYPE, MODE) \
46 TYPE __atomic_load_##MODE(const volatile void *atom, int memorder) \
47 { \
48 MemoryBarrier(__ATOMIC_ACQUIRE); \
49 return *(volatile TYPE *)atom; \
52 #define ATOMIC_STORE(TYPE, MODE) \
53 void __atomic_store_##MODE(volatile void *atom, TYPE value, int memorder) \
54 { \
55 *(volatile TYPE *)atom = value; \
56 MemoryBarrier(__ATOMIC_ACQ_REL); \
59 #define ATOMIC_GEN_OP(TYPE, MODE, NAME, OP, RET) \
60 TYPE __atomic_##NAME##_##MODE(volatile void *atom, TYPE value, int memorder) \
61 { \
62 TYPE xchg, cmp; \
63 __atomic_load((TYPE *)atom, (TYPE *)&cmp, __ATOMIC_RELAXED); \
64 do { \
65 xchg = (OP); \
66 } while (!__atomic_compare_exchange((TYPE *)atom, &cmp, &xchg, true, \
67 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)); \
68 return RET; \
71 #define ATOMIC_EXCHANGE(TYPE, MODE) \
72 ATOMIC_GEN_OP(TYPE, MODE, exchange, value, cmp)
73 #define ATOMIC_ADD_FETCH(TYPE, MODE) \
74 ATOMIC_GEN_OP(TYPE, MODE, add_fetch, (cmp + value), xchg)
75 #define ATOMIC_SUB_FETCH(TYPE, MODE) \
76 ATOMIC_GEN_OP(TYPE, MODE, sub_fetch, (cmp - value), xchg)
77 #define ATOMIC_AND_FETCH(TYPE, MODE) \
78 ATOMIC_GEN_OP(TYPE, MODE, and_fetch, (cmp & value), xchg)
79 #define ATOMIC_OR_FETCH(TYPE, MODE) \
80 ATOMIC_GEN_OP(TYPE, MODE, or_fetch, (cmp | value), xchg)
81 #define ATOMIC_XOR_FETCH(TYPE, MODE) \
82 ATOMIC_GEN_OP(TYPE, MODE, xor_fetch, (cmp ^ value), xchg)
83 #define ATOMIC_NAND_FETCH(TYPE, MODE) \
84 ATOMIC_GEN_OP(TYPE, MODE, nand_fetch, ~(cmp & value), xchg)
85 #define ATOMIC_FETCH_ADD(TYPE, MODE) \
86 ATOMIC_GEN_OP(TYPE, MODE, fetch_add, (cmp + value), cmp)
87 #define ATOMIC_FETCH_SUB(TYPE, MODE) \
88 ATOMIC_GEN_OP(TYPE, MODE, fetch_sub, (cmp - value), cmp)
89 #define ATOMIC_FETCH_AND(TYPE, MODE) \
90 ATOMIC_GEN_OP(TYPE, MODE, fetch_and, (cmp & value), cmp)
91 #define ATOMIC_FETCH_OR(TYPE, MODE) \
92 ATOMIC_GEN_OP(TYPE, MODE, fetch_or, (cmp | value), cmp)
93 #define ATOMIC_FETCH_XOR(TYPE, MODE) \
94 ATOMIC_GEN_OP(TYPE, MODE, fetch_xor, (cmp ^ value), cmp)
95 #define ATOMIC_FETCH_NAND(TYPE, MODE) \
96 ATOMIC_GEN_OP(TYPE, MODE, fetch_nand, ~(cmp & value), cmp)
98 #define ATOMIC_GEN(TYPE, SIZE, SUFFIX) \
99 ATOMIC_STORE(TYPE, SIZE) \
100 ATOMIC_LOAD(TYPE, SIZE) \
101 ATOMIC_COMPARE_EXCHANGE(TYPE, SIZE, SUFFIX) \
102 ATOMIC_EXCHANGE(TYPE, SIZE) \
103 ATOMIC_ADD_FETCH(TYPE, SIZE) \
104 ATOMIC_SUB_FETCH(TYPE, SIZE) \
105 ATOMIC_AND_FETCH(TYPE, SIZE) \
106 ATOMIC_OR_FETCH(TYPE, SIZE) \
107 ATOMIC_XOR_FETCH(TYPE, SIZE) \
108 ATOMIC_NAND_FETCH(TYPE, SIZE) \
109 ATOMIC_FETCH_ADD(TYPE, SIZE) \
110 ATOMIC_FETCH_SUB(TYPE, SIZE) \
111 ATOMIC_FETCH_AND(TYPE, SIZE) \
112 ATOMIC_FETCH_OR(TYPE, SIZE) \
113 ATOMIC_FETCH_XOR(TYPE, SIZE) \
114 ATOMIC_FETCH_NAND(TYPE, SIZE)
116 ATOMIC_GEN(uint8_t, 1, "b")
117 ATOMIC_GEN(uint16_t, 2, "w")
118 ATOMIC_GEN(uint32_t, 4, "l")
119 #if defined __x86_64__ || defined __aarch64__ || defined __riscv
120 ATOMIC_GEN(uint64_t, 8, "q")
121 #endif
123 /* uses alias to allow building with gcc/clang */
124 #ifdef __TINYC__
125 #define ATOMIC(x) __atomic_##x
126 #else
127 #define ATOMIC(x) __tcc_atomic_##x
128 #endif
130 void ATOMIC(signal_fence) (int memorder)
134 void ATOMIC(thread_fence) (int memorder)
136 #if defined __i386__
137 __asm__ volatile("lock orl $0, (%esp)");
138 #elif defined __x86_64__
139 __asm__ volatile("lock orq $0, (%rsp)");
140 #elif defined __arm__
141 __asm__ volatile(".int 0xee070fba"); // mcr p15, 0, r0, c7, c10, 5
142 #elif defined __aarch64__
143 __asm__ volatile(".int 0xd5033bbf"); // dmb ish
144 #elif defined __riscv
145 __asm__ volatile(".int 0x0ff0000f"); // fence iorw,iorw
146 #endif
149 bool ATOMIC(is_lock_free) (unsigned long size, const volatile void *ptr)
151 bool ret;
153 switch (size) {
154 case 1: ret = true; break;
155 case 2: ret = true; break;
156 case 4: ret = true; break;
157 #if defined __x86_64__ || defined __aarch64__ || defined __riscv
158 case 8: ret = true; break;
159 #else
160 case 8: ret = false; break;
161 #endif
162 default: ret = false; break;
164 return ret;
167 #ifndef __TINYC__
168 void __atomic_signal_fence(int memorder) __attribute__((alias("__tcc_atomic_signal_fence")));
169 void __atomic_thread_fence(int memorder) __attribute__((alias("__tcc_atomic_thread_fence")));
170 bool __atomic_is_lock_free(unsigned long size, const volatile void *ptr) __attribute__((alias("__tcc_atomic_is_lock_free")));
171 #endif