Bug 1852740: add tests for the `fetchpriority` attribute in Link headers. r=necko...
[gecko.git] / js / src / jsapi-tests / testAtomicOperations.cpp
blob1a59fec08f260354f0e29ae65f0c92316a37312a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 */
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #include "mozilla/Alignment.h"
9 #include "mozilla/Assertions.h"
11 #include "jit/AtomicOperations.h"
12 #include "jsapi-tests/tests.h"
13 #include "vm/ArrayBufferObject.h"
14 #include "vm/SharedMem.h"
15 #include "vm/Uint8Clamped.h"
16 #include "wasm/WasmFeatures.h"
18 using namespace js;
20 // Machinery to disguise pointer addresses to the C++ compiler -- quite possibly
21 // not thread-safe.
23 extern void setHiddenPointer(void* p);
24 extern void* getHiddenPointer();
26 void* hidePointerValue(void* p) {
27 setHiddenPointer(p);
28 return getHiddenPointer();
31 //////////////////////////////////////////////////////////////////////
33 // Lock-freedom predicates
35 BEGIN_REUSABLE_TEST(testAtomicLockFree8) {
36 // isLockfree8() must not return true if there are no 8-byte atomics
38 CHECK(!jit::AtomicOperations::isLockfree8() ||
39 jit::AtomicOperations::hasAtomic8());
41 // We must have lock-free 8-byte atomics on every platform where we support
42 // wasm, but we don't care otherwise.
44 CHECK(!wasm::HasSupport(cx) || jit::AtomicOperations::isLockfree8());
45 return true;
47 END_TEST(testAtomicLockFree8)
49 // The JS spec requires specific behavior for all but 1 and 2.
51 BEGIN_REUSABLE_TEST(testAtomicLockFreeJS) {
52 static_assert(jit::AtomicOperations::isLockfreeJS(1) ==
53 true); // false is allowed by spec but not in SpiderMonkey
54 static_assert(jit::AtomicOperations::isLockfreeJS(2) == true); // ditto
55 static_assert(jit::AtomicOperations::isLockfreeJS(8) == true); // ditto
56 static_assert(jit::AtomicOperations::isLockfreeJS(3) == false); // required
57 static_assert(jit::AtomicOperations::isLockfreeJS(4) == true); // required
58 static_assert(jit::AtomicOperations::isLockfreeJS(5) == false); // required
59 static_assert(jit::AtomicOperations::isLockfreeJS(6) == false); // required
60 static_assert(jit::AtomicOperations::isLockfreeJS(7) == false); // required
61 return true;
63 END_TEST(testAtomicLockFreeJS)
65 //////////////////////////////////////////////////////////////////////
67 // Fence
69 // This only tests that fenceSeqCst is defined and that it doesn't crash if we
70 // call it, but it has no return value and its effect is not observable here.
72 BEGIN_REUSABLE_TEST(testAtomicFence) {
73 jit::AtomicOperations::fenceSeqCst();
74 return true;
76 END_TEST(testAtomicFence)
78 //////////////////////////////////////////////////////////////////////
80 // Memory access primitives
82 // These tests for the atomic load and store primitives ascertain that the
83 // primitives are defined and that they load and store the values they should,
84 // but not that the primitives are actually atomic wrt to the memory subsystem.
86 // Memory for testing atomics. This must be aligned to the natural alignment of
87 // the type we're testing; for now, use 8-byte alignment for all.
89 MOZ_ALIGNED_DECL(8, static uint8_t atomicMem[8]);
90 MOZ_ALIGNED_DECL(8, static uint8_t atomicMem2[8]);
92 // T is the primitive type we're testing, and A and B are references to constant
93 // bindings holding values of that type.
95 // No bytes of A and B should be 0 or FF. A+B and A-B must not overflow.
97 #define ATOMIC_TESTS(T, A, B) \
98 T* q = (T*)hidePointerValue((void*)atomicMem); \
99 *q = A; \
100 SharedMem<T*> p = \
101 SharedMem<T*>::shared((T*)hidePointerValue((T*)atomicMem)); \
102 CHECK(*q == A); \
103 CHECK(jit::AtomicOperations::loadSeqCst(p) == A); \
104 CHECK(*q == A); \
105 jit::AtomicOperations::storeSeqCst(p, B); \
106 CHECK(*q == B); \
107 CHECK(jit::AtomicOperations::exchangeSeqCst(p, A) == B); \
108 CHECK(*q == A); \
109 CHECK(jit::AtomicOperations::compareExchangeSeqCst(p, (T)0, (T)1) == \
110 A); /*failure*/ \
111 CHECK(*q == A); \
112 CHECK(jit::AtomicOperations::compareExchangeSeqCst(p, A, B) == \
113 A); /*success*/ \
114 CHECK(*q == B); \
115 *q = A; \
116 CHECK(jit::AtomicOperations::fetchAddSeqCst(p, B) == A); \
117 CHECK(*q == A + B); \
118 *q = A; \
119 CHECK(jit::AtomicOperations::fetchSubSeqCst(p, B) == A); \
120 CHECK(*q == A - B); \
121 *q = A; \
122 CHECK(jit::AtomicOperations::fetchAndSeqCst(p, B) == A); \
123 CHECK(*q == (A & B)); \
124 *q = A; \
125 CHECK(jit::AtomicOperations::fetchOrSeqCst(p, B) == A); \
126 CHECK(*q == (A | B)); \
127 *q = A; \
128 CHECK(jit::AtomicOperations::fetchXorSeqCst(p, B) == A); \
129 CHECK(*q == (A ^ B)); \
130 *q = A; \
131 CHECK(jit::AtomicOperations::loadSafeWhenRacy(p) == A); \
132 jit::AtomicOperations::storeSafeWhenRacy(p, B); \
133 CHECK(*q == B); \
134 T* q2 = (T*)hidePointerValue((void*)atomicMem2); \
135 SharedMem<T*> p2 = \
136 SharedMem<T*>::shared((T*)hidePointerValue((void*)atomicMem2)); \
137 *q = A; \
138 *q2 = B; \
139 jit::AtomicOperations::memcpySafeWhenRacy(p2, p, sizeof(T)); \
140 CHECK(*q2 == A); \
141 *q = A; \
142 *q2 = B; \
143 jit::AtomicOperations::memcpySafeWhenRacy(p2, p.unwrap(), sizeof(T)); \
144 CHECK(*q2 == A); \
145 *q = A; \
146 *q2 = B; \
147 jit::AtomicOperations::memcpySafeWhenRacy(p2.unwrap(), p, sizeof(T)); \
148 CHECK(*q2 == A); \
149 *q = A; \
150 *q2 = B; \
151 jit::AtomicOperations::memmoveSafeWhenRacy(p2, p, sizeof(T)); \
152 CHECK(*q2 == A); \
153 *q = A; \
154 *q2 = B; \
155 jit::AtomicOperations::podCopySafeWhenRacy(p2, p, 1); \
156 CHECK(*q2 == A); \
157 *q = A; \
158 *q2 = B; \
159 jit::AtomicOperations::podMoveSafeWhenRacy(p2, p, 1); \
160 CHECK(*q2 == A); \
161 return true
163 BEGIN_REUSABLE_TEST(testAtomicOperationsU8) {
164 const uint8_t A = 0xab;
165 const uint8_t B = 0x37;
166 ATOMIC_TESTS(uint8_t, A, B);
168 END_TEST(testAtomicOperationsU8)
170 BEGIN_REUSABLE_TEST(testAtomicOperationsI8) {
171 const int8_t A = 0x3b;
172 const int8_t B = 0x27;
173 ATOMIC_TESTS(int8_t, A, B);
175 END_TEST(testAtomicOperationsI8)
177 BEGIN_REUSABLE_TEST(testAtomicOperationsU16) {
178 const uint16_t A = 0xabdc;
179 const uint16_t B = 0x3789;
180 ATOMIC_TESTS(uint16_t, A, B);
182 END_TEST(testAtomicOperationsU16)
184 BEGIN_REUSABLE_TEST(testAtomicOperationsI16) {
185 const int16_t A = 0x3bdc;
186 const int16_t B = 0x2737;
187 ATOMIC_TESTS(int16_t, A, B);
189 END_TEST(testAtomicOperationsI16)
191 BEGIN_REUSABLE_TEST(testAtomicOperationsU32) {
192 const uint32_t A = 0xabdc0588;
193 const uint32_t B = 0x37891942;
194 ATOMIC_TESTS(uint32_t, A, B);
196 END_TEST(testAtomicOperationsU32)
198 BEGIN_REUSABLE_TEST(testAtomicOperationsI32) {
199 const int32_t A = 0x3bdc0588;
200 const int32_t B = 0x27371843;
201 ATOMIC_TESTS(int32_t, A, B);
203 END_TEST(testAtomicOperationsI32)
205 BEGIN_REUSABLE_TEST(testAtomicOperationsU64) {
206 if (!jit::AtomicOperations::hasAtomic8()) {
207 return true;
210 const uint64_t A(0x9aadf00ddeadbeef);
211 const uint64_t B(0x4eedbead1337f001);
212 ATOMIC_TESTS(uint64_t, A, B);
214 END_TEST(testAtomicOperationsU64)
216 BEGIN_REUSABLE_TEST(testAtomicOperationsI64) {
217 if (!jit::AtomicOperations::hasAtomic8()) {
218 return true;
221 const int64_t A(0x2aadf00ddeadbeef);
222 const int64_t B(0x4eedbead1337f001);
223 ATOMIC_TESTS(int64_t, A, B);
225 END_TEST(testAtomicOperationsI64)
227 // T is the primitive float type we're testing, and A and B are references to
228 // constant bindings holding values of that type.
230 // Stay away from 0, NaN, infinities, and denormals.
232 #define ATOMIC_FLOAT_TESTS(T, A, B) \
233 T* q = (T*)hidePointerValue((void*)atomicMem); \
234 *q = A; \
235 SharedMem<T*> p = \
236 SharedMem<T*>::shared((T*)hidePointerValue((T*)atomicMem)); \
237 CHECK(*q == A); \
238 CHECK(jit::AtomicOperations::loadSafeWhenRacy(p) == A); \
239 jit::AtomicOperations::storeSafeWhenRacy(p, B); \
240 CHECK(*q == B); \
241 T* q2 = (T*)hidePointerValue((void*)atomicMem2); \
242 SharedMem<T*> p2 = \
243 SharedMem<T*>::shared((T*)hidePointerValue((void*)atomicMem2)); \
244 *q = A; \
245 *q2 = B; \
246 jit::AtomicOperations::memcpySafeWhenRacy(p2, p, sizeof(T)); \
247 CHECK(*q2 == A); \
248 *q = A; \
249 *q2 = B; \
250 jit::AtomicOperations::memcpySafeWhenRacy(p2, p.unwrap(), sizeof(T)); \
251 CHECK(*q2 == A); \
252 *q = A; \
253 *q2 = B; \
254 jit::AtomicOperations::memcpySafeWhenRacy(p2.unwrap(), p, sizeof(T)); \
255 CHECK(*q2 == A); \
256 *q = A; \
257 *q2 = B; \
258 jit::AtomicOperations::memmoveSafeWhenRacy(p2, p, sizeof(T)); \
259 CHECK(*q2 == A); \
260 *q = A; \
261 *q2 = B; \
262 jit::AtomicOperations::podCopySafeWhenRacy(p2, p, 1); \
263 CHECK(*q2 == A); \
264 *q = A; \
265 *q2 = B; \
266 jit::AtomicOperations::podMoveSafeWhenRacy(p2, p, 1); \
267 CHECK(*q2 == A); \
268 return true
270 BEGIN_REUSABLE_TEST(testAtomicOperationsF32) {
271 const float A(123.25);
272 const float B(-987.75);
273 ATOMIC_FLOAT_TESTS(float, A, B);
275 END_TEST(testAtomicOperationsF32)
277 BEGIN_REUSABLE_TEST(testAtomicOperationsF64) {
278 const double A(123.25);
279 const double B(-987.75);
280 ATOMIC_FLOAT_TESTS(double, A, B);
282 END_TEST(testAtomicOperationsF64)
284 #define ATOMIC_CLAMPED_TESTS(T, A, B) \
285 T* q = (T*)hidePointerValue((void*)atomicMem); \
286 *q = A; \
287 SharedMem<T*> p = \
288 SharedMem<T*>::shared((T*)hidePointerValue((T*)atomicMem)); \
289 CHECK(*q == A); \
290 CHECK(jit::AtomicOperations::loadSafeWhenRacy(p) == A); \
291 jit::AtomicOperations::storeSafeWhenRacy(p, B); \
292 CHECK(*q == B); \
293 return true
295 BEGIN_REUSABLE_TEST(testAtomicOperationsU8Clamped) {
296 const uint8_clamped A(0xab);
297 const uint8_clamped B(0x37);
298 ATOMIC_CLAMPED_TESTS(uint8_clamped, A, B);
300 END_TEST(testAtomicOperationsU8Clamped)
302 #undef ATOMIC_TESTS
303 #undef ATOMIC_FLOAT_TESTS
304 #undef ATOMIC_CLAMPED_TESTS