Bug 1852740: add tests for the `fetchpriority` attribute in Link headers. r=necko...
[gecko.git] / js / src / jsapi-tests / testGCAllocator.cpp
blobc740a3b10b3345faadf47fc0461d6a52b09bfcbc
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 */
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #include <cstdlib>
10 #include "gc/Memory.h"
11 #include "jsapi-tests/tests.h"
13 #if defined(XP_WIN)
14 # include "util/WindowsWrapper.h"
15 # include <psapi.h>
16 #elif defined(__wasi__)
17 // Nothing.
18 #else
19 # include <algorithm>
20 # include <errno.h>
21 # include <sys/mman.h>
22 # include <sys/resource.h>
23 # include <sys/stat.h>
24 # include <sys/types.h>
25 # include <unistd.h>
26 #endif
28 BEGIN_TEST(testGCAllocator) {
29 #ifdef JS_64BIT
30 // If we're using the scattershot allocator, this test does not apply.
31 if (js::gc::UsingScattershotAllocator()) {
32 return true;
34 #endif
36 size_t PageSize = js::gc::SystemPageSize();
38 /* Finish any ongoing background free activity. */
39 js::gc::FinishGC(cx);
41 bool growUp = false;
42 CHECK(addressesGrowUp(&growUp));
44 if (growUp) {
45 return testGCAllocatorUp(PageSize);
46 } else {
47 return testGCAllocatorDown(PageSize);
51 static const size_t Chunk = 512 * 1024;
52 static const size_t Alignment = 2 * Chunk;
53 static const int MaxTempChunks = 4096;
54 static const size_t StagingSize = 16 * Chunk;
56 bool addressesGrowUp(bool* resultOut) {
58 * Try to detect whether the OS allocates memory in increasing or decreasing
59 * address order by making several allocations and comparing the addresses.
62 static const unsigned ChunksToTest = 20;
63 static const int ThresholdCount = 15;
65 void* chunks[ChunksToTest];
66 for (unsigned i = 0; i < ChunksToTest; i++) {
67 chunks[i] = mapMemory(2 * Chunk);
68 CHECK(chunks[i]);
71 int upCount = 0;
72 int downCount = 0;
74 for (unsigned i = 0; i < ChunksToTest - 1; i++) {
75 if (chunks[i] < chunks[i + 1]) {
76 upCount++;
77 } else {
78 downCount++;
82 for (unsigned i = 0; i < ChunksToTest; i++) {
83 unmapPages(chunks[i], 2 * Chunk);
86 /* Check results were mostly consistent. */
87 CHECK(abs(upCount - downCount) >= ThresholdCount);
89 *resultOut = upCount > downCount;
91 return true;
94 size_t offsetFromAligned(void* p) { return uintptr_t(p) % Alignment; }
96 enum AllocType { UseNormalAllocator, UseLastDitchAllocator };
98 bool testGCAllocatorUp(const size_t PageSize) {
99 const size_t UnalignedSize = StagingSize + Alignment - PageSize;
100 void* chunkPool[MaxTempChunks];
101 // Allocate a contiguous chunk that we can partition for testing.
102 void* stagingArea = mapMemory(UnalignedSize);
103 if (!stagingArea) {
104 return false;
106 // Ensure that the staging area is aligned.
107 unmapPages(stagingArea, UnalignedSize);
108 if (offsetFromAligned(stagingArea)) {
109 const size_t Offset = offsetFromAligned(stagingArea);
110 // Place the area at the lowest aligned address.
111 stagingArea = (void*)(uintptr_t(stagingArea) + (Alignment - Offset));
113 mapMemoryAt(stagingArea, StagingSize);
114 // Make sure there are no available chunks below the staging area.
115 int tempChunks;
116 if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false)) {
117 return false;
119 // Unmap the staging area so we can set it up for testing.
120 unmapPages(stagingArea, StagingSize);
121 // Check that the first chunk is used if it is aligned.
122 CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool,
123 tempChunks));
124 // Check that the first chunk is used if it can be aligned.
125 CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
126 tempChunks));
127 // Check that an aligned chunk after a single unalignable chunk is used.
128 CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
129 tempChunks));
130 // Check that we fall back to the slow path after two unalignable chunks.
131 CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
132 tempChunks));
133 // Check that we also fall back after an unalignable and an alignable chunk.
134 CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
135 tempChunks));
136 // Check that the last ditch allocator works as expected.
137 CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
138 tempChunks, UseLastDitchAllocator));
139 // Check that the last ditch allocator can deal with naturally aligned chunks.
140 CHECK(positionIsCorrect("x--xx--xoo------", stagingArea, chunkPool,
141 tempChunks, UseLastDitchAllocator));
143 // Clean up.
144 while (--tempChunks >= 0) {
145 unmapPages(chunkPool[tempChunks], 2 * Chunk);
147 return true;
150 bool testGCAllocatorDown(const size_t PageSize) {
151 const size_t UnalignedSize = StagingSize + Alignment - PageSize;
152 void* chunkPool[MaxTempChunks];
153 // Allocate a contiguous chunk that we can partition for testing.
154 void* stagingArea = mapMemory(UnalignedSize);
155 if (!stagingArea) {
156 return false;
158 // Ensure that the staging area is aligned.
159 unmapPages(stagingArea, UnalignedSize);
160 if (offsetFromAligned(stagingArea)) {
161 void* stagingEnd = (void*)(uintptr_t(stagingArea) + UnalignedSize);
162 const size_t Offset = offsetFromAligned(stagingEnd);
163 // Place the area at the highest aligned address.
164 stagingArea = (void*)(uintptr_t(stagingEnd) - Offset - StagingSize);
166 mapMemoryAt(stagingArea, StagingSize);
167 // Make sure there are no available chunks above the staging area.
168 int tempChunks;
169 if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true)) {
170 return false;
172 // Unmap the staging area so we can set it up for testing.
173 unmapPages(stagingArea, StagingSize);
174 // Check that the first chunk is used if it is aligned.
175 CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool,
176 tempChunks));
177 // Check that the first chunk is used if it can be aligned.
178 CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool,
179 tempChunks));
180 // Check that an aligned chunk after a single unalignable chunk is used.
181 CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool,
182 tempChunks));
183 // Check that we fall back to the slow path after two unalignable chunks.
184 CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool,
185 tempChunks));
186 // Check that we also fall back after an unalignable and an alignable chunk.
187 CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool,
188 tempChunks));
189 // Check that the last ditch allocator works as expected.
190 CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool,
191 tempChunks, UseLastDitchAllocator));
192 // Check that the last ditch allocator can deal with naturally aligned chunks.
193 CHECK(positionIsCorrect("------oox--xx--x", stagingArea, chunkPool,
194 tempChunks, UseLastDitchAllocator));
196 // Clean up.
197 while (--tempChunks >= 0) {
198 unmapPages(chunkPool[tempChunks], 2 * Chunk);
200 return true;
203 bool fillSpaceBeforeStagingArea(int& tempChunks, void* stagingArea,
204 void** chunkPool, bool addressesGrowDown) {
205 // Make sure there are no available chunks before the staging area.
206 tempChunks = 0;
207 chunkPool[tempChunks++] = mapMemory(2 * Chunk);
208 while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] &&
209 (chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) {
210 chunkPool[tempChunks++] = mapMemory(2 * Chunk);
211 if (!chunkPool[tempChunks - 1]) {
212 break; // We already have our staging area, so OOM here is okay.
214 if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
215 addressesGrowDown) {
216 break; // The address growth direction is inconsistent!
219 // OOM also means success in this case.
220 if (!chunkPool[tempChunks - 1]) {
221 --tempChunks;
222 return true;
224 // Bail if we can't guarantee the right address space layout.
225 if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown ||
226 (tempChunks > 1 &&
227 (chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
228 addressesGrowDown)) {
229 while (--tempChunks >= 0) {
230 unmapPages(chunkPool[tempChunks], 2 * Chunk);
232 unmapPages(stagingArea, StagingSize);
233 return false;
235 return true;
238 bool positionIsCorrect(const char* str, void* base, void** chunkPool,
239 int tempChunks,
240 AllocType allocator = UseNormalAllocator) {
241 // str represents a region of memory, with each character representing a
242 // region of Chunk bytes. str should contain only x, o and -, where
243 // x = mapped by the test to set up the initial conditions,
244 // o = mapped by the GC allocator, and
245 // - = unmapped.
246 // base should point to a region of contiguous free memory
247 // large enough to hold strlen(str) chunks of Chunk bytes.
248 int len = strlen(str);
249 int i;
250 // Find the index of the desired address.
251 for (i = 0; i < len && str[i] != 'o'; ++i)
253 void* desired = (void*)(uintptr_t(base) + i * Chunk);
254 // Map the regions indicated by str.
255 for (i = 0; i < len; ++i) {
256 if (str[i] == 'x') {
257 mapMemoryAt((void*)(uintptr_t(base) + i * Chunk), Chunk);
260 // Allocate using the GC's allocator.
261 void* result;
262 if (allocator == UseNormalAllocator) {
263 result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
264 } else {
265 result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
267 // Clean up the mapped regions.
268 if (result) {
269 js::gc::UnmapPages(result, 2 * Chunk);
271 for (--i; i >= 0; --i) {
272 if (str[i] == 'x') {
273 js::gc::UnmapPages((void*)(uintptr_t(base) + i * Chunk), Chunk);
276 // CHECK returns, so clean up on failure.
277 if (result != desired) {
278 while (--tempChunks >= 0) {
279 js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
282 return result == desired;
285 #if defined(XP_WIN)
287 void* mapMemoryAt(void* desired, size_t length) {
288 return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE,
289 PAGE_READWRITE);
292 void* mapMemory(size_t length) {
293 return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
294 PAGE_READWRITE);
297 void unmapPages(void* p, size_t size) {
298 MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
301 #elif defined(__wasi__)
303 void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
305 void* mapMemory(size_t length) {
306 void* addr = nullptr;
307 if (int err = posix_memalign(&addr, js::gc::SystemPageSize(), length)) {
308 MOZ_ASSERT(err == ENOMEM);
310 MOZ_ASSERT(addr);
311 memset(addr, 0, length);
312 return addr;
315 void unmapPages(void* p, size_t size) { free(p); }
317 #else
319 void* mapMemoryAt(void* desired, size_t length) {
320 void* region = mmap(desired, length, PROT_READ | PROT_WRITE,
321 MAP_PRIVATE | MAP_ANON, -1, 0);
322 if (region == MAP_FAILED) {
323 return nullptr;
325 if (region != desired) {
326 if (munmap(region, length)) {
327 MOZ_RELEASE_ASSERT(errno == ENOMEM);
329 return nullptr;
331 return region;
334 void* mapMemory(size_t length) {
335 int prot = PROT_READ | PROT_WRITE;
336 int flags = MAP_PRIVATE | MAP_ANON;
337 int fd = -1;
338 off_t offset = 0;
339 void* region = mmap(nullptr, length, prot, flags, fd, offset);
340 if (region == MAP_FAILED) {
341 return nullptr;
343 return region;
346 void unmapPages(void* p, size_t size) {
347 if (munmap(p, size)) {
348 MOZ_RELEASE_ASSERT(errno == ENOMEM);
352 #endif
354 END_TEST(testGCAllocator)