1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
10 #include "gc/Memory.h"
11 #include "jsapi-tests/tests.h"
14 # include "util/WindowsWrapper.h"
16 #elif defined(__wasi__)
21 # include <sys/mman.h>
22 # include <sys/resource.h>
23 # include <sys/stat.h>
24 # include <sys/types.h>
28 BEGIN_TEST(testGCAllocator
) {
30 // If we're using the scattershot allocator, this test does not apply.
31 if (js::gc::UsingScattershotAllocator()) {
36 size_t PageSize
= js::gc::SystemPageSize();
38 /* Finish any ongoing background free activity. */
42 CHECK(addressesGrowUp(&growUp
));
45 return testGCAllocatorUp(PageSize
);
47 return testGCAllocatorDown(PageSize
);
51 static const size_t Chunk
= 512 * 1024;
52 static const size_t Alignment
= 2 * Chunk
;
53 static const int MaxTempChunks
= 4096;
54 static const size_t StagingSize
= 16 * Chunk
;
56 bool addressesGrowUp(bool* resultOut
) {
58 * Try to detect whether the OS allocates memory in increasing or decreasing
59 * address order by making several allocations and comparing the addresses.
62 static const unsigned ChunksToTest
= 20;
63 static const int ThresholdCount
= 15;
65 void* chunks
[ChunksToTest
];
66 for (unsigned i
= 0; i
< ChunksToTest
; i
++) {
67 chunks
[i
] = mapMemory(2 * Chunk
);
74 for (unsigned i
= 0; i
< ChunksToTest
- 1; i
++) {
75 if (chunks
[i
] < chunks
[i
+ 1]) {
82 for (unsigned i
= 0; i
< ChunksToTest
; i
++) {
83 unmapPages(chunks
[i
], 2 * Chunk
);
86 /* Check results were mostly consistent. */
87 CHECK(abs(upCount
- downCount
) >= ThresholdCount
);
89 *resultOut
= upCount
> downCount
;
94 size_t offsetFromAligned(void* p
) { return uintptr_t(p
) % Alignment
; }
96 enum AllocType
{ UseNormalAllocator
, UseLastDitchAllocator
};
98 bool testGCAllocatorUp(const size_t PageSize
) {
99 const size_t UnalignedSize
= StagingSize
+ Alignment
- PageSize
;
100 void* chunkPool
[MaxTempChunks
];
101 // Allocate a contiguous chunk that we can partition for testing.
102 void* stagingArea
= mapMemory(UnalignedSize
);
106 // Ensure that the staging area is aligned.
107 unmapPages(stagingArea
, UnalignedSize
);
108 if (offsetFromAligned(stagingArea
)) {
109 const size_t Offset
= offsetFromAligned(stagingArea
);
110 // Place the area at the lowest aligned address.
111 stagingArea
= (void*)(uintptr_t(stagingArea
) + (Alignment
- Offset
));
113 mapMemoryAt(stagingArea
, StagingSize
);
114 // Make sure there are no available chunks below the staging area.
116 if (!fillSpaceBeforeStagingArea(tempChunks
, stagingArea
, chunkPool
, false)) {
119 // Unmap the staging area so we can set it up for testing.
120 unmapPages(stagingArea
, StagingSize
);
121 // Check that the first chunk is used if it is aligned.
122 CHECK(positionIsCorrect("xxooxxx---------", stagingArea
, chunkPool
,
124 // Check that the first chunk is used if it can be aligned.
125 CHECK(positionIsCorrect("x-ooxxx---------", stagingArea
, chunkPool
,
127 // Check that an aligned chunk after a single unalignable chunk is used.
128 CHECK(positionIsCorrect("x--xooxxx-------", stagingArea
, chunkPool
,
130 // Check that we fall back to the slow path after two unalignable chunks.
131 CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea
, chunkPool
,
133 // Check that we also fall back after an unalignable and an alignable chunk.
134 CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea
, chunkPool
,
136 // Check that the last ditch allocator works as expected.
137 CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea
, chunkPool
,
138 tempChunks
, UseLastDitchAllocator
));
139 // Check that the last ditch allocator can deal with naturally aligned chunks.
140 CHECK(positionIsCorrect("x--xx--xoo------", stagingArea
, chunkPool
,
141 tempChunks
, UseLastDitchAllocator
));
144 while (--tempChunks
>= 0) {
145 unmapPages(chunkPool
[tempChunks
], 2 * Chunk
);
150 bool testGCAllocatorDown(const size_t PageSize
) {
151 const size_t UnalignedSize
= StagingSize
+ Alignment
- PageSize
;
152 void* chunkPool
[MaxTempChunks
];
153 // Allocate a contiguous chunk that we can partition for testing.
154 void* stagingArea
= mapMemory(UnalignedSize
);
158 // Ensure that the staging area is aligned.
159 unmapPages(stagingArea
, UnalignedSize
);
160 if (offsetFromAligned(stagingArea
)) {
161 void* stagingEnd
= (void*)(uintptr_t(stagingArea
) + UnalignedSize
);
162 const size_t Offset
= offsetFromAligned(stagingEnd
);
163 // Place the area at the highest aligned address.
164 stagingArea
= (void*)(uintptr_t(stagingEnd
) - Offset
- StagingSize
);
166 mapMemoryAt(stagingArea
, StagingSize
);
167 // Make sure there are no available chunks above the staging area.
169 if (!fillSpaceBeforeStagingArea(tempChunks
, stagingArea
, chunkPool
, true)) {
172 // Unmap the staging area so we can set it up for testing.
173 unmapPages(stagingArea
, StagingSize
);
174 // Check that the first chunk is used if it is aligned.
175 CHECK(positionIsCorrect("---------xxxooxx", stagingArea
, chunkPool
,
177 // Check that the first chunk is used if it can be aligned.
178 CHECK(positionIsCorrect("---------xxxoo-x", stagingArea
, chunkPool
,
180 // Check that an aligned chunk after a single unalignable chunk is used.
181 CHECK(positionIsCorrect("-------xxxoox--x", stagingArea
, chunkPool
,
183 // Check that we fall back to the slow path after two unalignable chunks.
184 CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea
, chunkPool
,
186 // Check that we also fall back after an unalignable and an alignable chunk.
187 CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea
, chunkPool
,
189 // Check that the last ditch allocator works as expected.
190 CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea
, chunkPool
,
191 tempChunks
, UseLastDitchAllocator
));
192 // Check that the last ditch allocator can deal with naturally aligned chunks.
193 CHECK(positionIsCorrect("------oox--xx--x", stagingArea
, chunkPool
,
194 tempChunks
, UseLastDitchAllocator
));
197 while (--tempChunks
>= 0) {
198 unmapPages(chunkPool
[tempChunks
], 2 * Chunk
);
203 bool fillSpaceBeforeStagingArea(int& tempChunks
, void* stagingArea
,
204 void** chunkPool
, bool addressesGrowDown
) {
205 // Make sure there are no available chunks before the staging area.
207 chunkPool
[tempChunks
++] = mapMemory(2 * Chunk
);
208 while (tempChunks
< MaxTempChunks
&& chunkPool
[tempChunks
- 1] &&
209 (chunkPool
[tempChunks
- 1] < stagingArea
) ^ addressesGrowDown
) {
210 chunkPool
[tempChunks
++] = mapMemory(2 * Chunk
);
211 if (!chunkPool
[tempChunks
- 1]) {
212 break; // We already have our staging area, so OOM here is okay.
214 if ((chunkPool
[tempChunks
- 1] < chunkPool
[tempChunks
- 2]) ^
216 break; // The address growth direction is inconsistent!
219 // OOM also means success in this case.
220 if (!chunkPool
[tempChunks
- 1]) {
224 // Bail if we can't guarantee the right address space layout.
225 if ((chunkPool
[tempChunks
- 1] < stagingArea
) ^ addressesGrowDown
||
227 (chunkPool
[tempChunks
- 1] < chunkPool
[tempChunks
- 2]) ^
228 addressesGrowDown
)) {
229 while (--tempChunks
>= 0) {
230 unmapPages(chunkPool
[tempChunks
], 2 * Chunk
);
232 unmapPages(stagingArea
, StagingSize
);
238 bool positionIsCorrect(const char* str
, void* base
, void** chunkPool
,
240 AllocType allocator
= UseNormalAllocator
) {
241 // str represents a region of memory, with each character representing a
242 // region of Chunk bytes. str should contain only x, o and -, where
243 // x = mapped by the test to set up the initial conditions,
244 // o = mapped by the GC allocator, and
246 // base should point to a region of contiguous free memory
247 // large enough to hold strlen(str) chunks of Chunk bytes.
248 int len
= strlen(str
);
250 // Find the index of the desired address.
251 for (i
= 0; i
< len
&& str
[i
] != 'o'; ++i
)
253 void* desired
= (void*)(uintptr_t(base
) + i
* Chunk
);
254 // Map the regions indicated by str.
255 for (i
= 0; i
< len
; ++i
) {
257 mapMemoryAt((void*)(uintptr_t(base
) + i
* Chunk
), Chunk
);
260 // Allocate using the GC's allocator.
262 if (allocator
== UseNormalAllocator
) {
263 result
= js::gc::MapAlignedPages(2 * Chunk
, Alignment
);
265 result
= js::gc::TestMapAlignedPagesLastDitch(2 * Chunk
, Alignment
);
267 // Clean up the mapped regions.
269 js::gc::UnmapPages(result
, 2 * Chunk
);
271 for (--i
; i
>= 0; --i
) {
273 js::gc::UnmapPages((void*)(uintptr_t(base
) + i
* Chunk
), Chunk
);
276 // CHECK returns, so clean up on failure.
277 if (result
!= desired
) {
278 while (--tempChunks
>= 0) {
279 js::gc::UnmapPages(chunkPool
[tempChunks
], 2 * Chunk
);
282 return result
== desired
;
287 void* mapMemoryAt(void* desired
, size_t length
) {
288 return VirtualAlloc(desired
, length
, MEM_COMMIT
| MEM_RESERVE
,
292 void* mapMemory(size_t length
) {
293 return VirtualAlloc(nullptr, length
, MEM_COMMIT
| MEM_RESERVE
,
297 void unmapPages(void* p
, size_t size
) {
298 MOZ_ALWAYS_TRUE(VirtualFree(p
, 0, MEM_RELEASE
));
301 #elif defined(__wasi__)
303 void* mapMemoryAt(void* desired
, size_t length
) { return nullptr; }
305 void* mapMemory(size_t length
) {
306 void* addr
= nullptr;
307 if (int err
= posix_memalign(&addr
, js::gc::SystemPageSize(), length
)) {
308 MOZ_ASSERT(err
== ENOMEM
);
311 memset(addr
, 0, length
);
315 void unmapPages(void* p
, size_t size
) { free(p
); }
319 void* mapMemoryAt(void* desired
, size_t length
) {
320 void* region
= mmap(desired
, length
, PROT_READ
| PROT_WRITE
,
321 MAP_PRIVATE
| MAP_ANON
, -1, 0);
322 if (region
== MAP_FAILED
) {
325 if (region
!= desired
) {
326 if (munmap(region
, length
)) {
327 MOZ_RELEASE_ASSERT(errno
== ENOMEM
);
334 void* mapMemory(size_t length
) {
335 int prot
= PROT_READ
| PROT_WRITE
;
336 int flags
= MAP_PRIVATE
| MAP_ANON
;
339 void* region
= mmap(nullptr, length
, prot
, flags
, fd
, offset
);
340 if (region
== MAP_FAILED
) {
346 void unmapPages(void* p
, size_t size
) {
347 if (munmap(p
, size
)) {
348 MOZ_RELEASE_ASSERT(errno
== ENOMEM
);
354 END_TEST(testGCAllocator
)