1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice(s), this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified other than the possible
14 * addition of one or more copyright notices.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice(s), this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
27 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
29 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ***** END LICENSE BLOCK ***** */
37 #include "jsgcchunk.h"
43 # pragma warning( disable: 4267 4996 4146 )
46 #elif defined(XP_MACOSX) || defined(DARWIN)
48 # include <libkern/OSAtomic.h>
49 # include <mach/mach_error.h>
50 # include <mach/mach_init.h>
51 # include <mach/vm_map.h>
52 # include <malloc/malloc.h>
54 #elif defined(XP_UNIX) || defined(XP_BEOS)
57 # include <sys/mman.h>
68 * On Windows CE < 6 we must use separated MEM_RESERVE and MEM_COMMIT
69 * VirtualAlloc calls and we cannot use MEM_RESERVE to allocate at the given
70 * address. So we use a workaround based on oversized allocation.
72 # if defined(WINCE) && !defined(MOZ_MEMORY_WINCE6)
74 # define JS_GC_HAS_MAP_ALIGN
77 UnmapPagesAtBase(void *p
)
79 JS_ALWAYS_TRUE(VirtualFree(p
, 0, MEM_RELEASE
));
83 MapAlignedPages(size_t size
, size_t alignment
)
85 JS_ASSERT(size
% alignment
== 0);
86 JS_ASSERT(size
>= alignment
);
88 void *reserve
= VirtualAlloc(NULL
, size
, MEM_RESERVE
, PAGE_NOACCESS
);
92 void *p
= VirtualAlloc(reserve
, size
, MEM_COMMIT
, PAGE_READWRITE
);
93 JS_ASSERT(p
== reserve
);
95 size_t mask
= alignment
- 1;
96 size_t offset
= (uintptr_t) p
& mask
;
100 /* Try to extend the initial allocation. */
101 UnmapPagesAtBase(reserve
);
102 reserve
= VirtualAlloc(NULL
, size
+ alignment
- offset
, MEM_RESERVE
,
106 if (offset
== ((uintptr_t) reserve
& mask
)) {
107 void *aligned
= (void *) ((uintptr_t) reserve
+ alignment
- offset
);
108 p
= VirtualAlloc(aligned
, size
, MEM_COMMIT
, PAGE_READWRITE
);
109 JS_ASSERT(p
== aligned
);
113 /* over allocate to ensure we have an aligned region */
114 UnmapPagesAtBase(reserve
);
115 reserve
= VirtualAlloc(NULL
, size
+ alignment
, MEM_RESERVE
, PAGE_NOACCESS
);
119 offset
= (uintptr_t) reserve
& mask
;
120 void *aligned
= (void *) ((uintptr_t) reserve
+ alignment
- offset
);
121 p
= VirtualAlloc(aligned
, size
, MEM_COMMIT
, PAGE_READWRITE
);
122 JS_ASSERT(p
== aligned
);
128 UnmapPages(void *p
, size_t size
)
130 if (VirtualFree(p
, 0, MEM_RELEASE
))
133 /* We could have used the over allocation. */
134 JS_ASSERT(GetLastError() == ERROR_INVALID_PARAMETER
);
135 MEMORY_BASIC_INFORMATION info
;
136 VirtualQuery(p
, &info
, sizeof(info
));
138 UnmapPagesAtBase(info
.AllocationBase
);
144 MapPages(void *addr
, size_t size
)
146 void *p
= VirtualAlloc(addr
, size
, MEM_COMMIT
|MEM_RESERVE
, PAGE_READWRITE
);
147 JS_ASSERT_IF(p
&& addr
, p
== addr
);
152 UnmapPages(void *addr
, size_t size
)
154 JS_ALWAYS_TRUE(VirtualFree(addr
, 0, MEM_RELEASE
));
159 #elif defined(XP_MACOSX) || defined(DARWIN)
162 MapPages(void *addr
, size_t size
)
167 p
= (vm_address_t
) addr
;
170 flags
= VM_FLAGS_ANYWHERE
;
173 kern_return_t err
= vm_allocate((vm_map_t
) mach_task_self(),
174 &p
, (vm_size_t
) size
, flags
);
175 if (err
!= KERN_SUCCESS
)
179 JS_ASSERT_IF(addr
, p
== (vm_address_t
) addr
);
184 UnmapPages(void *addr
, size_t size
)
186 JS_ALWAYS_TRUE(vm_deallocate((vm_map_t
) mach_task_self(),
192 #elif defined(XP_UNIX) || defined(XP_BEOS)
194 /* Required on Solaris 10. Might improve performance elsewhere. */
195 # if defined(SOLARIS) && defined(MAP_ALIGN)
196 # define JS_GC_HAS_MAP_ALIGN
199 MapAlignedPages(size_t size
, size_t alignment
)
202 * We don't use MAP_FIXED here, because it can cause the *replacement*
203 * of existing mappings, and we only want to create new mappings.
206 void *p
= mmap((caddr_t
) alignment
, size
, PROT_READ
| PROT_WRITE
,
207 MAP_PRIVATE
| MAP_NOSYNC
| MAP_ALIGN
| MAP_ANON
, -1, 0);
209 void *p
= mmap((void *) alignment
, size
, PROT_READ
| PROT_WRITE
,
210 MAP_PRIVATE
| MAP_NOSYNC
| MAP_ALIGN
| MAP_ANON
, -1, 0);
217 # else /* JS_GC_HAS_MAP_ALIGN */
220 MapPages(void *addr
, size_t size
)
223 * We don't use MAP_FIXED here, because it can cause the *replacement*
224 * of existing mappings, and we only want to create new mappings.
226 void *p
= mmap(addr
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
,
230 if (addr
&& p
!= addr
) {
231 /* We succeeded in mapping memory, but not in the right place. */
232 JS_ALWAYS_TRUE(munmap(p
, size
) == 0);
238 # endif /* !JS_GC_HAS_MAP_ALIGN */
241 UnmapPages(void *addr
, size_t size
)
244 JS_ALWAYS_TRUE(munmap((caddr_t
) addr
, size
) == 0);
246 JS_ALWAYS_TRUE(munmap(addr
, size
) == 0);
254 GCChunkAllocator defaultGCChunkAllocator
;
257 FindChunkStart(void *p
)
259 jsuword addr
= reinterpret_cast<jsuword
>(p
);
260 addr
= (addr
+ GC_CHUNK_MASK
) & ~GC_CHUNK_MASK
;
261 return reinterpret_cast<void *>(addr
);
264 JS_FRIEND_API(void *)
269 #ifdef JS_GC_HAS_MAP_ALIGN
270 p
= MapAlignedPages(GC_CHUNK_SIZE
, GC_CHUNK_SIZE
);
275 * Windows requires that there be a 1:1 mapping between VM allocation
276 * and deallocation operations. Therefore, take care here to acquire the
277 * final result via one mapping operation. This means unmapping any
278 * preliminary result that is not correctly aligned.
280 p
= MapPages(NULL
, GC_CHUNK_SIZE
);
284 if (reinterpret_cast<jsuword
>(p
) & GC_CHUNK_MASK
) {
285 UnmapPages(p
, GC_CHUNK_SIZE
);
286 p
= MapPages(FindChunkStart(p
), GC_CHUNK_SIZE
);
289 * Over-allocate in order to map a memory region that is
290 * definitely large enough then deallocate and allocate again the
291 * correct size, within the over-sized mapping.
293 p
= MapPages(NULL
, GC_CHUNK_SIZE
* 2);
296 UnmapPages(p
, GC_CHUNK_SIZE
* 2);
297 p
= MapPages(FindChunkStart(p
), GC_CHUNK_SIZE
);
300 * Failure here indicates a race with another thread, so
305 #endif /* !JS_GC_HAS_MAP_ALIGN */
307 JS_ASSERT(!(reinterpret_cast<jsuword
>(p
) & GC_CHUNK_MASK
));
315 JS_ASSERT(!(reinterpret_cast<jsuword
>(p
) & GC_CHUNK_MASK
));
316 UnmapPages(p
, GC_CHUNK_SIZE
);