Merge mozilla-central and tracemonkey. (a=blockers)
[mozilla-central.git] / js / src / jsarena.cpp
blobdd1b8a650d134c2a70f996c2ad962e9397bb4148
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
24 * Contributor(s):
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
41 * Lifetime-based fast allocation, inspired by much prior art, including
42 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
43 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
45 #include <stdlib.h>
46 #include <string.h>
47 #include "jstypes.h"
48 #include "jsstdint.h"
49 #include "jsbit.h"
50 #include "jsarena.h"
51 #include "jsprvtd.h"
53 #ifdef JS_ARENAMETER
54 static JSArenaStats *arena_stats_list;
56 #define COUNT(pool,what) (pool)->stats.what++
57 #else
58 #define COUNT(pool,what) /* nothing */
59 #endif
61 #define JS_ARENA_DEFAULT_ALIGN sizeof(double)
63 JS_PUBLIC_API(void)
64 JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size,
65 size_t align, size_t *quotap)
67 if (align == 0)
68 align = JS_ARENA_DEFAULT_ALIGN;
69 pool->mask = JS_BITMASK(JS_CeilingLog2(align));
70 pool->first.next = NULL;
71 pool->first.base = pool->first.avail = pool->first.limit =
72 JS_ARENA_ALIGN(pool, &pool->first + 1);
73 pool->current = &pool->first;
74 pool->arenasize = size;
75 pool->quotap = quotap;
76 #ifdef JS_ARENAMETER
77 memset(&pool->stats, 0, sizeof pool->stats);
78 pool->stats.name = strdup(name);
79 pool->stats.next = arena_stats_list;
80 arena_stats_list = &pool->stats;
81 #endif
85 * An allocation that consumes more than pool->arenasize also has a header
86 * pointing back to its previous arena's next member. This header is not
87 * included in [a->base, a->limit), so its space can't be wrongly claimed.
89 * As the header is a pointer, it must be well-aligned. If pool->mask is
90 * greater than or equal to POINTER_MASK, the header just preceding a->base
91 * for an oversized arena a is well-aligned, because a->base is well-aligned.
92 * However, we may need to add more space to pad the JSArena ** back-pointer
93 * so that it lies just behind a->base, because a might not be aligned such
94 * that (jsuword)(a + 1) is on a pointer boundary.
96 * By how much must we pad? Let M be the alignment modulus for pool and P
97 * the modulus for a pointer. Given M >= P, the base of an oversized arena
98 * that satisfies M is well-aligned for P.
100 * On the other hand, if M < P, we must include enough space in the header
101 * size to align the back-pointer on a P boundary so that it can be found by
102 * subtracting P from a->base. This means a->base must be on a P boundary,
103 * even though subsequent allocations from a may be aligned on a lesser (M)
104 * boundary. Given powers of two M and P as above, the extra space needed
105 * when M < P is P-M or POINTER_MASK - pool->mask.
107 * The size of a header including padding is given by the HEADER_SIZE macro,
108 * below, for any pool (for any value of M).
110 * The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
111 * HEADER_BASE_MASK(pool).
113 * PTR_TO_HEADER computes the address of the back-pointer, given an oversized
114 * allocation at p. By definition, p must be a->base for the arena a that
115 * contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
116 * the case of SET_HEADER with back-pointer ap.
118 #define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
119 #define HEADER_SIZE(pool) (sizeof(JSArena **) \
120 + (((pool)->mask < POINTER_MASK) \
121 ? POINTER_MASK - (pool)->mask \
122 : 0))
123 #define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
124 #define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
125 & HEADER_BASE_MASK(pool)) \
126 == 0), \
127 (JSArena ***)(p) - 1)
128 #define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
129 #define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
131 JS_PUBLIC_API(void *)
132 JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
134 JSArena **ap, *a, *b;
135 jsuword extra, hdrsz, gross;
136 void *p;
139 * Search pool from current forward till we find or make enough space.
141 * NB: subtract nb from a->limit in the loop condition, instead of adding
142 * nb to a->avail, to avoid overflowing a 32-bit address space (possible
143 * when running a 32-bit program on a 64-bit system where the kernel maps
144 * the heap up against the top of the 32-bit address space).
146 * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
147 * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
149 JS_ASSERT((nb & pool->mask) == 0);
150 for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
151 pool->current = a) {
152 ap = &a->next;
153 if (!*ap) {
154 /* Not enough space in pool, so we must malloc. */
155 extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
156 hdrsz = sizeof *a + extra + pool->mask;
157 gross = hdrsz + JS_MAX(nb, pool->arenasize);
158 if (gross < nb)
159 return NULL;
160 if (pool->quotap) {
161 if (gross > *pool->quotap)
162 return NULL;
163 b = (JSArena *) js_malloc(gross);
164 if (!b)
165 return NULL;
166 *pool->quotap -= gross;
167 } else {
168 b = (JSArena *) js_malloc(gross);
169 if (!b)
170 return NULL;
173 b->next = NULL;
174 b->limit = (jsuword)b + gross;
175 JS_COUNT_ARENA(pool,++);
176 COUNT(pool, nmallocs);
178 /* If oversized, store ap in the header, just before a->base. */
179 *ap = a = b;
180 JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
181 if (extra) {
182 a->base = a->avail =
183 ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
184 SET_HEADER(pool, a, ap);
185 } else {
186 a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
188 continue;
190 a = *ap; /* move to next arena */
193 p = (void *)a->avail;
194 a->avail += nb;
195 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
196 return p;
199 JS_PUBLIC_API(void *)
200 JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
202 JSArena **ap, *a, *b;
203 jsuword boff, aoff, extra, hdrsz, gross, growth;
206 * Use the oversized-single-allocation header to avoid searching for ap.
207 * See JS_ArenaAllocate, the SET_HEADER call.
209 if (size > pool->arenasize) {
210 ap = *PTR_TO_HEADER(pool, p);
211 a = *ap;
212 } else {
213 ap = &pool->first.next;
214 while ((a = *ap) != pool->current)
215 ap = &a->next;
218 JS_ASSERT(a->base == (jsuword)p);
219 boff = JS_UPTRDIFF(a->base, a);
220 aoff = JS_ARENA_ALIGN(pool, size + incr);
221 JS_ASSERT(aoff > pool->arenasize);
222 extra = HEADER_SIZE(pool); /* oversized header holds ap */
223 hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
224 gross = hdrsz + aoff;
225 JS_ASSERT(gross > aoff);
226 if (pool->quotap) {
227 growth = gross - (a->limit - (jsuword) a);
228 if (growth > *pool->quotap)
229 return NULL;
230 a = (JSArena *) js_realloc(a, gross);
231 if (!a)
232 return NULL;
233 *pool->quotap -= growth;
234 } else {
235 a = (JSArena *) js_realloc(a, gross);
236 if (!a)
237 return NULL;
239 #ifdef JS_ARENAMETER
240 pool->stats.nreallocs++;
241 #endif
243 if (a != *ap) {
244 /* Oops, realloc moved the allocation: update other pointers to a. */
245 if (pool->current == *ap)
246 pool->current = a;
247 b = a->next;
248 if (b && b->avail - b->base > pool->arenasize) {
249 JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
250 SET_HEADER(pool, b, &a->next);
253 /* Now update *ap, the next link of the arena before a. */
254 *ap = a;
257 a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
258 a->limit = (jsuword)a + gross;
259 a->avail = a->base + aoff;
260 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
262 /* Check whether realloc aligned differently, and copy if necessary. */
263 if (boff != JS_UPTRDIFF(a->base, a))
264 memmove((void *)a->base, (char *)a + boff, size);
266 /* Store ap in the oversized-load arena header. */
267 SET_HEADER(pool, a, ap);
268 return (void *)a->base;
271 JS_PUBLIC_API(void *)
272 JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
274 void *newp;
277 * If p points to an oversized allocation, it owns an entire arena, so we
278 * can simply realloc the arena.
280 if (size > pool->arenasize)
281 return JS_ArenaRealloc(pool, p, size, incr);
283 JS_ARENA_ALLOCATE(newp, pool, size + incr);
284 if (newp)
285 memcpy(newp, p, size);
286 return newp;
290 * Free tail arenas linked after head, which may not be the true list head.
291 * Reset pool->current to point to head in case it pointed at a tail arena.
293 static void
294 FreeArenaList(JSArenaPool *pool, JSArena *head)
296 JSArena **ap, *a;
298 ap = &head->next;
299 a = *ap;
300 if (!a)
301 return;
303 #ifdef DEBUG
304 do {
305 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
306 a->avail = a->base;
307 JS_CLEAR_UNUSED(a);
308 } while ((a = a->next) != NULL);
309 a = *ap;
310 #endif
312 do {
313 *ap = a->next;
314 if (pool->quotap)
315 *pool->quotap += a->limit - (jsuword) a;
316 JS_CLEAR_ARENA(a);
317 JS_COUNT_ARENA(pool,--);
318 js_free(a);
319 } while ((a = *ap) != NULL);
321 pool->current = head;
324 JS_PUBLIC_API(void)
325 JS_ArenaRelease(JSArenaPool *pool, char *mark)
327 JSArena *a;
329 for (a = &pool->first; a; a = a->next) {
330 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
332 if (JS_ARENA_MARK_MATCH(a, mark)) {
333 a->avail = JS_ARENA_ALIGN(pool, mark);
334 JS_ASSERT(a->avail <= a->limit);
335 FreeArenaList(pool, a);
336 return;
341 JS_PUBLIC_API(void)
342 JS_FreeArenaPool(JSArenaPool *pool)
344 FreeArenaList(pool, &pool->first);
345 COUNT(pool, ndeallocs);
348 JS_PUBLIC_API(void)
349 JS_FinishArenaPool(JSArenaPool *pool)
351 FreeArenaList(pool, &pool->first);
352 #ifdef JS_ARENAMETER
354 JSArenaStats *stats, **statsp;
356 if (pool->stats.name) {
357 js_free(pool->stats.name);
358 pool->stats.name = NULL;
360 for (statsp = &arena_stats_list; (stats = *statsp) != 0;
361 statsp = &stats->next) {
362 if (stats == &pool->stats) {
363 *statsp = stats->next;
364 return;
368 #endif
371 JS_PUBLIC_API(void)
372 JS_ArenaFinish()
376 JS_PUBLIC_API(void)
377 JS_ArenaShutDown(void)
381 #ifdef JS_ARENAMETER
382 JS_PUBLIC_API(void)
383 JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
385 pool->stats.nallocs++;
386 pool->stats.nbytes += nb;
387 if (nb > pool->stats.maxalloc)
388 pool->stats.maxalloc = nb;
389 pool->stats.variance += nb * nb;
392 JS_PUBLIC_API(void)
393 JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
395 pool->stats.ninplace++;
398 JS_PUBLIC_API(void)
399 JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
401 pool->stats.ngrows++;
402 pool->stats.nbytes += incr;
403 pool->stats.variance -= size * size;
404 size += incr;
405 if (size > pool->stats.maxalloc)
406 pool->stats.maxalloc = size;
407 pool->stats.variance += size * size;
410 JS_PUBLIC_API(void)
411 JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
413 pool->stats.nreleases++;
416 JS_PUBLIC_API(void)
417 JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
419 pool->stats.nfastrels++;
422 #include <stdio.h>
424 JS_PUBLIC_API(void)
425 JS_DumpArenaStats(FILE *fp)
427 JSArenaStats *stats;
428 double mean, sigma;
430 for (stats = arena_stats_list; stats; stats = stats->next) {
431 mean = JS_MeanAndStdDev(stats->nallocs, stats->nbytes, stats->variance,
432 &sigma);
434 fprintf(fp, "\n%s allocation statistics:\n", stats->name);
435 fprintf(fp, " number of arenas: %u\n", stats->narenas);
436 fprintf(fp, " number of allocations: %u\n", stats->nallocs);
437 fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
438 fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
439 fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
440 fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
441 fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
442 fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
443 fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
444 fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
445 fprintf(fp, " mean allocation size: %g\n", mean);
446 fprintf(fp, " standard deviation: %g\n", sigma);
447 fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
450 #endif /* JS_ARENAMETER */