Save all modification
[mozilla-1.9/m8.git] / js / src / jsarena.c
blobfea08011c060e0e63103162d0d8ef4f768468fcd
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
24 * Contributor(s):
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
41 * Lifetime-based fast allocation, inspired by much prior art, including
42 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
43 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
45 #include "jsstddef.h"
46 #include <stdlib.h>
47 #include <string.h>
48 #include "jstypes.h"
49 #include "jsbit.h"
50 #include "jsarena.h" /* Added by JSIFY */
51 #include "jsutil.h" /* Added by JSIFY */
53 #ifdef JS_ARENAMETER
54 static JSArenaStats *arena_stats_list;
56 #define COUNT(pool,what) (pool)->stats.what++
57 #else
58 #define COUNT(pool,what) /* nothing */
59 #endif
61 #define JS_ARENA_DEFAULT_ALIGN sizeof(double)
63 JS_PUBLIC_API(void)
64 JS_INIT_NAMED_ARENA_POOL(JSArenaPool *pool, const char *name, size_t size,
65 size_t align, size_t *quotap)
67 if (align == 0)
68 align = JS_ARENA_DEFAULT_ALIGN;
69 pool->mask = JS_BITMASK(JS_CeilingLog2(align));
70 pool->first.next = NULL;
71 pool->first.base = pool->first.avail = pool->first.limit =
72 JS_ARENA_ALIGN(pool, &pool->first + 1);
73 pool->current = &pool->first;
74 pool->arenasize = size;
75 pool->quotap = quotap;
76 #ifdef JS_ARENAMETER
77 memset(&pool->stats, 0, sizeof pool->stats);
78 pool->stats.name = strdup(name);
79 pool->stats.next = arena_stats_list;
80 arena_stats_list = &pool->stats;
81 #endif
85 * An allocation that consumes more than pool->arenasize also has a header
86 * pointing back to its previous arena's next member. This header is not
87 * included in [a->base, a->limit), so its space can't be wrongly claimed.
89 * As the header is a pointer, it must be well-aligned. If pool->mask is
90 * greater than or equal to POINTER_MASK, the header just preceding a->base
91 * for an oversized arena a is well-aligned, because a->base is well-aligned.
92 * However, we may need to add more space to pad the JSArena ** back-pointer
93 * so that it lies just behind a->base, because a might not be aligned such
94 * that (jsuword)(a + 1) is on a pointer boundary.
96 * By how much must we pad? Let M be the alignment modulus for pool and P
97 * the modulus for a pointer. Given M >= P, the base of an oversized arena
98 * that satisfies M is well-aligned for P.
100 * On the other hand, if M < P, we must include enough space in the header
101 * size to align the back-pointer on a P boundary so that it can be found by
102 * subtracting P from a->base. This means a->base must be on a P boundary,
103 * even though subsequent allocations from a may be aligned on a lesser (M)
104 * boundary. Given powers of two M and P as above, the extra space needed
105 * when M < P is P-M or POINTER_MASK - pool->mask.
107 * The size of a header including padding is given by the HEADER_SIZE macro,
108 * below, for any pool (for any value of M).
110 * The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
111 * HEADER_BASE_MASK(pool).
113 * PTR_TO_HEADER computes the address of the back-pointer, given an oversized
114 * allocation at p. By definition, p must be a->base for the arena a that
115 * contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
116 * the case of SET_HEADER with back-pointer ap.
118 #define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
119 #define HEADER_SIZE(pool) (sizeof(JSArena **) \
120 + (((pool)->mask < POINTER_MASK) \
121 ? POINTER_MASK - (pool)->mask \
122 : 0))
123 #define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
124 #define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
125 & HEADER_BASE_MASK(pool)) \
126 == 0), \
127 (JSArena ***)(p) - 1)
128 #define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
129 #define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
131 JS_PUBLIC_API(void *)
132 JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
134 JSArena **ap, *a, *b;
135 jsuword extra, hdrsz, gross;
136 void *p;
139 * Search pool from current forward till we find or make enough space.
141 * NB: subtract nb from a->limit in the loop condition, instead of adding
142 * nb to a->avail, to avoid overflowing a 32-bit address space (possible
143 * when running a 32-bit program on a 64-bit system where the kernel maps
144 * the heap up against the top of the 32-bit address space).
146 * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
147 * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
149 JS_ASSERT((nb & pool->mask) == 0);
150 for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
151 pool->current = a) {
152 ap = &a->next;
153 if (!*ap) {
154 /* Not enough space in pool, so we must malloc. */
155 extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
156 hdrsz = sizeof *a + extra + pool->mask;
157 gross = hdrsz + JS_MAX(nb, pool->arenasize);
158 if (gross < nb)
159 return NULL;
160 if (pool->quotap) {
161 if (gross > *pool->quotap)
162 return NULL;
163 b = (JSArena *) malloc(gross);
164 if (!b)
165 return NULL;
166 *pool->quotap -= gross;
167 } else {
168 b = (JSArena *) malloc(gross);
169 if (!b)
170 return NULL;
173 b->next = NULL;
174 b->limit = (jsuword)b + gross;
175 JS_COUNT_ARENA(pool,++);
176 COUNT(pool, nmallocs);
178 /* If oversized, store ap in the header, just before a->base. */
179 *ap = a = b;
180 JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
181 if (extra) {
182 a->base = a->avail =
183 ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
184 SET_HEADER(pool, a, ap);
185 } else {
186 a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
188 continue;
190 a = *ap; /* move to next arena */
193 p = (void *)a->avail;
194 a->avail += nb;
195 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
196 return p;
199 JS_PUBLIC_API(void *)
200 JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
202 JSArena **ap, *a, *b;
203 jsuword boff, aoff, extra, hdrsz, gross, growth;
206 * Use the oversized-single-allocation header to avoid searching for ap.
207 * See JS_ArenaAllocate, the SET_HEADER call.
209 if (size > pool->arenasize) {
210 ap = *PTR_TO_HEADER(pool, p);
211 a = *ap;
212 } else {
213 ap = &pool->first.next;
214 while ((a = *ap) != pool->current)
215 ap = &a->next;
218 JS_ASSERT(a->base == (jsuword)p);
219 boff = JS_UPTRDIFF(a->base, a);
220 aoff = JS_ARENA_ALIGN(pool, size + incr);
221 JS_ASSERT(aoff > pool->arenasize);
222 extra = HEADER_SIZE(pool); /* oversized header holds ap */
223 hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
224 gross = hdrsz + aoff;
225 JS_ASSERT(gross > aoff);
226 if (pool->quotap) {
227 growth = gross - (a->limit - (jsuword) a);
228 if (growth > *pool->quotap)
229 return NULL;
230 a = (JSArena *) realloc(a, gross);
231 if (!a)
232 return NULL;
233 *pool->quotap -= growth;
234 } else {
235 a = (JSArena *) realloc(a, gross);
236 if (!a)
237 return NULL;
239 #ifdef JS_ARENAMETER
240 pool->stats.nreallocs++;
241 #endif
243 if (a != *ap) {
244 /* Oops, realloc moved the allocation: update other pointers to a. */
245 if (pool->current == *ap)
246 pool->current = a;
247 b = a->next;
248 if (b && b->avail - b->base > pool->arenasize) {
249 JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
250 SET_HEADER(pool, b, &a->next);
253 /* Now update *ap, the next link of the arena before a. */
254 *ap = a;
257 a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
258 a->limit = (jsuword)a + gross;
259 a->avail = a->base + aoff;
260 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
262 /* Check whether realloc aligned differently, and copy if necessary. */
263 if (boff != JS_UPTRDIFF(a->base, a))
264 memmove((void *)a->base, (char *)a + boff, size);
266 /* Store ap in the oversized-load arena header. */
267 SET_HEADER(pool, a, ap);
268 return (void *)a->base;
271 JS_PUBLIC_API(void *)
272 JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
274 void *newp;
277 * If p points to an oversized allocation, it owns an entire arena, so we
278 * can simply realloc the arena.
280 if (size > pool->arenasize)
281 return JS_ArenaRealloc(pool, p, size, incr);
283 JS_ARENA_ALLOCATE(newp, pool, size + incr);
284 if (newp)
285 memcpy(newp, p, size);
286 return newp;
290 * Free tail arenas linked after head, which may not be the true list head.
291 * Reset pool->current to point to head in case it pointed at a tail arena.
293 static void
294 FreeArenaList(JSArenaPool *pool, JSArena *head)
296 JSArena **ap, *a;
298 ap = &head->next;
299 a = *ap;
300 if (!a)
301 return;
303 #ifdef DEBUG
304 do {
305 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
306 a->avail = a->base;
307 JS_CLEAR_UNUSED(a);
308 } while ((a = a->next) != NULL);
309 a = *ap;
310 #endif
312 do {
313 *ap = a->next;
314 if (pool->quotap)
315 *pool->quotap += a->limit - (jsuword) a;
316 JS_CLEAR_ARENA(a);
317 JS_COUNT_ARENA(pool,--);
318 free(a);
319 } while ((a = *ap) != NULL);
321 pool->current = head;
324 JS_PUBLIC_API(void)
325 JS_ArenaRelease(JSArenaPool *pool, char *mark)
327 JSArena *a;
329 for (a = &pool->first; a; a = a->next) {
330 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
332 if (JS_ARENA_MARK_MATCH(a, mark)) {
333 a->avail = JS_ARENA_ALIGN(pool, mark);
334 JS_ASSERT(a->avail <= a->limit);
335 FreeArenaList(pool, a);
336 return;
341 JS_PUBLIC_API(void)
342 JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size)
344 JSArena **ap, *a, *b;
345 jsuword q;
348 * If the allocation is oversized, it consumes an entire arena, and it has
349 * a header just before the allocation pointing back to its predecessor's
350 * next member. Otherwise, we have to search pool for a.
352 if (size > pool->arenasize) {
353 ap = *PTR_TO_HEADER(pool, p);
354 a = *ap;
355 } else {
356 q = (jsuword)p + size;
357 q = JS_ARENA_ALIGN(pool, q);
358 ap = &pool->first.next;
359 while ((a = *ap) != NULL) {
360 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
362 if (a->avail == q) {
364 * If a is consumed by the allocation at p, we can free it to
365 * the malloc heap.
367 if (a->base == (jsuword)p)
368 break;
371 * We can't free a, but we can "retract" its avail cursor --
372 * whether there are others after it in pool.
374 a->avail = (jsuword)p;
375 return;
377 ap = &a->next;
382 * At this point, a is doomed, so ensure that pool->current doesn't point
383 * at it. We must preserve LIFO order of mark/release cursors, so we use
384 * the oversized-allocation arena's back pointer (or if not oversized, we
385 * use the result of searching the entire pool) to compute the address of
386 * the arena that precedes a.
388 if (pool->current == a)
389 pool->current = (JSArena *) ((char *)ap - offsetof(JSArena, next));
392 * This is a non-LIFO deallocation, so take care to fix up a->next's back
393 * pointer in its header, if a->next is oversized.
395 *ap = b = a->next;
396 if (b && b->avail - b->base > pool->arenasize) {
397 JS_ASSERT(GET_HEADER(pool, b) == &a->next);
398 SET_HEADER(pool, b, ap);
400 if (pool->quotap)
401 *pool->quotap += a->limit - (jsuword) a;
402 JS_CLEAR_ARENA(a);
403 JS_COUNT_ARENA(pool,--);
404 free(a);
407 JS_PUBLIC_API(void)
408 JS_FreeArenaPool(JSArenaPool *pool)
410 FreeArenaList(pool, &pool->first);
411 COUNT(pool, ndeallocs);
414 JS_PUBLIC_API(void)
415 JS_FinishArenaPool(JSArenaPool *pool)
417 FreeArenaList(pool, &pool->first);
418 #ifdef JS_ARENAMETER
420 JSArenaStats *stats, **statsp;
422 if (pool->stats.name)
423 free(pool->stats.name);
424 for (statsp = &arena_stats_list; (stats = *statsp) != 0;
425 statsp = &stats->next) {
426 if (stats == &pool->stats) {
427 *statsp = stats->next;
428 return;
432 #endif
435 JS_PUBLIC_API(void)
436 JS_ArenaFinish()
440 JS_PUBLIC_API(void)
441 JS_ArenaShutDown(void)
445 #ifdef JS_ARENAMETER
446 JS_PUBLIC_API(void)
447 JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
449 pool->stats.nallocs++;
450 pool->stats.nbytes += nb;
451 if (nb > pool->stats.maxalloc)
452 pool->stats.maxalloc = nb;
453 pool->stats.variance += nb * nb;
456 JS_PUBLIC_API(void)
457 JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
459 pool->stats.ninplace++;
462 JS_PUBLIC_API(void)
463 JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
465 pool->stats.ngrows++;
466 pool->stats.nbytes += incr;
467 pool->stats.variance -= size * size;
468 size += incr;
469 if (size > pool->stats.maxalloc)
470 pool->stats.maxalloc = size;
471 pool->stats.variance += size * size;
474 JS_PUBLIC_API(void)
475 JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
477 pool->stats.nreleases++;
480 JS_PUBLIC_API(void)
481 JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
483 pool->stats.nfastrels++;
486 #include <math.h>
487 #include <stdio.h>
489 JS_PUBLIC_API(void)
490 JS_DumpArenaStats(FILE *fp)
492 JSArenaStats *stats;
493 uint32 nallocs, nbytes;
494 double mean, variance, sigma;
496 for (stats = arena_stats_list; stats; stats = stats->next) {
497 nallocs = stats->nallocs;
498 if (nallocs != 0) {
499 nbytes = stats->nbytes;
500 mean = (double)nbytes / nallocs;
501 variance = stats->variance * nallocs - nbytes * nbytes;
502 if (variance < 0 || nallocs == 1)
503 variance = 0;
504 else
505 variance /= nallocs * (nallocs - 1);
506 sigma = sqrt(variance);
507 } else {
508 mean = variance = sigma = 0;
511 fprintf(fp, "\n%s allocation statistics:\n", stats->name);
512 fprintf(fp, " number of arenas: %u\n", stats->narenas);
513 fprintf(fp, " number of allocations: %u\n", stats->nallocs);
514 fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
515 fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
516 fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
517 fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
518 fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
519 fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
520 fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
521 fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
522 fprintf(fp, " mean allocation size: %g\n", mean);
523 fprintf(fp, " standard deviation: %g\n", sigma);
524 fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
527 #endif /* JS_ARENAMETER */
529 #ifdef DEBUG
531 JSBool
532 js_GuardedArenaMark(JSArenaPool *pool, void *mark, void *guardMark)
534 JSArena *a;
536 a = pool->current;
537 if (JS_ARENA_MARK_MATCH(a, mark)) {
538 return !JS_ARENA_MARK_MATCH(a, guardMark) ||
539 (uint8 *)guardMark <= (uint8 *)mark;
542 for (a = &pool->first; !JS_ARENA_MARK_MATCH(a, guardMark); a = a->next) {
543 if (JS_ARENA_MARK_MATCH(a, mark))
544 return JS_FALSE;
548 * We found the guarded arena. Mark follows the guard when it either marks
549 * arenas after a or if it is greater or equal to the guard.
551 return !JS_ARENA_MARK_MATCH(a, mark) ||
552 (uint8 *)guardMark <= (uint8 *)mark;
555 #endif