Import boehm-gc snapshot, taken from
[official-gcc.git] / boehm-gc / mallocx.c
blobbf7f9f06013c86c5d0748266839515efbcd4a20e
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
20 * These are extra allocation routines which are likely to be less
21 * frequently used than those in malloc.c. They are separate in the
22 * hope that the .o file will be excluded from statically linked
23 * executables. We should probably break this up further.
26 #include <stdio.h>
27 #include <string.h>
29 #ifdef MSWINCE
30 # ifndef WIN32_LEAN_AND_MEAN
31 # define WIN32_LEAN_AND_MEAN 1
32 # endif
33 # define NOSERVICE
34 # include <windows.h>
35 #else
36 # include <errno.h>
37 #endif
39 /* Some externally visible but unadvertised variables to allow access to */
40 /* free lists from inlined allocators without including gc_priv.h */
41 /* or introducing dependencies on internal data structure layouts. */
42 void ** const GC_objfreelist_ptr = GC_objfreelist;
43 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
44 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
45 # ifdef ATOMIC_UNCOLLECTABLE
46 void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
47 # endif
50 STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
52 switch(knd) {
53 # ifdef STUBBORN_ALLOC
54 case STUBBORN:
55 return(GC_malloc_stubborn((size_t)lb));
56 # endif
57 case PTRFREE:
58 return(GC_malloc_atomic((size_t)lb));
59 case NORMAL:
60 return(GC_malloc((size_t)lb));
61 case UNCOLLECTABLE:
62 return(GC_malloc_uncollectable((size_t)lb));
63 # ifdef ATOMIC_UNCOLLECTABLE
64 case AUNCOLLECTABLE:
65 return(GC_malloc_atomic_uncollectable((size_t)lb));
66 # endif /* ATOMIC_UNCOLLECTABLE */
67 default:
68 return(GC_generic_malloc(lb,knd));
72 /* Change the size of the block pointed to by p to contain at least */
73 /* lb bytes. The object may be (and quite likely will be) moved. */
74 /* The kind (e.g. atomic) is the same as that of the old. */
75 /* Shrinking of large blocks is not implemented well. */
76 GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
78 struct hblk * h;
79 hdr * hhdr;
80 size_t sz; /* Current size in bytes */
81 size_t orig_sz; /* Original sz in bytes */
82 int obj_kind;
84 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
85 h = HBLKPTR(p);
86 hhdr = HDR(h);
87 sz = hhdr -> hb_sz;
88 obj_kind = hhdr -> hb_obj_kind;
89 orig_sz = sz;
91 if (sz > MAXOBJBYTES) {
92 /* Round it up to the next whole heap block */
93 word descr;
95 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
96 hhdr -> hb_sz = sz;
97 descr = GC_obj_kinds[obj_kind].ok_descriptor;
98 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
99 hhdr -> hb_descr = descr;
100 # ifdef MARK_BIT_PER_OBJ
101 GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
102 # else
103 GC_ASSERT(hhdr -> hb_large_block &&
104 hhdr -> hb_map[ANY_INDEX] == 1);
105 # endif
106 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
107 /* Extra area is already cleared by GC_alloc_large_and_clear. */
109 if (ADD_SLOP(lb) <= sz) {
110 if (lb >= (sz >> 1)) {
111 # ifdef STUBBORN_ALLOC
112 if (obj_kind == STUBBORN) GC_change_stubborn(p);
113 # endif
114 if (orig_sz > lb) {
115 /* Clear unneeded part of object to avoid bogus pointer */
116 /* tracing. */
117 /* Safe for stubborn objects. */
118 BZERO(((ptr_t)p) + lb, orig_sz - lb);
120 return(p);
121 } else {
122 /* shrink */
123 void * result =
124 GC_generic_or_special_malloc((word)lb, obj_kind);
126 if (result == 0) return(0);
127 /* Could also return original object. But this */
128 /* gives the client warning of imminent disaster. */
129 BCOPY(p, result, lb);
130 # ifndef IGNORE_FREE
131 GC_free(p);
132 # endif
133 return(result);
135 } else {
136 /* grow */
137 void * result =
138 GC_generic_or_special_malloc((word)lb, obj_kind);
140 if (result == 0) return(0);
141 BCOPY(p, result, sz);
142 # ifndef IGNORE_FREE
143 GC_free(p);
144 # endif
145 return(result);
149 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
150 # define REDIRECT_REALLOC GC_realloc
151 # endif
153 # ifdef REDIRECT_REALLOC
155 /* As with malloc, avoid two levels of extra calls here. */
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0)
160 void * realloc(void * p, size_t lb)
162 return(REDIRECT_REALLOC(p, lb));
165 # undef GC_debug_realloc_replacement
166 # endif /* REDIRECT_REALLOC */
169 /* Allocate memory such that only pointers to near the */
170 /* beginning of the object are considered. */
171 /* We avoid holding allocation lock while we clear memory. */
172 GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
174 void *result;
175 size_t lg;
176 size_t lb_rounded;
177 word n_blocks;
178 GC_bool init;
179 DCL_LOCK_STATE;
181 if (SMALL_OBJ(lb))
182 return(GC_generic_malloc((word)lb, k));
183 lg = ROUNDED_UP_GRANULES(lb);
184 lb_rounded = GRANULES_TO_BYTES(lg);
185 if (lb_rounded < lb)
186 return((*GC_get_oom_fn())(lb));
187 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
188 init = GC_obj_kinds[k].ok_init;
189 if (EXPECT(GC_have_errors, FALSE))
190 GC_print_all_errors();
191 GC_INVOKE_FINALIZERS();
192 GC_DBG_COLLECT_AT_MALLOC(lb);
193 LOCK();
194 result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
195 if (0 != result) {
196 if (GC_debugging_started) {
197 BZERO(result, n_blocks * HBLKSIZE);
198 } else {
199 # ifdef THREADS
200 /* Clear any memory that might be used for GC descriptors */
201 /* before we release the lock. */
202 ((word *)result)[0] = 0;
203 ((word *)result)[1] = 0;
204 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
205 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
206 # endif
209 GC_bytes_allocd += lb_rounded;
210 if (0 == result) {
211 GC_oom_func oom_fn = GC_oom_fn;
212 UNLOCK();
213 return((*oom_fn)(lb));
214 } else {
215 UNLOCK();
216 if (init && !GC_debugging_started) {
217 BZERO(result, n_blocks * HBLKSIZE);
219 return(result);
223 GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
225 return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
228 GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
230 return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
233 /* Increment GC_bytes_allocd from code that doesn't have direct access */
234 /* to GC_arrays. */
235 GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
237 GC_bytes_allocd += n;
240 /* The same for GC_bytes_freed. */
241 GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
243 GC_bytes_freed += n;
246 # ifdef PARALLEL_MARK
247 STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
248 /* Number of bytes of memory allocated since */
249 /* we released the GC lock. Instead of */
250 /* reacquiring the GC lock just to add this in, */
251 /* we add it in the next time we reacquire */
252 /* the lock. (Atomically adding it doesn't */
253 /* work, since we would have to atomically */
254 /* update it in GC_malloc, which is too */
255 /* expensive.) */
256 # endif /* PARALLEL_MARK */
258 /* Return a list of 1 or more objects of the indicated size, linked */
259 /* through the first word in the object. This has the advantage that */
260 /* it acquires the allocation lock only once, and may greatly reduce */
261 /* time wasted contending for the allocation lock. Typical usage would */
262 /* be in a thread that requires many items of the same size. It would */
263 /* keep its own free list in thread-local storage, and call */
264 /* GC_malloc_many or friends to replenish it. (We do not round up */
265 /* object sizes, since a call indicates the intention to consume many */
266 /* objects of exactly this size.) */
267 /* We assume that the size is a multiple of GRANULE_BYTES. */
268 /* We return the free-list by assigning it to *result, since it is */
269 /* not safe to return, e.g. a linked list of pointer-free objects, */
270 /* since the collector would not retain the entire list if it were */
271 /* invoked just as we were returning. */
272 /* Note that the client should usually clear the link field. */
273 GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
275 void *op;
276 void *p;
277 void **opp;
278 size_t lw; /* Length in words. */
279 size_t lg; /* Length in granules. */
280 signed_word my_bytes_allocd = 0;
281 struct obj_kind * ok = &(GC_obj_kinds[k]);
282 DCL_LOCK_STATE;
284 GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
285 if (!SMALL_OBJ(lb)) {
286 op = GC_generic_malloc(lb, k);
287 if (EXPECT(0 != op, TRUE))
288 obj_link(op) = 0;
289 *result = op;
290 return;
292 lw = BYTES_TO_WORDS(lb);
293 lg = BYTES_TO_GRANULES(lb);
294 if (EXPECT(GC_have_errors, FALSE))
295 GC_print_all_errors();
296 GC_INVOKE_FINALIZERS();
297 GC_DBG_COLLECT_AT_MALLOC(lb);
298 LOCK();
299 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
300 /* Do our share of marking work */
301 if (GC_incremental && !GC_dont_gc) {
302 ENTER_GC();
303 GC_collect_a_little_inner(1);
304 EXIT_GC();
306 /* First see if we can reclaim a page of objects waiting to be */
307 /* reclaimed. */
309 struct hblk ** rlh = ok -> ok_reclaim_list;
310 struct hblk * hbp;
311 hdr * hhdr;
313 rlh += lg;
314 while ((hbp = *rlh) != 0) {
315 hhdr = HDR(hbp);
316 *rlh = hhdr -> hb_next;
317 GC_ASSERT(hhdr -> hb_sz == lb);
318 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
319 # ifdef PARALLEL_MARK
320 if (GC_parallel) {
321 signed_word my_bytes_allocd_tmp =
322 (signed_word)AO_load(&GC_bytes_allocd_tmp);
323 GC_ASSERT(my_bytes_allocd_tmp >= 0);
324 /* We only decrement it while holding the GC lock. */
325 /* Thus we can't accidentally adjust it down in more */
326 /* than one thread simultaneously. */
328 if (my_bytes_allocd_tmp != 0) {
329 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
330 (AO_t)(-my_bytes_allocd_tmp));
331 GC_bytes_allocd += my_bytes_allocd_tmp;
333 GC_acquire_mark_lock();
334 ++ GC_fl_builder_count;
335 UNLOCK();
336 GC_release_mark_lock();
338 # endif
339 op = GC_reclaim_generic(hbp, hhdr, lb,
340 ok -> ok_init, 0, &my_bytes_allocd);
341 if (op != 0) {
342 /* We also reclaimed memory, so we need to adjust */
343 /* that count. */
344 /* This should be atomic, so the results may be */
345 /* inaccurate. */
346 GC_bytes_found += my_bytes_allocd;
347 # ifdef PARALLEL_MARK
348 if (GC_parallel) {
349 *result = op;
350 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
351 (AO_t)my_bytes_allocd);
352 GC_acquire_mark_lock();
353 -- GC_fl_builder_count;
354 if (GC_fl_builder_count == 0) GC_notify_all_builder();
355 GC_release_mark_lock();
356 (void) GC_clear_stack(0);
357 return;
359 # endif
360 GC_bytes_allocd += my_bytes_allocd;
361 goto out;
363 # ifdef PARALLEL_MARK
364 if (GC_parallel) {
365 GC_acquire_mark_lock();
366 -- GC_fl_builder_count;
367 if (GC_fl_builder_count == 0) GC_notify_all_builder();
368 GC_release_mark_lock();
369 LOCK();
370 /* GC lock is needed for reclaim list access. We */
371 /* must decrement fl_builder_count before reaquiring GC */
372 /* lock. Hopefully this path is rare. */
374 # endif
377 /* Next try to use prefix of global free list if there is one. */
378 /* We don't refill it, but we need to use it up before allocating */
379 /* a new block ourselves. */
380 opp = &(GC_obj_kinds[k].ok_freelist[lg]);
381 if ( (op = *opp) != 0 ) {
382 *opp = 0;
383 my_bytes_allocd = 0;
384 for (p = op; p != 0; p = obj_link(p)) {
385 my_bytes_allocd += lb;
386 if ((word)my_bytes_allocd >= HBLKSIZE) {
387 *opp = obj_link(p);
388 obj_link(p) = 0;
389 break;
392 GC_bytes_allocd += my_bytes_allocd;
393 goto out;
395 /* Next try to allocate a new block worth of objects of this size. */
397 struct hblk *h = GC_allochblk(lb, k, 0);
398 if (h != 0) {
399 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
400 GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
401 # ifdef PARALLEL_MARK
402 if (GC_parallel) {
403 GC_acquire_mark_lock();
404 ++ GC_fl_builder_count;
405 UNLOCK();
406 GC_release_mark_lock();
408 op = GC_build_fl(h, lw,
409 (ok -> ok_init || GC_debugging_started), 0);
411 *result = op;
412 GC_acquire_mark_lock();
413 -- GC_fl_builder_count;
414 if (GC_fl_builder_count == 0) GC_notify_all_builder();
415 GC_release_mark_lock();
416 (void) GC_clear_stack(0);
417 return;
419 # endif
420 op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
421 goto out;
425 /* As a last attempt, try allocating a single object. Note that */
426 /* this may trigger a collection or expand the heap. */
427 op = GC_generic_malloc_inner(lb, k);
428 if (0 != op) obj_link(op) = 0;
430 out:
431 *result = op;
432 UNLOCK();
433 (void) GC_clear_stack(0);
436 /* Note that the "atomic" version of this would be unsafe, since the */
437 /* links would not be seen by the collector. */
438 GC_API void * GC_CALL GC_malloc_many(size_t lb)
440 void *result;
441 GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
442 & ~(GRANULE_BYTES-1),
443 NORMAL, &result);
444 return result;
447 /* Not well tested nor integrated. */
448 /* Debug version is tricky and currently missing. */
449 #include <limits.h>
451 GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
453 size_t new_lb;
454 size_t offset;
455 ptr_t result;
457 if (align <= GRANULE_BYTES) return GC_malloc(lb);
458 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
459 if (align > HBLKSIZE) {
460 return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
462 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
463 /* Will be HBLKSIZE aligned. */
465 /* We could also try to make sure that the real rounded-up object size */
466 /* is a multiple of align. That would be correct up to HBLKSIZE. */
467 new_lb = lb + align - 1;
468 result = GC_malloc(new_lb);
469 /* It is ok not to check result for NULL as in that case */
470 /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
471 offset = (word)result % align;
472 if (offset != 0) {
473 offset = align - offset;
474 if (!GC_all_interior_pointers) {
475 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
476 GC_register_displacement(offset);
479 result = (void *) ((ptr_t)result + offset);
480 GC_ASSERT((word)result % align == 0);
481 return result;
484 /* This one exists largerly to redirect posix_memalign for leaks finding. */
485 GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
487 /* Check alignment properly. */
488 if (((align - 1) & align) != 0 || align < sizeof(void *)) {
489 # ifdef MSWINCE
490 return ERROR_INVALID_PARAMETER;
491 # else
492 return EINVAL;
493 # endif
496 if ((*memptr = GC_memalign(align, lb)) == NULL) {
497 # ifdef MSWINCE
498 return ERROR_NOT_ENOUGH_MEMORY;
499 # else
500 return ENOMEM;
501 # endif
503 return 0;
506 #ifdef ATOMIC_UNCOLLECTABLE
507 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
508 /* This is normally roughly equivalent to the system malloc. */
509 /* But it may be useful if malloc is redefined. */
510 GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
512 void *op;
513 void **opp;
514 size_t lg;
515 DCL_LOCK_STATE;
517 if( SMALL_OBJ(lb) ) {
518 GC_DBG_COLLECT_AT_MALLOC(lb);
519 if (EXTRA_BYTES != 0 && lb != 0) lb--;
520 /* We don't need the extra byte, since this won't be */
521 /* collected anyway. */
522 lg = GC_size_map[lb];
523 opp = &(GC_auobjfreelist[lg]);
524 LOCK();
525 op = *opp;
526 if (EXPECT(0 != op, TRUE)) {
527 *opp = obj_link(op);
528 obj_link(op) = 0;
529 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
530 /* Mark bit was already set while object was on free list. */
531 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
532 UNLOCK();
533 } else {
534 UNLOCK();
535 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
537 GC_ASSERT(0 == op || GC_is_marked(op));
538 return((void *) op);
539 } else {
540 hdr * hhdr;
542 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
543 if (0 == op) return(0);
545 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
546 hhdr = HDR(op);
548 LOCK();
549 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
550 # ifndef THREADS
551 GC_ASSERT(hhdr -> hb_n_marks == 0);
552 # endif
553 hhdr -> hb_n_marks = 1;
554 UNLOCK();
555 return((void *) op);
558 #endif /* ATOMIC_UNCOLLECTABLE */
560 /* provide a version of strdup() that uses the collector to allocate the
561 copy of the string */
562 GC_API char * GC_CALL GC_strdup(const char *s)
564 char *copy;
565 size_t lb;
566 if (s == NULL) return NULL;
567 lb = strlen(s) + 1;
568 if ((copy = GC_malloc_atomic(lb)) == NULL) {
569 # ifndef MSWINCE
570 errno = ENOMEM;
571 # endif
572 return NULL;
574 BCOPY(s, copy, lb);
575 return copy;
578 GC_API char * GC_CALL GC_strndup(const char *str, size_t size)
580 char *copy;
581 size_t len = strlen(str); /* str is expected to be non-NULL */
582 if (len > size)
583 len = size;
584 copy = GC_malloc_atomic(len + 1);
585 if (copy == NULL) {
586 # ifndef MSWINCE
587 errno = ENOMEM;
588 # endif
589 return NULL;
591 BCOPY(str, copy, len);
592 copy[len] = '\0';
593 return copy;
596 #ifdef GC_REQUIRE_WCSDUP
597 # include <wchar.h> /* for wcslen() */
599 GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
601 size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
602 wchar_t *copy = GC_malloc_atomic(lb);
603 if (copy == NULL) {
604 # ifndef MSWINCE
605 errno = ENOMEM;
606 # endif
607 return NULL;
609 BCOPY(str, copy, lb);
610 return copy;
612 #endif /* GC_REQUIRE_WCSDUP */