2001-06-20 Alexandre Petit-Bianco <apbianco@redhat.com>
[official-gcc.git] / boehm-gc / mallocx.c
blob77c750fafbcf36718402d7b44a6e9c95f33dfcb3
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
24 #include <stdio.h>
25 #include "private/gc_priv.h"
27 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35 ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36 ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
39 # endif
42 GC_PTR GC_generic_or_special_malloc(lb,knd)
43 word lb;
44 int knd;
46 switch(knd) {
47 # ifdef STUBBORN_ALLOC
48 case STUBBORN:
49 return(GC_malloc_stubborn((size_t)lb));
50 # endif
51 case PTRFREE:
52 return(GC_malloc_atomic((size_t)lb));
53 case NORMAL:
54 return(GC_malloc((size_t)lb));
55 case UNCOLLECTABLE:
56 return(GC_malloc_uncollectable((size_t)lb));
57 # ifdef ATOMIC_UNCOLLECTABLE
58 case AUNCOLLECTABLE:
59 return(GC_malloc_atomic_uncollectable((size_t)lb));
60 # endif /* ATOMIC_UNCOLLECTABLE */
61 default:
62 return(GC_generic_malloc(lb,knd));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
71 # ifdef __STDC__
72 GC_PTR GC_realloc(GC_PTR p, size_t lb)
73 # else
74 GC_PTR GC_realloc(p,lb)
75 GC_PTR p;
76 size_t lb;
77 # endif
79 register struct hblk * h;
80 register hdr * hhdr;
81 register word sz; /* Current size in bytes */
82 register word orig_sz; /* Original sz in bytes */
83 int obj_kind;
85 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
86 h = HBLKPTR(p);
87 hhdr = HDR(h);
88 sz = hhdr -> hb_sz;
89 obj_kind = hhdr -> hb_obj_kind;
90 sz = WORDS_TO_BYTES(sz);
91 orig_sz = sz;
93 if (sz > MAXOBJBYTES) {
94 /* Round it up to the next whole heap block */
95 register word descr;
97 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98 hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99 descr = GC_obj_kinds[obj_kind].ok_descriptor;
100 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101 hhdr -> hb_descr = descr;
102 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb) <= sz) {
106 if (lb >= (sz >> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind == STUBBORN) GC_change_stubborn(p);
109 # endif
110 if (orig_sz > lb) {
111 /* Clear unneeded part of object to avoid bogus pointer */
112 /* tracing. */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t)p) + lb, orig_sz - lb);
116 return(p);
117 } else {
118 /* shrink */
119 GC_PTR result =
120 GC_generic_or_special_malloc((word)lb, obj_kind);
122 if (result == 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p, result, lb);
126 # ifndef IGNORE_FREE
127 GC_free(p);
128 # endif
129 return(result);
131 } else {
132 /* grow */
133 GC_PTR result =
134 GC_generic_or_special_malloc((word)lb, obj_kind);
136 if (result == 0) return(0);
137 BCOPY(p, result, sz);
138 # ifndef IGNORE_FREE
139 GC_free(p);
140 # endif
141 return(result);
145 # if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
146 # ifdef __STDC__
147 GC_PTR realloc(GC_PTR p, size_t lb)
148 # else
149 GC_PTR realloc(p,lb)
150 GC_PTR p;
151 size_t lb;
152 # endif
154 # ifdef REDIRECT_REALLOC
155 return(REDIRECT_REALLOC(p, lb));
156 # else
157 return(GC_realloc(p, lb));
158 # endif
160 # endif /* REDIRECT_MALLOC */
163 /* The same thing, except caller does not hold allocation lock. */
164 /* We avoid holding allocation lock while we clear memory. */
165 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
166 register size_t lb;
167 register int k;
169 register ptr_t result;
170 word lw;
171 word n_blocks;
172 GC_bool init;
173 DCL_LOCK_STATE;
175 if (SMALL_OBJ(lb))
176 return(GC_generic_malloc((word)lb, k));
177 lw = ROUNDED_UP_WORDS(lb);
178 n_blocks = OBJ_SZ_TO_BLOCKS(lw);
179 init = GC_obj_kinds[k].ok_init;
180 GC_INVOKE_FINALIZERS();
181 DISABLE_SIGNALS();
182 LOCK();
183 result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
184 if (0 != result) {
185 if (GC_debugging_started) {
186 BZERO(result, n_blocks * HBLKSIZE);
187 } else {
188 # ifdef THREADS
189 /* Clear any memory that might be used for GC descriptors */
190 /* before we release the lock. */
191 ((word *)result)[0] = 0;
192 ((word *)result)[1] = 0;
193 ((word *)result)[lw-1] = 0;
194 ((word *)result)[lw-2] = 0;
195 # endif
198 GC_words_allocd += lw;
199 UNLOCK();
200 ENABLE_SIGNALS();
201 if (0 == result) {
202 return((*GC_oom_fn)(lb));
203 } else {
204 if (init & !GC_debugging_started) {
205 BZERO(result, n_blocks * HBLKSIZE);
207 return(result);
211 # if defined(__STDC__) || defined(__cplusplus)
212 void * GC_malloc_ignore_off_page(size_t lb)
213 # else
214 char * GC_malloc_ignore_off_page(lb)
215 register size_t lb;
216 # endif
218 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
221 # if defined(__STDC__) || defined(__cplusplus)
222 void * GC_malloc_atomic_ignore_off_page(size_t lb)
223 # else
224 char * GC_malloc_atomic_ignore_off_page(lb)
225 register size_t lb;
226 # endif
228 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
231 /* Increment GC_words_allocd from code that doesn't have direct access */
232 /* to GC_arrays. */
233 # ifdef __STDC__
234 void GC_incr_words_allocd(size_t n)
236 GC_words_allocd += n;
239 /* The same for GC_mem_freed. */
240 void GC_incr_mem_freed(size_t n)
242 GC_mem_freed += n;
244 # endif /* __STDC__ */
246 /* Analogous to the above, but assumes a small object size, and */
247 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
248 ptr_t GC_generic_malloc_words_small_inner(lw, k)
249 register word lw;
250 register int k;
252 register ptr_t op;
253 register ptr_t *opp;
254 register struct obj_kind * kind = GC_obj_kinds + k;
256 opp = &(kind -> ok_freelist[lw]);
257 if( (op = *opp) == 0 ) {
258 if (!GC_is_initialized) {
259 GC_init_inner();
261 if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
262 op = GC_clear_stack(GC_allocobj((word)lw, k));
264 if (op == 0) {
265 UNLOCK();
266 ENABLE_SIGNALS();
267 return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
270 *opp = obj_link(op);
271 obj_link(op) = 0;
272 GC_words_allocd += lw;
273 return((ptr_t)op);
276 /* Analogous to the above, but assumes a small object size, and */
277 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
278 #ifdef __STDC__
279 ptr_t GC_generic_malloc_words_small(size_t lw, int k)
280 #else
281 ptr_t GC_generic_malloc_words_small(lw, k)
282 register word lw;
283 register int k;
284 #endif
286 register ptr_t op;
287 DCL_LOCK_STATE;
289 GC_INVOKE_FINALIZERS();
290 DISABLE_SIGNALS();
291 LOCK();
292 op = GC_generic_malloc_words_small_inner(lw, k);
293 UNLOCK();
294 ENABLE_SIGNALS();
295 return((ptr_t)op);
298 #if defined(THREADS) && !defined(SRC_M3)
300 extern signed_word GC_mem_found; /* Protected by GC lock. */
302 #ifdef PARALLEL_MARK
303 volatile signed_word GC_words_allocd_tmp = 0;
304 /* Number of words of memory allocated since */
305 /* we released the GC lock. Instead of */
306 /* reacquiring the GC lock just to add this in, */
307 /* we add it in the next time we reacquire */
308 /* the lock. (Atomically adding it doesn't */
309 /* work, since we would have to atomically */
310 /* update it in GC_malloc, which is too */
311 /* expensive. */
312 #endif /* PARALLEL_MARK */
314 /* See reclaim.c: */
315 extern ptr_t GC_reclaim_generic();
317 /* Return a list of 1 or more objects of the indicated size, linked */
318 /* through the first word in the object. This has the advantage that */
319 /* it acquires the allocation lock only once, and may greatly reduce */
320 /* time wasted contending for the allocation lock. Typical usage would */
321 /* be in a thread that requires many items of the same size. It would */
322 /* keep its own free list in thread-local storage, and call */
323 /* GC_malloc_many or friends to replenish it. (We do not round up */
324 /* object sizes, since a call indicates the intention to consume many */
325 /* objects of exactly this size.) */
326 /* Note that the client should usually clear the link field. */
327 ptr_t GC_generic_malloc_many(lb, k)
328 register word lb;
329 register int k;
331 ptr_t op;
332 ptr_t p;
333 ptr_t *opp;
334 word lw;
335 word my_words_allocd = 0;
336 struct obj_kind * ok = &(GC_obj_kinds[k]);
337 DCL_LOCK_STATE;
339 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
340 # define COUNT_ARG , &my_words_allocd
341 # else
342 # define COUNT_ARG
343 # define NEED_TO_COUNT
344 # endif
345 if (!SMALL_OBJ(lb)) {
346 op = GC_generic_malloc(lb, k);
347 if(0 != op) obj_link(op) = 0;
348 return(op);
350 lw = ALIGNED_WORDS(lb);
351 GC_INVOKE_FINALIZERS();
352 DISABLE_SIGNALS();
353 LOCK();
354 if (!GC_is_initialized) GC_init_inner();
355 /* First see if we can reclaim a page of objects waiting to be */
356 /* reclaimed. */
358 struct hblk ** rlh = ok -> ok_reclaim_list;
359 struct hblk * hbp;
360 hdr * hhdr;
362 rlh += lw;
363 while ((hbp = *rlh) != 0) {
364 hhdr = HDR(hbp);
365 *rlh = hhdr -> hb_next;
366 # ifdef PARALLEL_MARK
368 signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
370 GC_ASSERT(my_words_allocd_tmp >= 0);
371 /* We only decrement it while holding the GC lock. */
372 /* Thus we can't accidentally adjust it down in more */
373 /* than one thread simultaneously. */
374 if (my_words_allocd_tmp != 0) {
375 (void)GC_atomic_add(
376 (volatile GC_word *)(&GC_words_allocd_tmp),
377 (GC_word)(-my_words_allocd_tmp));
378 GC_words_allocd += my_words_allocd_tmp;
381 GC_acquire_mark_lock();
382 ++ GC_fl_builder_count;
383 UNLOCK();
384 ENABLE_SIGNALS();
385 GC_release_mark_lock();
386 # endif
387 op = GC_reclaim_generic(hbp, hhdr, lw,
388 ok -> ok_init, 0 COUNT_ARG);
389 if (op != 0) {
390 # ifdef NEED_TO_COUNT
391 /* We are neither gathering statistics, nor marking in */
392 /* parallel. Thus GC_reclaim_generic doesn't count */
393 /* for us. */
394 for (p = op; p != 0; p = obj_link(p)) {
395 my_words_allocd += lw;
397 # endif
398 # if defined(GATHERSTATS)
399 /* We also reclaimed memory, so we need to adjust */
400 /* that count. */
401 /* This should be atomic, so the results may be */
402 /* inaccurate. */
403 GC_mem_found += my_words_allocd;
404 # endif
405 # ifdef PARALLEL_MARK
406 (void)GC_atomic_add(
407 (volatile GC_word *)(&GC_words_allocd_tmp),
408 (GC_word)(my_words_allocd));
409 GC_acquire_mark_lock();
410 -- GC_fl_builder_count;
411 if (GC_fl_builder_count == 0) GC_notify_all_builder();
412 GC_release_mark_lock();
413 return GC_clear_stack(op);
414 # else
415 GC_words_allocd += my_words_allocd;
416 goto out;
417 # endif
419 # ifdef PARALLEL_MARK
420 GC_acquire_mark_lock();
421 -- GC_fl_builder_count;
422 if (GC_fl_builder_count == 0) GC_notify_all_builder();
423 GC_release_mark_lock();
424 DISABLE_SIGNALS();
425 LOCK();
426 /* GC lock is needed for reclaim list access. We */
427 /* must decrement fl_builder_count before reaquiring GC */
428 /* lock. Hopefully this path is rare. */
429 # endif
432 /* Next try to use prefix of global free list if there is one. */
433 /* We don't refill it, but we need to use it up before allocating */
434 /* a new block ourselves. */
435 opp = &(GC_obj_kinds[k].ok_freelist[lw]);
436 if ( (op = *opp) != 0 ) {
437 *opp = 0;
438 my_words_allocd = 0;
439 for (p = op; p != 0; p = obj_link(p)) {
440 my_words_allocd += lw;
441 if (my_words_allocd >= BODY_SZ) {
442 *opp = obj_link(p);
443 obj_link(p) = 0;
444 break;
447 GC_words_allocd += my_words_allocd;
448 goto out;
450 /* Next try to allocate a new block worth of objects of this size. */
452 struct hblk *h = GC_allochblk(lw, k, 0);
453 if (h != 0) {
454 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
455 GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
456 - BYTES_TO_WORDS(HBLKSIZE) % lw;
457 # ifdef PARALLEL_MARK
458 GC_acquire_mark_lock();
459 ++ GC_fl_builder_count;
460 UNLOCK();
461 ENABLE_SIGNALS();
462 GC_release_mark_lock();
463 # endif
465 op = GC_build_fl(h, lw, ok -> ok_init, 0);
466 # ifdef PARALLEL_MARK
467 GC_acquire_mark_lock();
468 -- GC_fl_builder_count;
469 if (GC_fl_builder_count == 0) GC_notify_all_builder();
470 GC_release_mark_lock();
471 return GC_clear_stack(op);
472 # else
473 goto out;
474 # endif
478 /* As a last attempt, try allocating a single object. Note that */
479 /* this may trigger a collection or expand the heap. */
480 op = GC_generic_malloc_inner(lb, k);
481 if (0 != op) obj_link(op) = 0;
483 out:
484 UNLOCK();
485 ENABLE_SIGNALS();
486 return(GC_clear_stack(op));
489 GC_PTR GC_malloc_many(size_t lb)
491 return(GC_generic_malloc_many(lb, NORMAL));
494 /* Note that the "atomic" version of this would be unsafe, since the */
495 /* links would not be seen by the collector. */
496 # endif
498 /* Allocate lb bytes of pointerful, traced, but not collectable data */
499 # ifdef __STDC__
500 GC_PTR GC_malloc_uncollectable(size_t lb)
501 # else
502 GC_PTR GC_malloc_uncollectable(lb)
503 size_t lb;
504 # endif
506 register ptr_t op;
507 register ptr_t *opp;
508 register word lw;
509 DCL_LOCK_STATE;
511 if( SMALL_OBJ(lb) ) {
512 # ifdef MERGE_SIZES
513 if (EXTRA_BYTES != 0 && lb != 0) lb--;
514 /* We don't need the extra byte, since this won't be */
515 /* collected anyway. */
516 lw = GC_size_map[lb];
517 # else
518 lw = ALIGNED_WORDS(lb);
519 # endif
520 opp = &(GC_uobjfreelist[lw]);
521 FASTLOCK();
522 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
523 /* See above comment on signals. */
524 *opp = obj_link(op);
525 obj_link(op) = 0;
526 GC_words_allocd += lw;
527 /* Mark bit ws already set on free list. It will be */
528 /* cleared only temporarily during a collection, as a */
529 /* result of the normal free list mark bit clearing. */
530 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
531 FASTUNLOCK();
532 return((GC_PTR) op);
534 FASTUNLOCK();
535 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
536 } else {
537 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
539 if (0 == op) return(0);
540 /* We don't need the lock here, since we have an undisguised */
541 /* pointer. We do need to hold the lock while we adjust */
542 /* mark bits. */
544 register struct hblk * h;
546 h = HBLKPTR(op);
547 lw = HDR(h) -> hb_sz;
549 DISABLE_SIGNALS();
550 LOCK();
551 GC_set_mark_bit(op);
552 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
553 UNLOCK();
554 ENABLE_SIGNALS();
555 return((GC_PTR) op);
559 # ifdef ATOMIC_UNCOLLECTABLE
560 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
561 /* This is normally roughly equivalent to the system malloc. */
562 /* But it may be useful if malloc is redefined. */
563 # ifdef __STDC__
564 GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
565 # else
566 GC_PTR GC_malloc_atomic_uncollectable(lb)
567 size_t lb;
568 # endif
570 register ptr_t op;
571 register ptr_t *opp;
572 register word lw;
573 DCL_LOCK_STATE;
575 if( SMALL_OBJ(lb) ) {
576 # ifdef MERGE_SIZES
577 if (EXTRA_BYTES != 0 && lb != 0) lb--;
578 /* We don't need the extra byte, since this won't be */
579 /* collected anyway. */
580 lw = GC_size_map[lb];
581 # else
582 lw = ALIGNED_WORDS(lb);
583 # endif
584 opp = &(GC_auobjfreelist[lw]);
585 FASTLOCK();
586 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
587 /* See above comment on signals. */
588 *opp = obj_link(op);
589 obj_link(op) = 0;
590 GC_words_allocd += lw;
591 /* Mark bit was already set while object was on free list. */
592 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
593 FASTUNLOCK();
594 return((GC_PTR) op);
596 FASTUNLOCK();
597 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
598 } else {
599 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
601 if (0 == op) return(0);
602 /* We don't need the lock here, since we have an undisguised */
603 /* pointer. We do need to hold the lock while we adjust */
604 /* mark bits. */
606 register struct hblk * h;
608 h = HBLKPTR(op);
609 lw = HDR(h) -> hb_sz;
611 DISABLE_SIGNALS();
612 LOCK();
613 GC_set_mark_bit(op);
614 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
615 UNLOCK();
616 ENABLE_SIGNALS();
617 return((GC_PTR) op);
621 #endif /* ATOMIC_UNCOLLECTABLE */