* lib/target-libpath.exp: New file defining set_ld_library_path_env_vars
[official-gcc.git] / boehm-gc / mallocx.c
blobd45f21e8e51d642601d9a7499ed8228f9a6615cf
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
24 #include <stdio.h>
25 #include "private/gc_priv.h"
27 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35 ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36 ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
39 # endif
42 GC_PTR GC_generic_or_special_malloc(lb,knd)
43 word lb;
44 int knd;
46 switch(knd) {
47 # ifdef STUBBORN_ALLOC
48 case STUBBORN:
49 return(GC_malloc_stubborn((size_t)lb));
50 # endif
51 case PTRFREE:
52 return(GC_malloc_atomic((size_t)lb));
53 case NORMAL:
54 return(GC_malloc((size_t)lb));
55 case UNCOLLECTABLE:
56 return(GC_malloc_uncollectable((size_t)lb));
57 # ifdef ATOMIC_UNCOLLECTABLE
58 case AUNCOLLECTABLE:
59 return(GC_malloc_atomic_uncollectable((size_t)lb));
60 # endif /* ATOMIC_UNCOLLECTABLE */
61 default:
62 return(GC_generic_malloc(lb,knd));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
71 # ifdef __STDC__
72 GC_PTR GC_realloc(GC_PTR p, size_t lb)
73 # else
74 GC_PTR GC_realloc(p,lb)
75 GC_PTR p;
76 size_t lb;
77 # endif
79 register struct hblk * h;
80 register hdr * hhdr;
81 register word sz; /* Current size in bytes */
82 register word orig_sz; /* Original sz in bytes */
83 int obj_kind;
85 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
86 h = HBLKPTR(p);
87 hhdr = HDR(h);
88 sz = hhdr -> hb_sz;
89 obj_kind = hhdr -> hb_obj_kind;
90 sz = WORDS_TO_BYTES(sz);
91 orig_sz = sz;
93 if (sz > MAXOBJBYTES) {
94 /* Round it up to the next whole heap block */
95 register word descr;
97 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98 hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99 descr = GC_obj_kinds[obj_kind].ok_descriptor;
100 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101 hhdr -> hb_descr = descr;
102 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb) <= sz) {
106 if (lb >= (sz >> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind == STUBBORN) GC_change_stubborn(p);
109 # endif
110 if (orig_sz > lb) {
111 /* Clear unneeded part of object to avoid bogus pointer */
112 /* tracing. */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t)p) + lb, orig_sz - lb);
116 return(p);
117 } else {
118 /* shrink */
119 GC_PTR result =
120 GC_generic_or_special_malloc((word)lb, obj_kind);
122 if (result == 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p, result, lb);
126 # ifndef IGNORE_FREE
127 GC_free(p);
128 # endif
129 return(result);
131 } else {
132 /* grow */
133 GC_PTR result =
134 GC_generic_or_special_malloc((word)lb, obj_kind);
136 if (result == 0) return(0);
137 BCOPY(p, result, sz);
138 # ifndef IGNORE_FREE
139 GC_free(p);
140 # endif
141 return(result);
145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146 # define REDIRECT_REALLOC GC_realloc
147 # endif
149 # ifdef REDIRECT_REALLOC
151 /* As with malloc, avoid two levels of extra calls here. */
152 # ifdef GC_ADD_CALLER
153 # define RA GC_RETURN_ADDR,
154 # else
155 # define RA
156 # endif
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, RA "unknown", 0)
160 # ifdef __STDC__
161 GC_PTR realloc(GC_PTR p, size_t lb)
162 # else
163 GC_PTR realloc(p,lb)
164 GC_PTR p;
165 size_t lb;
166 # endif
168 return(REDIRECT_REALLOC(p, lb));
171 # undef GC_debug_realloc_replacement
172 # endif /* REDIRECT_REALLOC */
175 /* The same thing, except caller does not hold allocation lock. */
176 /* We avoid holding allocation lock while we clear memory. */
177 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
178 register size_t lb;
179 register int k;
181 register ptr_t result;
182 word lw;
183 word n_blocks;
184 GC_bool init;
185 DCL_LOCK_STATE;
187 if (SMALL_OBJ(lb))
188 return(GC_generic_malloc((word)lb, k));
189 lw = ROUNDED_UP_WORDS(lb);
190 n_blocks = OBJ_SZ_TO_BLOCKS(lw);
191 init = GC_obj_kinds[k].ok_init;
192 if (GC_have_errors) GC_print_all_errors();
193 GC_INVOKE_FINALIZERS();
194 DISABLE_SIGNALS();
195 LOCK();
196 result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
197 if (0 != result) {
198 if (GC_debugging_started) {
199 BZERO(result, n_blocks * HBLKSIZE);
200 } else {
201 # ifdef THREADS
202 /* Clear any memory that might be used for GC descriptors */
203 /* before we release the lock. */
204 ((word *)result)[0] = 0;
205 ((word *)result)[1] = 0;
206 ((word *)result)[lw-1] = 0;
207 ((word *)result)[lw-2] = 0;
208 # endif
211 GC_words_allocd += lw;
212 UNLOCK();
213 ENABLE_SIGNALS();
214 if (0 == result) {
215 return((*GC_oom_fn)(lb));
216 } else {
217 if (init && !GC_debugging_started) {
218 BZERO(result, n_blocks * HBLKSIZE);
220 return(result);
224 # if defined(__STDC__) || defined(__cplusplus)
225 void * GC_malloc_ignore_off_page(size_t lb)
226 # else
227 char * GC_malloc_ignore_off_page(lb)
228 register size_t lb;
229 # endif
231 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
234 # if defined(__STDC__) || defined(__cplusplus)
235 void * GC_malloc_atomic_ignore_off_page(size_t lb)
236 # else
237 char * GC_malloc_atomic_ignore_off_page(lb)
238 register size_t lb;
239 # endif
241 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
244 /* Increment GC_words_allocd from code that doesn't have direct access */
245 /* to GC_arrays. */
246 # ifdef __STDC__
247 void GC_incr_words_allocd(size_t n)
249 GC_words_allocd += n;
252 /* The same for GC_mem_freed. */
253 void GC_incr_mem_freed(size_t n)
255 GC_mem_freed += n;
257 # endif /* __STDC__ */
259 /* Analogous to the above, but assumes a small object size, and */
260 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
261 ptr_t GC_generic_malloc_words_small_inner(lw, k)
262 register word lw;
263 register int k;
265 register ptr_t op;
266 register ptr_t *opp;
267 register struct obj_kind * kind = GC_obj_kinds + k;
269 opp = &(kind -> ok_freelist[lw]);
270 if( (op = *opp) == 0 ) {
271 if (!GC_is_initialized) {
272 GC_init_inner();
274 if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
275 op = GC_clear_stack(GC_allocobj((word)lw, k));
277 if (op == 0) {
278 UNLOCK();
279 ENABLE_SIGNALS();
280 return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
283 *opp = obj_link(op);
284 obj_link(op) = 0;
285 GC_words_allocd += lw;
286 return((ptr_t)op);
289 /* Analogous to the above, but assumes a small object size, and */
290 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
291 #ifdef __STDC__
292 ptr_t GC_generic_malloc_words_small(size_t lw, int k)
293 #else
294 ptr_t GC_generic_malloc_words_small(lw, k)
295 register word lw;
296 register int k;
297 #endif
299 register ptr_t op;
300 DCL_LOCK_STATE;
302 if (GC_have_errors) GC_print_all_errors();
303 GC_INVOKE_FINALIZERS();
304 DISABLE_SIGNALS();
305 LOCK();
306 op = GC_generic_malloc_words_small_inner(lw, k);
307 UNLOCK();
308 ENABLE_SIGNALS();
309 return((ptr_t)op);
312 #if defined(THREADS) && !defined(SRC_M3)
314 extern signed_word GC_mem_found; /* Protected by GC lock. */
316 #ifdef PARALLEL_MARK
317 volatile signed_word GC_words_allocd_tmp = 0;
318 /* Number of words of memory allocated since */
319 /* we released the GC lock. Instead of */
320 /* reacquiring the GC lock just to add this in, */
321 /* we add it in the next time we reacquire */
322 /* the lock. (Atomically adding it doesn't */
323 /* work, since we would have to atomically */
324 /* update it in GC_malloc, which is too */
325 /* expensive. */
326 #endif /* PARALLEL_MARK */
328 /* See reclaim.c: */
329 extern ptr_t GC_reclaim_generic();
331 /* Return a list of 1 or more objects of the indicated size, linked */
332 /* through the first word in the object. This has the advantage that */
333 /* it acquires the allocation lock only once, and may greatly reduce */
334 /* time wasted contending for the allocation lock. Typical usage would */
335 /* be in a thread that requires many items of the same size. It would */
336 /* keep its own free list in thread-local storage, and call */
337 /* GC_malloc_many or friends to replenish it. (We do not round up */
338 /* object sizes, since a call indicates the intention to consume many */
339 /* objects of exactly this size.) */
340 /* We return the free-list by assigning it to *result, since it is */
341 /* not safe to return, e.g. a linked list of pointer-free objects, */
342 /* since the collector would not retain the entire list if it were */
343 /* invoked just as we were returning. */
344 /* Note that the client should usually clear the link field. */
345 void GC_generic_malloc_many(lb, k, result)
346 register word lb;
347 register int k;
348 ptr_t *result;
350 ptr_t op;
351 ptr_t p;
352 ptr_t *opp;
353 word lw;
354 word my_words_allocd = 0;
355 struct obj_kind * ok = &(GC_obj_kinds[k]);
356 DCL_LOCK_STATE;
358 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
359 # define COUNT_ARG , &my_words_allocd
360 # else
361 # define COUNT_ARG
362 # define NEED_TO_COUNT
363 # endif
364 if (!SMALL_OBJ(lb)) {
365 op = GC_generic_malloc(lb, k);
366 if(0 != op) obj_link(op) = 0;
367 *result = op;
368 return;
370 lw = ALIGNED_WORDS(lb);
371 if (GC_have_errors) GC_print_all_errors();
372 GC_INVOKE_FINALIZERS();
373 DISABLE_SIGNALS();
374 LOCK();
375 if (!GC_is_initialized) GC_init_inner();
376 /* Do our share of marking work */
377 if (GC_incremental && !GC_dont_gc) {
378 ENTER_GC();
379 GC_collect_a_little_inner(1);
380 EXIT_GC();
382 /* First see if we can reclaim a page of objects waiting to be */
383 /* reclaimed. */
385 struct hblk ** rlh = ok -> ok_reclaim_list;
386 struct hblk * hbp;
387 hdr * hhdr;
389 rlh += lw;
390 while ((hbp = *rlh) != 0) {
391 hhdr = HDR(hbp);
392 *rlh = hhdr -> hb_next;
393 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
394 # ifdef PARALLEL_MARK
396 signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
398 GC_ASSERT(my_words_allocd_tmp >= 0);
399 /* We only decrement it while holding the GC lock. */
400 /* Thus we can't accidentally adjust it down in more */
401 /* than one thread simultaneously. */
402 if (my_words_allocd_tmp != 0) {
403 (void)GC_atomic_add(
404 (volatile GC_word *)(&GC_words_allocd_tmp),
405 (GC_word)(-my_words_allocd_tmp));
406 GC_words_allocd += my_words_allocd_tmp;
409 GC_acquire_mark_lock();
410 ++ GC_fl_builder_count;
411 UNLOCK();
412 ENABLE_SIGNALS();
413 GC_release_mark_lock();
414 # endif
415 op = GC_reclaim_generic(hbp, hhdr, lw,
416 ok -> ok_init, 0 COUNT_ARG);
417 if (op != 0) {
418 # ifdef NEED_TO_COUNT
419 /* We are neither gathering statistics, nor marking in */
420 /* parallel. Thus GC_reclaim_generic doesn't count */
421 /* for us. */
422 for (p = op; p != 0; p = obj_link(p)) {
423 my_words_allocd += lw;
425 # endif
426 # if defined(GATHERSTATS)
427 /* We also reclaimed memory, so we need to adjust */
428 /* that count. */
429 /* This should be atomic, so the results may be */
430 /* inaccurate. */
431 GC_mem_found += my_words_allocd;
432 # endif
433 # ifdef PARALLEL_MARK
434 *result = op;
435 (void)GC_atomic_add(
436 (volatile GC_word *)(&GC_words_allocd_tmp),
437 (GC_word)(my_words_allocd));
438 GC_acquire_mark_lock();
439 -- GC_fl_builder_count;
440 if (GC_fl_builder_count == 0) GC_notify_all_builder();
441 GC_release_mark_lock();
442 (void) GC_clear_stack(0);
443 return;
444 # else
445 GC_words_allocd += my_words_allocd;
446 goto out;
447 # endif
449 # ifdef PARALLEL_MARK
450 GC_acquire_mark_lock();
451 -- GC_fl_builder_count;
452 if (GC_fl_builder_count == 0) GC_notify_all_builder();
453 GC_release_mark_lock();
454 DISABLE_SIGNALS();
455 LOCK();
456 /* GC lock is needed for reclaim list access. We */
457 /* must decrement fl_builder_count before reaquiring GC */
458 /* lock. Hopefully this path is rare. */
459 # endif
462 /* Next try to use prefix of global free list if there is one. */
463 /* We don't refill it, but we need to use it up before allocating */
464 /* a new block ourselves. */
465 opp = &(GC_obj_kinds[k].ok_freelist[lw]);
466 if ( (op = *opp) != 0 ) {
467 *opp = 0;
468 my_words_allocd = 0;
469 for (p = op; p != 0; p = obj_link(p)) {
470 my_words_allocd += lw;
471 if (my_words_allocd >= BODY_SZ) {
472 *opp = obj_link(p);
473 obj_link(p) = 0;
474 break;
477 GC_words_allocd += my_words_allocd;
478 goto out;
480 /* Next try to allocate a new block worth of objects of this size. */
482 struct hblk *h = GC_allochblk(lw, k, 0);
483 if (h != 0) {
484 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
485 GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
486 - BYTES_TO_WORDS(HBLKSIZE) % lw;
487 # ifdef PARALLEL_MARK
488 GC_acquire_mark_lock();
489 ++ GC_fl_builder_count;
490 UNLOCK();
491 ENABLE_SIGNALS();
492 GC_release_mark_lock();
493 # endif
495 op = GC_build_fl(h, lw, ok -> ok_init, 0);
496 # ifdef PARALLEL_MARK
497 *result = op;
498 GC_acquire_mark_lock();
499 -- GC_fl_builder_count;
500 if (GC_fl_builder_count == 0) GC_notify_all_builder();
501 GC_release_mark_lock();
502 (void) GC_clear_stack(0);
503 return;
504 # else
505 goto out;
506 # endif
510 /* As a last attempt, try allocating a single object. Note that */
511 /* this may trigger a collection or expand the heap. */
512 op = GC_generic_malloc_inner(lb, k);
513 if (0 != op) obj_link(op) = 0;
515 out:
516 *result = op;
517 UNLOCK();
518 ENABLE_SIGNALS();
519 (void) GC_clear_stack(0);
522 GC_PTR GC_malloc_many(size_t lb)
524 ptr_t result;
525 GC_generic_malloc_many(lb, NORMAL, &result);
526 return result;
529 /* Note that the "atomic" version of this would be unsafe, since the */
530 /* links would not be seen by the collector. */
531 # endif
533 /* Allocate lb bytes of pointerful, traced, but not collectable data */
534 # ifdef __STDC__
535 GC_PTR GC_malloc_uncollectable(size_t lb)
536 # else
537 GC_PTR GC_malloc_uncollectable(lb)
538 size_t lb;
539 # endif
541 register ptr_t op;
542 register ptr_t *opp;
543 register word lw;
544 DCL_LOCK_STATE;
546 if( SMALL_OBJ(lb) ) {
547 # ifdef MERGE_SIZES
548 if (EXTRA_BYTES != 0 && lb != 0) lb--;
549 /* We don't need the extra byte, since this won't be */
550 /* collected anyway. */
551 lw = GC_size_map[lb];
552 # else
553 lw = ALIGNED_WORDS(lb);
554 # endif
555 opp = &(GC_uobjfreelist[lw]);
556 FASTLOCK();
557 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
558 /* See above comment on signals. */
559 *opp = obj_link(op);
560 obj_link(op) = 0;
561 GC_words_allocd += lw;
562 /* Mark bit ws already set on free list. It will be */
563 /* cleared only temporarily during a collection, as a */
564 /* result of the normal free list mark bit clearing. */
565 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
566 FASTUNLOCK();
567 return((GC_PTR) op);
569 FASTUNLOCK();
570 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
571 } else {
572 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
574 if (0 == op) return(0);
575 /* We don't need the lock here, since we have an undisguised */
576 /* pointer. We do need to hold the lock while we adjust */
577 /* mark bits. */
579 register struct hblk * h;
581 h = HBLKPTR(op);
582 lw = HDR(h) -> hb_sz;
584 DISABLE_SIGNALS();
585 LOCK();
586 GC_set_mark_bit(op);
587 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
588 UNLOCK();
589 ENABLE_SIGNALS();
590 return((GC_PTR) op);
594 #ifdef __STDC__
595 /* Not well tested nor integrated. */
596 /* Debug version is tricky and currently missing. */
597 #include <limits.h>
599 GC_PTR GC_memalign(size_t align, size_t lb)
601 size_t new_lb;
602 size_t offset;
603 ptr_t result;
605 # ifdef ALIGN_DOUBLE
606 if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
607 # endif
608 if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
609 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
610 if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
611 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
612 /* Will be HBLKSIZE aligned. */
614 /* We could also try to make sure that the real rounded-up object size */
615 /* is a multiple of align. That would be correct up to HBLKSIZE. */
616 new_lb = lb + align - 1;
617 result = GC_malloc(new_lb);
618 offset = (word)result % align;
619 if (offset != 0) {
620 offset = align - offset;
621 if (!GC_all_interior_pointers) {
622 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
623 GC_register_displacement(offset);
626 result = (GC_PTR) ((ptr_t)result + offset);
627 GC_ASSERT((word)result % align == 0);
628 return result;
630 #endif
632 # ifdef ATOMIC_UNCOLLECTABLE
633 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
634 /* This is normally roughly equivalent to the system malloc. */
635 /* But it may be useful if malloc is redefined. */
636 # ifdef __STDC__
637 GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
638 # else
639 GC_PTR GC_malloc_atomic_uncollectable(lb)
640 size_t lb;
641 # endif
643 register ptr_t op;
644 register ptr_t *opp;
645 register word lw;
646 DCL_LOCK_STATE;
648 if( SMALL_OBJ(lb) ) {
649 # ifdef MERGE_SIZES
650 if (EXTRA_BYTES != 0 && lb != 0) lb--;
651 /* We don't need the extra byte, since this won't be */
652 /* collected anyway. */
653 lw = GC_size_map[lb];
654 # else
655 lw = ALIGNED_WORDS(lb);
656 # endif
657 opp = &(GC_auobjfreelist[lw]);
658 FASTLOCK();
659 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
660 /* See above comment on signals. */
661 *opp = obj_link(op);
662 obj_link(op) = 0;
663 GC_words_allocd += lw;
664 /* Mark bit was already set while object was on free list. */
665 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
666 FASTUNLOCK();
667 return((GC_PTR) op);
669 FASTUNLOCK();
670 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
671 } else {
672 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
674 if (0 == op) return(0);
675 /* We don't need the lock here, since we have an undisguised */
676 /* pointer. We do need to hold the lock while we adjust */
677 /* mark bits. */
679 register struct hblk * h;
681 h = HBLKPTR(op);
682 lw = HDR(h) -> hb_sz;
684 DISABLE_SIGNALS();
685 LOCK();
686 GC_set_mark_bit(op);
687 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
688 UNLOCK();
689 ENABLE_SIGNALS();
690 return((GC_PTR) op);
694 #endif /* ATOMIC_UNCOLLECTABLE */