* config/rs6000/40x.md (ppc403-compare): Remove "compare".
[official-gcc.git] / boehm-gc / gcc_support.c
blobe8a7b8201db64d023a450690db4b7b21be950cdd
1 /***************************************************************************
3 Interface between g++ and Boehm GC
5 Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
7 THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 Permission is hereby granted to copy this code for any purpose,
11 provided the above notices are retained on all copies.
13 Last modified on Sun Jul 16 23:21:14 PDT 1995 by ellis
15 This module provides runtime support for implementing the
16 Ellis/Detlefs GC proposal, "Safe, Efficient Garbage Collection for
17 C++", within g++, using its -fgc-keyword extension. It defines
18 versions of __builtin_new, __builtin_new_gc, __builtin_vec_new,
19 __builtin_vec_new_gc, __builtin_delete, and __builtin_vec_delete that
20 invoke the Bohem GC. It also implements the WeakPointer.h interface.
22 This module assumes the following configuration options of the Boehm GC:
24 -DALL_INTERIOR_POINTERS
25 -DDONT_ADD_BYTE_AT_END
27 This module adds its own required padding to the end of objects to
28 support C/C++ "one-past-the-object" pointer semantics.
30 ****************************************************************************/
32 #include <stddef.h>
33 #include "gc.h"
35 #if defined(__STDC__)
36 # define PROTO( args ) args
37 #else
38 # define PROTO( args ) ()
39 # endif
41 #define BITSPERBYTE 8
42 /* What's the portable way to do this? */
45 typedef void (*vfp) PROTO(( void ));
46 extern vfp __new_handler;
47 extern void __default_new_handler PROTO(( void ));
50 /* A destructor_proc is the compiler generated procedure representing a
51 C++ destructor. The "flag" argument is a hidden argument following some
52 compiler convention. */
54 typedef (*destructor_proc) PROTO(( void* this, int flag ));
57 /***************************************************************************
59 A BI_header is the header the compiler adds to the front of
60 new-allocated arrays of objects with destructors. The header is
61 padded out to a double, because that's what the compiler does to
62 ensure proper alignment of array elements on some architectures.
64 int NUM_ARRAY_ELEMENTS (void* o)
65 returns the number of array elements for array object o.
67 char* FIRST_ELEMENT_P (void* o)
68 returns the address of the first element of array object o.
70 ***************************************************************************/
72 typedef struct BI_header {
73 int nelts;
74 char padding [sizeof( double ) - sizeof( int )];
75 /* Better way to do this? */
76 } BI_header;
78 #define NUM_ARRAY_ELEMENTS( o ) \
79 (((BI_header*) o)->nelts)
81 #define FIRST_ELEMENT_P( o ) \
82 ((char*) o + sizeof( BI_header ))
85 /***************************************************************************
87 The __builtin_new routines add a descriptor word to the end of each
88 object. The descriptor serves two purposes.
90 First, the descriptor acts as padding, implementing C/C++ pointer
91 semantics. C and C++ allow a valid array pointer to be incremented
92 one past the end of an object. The extra padding ensures that the
93 collector will recognize that such a pointer points to the object and
94 not the next object in memory.
96 Second, the descriptor stores three extra pieces of information,
97 whether an object has a registered finalizer (destructor), whether it
98 may have any weak pointers referencing it, and for collectible arrays,
99 the element size of the array. The element size is required for the
100 array's finalizer to iterate through the elements of the array. (An
101 alternative design would have the compiler generate a finalizer
102 procedure for each different array type. But given the overhead of
103 finalization, there isn't any efficiency to be gained by that.)
105 The descriptor must be added to non-collectible as well as collectible
106 objects, since the Ellis/Detlefs proposal allows "pointer to gc T" to
107 be assigned to a "pointer to T", which could then be deleted. Thus,
108 __builtin_delete must determine at runtime whether an object is
109 collectible, whether it has weak pointers referencing it, and whether
110 it may have a finalizer that needs unregistering. Though
111 GC_REGISTER_FINALIZER doesn't care if you ask it to unregister a
112 finalizer for an object that doesn't have one, it is a non-trivial
113 procedure that does a hash look-up, etc. The descriptor trades a
114 little extra space for a significant increase in time on the fast path
115 through delete. (A similar argument applies to
116 GC_UNREGISTER_DISAPPEARING_LINK).
118 For non-array types, the space for the descriptor could be shrunk to a
119 single byte for storing the "has finalizer" flag. But this would save
120 space only on arrays of char (whose size is not a multiple of the word
121 size) and structs whose largest member is less than a word in size
122 (very infrequent). And it would require that programmers actually
123 remember to call "delete[]" instead of "delete" (which they should,
124 but there are probably lots of buggy programs out there). For the
125 moment, the space savings seems not worthwhile, especially considering
126 that the Boehm GC is already quite space competitive with other
127 malloc's.
130 Given a pointer o to the base of an object:
132 Descriptor* DESCRIPTOR (void* o)
133 returns a pointer to the descriptor for o.
135 The implementation of descriptors relies on the fact that the GC
136 implementation allocates objects in units of the machine's natural
137 word size (e.g. 32 bits on a SPARC, 64 bits on an Alpha).
139 **************************************************************************/
141 typedef struct Descriptor {
142 unsigned has_weak_pointers: 1;
143 unsigned has_finalizer: 1;
144 unsigned element_size: BITSPERBYTE * sizeof( unsigned ) - 2;
145 } Descriptor;
147 #define DESCRIPTOR( o ) \
148 ((Descriptor*) ((char*)(o) + GC_size( o ) - sizeof( Descriptor )))
151 /**************************************************************************
153 Implementations of global operator new() and operator delete()
155 ***************************************************************************/
158 void* __builtin_new( size )
159 size_t size;
161 For non-gc non-array types, the compiler generates calls to
162 __builtin_new, which allocates non-collected storage via
163 GC_MALLOC_UNCOLLECTABLE. This ensures that the non-collected
164 storage will be part of the collector's root set, required by the
165 Ellis/Detlefs semantics. */
167 vfp handler = __new_handler ? __new_handler : __default_new_handler;
169 while (1) {
170 void* o = GC_MALLOC_UNCOLLECTABLE( size + sizeof( Descriptor ) );
171 if (o != 0) return o;
172 (*handler) ();}}
175 void* __builtin_vec_new( size )
176 size_t size;
178 For non-gc array types, the compiler generates calls to
179 __builtin_vec_new. */
181 return __builtin_new( size );}
184 void* __builtin_new_gc( size )
185 size_t size;
187 For gc non-array types, the compiler generates calls to
188 __builtin_new_gc, which allocates collected storage via
189 GC_MALLOC. */
191 vfp handler = __new_handler ? __new_handler : __default_new_handler;
193 while (1) {
194 void* o = GC_MALLOC( size + sizeof( Descriptor ) );
195 if (o != 0) return o;
196 (*handler) ();}}
199 void* __builtin_new_gc_a( size )
200 size_t size;
202 For non-pointer-containing gc non-array types, the compiler
203 generates calls to __builtin_new_gc_a, which allocates collected
204 storage via GC_MALLOC_ATOMIC. */
206 vfp handler = __new_handler ? __new_handler : __default_new_handler;
208 while (1) {
209 void* o = GC_MALLOC_ATOMIC( size + sizeof( Descriptor ) );
210 if (o != 0) return o;
211 (*handler) ();}}
214 void* __builtin_vec_new_gc( size )
215 size_t size;
217 For gc array types, the compiler generates calls to
218 __builtin_vec_new_gc. */
220 return __builtin_new_gc( size );}
223 void* __builtin_vec_new_gc_a( size )
224 size_t size;
226 For non-pointer-containing gc array types, the compiler generates
227 calls to __builtin_vec_new_gc_a. */
229 return __builtin_new_gc_a( size );}
232 static void call_destructor( o, data )
233 void* o;
234 void* data;
236 call_destructor is the GC finalizer proc registered for non-array
237 gc objects with destructors. Its client data is the destructor
238 proc, which it calls with the magic integer 2, a special flag
239 obeying the compiler convention for destructors. */
241 ((destructor_proc) data)( o, 2 );}
244 void* __builtin_new_gc_dtor( o, d )
245 void* o;
246 destructor_proc d;
248 The compiler generates a call to __builtin_new_gc_dtor to register
249 the destructor "d" of a non-array gc object "o" as a GC finalizer.
250 The destructor is registered via
251 GC_REGISTER_FINALIZER_IGNORE_SELF, which causes the collector to
252 ignore pointers from the object to itself when determining when
253 the object can be finalized. This is necessary due to the self
254 pointers used in the internal representation of multiply-inherited
255 objects. */
257 Descriptor* desc = DESCRIPTOR( o );
259 GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_destructor, d, 0, 0 );
260 desc->has_finalizer = 1;}
263 static void call_array_destructor( o, data )
264 void* o;
265 void* data;
267 call_array_destructor is the GC finalizer proc registered for gc
268 array objects whose elements have destructors. Its client data is
269 the destructor proc. It iterates through the elements of the
270 array in reverse order, calling the destructor on each. */
272 int num = NUM_ARRAY_ELEMENTS( o );
273 Descriptor* desc = DESCRIPTOR( o );
274 size_t size = desc->element_size;
275 char* first_p = FIRST_ELEMENT_P( o );
276 char* p = first_p + (num - 1) * size;
278 if (num > 0) {
279 while (1) {
280 ((destructor_proc) data)( p, 2 );
281 if (p == first_p) break;
282 p -= size;}}}
285 void* __builtin_vec_new_gc_dtor( first_elem, d, element_size )
286 void* first_elem;
287 destructor_proc d;
288 size_t element_size;
290 The compiler generates a call to __builtin_vec_new_gc_dtor to
291 register the destructor "d" of a gc array object as a GC
292 finalizer. "first_elem" points to the first element of the array,
293 *not* the beginning of the object (this makes the generated call
294 to this function smaller). The elements of the array are of size
295 "element_size". The destructor is registered as in
296 _builtin_new_gc_dtor. */
298 void* o = (char*) first_elem - sizeof( BI_header );
299 Descriptor* desc = DESCRIPTOR( o );
301 GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_array_destructor, d, 0, 0 );
302 desc->element_size = element_size;
303 desc->has_finalizer = 1;}
306 void __builtin_delete( o )
307 void* o;
309 The compiler generates calls to __builtin_delete for operator
310 delete(). The GC currently requires that any registered
311 finalizers be unregistered before explicitly freeing an object.
312 If the object has any weak pointers referencing it, we can't
313 actually free it now. */
315 if (o != 0) {
316 Descriptor* desc = DESCRIPTOR( o );
317 if (desc->has_finalizer) GC_REGISTER_FINALIZER( o, 0, 0, 0, 0 );
318 if (! desc->has_weak_pointers) GC_FREE( o );}}
321 void __builtin_vec_delete( o )
322 void* o;
324 The compiler generates calls to __builitn_vec_delete for operator
325 delete[](). */
327 __builtin_delete( o );}
330 /**************************************************************************
332 Implementations of the template class WeakPointer from WeakPointer.h
334 ***************************************************************************/
336 typedef struct WeakPointer {
337 void* pointer;
338 } WeakPointer;
341 void* _WeakPointer_New( t )
342 void* t;
344 if (t == 0) {
345 return 0;}
346 else {
347 void* base = GC_base( t );
348 WeakPointer* wp =
349 (WeakPointer*) GC_MALLOC_ATOMIC( sizeof( WeakPointer ) );
350 Descriptor* desc = DESCRIPTOR( base );
352 wp->pointer = t;
353 desc->has_weak_pointers = 1;
354 GC_general_register_disappearing_link( &wp->pointer, base );
355 return wp;}}
358 static void* PointerWithLock( wp )
359 WeakPointer* wp;
361 if (wp == 0 || wp->pointer == 0) {
362 return 0;}
363 else {
364 return (void*) wp->pointer;}}
367 void* _WeakPointer_Pointer( wp )
368 WeakPointer* wp;
370 return (void*) GC_call_with_alloc_lock( PointerWithLock, wp );}
373 typedef struct EqualClosure {
374 WeakPointer* wp1;
375 WeakPointer* wp2;
376 } EqualClosure;
379 static void* EqualWithLock( ec )
380 EqualClosure* ec;
382 if (ec->wp1 == 0 || ec->wp2 == 0) {
383 return (void*) (ec->wp1 == ec->wp2);}
384 else {
385 return (void*) (ec->wp1->pointer == ec->wp2->pointer);}}
388 int _WeakPointer_Equal( wp1, wp2 )
389 WeakPointer* wp1;
390 WeakPointer* wp2;
392 EqualClosure ec;
394 ec.wp1 = wp1;
395 ec.wp2 = wp2;
396 return (int) GC_call_with_alloc_lock( EqualWithLock, &ec );}
399 int _WeakPointer_Hash( wp )
400 WeakPointer* wp;
402 return (int) _WeakPointer_Pointer( wp );}
405 /**************************************************************************
407 Implementations of the template class CleanUp from WeakPointer.h
409 ***************************************************************************/
411 typedef struct Closure {
412 void (*c) PROTO(( void* d, void* t ));
413 ptrdiff_t t_offset;
414 void* d;
415 } Closure;
418 static void _CleanUp_CallClosure( obj, data )
419 void* obj;
420 void* data;
422 Closure* closure = (Closure*) data;
423 closure->c( closure->d, (char*) obj + closure->t_offset );}
426 void _CleanUp_Set( t, c, d )
427 void* t;
428 void (*c) PROTO(( void* d, void* t ));
429 void* d;
431 void* base = GC_base( t );
432 Descriptor* desc = DESCRIPTOR( t );
434 if (c == 0) {
435 GC_REGISTER_FINALIZER_IGNORE_SELF( base, 0, 0, 0, 0 );
436 desc->has_finalizer = 0;}
437 else {
438 Closure* closure = (Closure*) GC_MALLOC( sizeof( Closure ) );
439 closure->c = c;
440 closure->t_offset = (char*) t - (char*) base;
441 closure->d = d;
442 GC_REGISTER_FINALIZER_IGNORE_SELF( base, _CleanUp_CallClosure,
443 closure, 0, 0 );
444 desc->has_finalizer = 1;}}
447 void _CleanUp_Call( t )
448 void* t;
450 /* ? Aren't we supposed to deactivate weak pointers to t too?
451 Why? */
452 void* base = GC_base( t );
453 void* d;
454 GC_finalization_proc f;
456 GC_REGISTER_FINALIZER( base, 0, 0, &f, &d );
457 f( base, d );}
460 typedef struct QueueElem {
461 void* o;
462 GC_finalization_proc f;
463 void* d;
464 struct QueueElem* next;
465 } QueueElem;
468 void* _CleanUp_Queue_NewHead()
470 return GC_MALLOC( sizeof( QueueElem ) );}
473 static void _CleanUp_Queue_Enqueue( obj, data )
474 void* obj;
475 void* data;
477 QueueElem* q = (QueueElem*) data;
478 QueueElem* head = q->next;
480 q->o = obj;
481 q->next = head->next;
482 head->next = q;}
485 void _CleanUp_Queue_Set( h, t )
486 void* h;
487 void* t;
489 QueueElem* head = (QueueElem*) h;
490 void* base = GC_base( t );
491 void* d;
492 GC_finalization_proc f;
493 QueueElem* q = (QueueElem*) GC_MALLOC( sizeof( QueueElem ) );
495 GC_REGISTER_FINALIZER( base, _CleanUp_Queue_Enqueue, q, &f, &d );
496 q->f = f;
497 q->d = d;
498 q->next = head;}
501 int _CleanUp_Queue_Call( h )
502 void* h;
504 QueueElem* head = (QueueElem*) h;
505 QueueElem* q = head->next;
507 if (q == 0) {
508 return 0;}
509 else {
510 head->next = q->next;
511 q->next = 0;
512 if (q->f != 0) q->f( q->o, q->d );
513 return 1;}}