4 * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
5 * to any person obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to permit
9 * persons to whom the Software is furnished to do so, subject to the following
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "Block_private.h"
33 #ifdef HAVE_AVAILABILITY_MACROS_H
34 #include <AvailabilityMacros.h>
35 #endif /* HAVE_AVAILABILITY_MACROS_H */
37 #ifdef HAVE_TARGET_CONDITIONALS_H
38 #include <TargetConditionals.h>
39 #endif /* HAVE_TARGET_CONDITIONALS_H */
41 #if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
43 #ifdef HAVE_LIBKERN_OSATOMIC_H
44 #include <libkern/OSAtomic.h>
45 #endif /* HAVE_LIBKERN_OSATOMIC_H */
47 #elif defined(__WIN32__) || defined(_WIN32)
48 #define _CRT_SECURE_NO_WARNINGS 1
51 static __inline
bool OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
) {
52 /* fixme barrier is overkill -- see objc-os.h */
53 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
54 return (original
== oldl
);
57 static __inline
bool OSAtomicCompareAndSwapInt(int oldi
, int newi
, int volatile *dst
) {
58 /* fixme barrier is overkill -- see objc-os.h */
59 int original
= InterlockedCompareExchange(dst
, newi
, oldi
);
60 return (original
== oldi
);
64 * Check to see if the GCC atomic built-ins are available. If we're on
65 * a 64-bit system, make sure we have an 8-byte atomic function
70 #elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
72 static __inline
bool OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
) {
73 return __sync_bool_compare_and_swap(dst
, oldl
, newl
);
76 static __inline
bool OSAtomicCompareAndSwapInt(int oldi
, int newi
, int volatile *dst
) {
77 return __sync_bool_compare_and_swap(dst
, oldi
, newi
);
81 #error unknown atomic compare-and-swap primitive
82 #endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
89 static void *_Block_copy_class
= _NSConcreteMallocBlock
;
90 static void *_Block_copy_finalizing_class
= _NSConcreteMallocBlock
;
91 static int _Block_copy_flag
= BLOCK_NEEDS_FREE
;
92 static int _Byref_flag_initial_value
= BLOCK_NEEDS_FREE
| 2;
94 static const int WANTS_ONE
= (1 << 16);
96 static bool isGC
= false;
103 static unsigned long int latching_incr_long(unsigned long int *where
) {
105 unsigned long int old_value
= *(volatile unsigned long int *)where
;
106 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
107 return BLOCK_REFCOUNT_MASK
;
109 if (OSAtomicCompareAndSwapLong(old_value
, old_value
+1, (volatile long int *)where
)) {
116 static int latching_incr_int(int *where
) {
118 int old_value
= *(volatile int *)where
;
119 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
120 return BLOCK_REFCOUNT_MASK
;
122 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+1, (volatile int *)where
)) {
129 static int latching_decr_long(unsigned long int *where
) {
131 unsigned long int old_value
= *(volatile int *)where
;
132 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
133 return BLOCK_REFCOUNT_MASK
;
135 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
138 if (OSAtomicCompareAndSwapLong(old_value
, old_value
-1, (volatile long int *)where
)) {
145 static int latching_decr_int(int *where
) {
147 int old_value
= *(volatile int *)where
;
148 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
149 return BLOCK_REFCOUNT_MASK
;
151 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
154 if (OSAtomicCompareAndSwapInt(old_value
, old_value
-1, (volatile int *)where
)) {
162 * GC support stub routines:
165 #pragma mark GC Support Routines
169 static void *_Block_alloc_default(const unsigned long size
, const bool initialCountIsOne
, const bool isObject
) {
173 static void _Block_assign_default(void *value
, void **destptr
) {
177 static void _Block_setHasRefcount_default(const void *ptr
, const bool hasRefcount
) {
180 static void _Block_do_nothing(const void *aBlock
) { }
182 static void _Block_retain_object_default(const void *ptr
) {
186 static void _Block_release_object_default(const void *ptr
) {
190 static void _Block_assign_weak_default(const void *ptr
, void *dest
) {
191 *(void **)dest
= (void *)ptr
;
194 static void _Block_memmove_default(void *dst
, void *src
, unsigned long size
) {
195 memmove(dst
, src
, (size_t)size
);
198 static void _Block_memmove_gc_broken(void *dest
, void *src
, unsigned long size
) {
199 void **destp
= (void **)dest
;
200 void **srcp
= (void **)src
;
202 _Block_assign_default(*srcp
, destp
);
205 size
-= sizeof(void *);
210 * GC support callout functions - initially set to stub routines:
213 static void *(*_Block_allocator
)(const unsigned long, const bool isOne
, const bool isObject
) = _Block_alloc_default
;
214 static void (*_Block_deallocator
)(const void *) = (void (*)(const void *))free
;
215 static void (*_Block_assign
)(void *value
, void **destptr
) = _Block_assign_default
;
216 static void (*_Block_setHasRefcount
)(const void *ptr
, const bool hasRefcount
) = _Block_setHasRefcount_default
;
217 static void (*_Block_retain_object
)(const void *ptr
) = _Block_retain_object_default
;
218 static void (*_Block_release_object
)(const void *ptr
) = _Block_release_object_default
;
219 static void (*_Block_assign_weak
)(const void *dest
, void *ptr
) = _Block_assign_weak_default
;
220 static void (*_Block_memmove
)(void *dest
, void *src
, unsigned long size
) = _Block_memmove_default
;
224 * GC support SPI functions - called from ObjC runtime and CoreFoundation:
228 * Called from objc-auto to turn on GC.
229 * version 3, 4 arg, but changed 1st arg
231 void _Block_use_GC( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
232 void (*setHasRefcount
)(const void *, const bool),
233 void (*gc_assign
)(void *, void **),
234 void (*gc_assign_weak
)(const void *, void *),
235 void (*gc_memmove
)(void *, void *, unsigned long)) {
238 _Block_allocator
= alloc
;
239 _Block_deallocator
= _Block_do_nothing
;
240 _Block_assign
= gc_assign
;
241 _Block_copy_flag
= BLOCK_IS_GC
;
242 _Block_copy_class
= _NSConcreteAutoBlock
;
243 /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
244 _Block_copy_finalizing_class
= _NSConcreteFinalizingBlock
;
245 _Block_setHasRefcount
= setHasRefcount
;
246 _Byref_flag_initial_value
= BLOCK_IS_GC
; // no refcount
247 _Block_retain_object
= _Block_do_nothing
;
248 _Block_release_object
= _Block_do_nothing
;
249 _Block_assign_weak
= gc_assign_weak
;
250 _Block_memmove
= gc_memmove
;
254 void _Block_use_GC5( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
255 void (*setHasRefcount
)(const void *, const bool),
256 void (*gc_assign
)(void *, void **),
257 void (*gc_assign_weak
)(const void *, void *)) {
258 /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
259 _Block_use_GC(alloc
, setHasRefcount
, gc_assign
, gc_assign_weak
, _Block_memmove_gc_broken
);
264 * Called from objc-auto to alternatively turn on retain/release.
265 * Prior to this the only "object" support we can provide is for those
266 * super special objects that live in libSystem, namely dispatch queues.
267 * Blocks and Block_byrefs have their own special entry points.
270 void _Block_use_RR( void (*retain
)(const void *),
271 void (*release
)(const void *)) {
272 _Block_retain_object
= retain
;
273 _Block_release_object
= release
;
277 * Internal Support routines for copying:
281 #pragma mark Copy/Release support
284 /* Copy, or bump refcount, of a block. If really copying, call the copy helper if present. */
285 static void *_Block_copy_internal(const void *arg
, const int flags
) {
286 struct Block_layout
*aBlock
;
287 const bool wantsOne
= (WANTS_ONE
& flags
) == WANTS_ONE
;
289 //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
290 if (!arg
) return NULL
;
293 // The following would be better done as a switch statement
294 aBlock
= (struct Block_layout
*)arg
;
295 if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
297 latching_incr_int(&aBlock
->flags
);
300 else if (aBlock
->flags
& BLOCK_IS_GC
) {
301 // GC refcounting is expensive so do most refcounting here.
302 if (wantsOne
&& ((latching_incr_int(&aBlock
->flags
) & BLOCK_REFCOUNT_MASK
) == 1)) {
303 // Tell collector to hang on this - it will bump the GC refcount version
304 _Block_setHasRefcount(aBlock
, true);
308 else if (aBlock
->flags
& BLOCK_IS_GLOBAL
) {
312 // Its a stack block. Make a copy.
314 struct Block_layout
*result
= malloc(aBlock
->descriptor
->size
);
315 if (!result
) return (void *)0;
316 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
318 result
->flags
&= ~(BLOCK_REFCOUNT_MASK
); // XXX not needed
319 result
->flags
|= BLOCK_NEEDS_FREE
| 1;
320 result
->isa
= _NSConcreteMallocBlock
;
321 if (result
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
322 //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
323 (*aBlock
->descriptor
->copy
)(result
, aBlock
); // do fixup
328 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
329 // This allows the copy helper routines to make non-refcounted block copies under GC
330 unsigned long int flags
= aBlock
->flags
;
331 bool hasCTOR
= (flags
& BLOCK_HAS_CTOR
) != 0;
332 struct Block_layout
*result
= _Block_allocator(aBlock
->descriptor
->size
, wantsOne
, hasCTOR
);
333 if (!result
) return (void *)0;
334 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
336 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
337 flags
&= ~(BLOCK_NEEDS_FREE
|BLOCK_REFCOUNT_MASK
); // XXX not needed
339 flags
|= BLOCK_IS_GC
| 1;
341 flags
|= BLOCK_IS_GC
;
342 result
->flags
= flags
;
343 if (flags
& BLOCK_HAS_COPY_DISPOSE
) {
344 //printf("calling block copy helper...\n");
345 (*aBlock
->descriptor
->copy
)(result
, aBlock
); // do fixup
348 result
->isa
= _NSConcreteFinalizingBlock
;
351 result
->isa
= _NSConcreteAutoBlock
;
359 * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
361 * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
362 * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
363 * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
364 * Otherwise we need to copy it and update the stack forwarding pointer
365 * XXX We need to account for weak/nonretained read-write barriers.
368 static void _Block_byref_assign_copy(void *dest
, const void *arg
, const int flags
) {
369 struct Block_byref
**destp
= (struct Block_byref
**)dest
;
370 struct Block_byref
*src
= (struct Block_byref
*)arg
;
372 //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
373 //printf("src dump: %s\n", _Block_byref_dump(src));
374 if (src
->forwarding
->flags
& BLOCK_IS_GC
) {
375 ; // don't need to do any more work
377 else if ((src
->forwarding
->flags
& BLOCK_REFCOUNT_MASK
) == 0) {
378 //printf("making copy\n");
379 // src points to stack
380 bool isWeak
= ((flags
& (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
)) == (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
));
381 // if its weak ask for an object (only matters under GC)
382 struct Block_byref
*copy
= (struct Block_byref
*)_Block_allocator(src
->size
, false, isWeak
);
383 copy
->flags
= src
->flags
| _Byref_flag_initial_value
; // non-GC one for caller, one for stack
384 copy
->forwarding
= copy
; // patch heap copy to point to itself (skip write-barrier)
385 src
->forwarding
= copy
; // patch stack to point to heap copy
386 copy
->size
= src
->size
;
388 copy
->isa
= &_NSConcreteWeakBlockVariable
; // mark isa field so it gets weak scanning
390 if (src
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
391 // Trust copy helper to copy everything of interest
392 // If more than one field shows up in a byref block this is wrong XXX
393 copy
->byref_keep
= src
->byref_keep
;
394 copy
->byref_destroy
= src
->byref_destroy
;
395 (*src
->byref_keep
)(copy
, src
);
398 // just bits. Blast 'em using _Block_memmove in case they're __strong
400 (void *)©
->byref_keep
,
401 (void *)&src
->byref_keep
,
402 src
->size
- sizeof(struct Block_byref_header
));
405 // already copied to heap
406 else if ((src
->forwarding
->flags
& BLOCK_NEEDS_FREE
) == BLOCK_NEEDS_FREE
) {
407 latching_incr_int(&src
->forwarding
->flags
);
409 // assign byref data block pointer into new Block
410 _Block_assign(src
->forwarding
, (void **)destp
);
414 static void _Block_byref_release(const void *arg
) {
415 struct Block_byref
*shared_struct
= (struct Block_byref
*)arg
;
418 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
419 shared_struct
= shared_struct
->forwarding
;
421 //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
422 // To support C++ destructors under GC we arrange for there to be a finalizer for this
423 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
424 if ((shared_struct
->flags
& BLOCK_NEEDS_FREE
) == 0) {
425 return; // stack or GC or global
427 refcount
= shared_struct
->flags
& BLOCK_REFCOUNT_MASK
;
429 printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg
);
431 else if ((latching_decr_int(&shared_struct
->flags
) & BLOCK_REFCOUNT_MASK
) == 0) {
432 //printf("disposing of heap based byref block\n");
433 if (shared_struct
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
434 //printf("calling out to helper\n");
435 (*shared_struct
->byref_destroy
)(shared_struct
);
437 _Block_deallocator((struct Block_layout
*)shared_struct
);
445 * _Block_copy, _Block_release, and (old) _Block_destroy
453 void *_Block_copy(const void *arg
) {
454 return _Block_copy_internal(arg
, WANTS_ONE
);
458 // API entry point to release a copied Block
459 void _Block_release(void *arg
) {
460 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
463 newCount
= latching_decr_int(&aBlock
->flags
) & BLOCK_REFCOUNT_MASK
;
464 if (newCount
> 0) return;
466 if (aBlock
->flags
& BLOCK_IS_GC
) {
467 // Tell GC we no longer have our own refcounts. GC will decr its refcount
468 // and unless someone has done a CFRetain or marked it uncollectable it will
469 // now be subject to GC reclamation.
470 _Block_setHasRefcount(aBlock
, false);
472 else if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
473 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
)(*aBlock
->descriptor
->dispose
)(aBlock
);
474 _Block_deallocator(aBlock
);
476 else if (aBlock
->flags
& BLOCK_IS_GLOBAL
) {
480 printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock
);
486 // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
487 static void _Block_destroy(const void *arg
) {
488 struct Block_layout
*aBlock
;
490 aBlock
= (struct Block_layout
*)arg
;
491 if (aBlock
->flags
& BLOCK_IS_GC
) {
492 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
493 return; // ignore, we are being called because of a DTOR
495 _Block_release(aBlock
);
502 * SPI used by other layers
506 // SPI, also internal. Called from NSAutoBlock only under GC
507 void *_Block_copy_collectable(const void *aBlock
) {
508 return _Block_copy_internal(aBlock
, 0);
513 unsigned long int Block_size(void *arg
) {
514 return ((struct Block_layout
*)arg
)->descriptor
->size
;
519 #pragma mark Compiler SPI entry points
523 /*******************************************************
525 Entry points used by the compiler - the real API!
528 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
529 1) C++ stack based objects
530 2) References to Objective-C objects
534 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
536 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
537 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
538 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
539 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
540 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
542 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
544 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
546 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
548 __weak block id 128+3+16
549 __block (^Block) 128+7
550 __weak __block (^Block) 128+7+16
552 The implementation of the two routines would be improved by switch statements enumerating the eight cases.
554 ********************************************************/
557 * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
558 * to do the assignment.
560 void _Block_object_assign(void *destAddr
, const void *object
, const int flags
) {
561 //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
562 if ((flags
& BLOCK_BYREF_CALLER
) == BLOCK_BYREF_CALLER
) {
563 if ((flags
& BLOCK_FIELD_IS_WEAK
) == BLOCK_FIELD_IS_WEAK
) {
564 _Block_assign_weak(object
, destAddr
);
567 // do *not* retain or *copy* __block variables whatever they are
568 _Block_assign((void *)object
, destAddr
);
571 else if ((flags
& BLOCK_FIELD_IS_BYREF
) == BLOCK_FIELD_IS_BYREF
) {
572 // copying a __block reference from the stack Block to the heap
573 // flags will indicate if it holds a __weak reference and needs a special isa
574 _Block_byref_assign_copy(destAddr
, object
, flags
);
576 // (this test must be before next one)
577 else if ((flags
& BLOCK_FIELD_IS_BLOCK
) == BLOCK_FIELD_IS_BLOCK
) {
578 // copying a Block declared variable from the stack Block to the heap
579 _Block_assign(_Block_copy_internal(object
, flags
), destAddr
);
581 // (this test must be after previous one)
582 else if ((flags
& BLOCK_FIELD_IS_OBJECT
) == BLOCK_FIELD_IS_OBJECT
) {
583 //printf("retaining object at %p\n", object);
584 _Block_retain_object(object
);
585 //printf("done retaining object at %p\n", object);
586 _Block_assign((void *)object
, destAddr
);
590 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
591 // to help dispose of the contents
592 // Used initially only for __attribute__((NSObject)) marked pointers.
593 void _Block_object_dispose(const void *object
, const int flags
) {
594 //printf("_Block_object_dispose(%p, %x)\n", object, flags);
595 if (flags
& BLOCK_FIELD_IS_BYREF
) {
596 // get rid of the __block data structure held in a Block
597 _Block_byref_release(object
);
599 else if ((flags
& (BLOCK_FIELD_IS_BLOCK
|BLOCK_BYREF_CALLER
)) == BLOCK_FIELD_IS_BLOCK
) {
600 // get rid of a referenced Block held by this Block
601 // (ignore __block Block variables, compiler doesn't need to call us)
602 _Block_destroy(object
);
604 else if ((flags
& (BLOCK_FIELD_IS_WEAK
|BLOCK_FIELD_IS_BLOCK
|BLOCK_BYREF_CALLER
)) == BLOCK_FIELD_IS_OBJECT
) {
605 // get rid of a referenced object held by this Block
606 // (ignore __block object variables, compiler doesn't need to call us)
607 _Block_release_object(object
);
616 #pragma mark Debugging
620 const char *_Block_dump(const void *block
) {
621 struct Block_layout
*closure
= (struct Block_layout
*)block
;
622 static char buffer
[512];
624 if (closure
== NULL
) {
625 sprintf(cp
, "NULL passed to _Block_dump\n");
628 if (! (closure
->flags
& BLOCK_HAS_DESCRIPTOR
)) {
629 printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
632 cp
+= sprintf(cp
, "^%p (new layout) =\n", (void *)closure
);
633 if (closure
->isa
== NULL
) {
634 cp
+= sprintf(cp
, "isa: NULL\n");
636 else if (closure
->isa
== _NSConcreteStackBlock
) {
637 cp
+= sprintf(cp
, "isa: stack Block\n");
639 else if (closure
->isa
== _NSConcreteMallocBlock
) {
640 cp
+= sprintf(cp
, "isa: malloc heap Block\n");
642 else if (closure
->isa
== _NSConcreteAutoBlock
) {
643 cp
+= sprintf(cp
, "isa: GC heap Block\n");
645 else if (closure
->isa
== _NSConcreteGlobalBlock
) {
646 cp
+= sprintf(cp
, "isa: global Block\n");
648 else if (closure
->isa
== _NSConcreteFinalizingBlock
) {
649 cp
+= sprintf(cp
, "isa: finalizing Block\n");
652 cp
+= sprintf(cp
, "isa?: %p\n", (void *)closure
->isa
);
654 cp
+= sprintf(cp
, "flags:");
655 if (closure
->flags
& BLOCK_HAS_DESCRIPTOR
) {
656 cp
+= sprintf(cp
, " HASDESCRIPTOR");
658 if (closure
->flags
& BLOCK_NEEDS_FREE
) {
659 cp
+= sprintf(cp
, " FREEME");
661 if (closure
->flags
& BLOCK_IS_GC
) {
662 cp
+= sprintf(cp
, " ISGC");
664 if (closure
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
665 cp
+= sprintf(cp
, " HASHELP");
667 if (closure
->flags
& BLOCK_HAS_CTOR
) {
668 cp
+= sprintf(cp
, " HASCTOR");
670 cp
+= sprintf(cp
, "\nrefcount: %u\n", closure
->flags
& BLOCK_REFCOUNT_MASK
);
671 cp
+= sprintf(cp
, "invoke: %p\n", (void *)(uintptr_t)closure
->invoke
);
673 struct Block_descriptor
*dp
= closure
->descriptor
;
674 cp
+= sprintf(cp
, "descriptor: %p\n", (void *)dp
);
675 cp
+= sprintf(cp
, "descriptor->reserved: %lu\n", dp
->reserved
);
676 cp
+= sprintf(cp
, "descriptor->size: %lu\n", dp
->size
);
678 if (closure
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
679 cp
+= sprintf(cp
, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp
->copy
);
680 cp
+= sprintf(cp
, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp
->dispose
);
687 const char *_Block_byref_dump(struct Block_byref
*src
) {
688 static char buffer
[256];
690 cp
+= sprintf(cp
, "byref data block %p contents:\n", (void *)src
);
691 cp
+= sprintf(cp
, " forwarding: %p\n", (void *)src
->forwarding
);
692 cp
+= sprintf(cp
, " flags: 0x%x\n", src
->flags
);
693 cp
+= sprintf(cp
, " size: %d\n", src
->size
);
694 if (src
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
695 cp
+= sprintf(cp
, " copy helper: %p\n", (void *)(uintptr_t)src
->byref_keep
);
696 cp
+= sprintf(cp
, " dispose helper: %p\n", (void *)(uintptr_t)src
->byref_destroy
);