1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // See malloc.h for overview.
7 // TODO(rsc): double-check stats.
17 #include "interface.h"
21 // Map gccgo field names to gc field names.
22 // Eface aka __go_empty_interface.
23 #define type __type_descriptor
24 // Type aka __go_type_descriptor
26 #define string __reflection
27 #define KindPtr GO_PTR
28 #define KindNoPointers GO_NO_POINTERS
30 // GCCGO SPECIFIC CHANGE
32 // There is a long comment in runtime_mallocinit about where to put the heap
33 // on a 64-bit system. It makes assumptions that are not valid on linux/arm64
34 // -- it assumes user space can choose the lower 47 bits of a pointer, but on
35 // linux/arm64 we can only choose the lower 39 bits. This means the heap is
36 // roughly a quarter of the available address space and we cannot choose a bit
37 // pattern that all pointers will have -- luckily the GC is mostly precise
38 // these days so this doesn't matter all that much. The kernel (as of 3.13)
39 // will allocate address space starting either down from 0x7fffffffff or up
40 // from 0x2000000000, so we put the heap roughly in the middle of these two
41 // addresses to minimize the chance that a non-heap allocation will get in the
44 // This all means that there isn't much point in trying 256 different
45 // locations for the heap on such systems.
47 #define HeapBase(i) ((void*)(uintptr)(0x40ULL<<32))
48 #define HeapBaseOptions 1
50 #define HeapBase(i) ((void*)(uintptr)(i<<40|0x00c0ULL<<32))
51 #define HeapBaseOptions 0x80
53 // END GCCGO SPECIFIC CHANGE
55 // Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
58 int32 runtime_checking;
60 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
62 extern volatile intgo runtime_MemProfileRate
63 __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
65 // Allocate an object of at least size bytes.
66 // Small objects are allocated from the per-thread cache's free lists.
67 // Large objects (> 32 kB) are allocated straight from the heap.
68 // If the block will be freed with runtime_free(), typ must be 0.
70 runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
84 // All 0-length allocations use this pointer.
85 // The language does not require the allocations to
86 // have distinct values.
87 return &runtime_zerobase;
94 if(m->mcache == nil && g->ncgo > 0) {
95 // For gccgo this case can occur when a cgo or SWIG function
96 // has an interface return type and the function
97 // returns a non-pointer, so memory allocation occurs
98 // after syscall.Cgocall but before syscall.CgocallDone.
99 // We treat it as a callback.
100 runtime_exitsyscall();
103 flag |= FlagNoInvokeGC;
106 if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
111 runtime_throw("malloc/free - deadlock");
112 // Disable preemption during settype_flush.
113 // We can not use m->mallocing for this, because settype_flush calls mallocgc.
117 if(DebugTypeAtBlockEnd)
118 size += sizeof(uintptr);
121 if(!runtime_debug.efence && size <= MaxSmallSize) {
122 // Allocate from mcache free lists.
123 // Inlined version of SizeToClass().
125 sizeclass = runtime_size_to_class8[(size+7)>>3];
127 sizeclass = runtime_size_to_class128[(size-1024+127) >> 7];
128 size = runtime_class_to_size[sizeclass];
129 l = &c->list[sizeclass];
131 runtime_MCache_Refill(c, sizeclass);
135 if(!(flag & FlagNoZero)) {
137 // block is zeroed iff second word is zero ...
138 if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
139 runtime_memclr((byte*)v, size);
141 c->local_cachealloc += size;
143 // TODO(rsc): Report tracebacks for very large allocations.
145 // Allocate directly from heap.
146 npages = size >> PageShift;
147 if((size & PageMask) != 0)
149 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
151 runtime_throw("out of memory");
152 s->limit = (byte*)(s->start<<PageShift) + size;
153 size = npages<<PageShift;
154 v = (void*)(s->start << PageShift);
156 // setup for mark sweep
157 runtime_markspan(v, 0, 0, true);
162 else if(!(flag & FlagNoScan))
165 if(DebugTypeAtBlockEnd)
166 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
168 // TODO: save type even if FlagNoScan? Potentially expensive but might help
169 // heap profiling/tracing.
170 if(UseSpanType && !(flag & FlagNoScan) && typ != 0) {
173 buf = m->settype_buf;
174 i = m->settype_bufsize;
175 buf[i++] = (uintptr)v;
177 m->settype_bufsize = i;
181 if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
182 runtime_settype_flush(m);
185 if(runtime_debug.allocfreetrace)
188 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
189 if(size >= (uint32) rate)
191 if((uint32) m->mcache->next_sample > size)
192 m->mcache->next_sample -= size;
194 // pick next profile time
195 // If you change this, also change allocmcache.
196 if(rate > 0x3fffffff) // make 2*rate not overflow
198 m->mcache->next_sample = runtime_fastrand1() % (2*rate);
200 runtime_setblockspecial(v, true);
201 runtime_MProf_Malloc(v, size, typ);
205 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
209 runtime_racemalloc(v, size);
212 runtime_entersyscall();
218 __go_alloc(uintptr size)
220 return runtime_mallocgc(size, 0, FlagNoInvokeGC);
223 // Free the object whose base pointer is v.
237 // If you change this also change mgc0.c:/^sweep,
238 // which has a copy of the guts of free.
242 runtime_throw("malloc/free - deadlock");
245 if(!runtime_mlookup(v, nil, nil, &s)) {
246 runtime_printf("free %p: not an allocated block\n", v);
247 runtime_throw("free runtime_mlookup");
249 prof = runtime_blockspecial(v);
254 // Find size class for v.
255 sizeclass = s->sizeclass;
259 size = s->npages<<PageShift;
260 *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
261 // Must mark v freed before calling unmarkspan and MHeap_Free:
262 // they might coalesce v into other spans and change the bitmap further.
263 runtime_markfreed(v, size);
264 runtime_unmarkspan(v, 1<<PageShift);
265 if(runtime_debug.efence)
266 runtime_SysFree((void*)(s->start<<PageShift), size, &mstats.heap_sys);
268 runtime_MHeap_Free(&runtime_mheap, s, 1);
269 c->local_nlargefree++;
270 c->local_largefree += size;
273 size = runtime_class_to_size[sizeclass];
274 if(size > sizeof(uintptr))
275 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
276 // Must mark v freed before calling MCache_Free:
277 // it might coalesce v and other blocks into a bigger span
278 // and change the bitmap further.
279 runtime_markfreed(v, size);
280 c->local_nsmallfree[sizeclass]++;
281 runtime_MCache_Free(c, v, sizeclass, size);
284 runtime_MProf_Free(v, size);
289 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
298 m->mcache->local_nlookup++;
299 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
300 // purge cache stats to prevent overflow
301 runtime_lock(&runtime_mheap);
302 runtime_purgecachedstats(m->mcache);
303 runtime_unlock(&runtime_mheap);
306 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
310 runtime_checkfreed(v, 1);
318 p = (byte*)((uintptr)s->start<<PageShift);
319 if(s->sizeclass == 0) {
324 *size = s->npages<<PageShift;
330 i = ((byte*)v - p)/n;
340 runtime_allocmcache(void)
345 runtime_lock(&runtime_mheap);
346 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
347 runtime_unlock(&runtime_mheap);
348 runtime_memclr((byte*)c, sizeof(*c));
350 // Set first allocation sample size.
351 rate = runtime_MemProfileRate;
352 if(rate > 0x3fffffff) // make 2*rate not overflow
355 c->next_sample = runtime_fastrand1() % (2*rate);
361 runtime_freemcache(MCache *c)
363 runtime_MCache_ReleaseAll(c);
364 runtime_lock(&runtime_mheap);
365 runtime_purgecachedstats(c);
366 runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
367 runtime_unlock(&runtime_mheap);
371 runtime_purgecachedstats(MCache *c)
376 // Protected by either heap or GC lock.
378 mstats.heap_alloc += c->local_cachealloc;
379 c->local_cachealloc = 0;
380 mstats.nlookup += c->local_nlookup;
381 c->local_nlookup = 0;
382 h->largefree += c->local_largefree;
383 c->local_largefree = 0;
384 h->nlargefree += c->local_nlargefree;
385 c->local_nlargefree = 0;
386 for(i=0; i<(int32)nelem(c->local_nsmallfree); i++) {
387 h->nsmallfree[i] += c->local_nsmallfree[i];
388 c->local_nsmallfree[i] = 0;
392 extern uintptr runtime_sizeof_C_MStats
393 __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
395 #define MaxArena32 (2U<<30)
398 runtime_mallocinit(void)
401 uintptr arena_size, bitmap_size, spans_size;
407 runtime_sizeof_C_MStats = sizeof(MStats);
422 // limit = runtime_memlimit();
423 // See https://code.google.com/p/go/issues/detail?id=5049
424 // TODO(rsc): Fix after 1.1.
427 // Set up the allocation arena, a contiguous area of memory where
428 // allocated data will be found. The arena begins with a bitmap large
429 // enough to hold 4 bits per allocated word.
430 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
431 // On a 64-bit machine, allocate from a single contiguous reservation.
432 // 128 GB (MaxMem) should be big enough for now.
434 // The code will work with the reservation at any address, but ask
435 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
436 // Allocating a 128 GB region takes away 37 bits, and the amd64
437 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
438 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
439 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
440 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
441 // UTF-8 sequences, and they are otherwise as far away from
442 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
443 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
444 // on OS X during thread allocations. 0x00c0 causes conflicts with
445 // AddressSanitizer which reserves all memory up to 0x0100.
446 // These choices are both for debuggability and to reduce the
447 // odds of the conservative garbage collector not collecting memory
448 // because some non-pointer block of memory had a bit pattern
449 // that matched a memory address.
451 // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
452 // but it hardly matters: e0 00 is not valid UTF-8 either.
454 // If this fails we fall back to the 32 bit memory mechanism
456 bitmap_size = arena_size / (sizeof(void*)*8/4);
457 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
458 spans_size = ROUND(spans_size, PageSize);
459 for(i = 0; i < HeapBaseOptions; i++) {
460 p = runtime_SysReserve(HeapBase(i), bitmap_size + spans_size + arena_size);
466 // On a 32-bit machine, we can't typically get away
467 // with a giant virtual address space reservation.
468 // Instead we map the memory information bitmap
469 // immediately after the data segment, large enough
470 // to handle another 2GB of mappings (256 MB),
471 // along with a reservation for another 512 MB of memory.
472 // When that gets used up, we'll start asking the kernel
473 // for any memory anywhere and hope it's in the 2GB
474 // following the bitmap (presumably the executable begins
475 // near the bottom of memory, so we'll have to use up
476 // most of memory before the kernel resorts to giving out
477 // memory before the beginning of the text segment).
479 // Alternatively we could reserve 512 MB bitmap, enough
480 // for 4GB of mappings, and then accept any memory the
481 // kernel threw at us, but normally that's a waste of 512 MB
482 // of address space, which is probably too much in a 32-bit world.
483 bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
484 arena_size = 512<<20;
485 spans_size = MaxArena32 / PageSize * sizeof(runtime_mheap.spans[0]);
486 if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
487 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
488 arena_size = bitmap_size * 8;
489 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
491 spans_size = ROUND(spans_size, PageSize);
493 // SysReserve treats the address we ask for, end, as a hint,
494 // not as an absolute requirement. If we ask for the end
495 // of the data segment but the operating system requires
496 // a little more space before we can start allocating, it will
497 // give out a slightly higher pointer. Except QEMU, which
498 // is buggy, as usual: it won't adjust the pointer upward.
499 // So adjust it upward a little bit ourselves: 1/4 MB to get
500 // away from the running binary image and then round up
502 want = (byte*)ROUND((uintptr)_end + (1<<18), 1<<20);
503 if(0xffffffff - (uintptr)want <= bitmap_size + spans_size + arena_size)
505 p = runtime_SysReserve(want, bitmap_size + spans_size + arena_size);
507 runtime_throw("runtime: cannot reserve arena virtual address space");
508 if((uintptr)p & (((uintptr)1<<PageShift)-1))
509 runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p,
510 bitmap_size+spans_size+arena_size);
512 if((uintptr)p & (((uintptr)1<<PageShift)-1))
513 runtime_throw("runtime: SysReserve returned unaligned address");
515 runtime_mheap.spans = (MSpan**)p;
516 runtime_mheap.bitmap = p + spans_size;
517 runtime_mheap.arena_start = p + spans_size + bitmap_size;
518 runtime_mheap.arena_used = runtime_mheap.arena_start;
519 runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
521 // Initialize the rest of the allocator.
522 runtime_MHeap_Init(&runtime_mheap);
523 runtime_m()->mcache = runtime_allocmcache();
526 runtime_free(runtime_malloc(1));
530 runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
535 if(n > (uintptr)(h->arena_end - h->arena_used)) {
536 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
537 // Reserve some more space.
541 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
542 needed = ROUND(needed, 256<<20);
543 new_end = h->arena_end + needed;
544 if(new_end <= h->arena_start + MaxArena32) {
545 p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
546 if(p == h->arena_end)
547 h->arena_end = new_end;
550 if(n <= (uintptr)(h->arena_end - h->arena_used)) {
551 // Keep taking from our reservation.
553 runtime_SysMap(p, n, &mstats.heap_sys);
555 runtime_MHeap_MapBits(h);
556 runtime_MHeap_MapSpans(h);
558 runtime_racemapshadow(p, n);
562 // If using 64-bit, our reservation is all we have.
563 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
566 // On 32-bit, once the reservation is gone we can
567 // try to get memory at a location chosen by the OS
568 // and hope that it is in the range we allocated bitmap for.
569 p = runtime_SysAlloc(n, &mstats.heap_sys);
573 if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
574 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
575 p, h->arena_start, h->arena_start+MaxArena32);
576 runtime_SysFree(p, n, &mstats.heap_sys);
580 if(p+n > h->arena_used) {
582 if(h->arena_used > h->arena_end)
583 h->arena_end = h->arena_used;
584 runtime_MHeap_MapBits(h);
585 runtime_MHeap_MapSpans(h);
587 runtime_racemapshadow(p, n);
602 PersistentAllocChunk = 256<<10,
603 PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows
606 // Wrapper around SysAlloc that can allocate small chunks.
607 // There is no associated free operation.
608 // Intended for things like function/type/debug-related persistent data.
609 // If align is 0, uses default align (currently 8).
611 runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
617 runtime_throw("persistentalloc: align is now a power of 2");
619 runtime_throw("persistentalloc: align is too large");
622 if(size >= PersistentAllocMaxBlock)
623 return runtime_SysAlloc(size, stat);
624 runtime_lock(&persistent);
625 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
626 if(persistent.pos + size > persistent.end) {
627 persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
628 if(persistent.pos == nil) {
629 runtime_unlock(&persistent);
630 runtime_throw("runtime: cannot allocate memory");
632 persistent.end = persistent.pos + PersistentAllocChunk;
635 persistent.pos += size;
636 runtime_unlock(&persistent);
637 if(stat != &mstats.other_sys) {
638 // reaccount the allocation against provided stat
639 runtime_xadd64(stat, size);
640 runtime_xadd64(&mstats.other_sys, -(uint64)size);
645 static Lock settype_lock;
648 runtime_settype_flush(M *mp)
650 uintptr *buf, *endbuf;
651 uintptr size, ofs, j, t;
652 uintptr ntypes, nbytes2, nbytes3;
659 buf = mp->settype_buf;
660 endbuf = buf + mp->settype_bufsize;
662 runtime_lock(&settype_lock);
663 while(buf < endbuf) {
670 // (Manually inlined copy of runtime_MHeap_Lookup)
671 p = (uintptr)v>>PageShift;
672 p -= (uintptr)runtime_mheap.arena_start >> PageShift;
673 s = runtime_mheap.spans[p];
675 if(s->sizeclass == 0) {
676 s->types.compression = MTypes_Single;
682 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
684 switch(s->types.compression) {
686 ntypes = (s->npages << PageShift) / size;
687 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
688 data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
689 s->types.compression = MTypes_Bytes;
690 s->types.data = (uintptr)data3;
691 ((uintptr*)data3)[1] = typ;
692 data3[8*sizeof(uintptr) + ofs] = 1;
696 ((uintptr*)s->types.data)[ofs] = typ;
700 data3 = (byte*)s->types.data;
702 if(((uintptr*)data3)[j] == typ) {
705 if(((uintptr*)data3)[j] == 0) {
706 ((uintptr*)data3)[j] = typ;
711 data3[8*sizeof(uintptr) + ofs] = j;
713 ntypes = (s->npages << PageShift) / size;
714 nbytes2 = ntypes * sizeof(uintptr);
715 data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
716 s->types.compression = MTypes_Words;
717 s->types.data = (uintptr)data2;
719 // Move the contents of data3 to data2. Then deallocate data3.
720 for(j=0; j<ntypes; j++) {
721 t = data3[8*sizeof(uintptr) + j];
722 t = ((uintptr*)data3)[t];
730 runtime_unlock(&settype_lock);
732 mp->settype_bufsize = 0;
736 runtime_gettype(void *v)
742 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
745 switch(s->types.compression) {
752 ofs = (uintptr)v - (s->start<<PageShift);
753 t = ((uintptr*)s->types.data)[ofs/s->elemsize];
756 ofs = (uintptr)v - (s->start<<PageShift);
757 data = (byte*)s->types.data;
758 t = data[8*sizeof(uintptr) + ofs/s->elemsize];
759 t = ((uintptr*)data)[t];
762 runtime_throw("runtime_gettype: invalid compression kind");
765 runtime_lock(&settype_lock);
766 runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
767 runtime_unlock(&settype_lock);
777 runtime_mal(uintptr n)
779 return runtime_mallocgc(n, 0, 0);
783 runtime_new(const Type *typ)
785 return runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
789 cnew(const Type *typ, intgo n, int32 objtyp)
791 if((objtyp&(PtrSize-1)) != objtyp)
792 runtime_throw("runtime: invalid objtyp");
793 if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
794 runtime_panicstring("runtime: allocation size out of range");
795 return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
798 // same as runtime_new, but callable from C
800 runtime_cnew(const Type *typ)
802 return cnew(typ, 1, TypeInfo_SingleObject);
806 runtime_cnewarray(const Type *typ, intgo n)
808 return cnew(typ, n, TypeInfo_Array);
815 func SetFinalizer(obj Eface, finalizer Eface) {
822 if(obj.__type_descriptor == nil) {
823 runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
826 if(obj.__type_descriptor->__code != GO_PTR) {
827 runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
830 ot = (const PtrType*)obj.type;
831 if(ot->__element_type != nil && ot->__element_type->__size == 0) {
834 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
835 runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
839 ot = (const PtrType*)obj.__type_descriptor;
841 if(finalizer.__type_descriptor != nil) {
842 if(finalizer.__type_descriptor->__code != GO_FUNC)
844 ft = (const FuncType*)finalizer.__type_descriptor;
845 if(ft->__dotdotdot || ft->__in.__count != 1)
847 fint = *(Type**)ft->__in.__values;
848 if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) {
850 } else if(fint->__code == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) {
851 // ok - not same type, but both pointers,
852 // one or the other is unnamed, and same element type, so assignable.
853 } else if(fint->kind == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) {
854 // ok - satisfies empty interface
855 } else if(fint->kind == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) {
856 // ok - satisfies non-empty interface
861 if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft, ot)) {
862 runtime_printf("runtime.SetFinalizer: finalizer already set\n");
868 runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.__type_descriptor->__reflection, *finalizer.__type_descriptor->__reflection);
870 runtime_throw("runtime.SetFinalizer");